gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# This file contains test code for the formatting of parsed statements back to
# make file "source." It essentially verifies to to_source() functions
# scattered across the tree.
import glob
import logging
import os.path
import unittest
from pymake.data import Expansion
from pymake.data import StringExpansion
from pymake.functions import BasenameFunction
from pymake.functions import SubstitutionRef
from pymake.functions import VariableRef
from pymake.functions import WordlistFunction
from pymake.parserdata import Include
from pymake.parserdata import SetVariable
from pymake.parser import parsestring
from pymake.parser import SyntaxError
class TestBase(unittest.TestCase):
pass
class VariableRefTest(TestBase):
def test_string_name(self):
e = StringExpansion('foo', None)
v = VariableRef(None, e)
self.assertEqual(v.to_source(), '$(foo)')
def test_special_variable(self):
e = StringExpansion('<', None)
v = VariableRef(None, e)
self.assertEqual(v.to_source(), '$<')
def test_expansion_simple(self):
e = Expansion()
e.appendstr('foo')
e.appendstr('bar')
v = VariableRef(None, e)
self.assertEqual(v.to_source(), '$(foobar)')
class StandardFunctionTest(TestBase):
def test_basename(self):
e1 = StringExpansion('foo', None)
v = VariableRef(None, e1)
e2 = Expansion(None)
e2.appendfunc(v)
b = BasenameFunction(None)
b.append(e2)
self.assertEqual(b.to_source(), '$(basename $(foo))')
def test_wordlist(self):
e1 = StringExpansion('foo', None)
e2 = StringExpansion('bar ', None)
e3 = StringExpansion(' baz', None)
w = WordlistFunction(None)
w.append(e1)
w.append(e2)
w.append(e3)
self.assertEqual(w.to_source(), '$(wordlist foo,bar , baz)')
def test_curly_brackets(self):
e1 = Expansion(None)
e1.appendstr('foo')
e2 = Expansion(None)
e2.appendstr('foo ( bar')
f = WordlistFunction(None)
f.append(e1)
f.append(e2)
self.assertEqual(f.to_source(), '${wordlist foo,foo ( bar}')
class StringExpansionTest(TestBase):
def test_simple(self):
e = StringExpansion('foobar', None)
self.assertEqual(e.to_source(), 'foobar')
e = StringExpansion('$var', None)
self.assertEqual(e.to_source(), '$var')
def test_escaping(self):
e = StringExpansion('$var', None)
self.assertEqual(e.to_source(escape_variables=True), '$$var')
e = StringExpansion('this is # not a comment', None)
self.assertEqual(e.to_source(escape_comments=True),
'this is \# not a comment')
def test_empty(self):
e = StringExpansion('', None)
self.assertEqual(e.to_source(), '')
e = StringExpansion(' ', None)
self.assertEqual(e.to_source(), ' ')
class ExpansionTest(TestBase):
def test_single_string(self):
e = Expansion()
e.appendstr('foo')
self.assertEqual(e.to_source(), 'foo')
def test_multiple_strings(self):
e = Expansion()
e.appendstr('hello')
e.appendstr('world')
self.assertEqual(e.to_source(), 'helloworld')
def test_string_escape(self):
e = Expansion()
e.appendstr('$var')
self.assertEqual(e.to_source(), '$var')
self.assertEqual(e.to_source(escape_variables=True), '$$var')
e = Expansion()
e.appendstr('foo')
e.appendstr(' $bar')
self.assertEqual(e.to_source(escape_variables=True), 'foo $$bar')
class SubstitutionRefTest(TestBase):
def test_simple(self):
name = StringExpansion('foo', None)
c = StringExpansion('%.c', None)
o = StringExpansion('%.o', None)
s = SubstitutionRef(None, name, c, o)
self.assertEqual(s.to_source(), '$(foo:%.c=%.o)')
class SetVariableTest(TestBase):
def test_simple(self):
v = SetVariable(StringExpansion('foo', None), '=', 'bar', None, None)
self.assertEqual(v.to_source(), 'foo = bar')
def test_multiline(self):
s = 'hello\nworld'
foo = StringExpansion('FOO', None)
v = SetVariable(foo, '=', s, None, None)
self.assertEqual(v.to_source(), 'define FOO\nhello\nworld\nendef')
def test_multiline_immediate(self):
source = 'define FOO :=\nhello\nworld\nendef'
statements = parsestring(source, 'foo.mk')
self.assertEqual(statements.to_source(), source)
def test_target_specific(self):
foo = StringExpansion('FOO', None)
bar = StringExpansion('BAR', None)
v = SetVariable(foo, '+=', 'value', None, bar)
self.assertEqual(v.to_source(), 'BAR: FOO += value')
class IncludeTest(TestBase):
def test_include(self):
e = StringExpansion('rules.mk', None)
i = Include(e, True, False)
self.assertEqual(i.to_source(), 'include rules.mk')
i = Include(e, False, False)
self.assertEqual(i.to_source(), '-include rules.mk')
class IfdefTest(TestBase):
def test_simple(self):
source = 'ifdef FOO\nbar := $(value)\nendif'
statements = parsestring(source, 'foo.mk')
self.assertEqual(statements[0].to_source(), source)
def test_nested(self):
source = 'ifdef FOO\nifdef BAR\nhello = world\nendif\nendif'
statements = parsestring(source, 'foo.mk')
self.assertEqual(statements[0].to_source(), source)
def test_negation(self):
source = 'ifndef FOO\nbar += value\nendif'
statements = parsestring(source, 'foo.mk')
self.assertEqual(statements[0].to_source(), source)
class IfeqTest(TestBase):
def test_simple(self):
source = 'ifeq ($(foo),bar)\nhello = $(world)\nendif'
statements = parsestring(source, 'foo.mk')
self.assertEqual(statements[0].to_source(), source)
def test_negation(self):
source = 'ifneq (foo,bar)\nhello = world\nendif'
statements = parsestring(source, 'foo.mk')
self.assertEqual(statements.to_source(), source)
class ConditionBlocksTest(TestBase):
def test_mixed_conditions(self):
source = 'ifdef FOO\nifeq ($(FOO),bar)\nvar += $(value)\nendif\nendif'
statements = parsestring(source, 'foo.mk')
self.assertEqual(statements.to_source(), source)
def test_extra_statements(self):
source = 'ifdef FOO\nF := 1\nifdef BAR\nB += 1\nendif\nC = 1\nendif'
statements = parsestring(source, 'foo.mk')
self.assertEqual(statements.to_source(), source)
def test_whitespace_preservation(self):
source = "ifeq ' x' 'x '\n$(error stripping)\nendif"
statements = parsestring(source, 'foo.mk')
self.assertEqual(statements.to_source(), source)
source = 'ifneq (x , x)\n$(error stripping)\nendif'
statements = parsestring(source, 'foo.mk')
self.assertEqual(statements.to_source(),
'ifneq (x,x)\n$(error stripping)\nendif')
class MakefileCorupusTest(TestBase):
"""Runs the make files from the pymake corpus through the formatter.
All the above tests are child's play compared to this.
"""
# Our reformatting isn't perfect. We ignore files with known failures until
# we make them work.
# TODO Address these formatting corner cases.
_IGNORE_FILES = [
# We are thrown off by backslashes at end of lines.
'comment-parsing.mk',
'escape-chars.mk',
'include-notfound.mk',
]
def _get_test_files(self):
ourdir = os.path.dirname(os.path.abspath(__file__))
for makefile in glob.glob(os.path.join(ourdir, '*.mk')):
if os.path.basename(makefile) in self._IGNORE_FILES:
continue
source = None
with open(makefile, 'rU') as fh:
source = fh.read()
try:
yield (makefile, source, parsestring(source, makefile))
except SyntaxError:
continue
def test_reparse_consistency(self):
for filename, source, statements in self._get_test_files():
reformatted = statements.to_source()
# We should be able to parse the reformatted source fine.
new_statements = parsestring(reformatted, filename)
# If we do the formatting again, the representation shouldn't
# change. i.e. the only lossy change should be the original
# (whitespace and some semantics aren't preserved).
reformatted_again = new_statements.to_source()
self.assertEqual(reformatted, reformatted_again,
'%s has lossless reformat.' % filename)
self.assertEqual(len(statements), len(new_statements))
for i in range(0, len(statements)):
original = statements[i]
formatted = new_statements[i]
self.assertEqual(original, formatted, '%s %d: %s != %s' % (filename,
i, original, formatted))
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| |
# Copyright 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for network API."""
import contextlib
import itertools
import mock
import mox
from nova.compute import flavors
from nova import context
from nova import exception
from nova import network
from nova.network import api
from nova.network import base_api
from nova.network import floating_ips
from nova.network import model as network_model
from nova.network import rpcapi as network_rpcapi
from nova import objects
from nova.objects import fields
from nova import policy
from nova import test
from nova.tests import fake_instance
from nova.tests.objects import test_fixed_ip
from nova.tests.objects import test_flavor
from nova.tests.objects import test_virtual_interface
from nova import utils
FAKE_UUID = 'a47ae74e-ab08-547f-9eee-ffd23fc46c16'
class NetworkPolicyTestCase(test.TestCase):
def setUp(self):
super(NetworkPolicyTestCase, self).setUp()
policy.reset()
policy.init()
self.context = context.get_admin_context()
def tearDown(self):
super(NetworkPolicyTestCase, self).tearDown()
policy.reset()
def test_check_policy(self):
self.mox.StubOutWithMock(policy, 'enforce')
target = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
}
policy.enforce(self.context, 'network:get_all', target)
self.mox.ReplayAll()
api.check_policy(self.context, 'get_all')
class ApiTestCase(test.TestCase):
def setUp(self):
super(ApiTestCase, self).setUp()
self.network_api = network.API()
self.context = context.RequestContext('fake-user',
'fake-project')
@mock.patch('nova.objects.NetworkList.get_all')
def test_get_all(self, mock_get_all):
mock_get_all.return_value = mock.sentinel.get_all
self.assertEqual(mock.sentinel.get_all,
self.network_api.get_all(self.context))
mock_get_all.assert_called_once_with(self.context,
project_only=True)
@mock.patch('nova.objects.NetworkList.get_all')
def test_get_all_no_networks(self, mock_get_all):
mock_get_all.side_effect = exception.NoNetworksFound
self.assertEqual([], self.network_api.get_all(self.context))
mock_get_all.assert_called_once_with(self.context,
project_only=True)
@mock.patch('nova.objects.Network.get_by_uuid')
def test_get(self, mock_get):
mock_get.return_value = mock.sentinel.get_by_uuid
with mock.patch.object(self.context, 'elevated') as elevated:
elevated.return_value = mock.sentinel.elevated_context
self.assertEqual(mock.sentinel.get_by_uuid,
self.network_api.get(self.context, 'fake-uuid'))
mock_get.assert_called_once_with(mock.sentinel.elevated_context,
'fake-uuid')
@mock.patch('nova.objects.Network.get_by_id')
@mock.patch('nova.db.virtual_interface_get_by_instance')
def test_get_vifs_by_instance(self, mock_get_by_instance,
mock_get_by_id):
mock_get_by_instance.return_value = [
dict(test_virtual_interface.fake_vif,
network_id=123)]
mock_get_by_id.return_value = objects.Network()
mock_get_by_id.return_value.uuid = mock.sentinel.network_uuid
instance = objects.Instance(uuid=mock.sentinel.inst_uuid)
vifs = self.network_api.get_vifs_by_instance(self.context,
instance)
self.assertEqual(1, len(vifs))
self.assertEqual(123, vifs[0].network_id)
self.assertEqual(str(mock.sentinel.network_uuid), vifs[0].net_uuid)
mock_get_by_instance.assert_called_once_with(
self.context, str(mock.sentinel.inst_uuid), use_slave=False)
mock_get_by_id.assert_called_once_with(self.context, 123,
project_only='allow_none')
@mock.patch('nova.objects.Network.get_by_id')
@mock.patch('nova.db.virtual_interface_get_by_address')
def test_get_vif_by_mac_address(self, mock_get_by_address,
mock_get_by_id):
mock_get_by_address.return_value = dict(
test_virtual_interface.fake_vif, network_id=123)
mock_get_by_id.return_value = objects.Network(
uuid=mock.sentinel.network_uuid)
vif = self.network_api.get_vif_by_mac_address(self.context,
mock.sentinel.mac)
self.assertEqual(123, vif.network_id)
self.assertEqual(str(mock.sentinel.network_uuid), vif.net_uuid)
mock_get_by_address.assert_called_once_with(self.context,
mock.sentinel.mac)
mock_get_by_id.assert_called_once_with(self.context, 123,
project_only='allow_none')
def test_allocate_for_instance_handles_macs_passed(self):
# If a macs argument is supplied to the 'nova-network' API, it is just
# ignored. This test checks that the call down to the rpcapi layer
# doesn't pass macs down: nova-network doesn't support hypervisor
# mac address limits (today anyhow).
macs = set(['ab:cd:ef:01:23:34'])
self.mox.StubOutWithMock(
self.network_api.network_rpcapi, "allocate_for_instance")
kwargs = dict(zip(['host', 'instance_id', 'project_id',
'requested_networks', 'rxtx_factor', 'vpn', 'macs',
'dhcp_options'],
itertools.repeat(mox.IgnoreArg())))
self.network_api.network_rpcapi.allocate_for_instance(
mox.IgnoreArg(), **kwargs).AndReturn([])
self.mox.ReplayAll()
flavor = flavors.get_default_flavor()
flavor['rxtx_factor'] = 0
sys_meta = flavors.save_flavor_info({}, flavor)
instance = dict(id='id', uuid='uuid', project_id='project_id',
host='host', system_metadata=utils.dict_to_metadata(sys_meta))
self.network_api.allocate_for_instance(
self.context, instance, 'vpn', 'requested_networks', macs=macs)
def _do_test_associate_floating_ip(self, orig_instance_uuid):
"""Test post-association logic."""
new_instance = {'uuid': 'new-uuid'}
def fake_associate(*args, **kwargs):
return orig_instance_uuid
self.stubs.Set(floating_ips.FloatingIP, 'associate_floating_ip',
fake_associate)
def fake_instance_get_by_uuid(context, instance_uuid,
columns_to_join=None,
use_slave=None):
return fake_instance.fake_db_instance(uuid=instance_uuid)
self.stubs.Set(self.network_api.db, 'instance_get_by_uuid',
fake_instance_get_by_uuid)
def fake_get_nw_info(ctxt, instance):
class FakeNWInfo(object):
def json(self):
pass
return FakeNWInfo()
self.stubs.Set(self.network_api, '_get_instance_nw_info',
fake_get_nw_info)
if orig_instance_uuid:
expected_updated_instances = [new_instance['uuid'],
orig_instance_uuid]
else:
expected_updated_instances = [new_instance['uuid']]
def fake_instance_info_cache_update(context, instance_uuid, cache):
self.assertEqual(instance_uuid,
expected_updated_instances.pop())
self.stubs.Set(self.network_api.db, 'instance_info_cache_update',
fake_instance_info_cache_update)
def fake_update_instance_cache_with_nw_info(api, context, instance,
nw_info=None,
update_cells=True):
return
self.stubs.Set(base_api, "update_instance_cache_with_nw_info",
fake_update_instance_cache_with_nw_info)
self.network_api.associate_floating_ip(self.context,
new_instance,
'172.24.4.225',
'10.0.0.2')
def test_associate_preassociated_floating_ip(self):
self._do_test_associate_floating_ip('orig-uuid')
def test_associate_unassociated_floating_ip(self):
self._do_test_associate_floating_ip(None)
def test_get_floating_ip_invalid_id(self):
self.assertRaises(exception.InvalidID,
self.network_api.get_floating_ip,
self.context, '123zzz')
@mock.patch('nova.objects.FloatingIP.get_by_id')
def test_get_floating_ip(self, mock_get):
floating = mock.sentinel.floating
mock_get.return_value = floating
self.assertEqual(floating,
self.network_api.get_floating_ip(self.context, 123))
mock_get.assert_called_once_with(self.context, 123)
@mock.patch('nova.objects.FloatingIP.get_pool_names')
def test_get_floating_ip_pools(self, mock_get):
pools = ['foo', 'bar']
mock_get.return_value = pools
self.assertEqual(pools,
self.network_api.get_floating_ip_pools(
self.context))
@mock.patch('nova.objects.FloatingIP.get_by_address')
def test_get_floating_ip_by_address(self, mock_get):
floating = mock.sentinel.floating
mock_get.return_value = floating
self.assertEqual(floating,
self.network_api.get_floating_ip_by_address(
self.context, mock.sentinel.address))
mock_get.assert_called_once_with(self.context,
mock.sentinel.address)
@mock.patch('nova.objects.FloatingIPList.get_by_project')
def test_get_floating_ips_by_project(self, mock_get):
floatings = mock.sentinel.floating_ips
mock_get.return_value = floatings
self.assertEqual(floatings,
self.network_api.get_floating_ips_by_project(
self.context))
mock_get.assert_called_once_with(self.context,
self.context.project_id)
@mock.patch('nova.objects.FloatingIPList.get_by_fixed_address')
def test_get_floating_ips_by_fixed_address(self, mock_get):
floatings = [objects.FloatingIP(id=1, address='1.2.3.4'),
objects.FloatingIP(id=2, address='5.6.7.8')]
mock_get.return_value = floatings
self.assertEqual(['1.2.3.4', '5.6.7.8'],
self.network_api.get_floating_ips_by_fixed_address(
self.context, mock.sentinel.fixed_address))
mock_get.assert_called_once_with(self.context,
mock.sentinel.fixed_address)
def _stub_migrate_instance_calls(self, method, multi_host, info):
fake_flavor = flavors.get_default_flavor()
fake_flavor['rxtx_factor'] = 1.21
sys_meta = utils.dict_to_metadata(
flavors.save_flavor_info({}, fake_flavor))
fake_instance = {'uuid': 'fake_uuid',
'instance_type_id': fake_flavor['id'],
'project_id': 'fake_project_id',
'system_metadata': sys_meta}
fake_migration = {'source_compute': 'fake_compute_source',
'dest_compute': 'fake_compute_dest'}
def fake_mig_inst_method(*args, **kwargs):
info['kwargs'] = kwargs
def fake_is_multi_host(*args, **kwargs):
return multi_host
def fake_get_floaters(*args, **kwargs):
return ['fake_float1', 'fake_float2']
self.stubs.Set(network_rpcapi.NetworkAPI, method,
fake_mig_inst_method)
self.stubs.Set(self.network_api, '_is_multi_host',
fake_is_multi_host)
self.stubs.Set(self.network_api, '_get_floating_ip_addresses',
fake_get_floaters)
expected = {'instance_uuid': 'fake_uuid',
'source_compute': 'fake_compute_source',
'dest_compute': 'fake_compute_dest',
'rxtx_factor': 1.21,
'project_id': 'fake_project_id',
'floating_addresses': None}
if multi_host:
expected['floating_addresses'] = ['fake_float1', 'fake_float2']
return fake_instance, fake_migration, expected
def test_migrate_instance_start_with_multhost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_start', True, info)
expected['host'] = 'fake_compute_source'
self.network_api.migrate_instance_start(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_migrate_instance_start_without_multhost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_start', False, info)
self.network_api.migrate_instance_start(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_migrate_instance_finish_with_multhost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_finish', True, info)
expected['host'] = 'fake_compute_dest'
self.network_api.migrate_instance_finish(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_migrate_instance_finish_without_multhost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_finish', False, info)
self.network_api.migrate_instance_finish(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_is_multi_host_instance_has_no_fixed_ip(self):
def fake_fixed_ip_get_by_instance(ctxt, uuid):
raise exception.FixedIpNotFoundForInstance(instance_uuid=uuid)
self.stubs.Set(self.network_api.db, 'fixed_ip_get_by_instance',
fake_fixed_ip_get_by_instance)
instance = {'uuid': FAKE_UUID}
self.assertFalse(self.network_api._is_multi_host(self.context,
instance))
@mock.patch('nova.objects.fixed_ip.FixedIPList.get_by_instance_uuid')
@mock.patch('nova.objects.network.Network.get_by_id')
def _test_is_multi_host_network_has_no_project_id(self, is_multi_host,
net_get, fip_get):
net_get.return_value = objects.Network(id=123,
project_id=None,
multi_host=is_multi_host)
fip_get.return_value = [objects.FixedIP(
network_id=123, instance_uuid=FAKE_UUID)]
instance = {'uuid': FAKE_UUID}
result = self.network_api._is_multi_host(self.context, instance)
self.assertEqual(is_multi_host, result)
def test_is_multi_host_network_has_no_project_id_multi(self):
self._test_is_multi_host_network_has_no_project_id(True)
def test_is_multi_host_network_has_no_project_id_non_multi(self):
self._test_is_multi_host_network_has_no_project_id(False)
@mock.patch('nova.objects.fixed_ip.FixedIPList.get_by_instance_uuid')
@mock.patch('nova.objects.network.Network.get_by_id')
def _test_is_multi_host_network_has_project_id(self, is_multi_host,
net_get, fip_get):
net_get.return_value = objects.Network(
id=123, project_id=self.context.project_id,
multi_host=is_multi_host)
fip_get.return_value = [
objects.FixedIP(network_id=123, instance_uuid=FAKE_UUID)]
instance = {'uuid': FAKE_UUID}
result = self.network_api._is_multi_host(self.context, instance)
self.assertEqual(is_multi_host, result)
def test_is_multi_host_network_has_project_id_multi(self):
self._test_is_multi_host_network_has_project_id(True)
def test_is_multi_host_network_has_project_id_non_multi(self):
self._test_is_multi_host_network_has_project_id(False)
@mock.patch('nova.objects.Network.get_by_uuid')
@mock.patch('nova.objects.Network.disassociate')
def test_network_disassociate_project(self, mock_disassociate, mock_get):
net_obj = objects.Network(context=self.context, id=1)
mock_get.return_value = net_obj
self.network_api.associate(self.context, FAKE_UUID, project=None)
mock_disassociate.assert_called_once_with(self.context, net_obj.id,
host=False, project=True)
@mock.patch('nova.objects.Network.get_by_uuid')
@mock.patch('nova.objects.Network.disassociate')
def test_network_disassociate_host(self, mock_disassociate, mock_get):
net_obj = objects.Network(context=self.context, id=1)
mock_get.return_value = net_obj
self.network_api.associate(self.context, FAKE_UUID, host=None)
mock_disassociate.assert_called_once_with(self.context, net_obj.id,
host=True, project=False)
@mock.patch('nova.objects.Network.get_by_uuid')
@mock.patch('nova.objects.Network.associate')
def test_network_associate_project(self, mock_associate, mock_get):
net_obj = objects.Network(context=self.context, id=1)
mock_get.return_value = net_obj
project = mock.sentinel.project
self.network_api.associate(self.context, FAKE_UUID, project=project)
mock_associate.assert_called_once_with(self.context, project,
network_id=net_obj.id,
force=True)
@mock.patch('nova.objects.Network.get_by_uuid')
@mock.patch('nova.objects.Network.save')
def test_network_associate_host(self, mock_save, mock_get):
net_obj = objects.Network(context=self.context, id=1)
mock_get.return_value = net_obj
host = str(mock.sentinel.host)
self.network_api.associate(self.context, FAKE_UUID, host=host)
mock_save.assert_called_once_with()
self.assertEqual(host, net_obj.host)
@mock.patch('nova.objects.Network.get_by_uuid')
@mock.patch('nova.objects.Network.disassociate')
def test_network_disassociate(self, mock_disassociate, mock_get):
mock_get.return_value = objects.Network(context=self.context, id=123)
self.network_api.disassociate(self.context, FAKE_UUID)
mock_disassociate.assert_called_once_with(self.context, 123,
project=True, host=True)
def _test_refresh_cache(self, method, *args, **kwargs):
# This test verifies that no call to get_instance_nw_info() is made
# from the @refresh_cache decorator for the tested method.
with contextlib.nested(
mock.patch.object(self.network_api.network_rpcapi, method),
mock.patch.object(self.network_api.network_rpcapi,
'get_instance_nw_info'),
) as (
method_mock, nwinfo_mock
):
method_mock.return_value = network_model.NetworkInfo([])
getattr(self.network_api, method)(*args, **kwargs)
self.assertFalse(nwinfo_mock.called)
def test_allocate_for_instance_refresh_cache(self):
sys_meta = flavors.save_flavor_info({}, test_flavor.fake_flavor)
instance = fake_instance.fake_instance_obj(
self.context, expected_attrs=['system_metadata'],
system_metadata=sys_meta)
vpn = 'fake-vpn'
requested_networks = 'fake-networks'
self._test_refresh_cache('allocate_for_instance', self.context,
instance, vpn, requested_networks)
def test_add_fixed_ip_to_instance_refresh_cache(self):
sys_meta = flavors.save_flavor_info({}, test_flavor.fake_flavor)
instance = fake_instance.fake_instance_obj(
self.context, expected_attrs=['system_metadata'],
system_metadata=sys_meta)
network_id = 'fake-network-id'
self._test_refresh_cache('add_fixed_ip_to_instance', self.context,
instance, network_id)
def test_remove_fixed_ip_from_instance_refresh_cache(self):
sys_meta = flavors.save_flavor_info({}, test_flavor.fake_flavor)
instance = fake_instance.fake_instance_obj(
self.context, expected_attrs=['system_metadata'],
system_metadata=sys_meta)
address = 'fake-address'
self._test_refresh_cache('remove_fixed_ip_from_instance', self.context,
instance, address)
@mock.patch('nova.db.fixed_ip_get_by_address')
def test_get_fixed_ip_by_address(self, fip_get):
fip_get.return_value = test_fixed_ip.fake_fixed_ip
fip = self.network_api.get_fixed_ip_by_address(self.context,
'fake-addr')
self.assertIsInstance(fip, objects.FixedIP)
@mock.patch('nova.objects.FixedIP.get_by_id')
def test_get_fixed_ip(self, mock_get_by_id):
mock_get_by_id.return_value = mock.sentinel.fixed_ip
self.assertEqual(mock.sentinel.fixed_ip,
self.network_api.get_fixed_ip(self.context,
mock.sentinel.id))
mock_get_by_id.assert_called_once_with(self.context, mock.sentinel.id)
@mock.patch('nova.objects.FixedIP.get_by_floating_address')
def test_get_instance_by_floating_address(self, mock_get_by_floating):
mock_get_by_floating.return_value = objects.FixedIP(
instance_uuid = mock.sentinel.instance_uuid)
self.assertEqual(str(mock.sentinel.instance_uuid),
self.network_api.get_instance_id_by_floating_address(
self.context, mock.sentinel.floating))
mock_get_by_floating.assert_called_once_with(self.context,
mock.sentinel.floating)
@mock.patch('nova.objects.FixedIP.get_by_floating_address')
def test_get_instance_by_floating_address_none(self, mock_get_by_floating):
mock_get_by_floating.return_value = None
self.assertEqual(None,
self.network_api.get_instance_id_by_floating_address(
self.context, mock.sentinel.floating))
mock_get_by_floating.assert_called_once_with(self.context,
mock.sentinel.floating)
@mock.patch('nova.network.api.API')
@mock.patch('nova.db.instance_info_cache_update')
class TestUpdateInstanceCache(test.TestCase):
def setUp(self):
super(TestUpdateInstanceCache, self).setUp()
self.context = context.get_admin_context()
self.instance = {'uuid': FAKE_UUID}
vifs = [network_model.VIF(id='super_vif')]
self.nw_info = network_model.NetworkInfo(vifs)
self.nw_json = fields.NetworkModel.to_primitive(self, 'network_info',
self.nw_info)
def test_update_nw_info_none(self, db_mock, api_mock):
api_mock._get_instance_nw_info.return_value = self.nw_info
base_api.update_instance_cache_with_nw_info(api_mock, self.context,
self.instance, None)
api_mock._get_instance_nw_info.assert_called_once_with(self.context,
self.instance)
db_mock.assert_called_once_with(self.context, self.instance['uuid'],
{'network_info': self.nw_json})
def test_update_nw_info_one_network(self, db_mock, api_mock):
api_mock._get_instance_nw_info.return_value = self.nw_info
base_api.update_instance_cache_with_nw_info(api_mock, self.context,
self.instance, self.nw_info)
self.assertFalse(api_mock._get_instance_nw_info.called)
db_mock.assert_called_once_with(self.context, self.instance['uuid'],
{'network_info': self.nw_json})
def test_update_nw_info_empty_list(self, db_mock, api_mock):
api_mock._get_instance_nw_info.return_value = self.nw_info
base_api.update_instance_cache_with_nw_info(api_mock, self.context,
self.instance,
network_model.NetworkInfo([]))
self.assertFalse(api_mock._get_instance_nw_info.called)
db_mock.assert_called_once_with(self.context, self.instance['uuid'],
{'network_info': '[]'})
def test_decorator_return_object(self, db_mock, api_mock):
@base_api.refresh_cache
def func(self, context, instance):
return network_model.NetworkInfo([])
func(api_mock, self.context, self.instance)
self.assertFalse(api_mock._get_instance_nw_info.called)
db_mock.assert_called_once_with(self.context, self.instance['uuid'],
{'network_info': '[]'})
def test_decorator_return_none(self, db_mock, api_mock):
@base_api.refresh_cache
def func(self, context, instance):
pass
api_mock._get_instance_nw_info.return_value = self.nw_info
func(api_mock, self.context, self.instance)
api_mock._get_instance_nw_info.assert_called_once_with(self.context,
self.instance)
db_mock.assert_called_once_with(self.context, self.instance['uuid'],
{'network_info': self.nw_json})
class NetworkHooksTestCase(test.BaseHookTestCase):
def test_instance_network_info_hook(self):
info_func = base_api.update_instance_cache_with_nw_info
self.assert_has_hook('instance_network_info', info_func)
| |
# Compute the date of Easter according to various calendars.
#
# Reference: http://www.newadvent.org/cathen/05480b.htm
#
# Note that the table at the end of the section "Inaccuracy of the Metonic
# Cycle" has a misprint: all "+1" indications on or after year 4500 should be
# 100 years later than shown. (The text above the table gets it right.)
import hebrew
####################################
# Main entry point for the computus.
def gregorian_easter(year):
easter_data = easter(gregorian_year(year))
easter_day = gregorian_to_jd((year, ) + easter_data['easter'])
equinox = gregorian_to_jd((year, ) + easter_data['equinox'])
new_moon = gregorian_to_jd((year, ) + easter_data['new_moon'])
full_moon = new_moon + 13
return {
'gregorian_easter' : easter_day,
'gregorian_equinox' : equinox,
'gregorian_new_moon' : new_moon,
'gregorian_full_moon' : full_moon
}
def julian_easter(year):
easter_data = easter(julian_year(year))
presumptive_easter = julian_to_jd((year, ) + easter_data['easter'])
equinox = julian_to_jd((year, ) + easter_data['equinox'])
new_moon = julian_to_jd((year, ) + easter_data['new_moon'])
passover_begins = hebrew.pesach_jd(hebrew.ad_to_am_at_pesach(year))
easter_day = presumptive_easter
needed_passover_correction = False
while (passover_begins > presumptive_easter):
needed_passover_correction = True
easter_day += 7
return {
'julian_easter' : easter_day,
'julian_uncorrected_easter' : presumptive_easter,
'julian_equinox' : equinox,
'julian_new_moon' : new_moon,
'julian_full_moon' : new_moon + 13,
'julian_passover_correction' : needed_passover_correction,
'passover' : passover_begins,
'passover_prep' : passover_begins - 1,
'nissan' : passover_begins - 14
}
def easter(year_data):
i_vernal_equinox = find_vernal_equinox(year_data)
i_paschal_new_moon = find_new_moon_after(i_vernal_equinox - 13, year_data)
# (Actually this is the day *after* the Paschal full moon, just as the rule
# calls for.)
i_paschal_full_moon = i_paschal_new_moon + 14
i_easter = find_first_sunday_after(i_paschal_full_moon, year_data)
return {
'easter': year_data[i_easter][0:2],
'equinox': year_data[i_vernal_equinox][0:2],
'new_moon': year_data[i_paschal_new_moon][0:2]
}
########################################
# Boring functions to scan the calendar.
def find_vernal_equinox(year_data):
return find_day(year_data, 3, 21)
def find_day(year_data, target_month, target_day):
i = 0
while 1:
(month, day, weekday, new_moon) = year_data[i]
if month == target_month and day == target_day: return i
i = i + 1
def find_new_moon_after(i, year_data):
while 1:
(month, day, weekday, new_moon) = year_data[i]
if new_moon: return i
i = i + 1
def find_first_sunday_after(i, year_data):
while 1:
(month, day, weekday, new_moon) = year_data[i]
if weekday == 0: return i
i = i + 1
##################################################
# The Metonic cycle and its Gregorian refinements.
def golden_number(year):
r = (1 + year) % 19
if r == 0: return 19
else: return r
# The Gregorian epact is a base value determined in 1584, plus some
# refinements to correct for "jitter" in the lunar and solar calendars.
def gregorian_epact(year):
raw = base_greg_epact(year) + lunar_equation(year) + solar_equation(year)
return raw % 30
# The Metonic cycle merely counts by 11 mod 30, but somewhat arbitrarily
# loops back to the beginning after 19 entries, not the expected 30.
metonic_cycle = [ None,
0, 11, 22, 3, 14, 25, 6, 17, 28, 9, 20, 1, 12, 23, 4, 15, 26, 7, 18 ]
def metonic_epact(year):
return metonic_cycle[golden_number(year)]
# By the time of the Gregorian calendar, the epacts had already slipped by
# one day.
def base_greg_epact(year):
return metonic_epact(year) + 1
# Compensate for the Metonic Cycle's built-in inaccuracy: the ratio of a lunar
# to a solar year isn't really a rational number!
def lunar_equation(year):
d = (year - 1500) / 2500
r = (year - 1500) % 2500
return (8 * d) + (r / 300)
# Compensate for the "missing" leap years in the Gregorian calendar.
def solar_equation(year):
if year < 1600: return 0
d = (year - 1600) / 400
r = (year - 1600) % 400
return (-3 * d) - (r / 100)
###########################################
# Construct the static liturgical calendar.
def month_length(month):
if month == 2: return 28
if month in [ 1, 3, 5, 7, 8, 10, 12]: return 31
return 30
def build_calendarium():
calendarium = []
month = 1
day = 1
dominical = 1
epact = 0
lunation_parity = 1
for x in range(365):
calendarium += [ (month, day, dominical, epact) ]
# We'll use -25 to indicate the special label for epact 24/25
# conflict resolution. (See hack25, below.)
if ((lunation_parity == 1 and epact == 25) or
(lunation_parity == 0 and epact == 26)):
calendarium += [ (month, day, dominical, -25) ]
if lunation_parity == 0 and epact == 25:
epact -= 1
calendarium += [ (month, day, dominical, epact) ] # Again.
day += 1
if day > month_length(month):
day = 1
month += 1
lunation_parity = not lunation_parity
dominical += 1
if dominical == 8: dominical = 1
epact -= 1
if epact == -1: epact = 29
return calendarium
calendarium = build_calendarium()
months = [
None, 'January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November', 'December' ]
numerals = [
'*', 'i', 'ii', 'iii', 'iv', 'v', 'vi', 'vii', 'viii', 'ix', 'x',
'xi', 'xii', 'xiii', 'xiv', 'xv', 'xvi', 'xvii', 'xviii', 'xix', 'xx',
'xxi', 'xxii', 'xxiii', 'xxiv', 'xxv', 'xxvi', 'xxvii', 'xxviii',
'xxix', 'xxx' ]
letters = [
None, 'A', 'B', 'C', 'D', 'E', 'F', 'G' ]
weekdays = [
'Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday' ]
def name_month(month): return months[month]
def name_epact(epact):
if epact == -25: return '25'
return numerals[epact]
#######################################
# Build the yearly liturgical calendar.
# The "second" Dominical Letter is the one that takes effect after the leap
# day of a leap year. This formula finds the (only) Dominical Letter of a
# non-leap year, or the second Dominical Letter of a leap year.
#
# Reference: http://www.newadvent.org/cathen/05109a.htm
def second_gregorian_dominical(year):
a = year + 1
b = year / 4
c = year / 100 - 16
d = c / 4
e = a + b + d - c
f = e % 7
return 7 - f
def second_julian_dominical(year):
sol_num = solar_number(year)
if sol_num == 1: return 6
if sol_num == 2: return 5
if sol_num == 3: return 4
if sol_num == 4: return 3
if sol_num == 5: return 1
if sol_num == 6: return 7
if sol_num == 7: return 6
if sol_num == 8: return 5
if sol_num == 9: return 3
if sol_num == 10: return 2
if sol_num == 11: return 1
if sol_num == 12: return 7
if sol_num == 13: return 5
if sol_num == 14: return 4
if sol_num == 15: return 3
if sol_num == 16: return 2
if sol_num == 17: return 7
if sol_num == 18: return 6
if sol_num == 19: return 5
if sol_num == 20: return 4
if sol_num == 21: return 2
if sol_num == 22: return 1
if sol_num == 23: return 7
if sol_num == 24: return 6
if sol_num == 25: return 4
if sol_num == 26: return 3
if sol_num == 27: return 2
if sol_num == 28: return 1
def solar_number(year):
return ((year + 8) % 28) + 1
# Currently a hack: we ignore the Jan-Feb Dominical Letter for a leap year.
# Thus, we return the wrong values for those months. That's fine if Easter
# is all we care about.
#
# Note that we're not inserting the leap day. This is *not* a bug. In the
# liturgical calendar, like the Roman calendar on which it's based, the
# intercalary day is treated as the first "half" of a 48-hour day, which is
# February 24, of all things. (See Wikipedia s.v. "bissextile.") The day of
# the week *does* change at the 24-hour mark, however, and that's when the leap
# year's second Dominical Letter takes effect.
#
# This is all very silly, but the important thing is that for the sake of
# the computation of moons, we always count 28 days in February. When
# February 24 is doubled, its moon phase is doubled with it.
def gregorian_year(year):
year_dom = second_gregorian_dominical(year)
year_epact = gregorian_epact(year)
# This is a hack too, but it's Aloysius Lilius's hack, not mine. Somehow
# it was considered very important that you never get the same new moon
# two years in a row, so when that would happen, you move the second new
# moon.
hack25 = (year_epact == 25 and golden_number(year) > 11)
return generic_year(year_dom, year_epact, hack25)
def julian_year(year):
year_dom = second_julian_dominical(year)
# TODO: I have no idea why the -2 is necessary.
year_epact = metonic_epact(year - 2)
return generic_year(year_dom, year_epact, hack25 = False)
def generic_year(year_dom, year_epact, hack25):
days = []
this_month = None
this_day = None
this_weekday = None
this_new_moon = None
for cal in calendarium:
(month, day, dominical, epact) = cal
if (this_month != month) or (this_day != day):
if this_month != None:
days += [(this_month, this_day, this_weekday, this_new_moon)]
this_month = month
this_day = day
this_weekday = (dominical - year_dom) % 7
this_new_moon = False
if hack25:
if epact == -25: this_new_moon = True
else:
if epact == year_epact: this_new_moon = True
# Don't forget the last day of the year.
days += [(this_month, this_day, this_weekday, this_new_moon)]
return days
def julian_to_gregorian(date):
return jd_to_gregorian(julian_to_jd(date))
def julian_to_jd((year, month, day)):
return 367 * year - int(7 * (year + 5001 + int((month - 9) / 7.0)) / 4.0) \
+ int(275 * month / 9.0) + day + 1729777
def gregorian_to_jd((year, month, day)):
return 367 * year - int(7 * (year + int((month + 9) / 12.0)) / 4.0) \
- int(3 * (int((year + (month - 9) / 7.0) / 100.0) + 1) / 4.0) \
+ int(275 * month / 9.0) + day + 1721029
def jd_to_julian(jd):
raise 'wheeeee!'
def jd_to_gregorian(jd):
j = jd + 32044
g = j / 146097
dg = j % 146097
c = (dg / 36524 + 1) * 3 / 4
dc = dg - c * 36524
b = dc / 1461
db = dc % 1461
a = (db / 365 + 1) * 3 / 4
da = db - a * 365
y = g * 400 + c * 100 + b * 4 + a
m = (da * 5 + 308) / 153 - 2
d = da - (m + 4) * 153 / 5 + 122
year = y - 4800 + (m + 2) / 12
month = (m + 2) % 12 + 1
day = d + 1
return (year, month, day)
##################
# Output niceties.
def format_cal_item(entry):
(month, day, dominical, epact) = entry
mon = name_month(month)[0:3]
epnum = name_epact(epact)
dom = letters[dominical]
return '%s %2d %s %s' % (mon, day, dom, epnum)
def print_calendarium():
for cal in calendarium:
print format_cal_item(cal)
def format_day(day_tuple):
(month, day, weekday, new_moon) = day_tuple
mon = name_month(month)[0:3]
wee = weekdays[weekday][0:3]
formatted = '%s %s %2d' % (wee, mon, day)
if (new_moon): formatted += ' (new moon)'
return formatted
def print_year(year):
for day in year:
print format_day(day)
def format_easter_entry(day, year):
return '%s, %d' % (format_day(day), year)
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import six
from heat.common import exception
from heat.common.i18n import _
from heat.engine.cfn import functions as cfn_funcs
from heat.engine.cfn import template as cfn_template
from heat.engine import function
from heat.engine.hot import functions as hot_funcs
from heat.engine.hot import parameters
from heat.engine import rsrc_defn
from heat.engine import template
_RESOURCE_KEYS = (
RES_TYPE, RES_PROPERTIES, RES_METADATA, RES_DEPENDS_ON,
RES_DELETION_POLICY, RES_UPDATE_POLICY,
) = (
'type', 'properties', 'metadata', 'depends_on',
'deletion_policy', 'update_policy',
)
class HOTemplate20130523(template.Template):
"""
A Heat Orchestration Template format stack template.
"""
SECTIONS = (
VERSION, DESCRIPTION, PARAMETER_GROUPS,
PARAMETERS, RESOURCES, OUTPUTS, MAPPINGS
) = (
'heat_template_version', 'description', 'parameter_groups',
'parameters', 'resources', 'outputs', '__undefined__'
)
SECTIONS_NO_DIRECT_ACCESS = set([PARAMETERS, VERSION])
_CFN_TO_HOT_SECTIONS = {cfn_template.CfnTemplate.VERSION: VERSION,
cfn_template.CfnTemplate.DESCRIPTION: DESCRIPTION,
cfn_template.CfnTemplate.PARAMETERS: PARAMETERS,
cfn_template.CfnTemplate.MAPPINGS: MAPPINGS,
cfn_template.CfnTemplate.RESOURCES: RESOURCES,
cfn_template.CfnTemplate.OUTPUTS: OUTPUTS}
_RESOURCE_HOT_TO_CFN_ATTRS = {'type': 'Type',
'properties': 'Properties',
'metadata': 'Metadata',
'depends_on': 'DependsOn',
'deletion_policy': 'DeletionPolicy',
'update_policy': 'UpdatePolicy',
'description': 'Description',
'value': 'Value'}
functions = {
'Fn::GetAZs': cfn_funcs.GetAZs,
'get_param': hot_funcs.GetParam,
'get_resource': cfn_funcs.ResourceRef,
'Ref': cfn_funcs.Ref,
'get_attr': hot_funcs.GetAttThenSelect,
'Fn::Select': cfn_funcs.Select,
'Fn::Join': cfn_funcs.Join,
'list_join': hot_funcs.Join,
'Fn::Split': cfn_funcs.Split,
'str_replace': hot_funcs.Replace,
'Fn::Replace': cfn_funcs.Replace,
'Fn::Base64': cfn_funcs.Base64,
'Fn::MemberListToMap': cfn_funcs.MemberListToMap,
'resource_facade': hot_funcs.ResourceFacade,
'Fn::ResourceFacade': cfn_funcs.ResourceFacade,
'get_file': hot_funcs.GetFile,
}
def __getitem__(self, section):
""""Get the relevant section in the template."""
# first translate from CFN into HOT terminology if necessary
if section not in self.SECTIONS:
section = HOTemplate20130523._translate(
section, self._CFN_TO_HOT_SECTIONS,
_('"%s" is not a valid template section'))
if section not in self.SECTIONS:
raise KeyError(_('"%s" is not a valid template section') % section)
if section in self.SECTIONS_NO_DIRECT_ACCESS:
raise KeyError(
_('Section %s can not be accessed directly.') % section)
if section == self.MAPPINGS:
return {}
if section == self.DESCRIPTION:
default = 'No description'
else:
default = {}
# if a section is None (empty yaml section) return {}
# to be consistent with an empty json section.
the_section = self.t.get(section) or default
# In some cases (e.g. parameters), also translate each entry of
# a section into CFN format (case, naming, etc) so the rest of the
# engine can cope with it.
# This is a shortcut for now and might be changed in the future.
if section == self.RESOURCES:
return self._translate_resources(the_section)
if section == self.OUTPUTS:
return self._translate_outputs(the_section)
return the_section
@staticmethod
def _translate(value, mapping, err_msg=None):
try:
return mapping[value]
except KeyError as ke:
if err_msg:
raise KeyError(err_msg % value)
else:
raise ke
def _translate_section(self, section, sub_section, data, mapping):
cfn_objects = {}
obj_name = section[:-1]
err_msg = _('"%%s" is not a valid keyword inside a %s '
'definition') % obj_name
for name, attrs in six.iteritems(data):
cfn_object = {}
if not attrs:
args = {'object_name': obj_name, 'sub_section': sub_section}
message = _('Each %(object_name)s must contain a '
'%(sub_section)s key.') % args
raise exception.StackValidationFailed(message=message)
try:
for attr, attr_value in six.iteritems(attrs):
cfn_attr = self._translate(attr, mapping, err_msg)
cfn_object[cfn_attr] = attr_value
cfn_objects[name] = cfn_object
except AttributeError:
message = _('"%(section)s" must contain a map of '
'%(obj_name)s maps. Found a [%(_type)s] '
'instead') % {'section': section,
'_type': type(attrs),
'obj_name': obj_name}
raise exception.StackValidationFailed(message=message)
except KeyError as e:
# an invalid keyword was found
raise exception.StackValidationFailed(message=six.text_type(e))
return cfn_objects
def _translate_resources(self, resources):
"""Get the resources of the template translated into CFN format."""
return self._translate_section('resources', 'type', resources,
self._RESOURCE_HOT_TO_CFN_ATTRS)
def get_section_name(self, section):
cfn_to_hot_attrs = dict(
zip(six.itervalues(self._RESOURCE_HOT_TO_CFN_ATTRS),
six.iterkeys(self._RESOURCE_HOT_TO_CFN_ATTRS)))
return cfn_to_hot_attrs.get(section, section)
def _translate_outputs(self, outputs):
"""Get the outputs of the template translated into CFN format."""
HOT_TO_CFN_ATTRS = {'description': 'Description',
'value': 'Value'}
return self._translate_section('outputs', 'value', outputs,
HOT_TO_CFN_ATTRS)
def param_schemata(self, param_defaults=None):
parameter_section = self.t.get(self.PARAMETERS) or {}
pdefaults = param_defaults or {}
for name, schema in six.iteritems(parameter_section):
if name in pdefaults:
parameter_section[name]['default'] = pdefaults[name]
params = six.iteritems(parameter_section)
return dict((name, parameters.HOTParamSchema.from_dict(name, schema))
for name, schema in params)
def parameters(self, stack_identifier, user_params, param_defaults=None):
return parameters.HOTParameters(stack_identifier, self,
user_params=user_params,
param_defaults=param_defaults)
def validate_resource_definitions(self, stack):
resources = self.t.get(self.RESOURCES) or {}
allowed_keys = set(_RESOURCE_KEYS)
try:
for name, snippet in resources.items():
data = self.parse(stack, snippet)
if not self.validate_resource_key_type(RES_TYPE,
six.string_types,
'string',
allowed_keys,
name, data):
args = {'name': name, 'type_key': RES_TYPE}
msg = _('Resource %(name)s is missing '
'"%(type_key)s"') % args
raise KeyError(msg)
self.validate_resource_key_type(
RES_PROPERTIES,
(collections.Mapping, function.Function),
'object', allowed_keys, name, data)
self.validate_resource_key_type(
RES_METADATA,
(collections.Mapping, function.Function),
'object', allowed_keys, name, data)
self.validate_resource_key_type(
RES_DEPENDS_ON,
collections.Sequence,
'list or string', allowed_keys, name, data)
self.validate_resource_key_type(
RES_DELETION_POLICY,
six.string_types,
'string', allowed_keys, name, data)
self.validate_resource_key_type(
RES_UPDATE_POLICY,
(collections.Mapping, function.Function),
'object', allowed_keys, name, data)
except (TypeError, ValueError) as ex:
raise exception.StackValidationFailed(message=six.text_type(ex))
def resource_definitions(self, stack):
resources = self.t.get(self.RESOURCES) or {}
def rsrc_defn_item(name, snippet):
data = self.parse(stack, snippet)
depends = data.get(RES_DEPENDS_ON)
if not depends:
depends = []
elif isinstance(depends, six.string_types):
depends = [depends]
kwargs = {
'resource_type': data.get(RES_TYPE),
'properties': data.get(RES_PROPERTIES),
'metadata': data.get(RES_METADATA),
'depends': depends,
'deletion_policy': data.get(RES_DELETION_POLICY),
'update_policy': data.get(RES_UPDATE_POLICY),
'description': None
}
defn = rsrc_defn.ResourceDefinition(name, **kwargs)
return name, defn
return dict(rsrc_defn_item(name, data)
for name, data in resources.items())
def add_resource(self, definition, name=None):
if name is None:
name = definition.name
if self.t.get(self.RESOURCES) is None:
self.t[self.RESOURCES] = {}
self.t[self.RESOURCES][name] = definition.render_hot()
class HOTemplate20141016(HOTemplate20130523):
functions = {
'get_attr': hot_funcs.GetAtt,
'get_file': hot_funcs.GetFile,
'get_param': hot_funcs.GetParam,
'get_resource': cfn_funcs.ResourceRef,
'list_join': hot_funcs.Join,
'resource_facade': hot_funcs.ResourceFacade,
'str_replace': hot_funcs.Replace,
'Fn::Select': cfn_funcs.Select,
# functions removed from 20130523
'Fn::GetAZs': hot_funcs.Removed,
'Fn::Join': hot_funcs.Removed,
'Fn::Split': hot_funcs.Removed,
'Fn::Replace': hot_funcs.Removed,
'Fn::Base64': hot_funcs.Removed,
'Fn::MemberListToMap': hot_funcs.Removed,
'Fn::ResourceFacade': hot_funcs.Removed,
'Ref': hot_funcs.Removed,
}
class HOTemplate20150430(HOTemplate20141016):
functions = {
'digest': hot_funcs.Digest,
'get_attr': hot_funcs.GetAtt,
'get_file': hot_funcs.GetFile,
'get_param': hot_funcs.GetParam,
'get_resource': cfn_funcs.ResourceRef,
'list_join': hot_funcs.Join,
'repeat': hot_funcs.Repeat,
'resource_facade': hot_funcs.ResourceFacade,
'str_replace': hot_funcs.Replace,
'Fn::Select': cfn_funcs.Select,
# functions removed from 20130523
'Fn::GetAZs': hot_funcs.Removed,
'Fn::Join': hot_funcs.Removed,
'Fn::Split': hot_funcs.Removed,
'Fn::Replace': hot_funcs.Removed,
'Fn::Base64': hot_funcs.Removed,
'Fn::MemberListToMap': hot_funcs.Removed,
'Fn::ResourceFacade': hot_funcs.Removed,
'Ref': hot_funcs.Removed,
}
| |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import subprocess
import unittest
from glob import glob
from shutil import move
from tempfile import mkdtemp
from airflow.utils import db as db_utils
from airflow import models, AirflowException, LoggingMixin
from airflow.utils.timezone import datetime
from tests.contrib.utils.gcp_authenticator import GcpAuthenticator
from tests.contrib.utils.run_once_decorator import run_once
AIRFLOW_MAIN_FOLDER = os.path.realpath(os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.pardir, os.pardir, os.pardir))
AIRFLOW_PARENT_FOLDER = os.path.realpath(os.path.join(AIRFLOW_MAIN_FOLDER,
os.pardir, os.pardir, os.pardir))
ENV_FILE_RETRIEVER = os.path.join(AIRFLOW_PARENT_FOLDER,
"get_system_test_environment_variables.py")
# Retrieve environment variables from parent directory retriever - it should be
# in the path ${AIRFLOW_SOURCE_DIR}/../../get_system_test_environment_variables.py
# and it should print all the variables in form of key=value to the stdout
class RetrieveVariables:
@staticmethod
@run_once
def retrieve_variables():
if os.path.isfile(ENV_FILE_RETRIEVER):
if os.environ.get('AIRFLOW__CORE__UNIT_TEST_MODE'):
raise Exception("Please unset the AIRFLOW__CORE__UNIT_TEST_MODE")
variables = subprocess.check_output([ENV_FILE_RETRIEVER]).decode("utf-8")
print("Applying variables retrieved")
for line in variables.split("\n"):
try:
variable, key = line.split("=")
except ValueError:
continue
print("{}={}".format(variable, key))
os.environ[variable] = key
RetrieveVariables.retrieve_variables()
DEFAULT_DATE = datetime(2015, 1, 1)
CONTRIB_OPERATORS_EXAMPLES_DAG_FOLDER = os.path.join(
AIRFLOW_MAIN_FOLDER, "airflow", "contrib", "example_dags")
OPERATORS_EXAMPLES_DAG_FOLDER = os.path.join(
AIRFLOW_MAIN_FOLDER, "airflow", "example_dags")
AIRFLOW_HOME = os.environ.get('AIRFLOW_HOME',
os.path.join(os.path.expanduser('~'), 'airflow'))
DAG_FOLDER = os.path.join(AIRFLOW_HOME, "dags")
SKIP_TEST_WARNING = """
The test is only run when the test is run in with GCP-system-tests enabled
environment. You can enable it in one of two ways:
* Set GCP_CONFIG_DIR environment variable to point to the GCP configuration
directory which keeps variables.env file with environment variables to set
and keys directory which keeps service account keys in .json format
* Run this test within automated environment variable workspace where
config directory is checked out next to the airflow one.
""".format(__file__)
class BaseGcpSystemTestCase(unittest.TestCase, LoggingMixin):
def __init__(self,
method_name,
gcp_key,
project_extra=None):
super(BaseGcpSystemTestCase, self).__init__(methodName=method_name)
self.gcp_authenticator = GcpAuthenticator(gcp_key=gcp_key,
project_extra=project_extra)
self.setup_called = False
@staticmethod
def skip_check(key_name):
return GcpAuthenticator(key_name).full_key_path is None
def setUp(self):
self.gcp_authenticator.gcp_store_authentication()
self.gcp_authenticator.gcp_authenticate()
# We checked that authentication works. Ne we revoke it to make
# sure we are not relying on the default authentication
self.gcp_authenticator.gcp_revoke_authentication()
self.setup_called = True
# noinspection PyPep8Naming
def tearDown(self):
self.gcp_authenticator.gcp_restore_authentication()
class DagGcpSystemTestCase(BaseGcpSystemTestCase):
def __init__(self,
method_name,
dag_id,
gcp_key,
dag_name=None,
require_local_executor=False,
example_dags_folder=CONTRIB_OPERATORS_EXAMPLES_DAG_FOLDER,
project_extra=None):
super(DagGcpSystemTestCase, self).__init__(method_name=method_name,
gcp_key=gcp_key,
project_extra=project_extra)
self.dag_id = dag_id
self.dag_name = self.dag_id + '.py' if not dag_name else dag_name
self.example_dags_folder = example_dags_folder
self.require_local_executor = require_local_executor
self.temp_dir = None
@staticmethod
def _get_dag_folder():
return DAG_FOLDER
@staticmethod
def _get_files_to_link(path):
"""
Returns all file names (note - file names not paths)
that have the same base name as the .py dag file (for example dag_name.sql etc.)
:param path: path to the dag file.
:return: list of files matching the base name
"""
prefix, ext = os.path.splitext(path)
assert ext == '.py', "Dag name should be a .py file and is {} file".format(ext)
files_to_link = []
for file in glob(prefix + ".*"):
files_to_link.append(os.path.basename(file))
return files_to_link
def _symlink_dag_and_associated_files(self, remove=False):
target_folder = self._get_dag_folder()
source_path = os.path.join(self.example_dags_folder, self.dag_name)
for file_name in self._get_files_to_link(source_path):
source_path = os.path.join(self.example_dags_folder, file_name)
target_path = os.path.join(target_folder, file_name)
if remove:
try:
self.log.info("Remove symlink: {} -> {} ".format(
target_path, source_path))
os.remove(target_path)
except OSError:
pass
else:
if not os.path.exists(target_path):
self.log.info("Symlink: {} -> {} ".format(target_path, source_path))
os.symlink(source_path, target_path)
else:
self.log.info("Symlink {} already exists. Not symlinking it.".
format(target_path))
def _store_dags_to_temporary_directory(self):
dag_folder = self._get_dag_folder()
self.temp_dir = mkdtemp()
self.log.info("Storing DAGS from {} to temporary directory {}".
format(dag_folder, self.temp_dir))
try:
os.mkdir(dag_folder)
except OSError:
pass
for file in os.listdir(dag_folder):
move(os.path.join(dag_folder, file), os.path.join(self.temp_dir, file))
def _restore_dags_from_temporary_directory(self):
dag_folder = self._get_dag_folder()
self.log.info("Restoring DAGS to {} from temporary directory {}"
.format(dag_folder, self.temp_dir))
for file in os.listdir(self.temp_dir):
move(os.path.join(self.temp_dir, file), os.path.join(dag_folder, file))
def _run_dag(self):
self.log.info("Attempting to run DAG: {}".format(self.dag_id))
if not self.setup_called:
raise AirflowException("Please make sure to call super.setUp() in your "
"test class!")
dag_folder = self._get_dag_folder()
dag_bag = models.DagBag(dag_folder=dag_folder, include_examples=False)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
dag = dag_bag.get_dag(self.dag_id)
if dag is None:
raise AirflowException(
"The Dag {} could not be found. It's either an import problem or "
"the dag {} was not symlinked to the DAGs folder. "
"The content of the {} folder is {}".
format(self.dag_id,
self.dag_id + ".py",
dag_folder,
os.listdir(dag_folder)))
dag.clear(reset_dag_runs=True)
dag.run(ignore_first_depends_on_past=True, verbose=True)
@staticmethod
def _check_local_executor_setup():
postgres_path = os.path.realpath(os.path.join(
AIRFLOW_MAIN_FOLDER,
"tests", "contrib", "operators", "postgres_local_executor.cfg"))
if postgres_path != os.environ.get('AIRFLOW_CONFIG'):
raise AirflowException(
"""
Please set AIRFLOW_CONFIG variable to '{}'
and make sure you have a Postgres server running locally and
airflow/airflow.db database created.
You can create the database via these commands:
'createuser root'
'createdb airflow/airflow.db`
""".format(postgres_path))
# noinspection PyPep8Naming
def setUp(self):
if self.require_local_executor:
self._check_local_executor_setup()
try:
# We want to avoid random errors while database got reset - those
# Are apparently triggered by parser trying to parse DAGs while
# The tables are dropped. We move the dags temporarily out of the dags folder
# and move them back after reset
self._store_dags_to_temporary_directory()
try:
db_utils.upgradedb()
db_utils.resetdb()
finally:
self._restore_dags_from_temporary_directory()
self._symlink_dag_and_associated_files()
super(DagGcpSystemTestCase, self).setUp()
except Exception as e:
# In case of any error during setup - restore the authentication
self.gcp_authenticator.gcp_restore_authentication()
raise e
def tearDown(self):
self._symlink_dag_and_associated_files(remove=True)
super(DagGcpSystemTestCase, self).tearDown()
| |
"""Test the Smappee component config flow module."""
from http import HTTPStatus
from unittest.mock import patch
from homeassistant import data_entry_flow, setup
from homeassistant.components import zeroconf
from homeassistant.components.smappee.const import (
CONF_SERIALNUMBER,
DOMAIN,
ENV_CLOUD,
ENV_LOCAL,
TOKEN_URL,
)
from homeassistant.config_entries import SOURCE_USER, SOURCE_ZEROCONF
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
from homeassistant.helpers import config_entry_oauth2_flow
from tests.common import MockConfigEntry
CLIENT_ID = "1234"
CLIENT_SECRET = "5678"
async def test_show_user_form(hass):
"""Test that the user set up form is served."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
)
assert result["step_id"] == "environment"
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
async def test_show_user_host_form(hass):
"""Test that the host form is served after choosing the local option."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
)
assert result["step_id"] == "environment"
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"environment": ENV_LOCAL}
)
assert result["step_id"] == ENV_LOCAL
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
async def test_show_zeroconf_connection_error_form(hass):
"""Test that the zeroconf confirmation form is served."""
with patch("pysmappee.api.SmappeeLocalApi.logon", return_value=None):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host="1.2.3.4",
addresses=["1.2.3.4"],
port=22,
hostname="Smappee1006000212.local.",
type="_ssh._tcp.local.",
name="Smappee1006000212._ssh._tcp.local.",
properties={"_raw": {}},
),
)
assert result["description_placeholders"] == {CONF_SERIALNUMBER: "1006000212"}
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "zeroconf_confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.2.3.4"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
assert len(hass.config_entries.async_entries(DOMAIN)) == 0
async def test_show_zeroconf_connection_error_form_next_generation(hass):
"""Test that the zeroconf confirmation form is served."""
with patch("pysmappee.mqtt.SmappeeLocalMqtt.start_attempt", return_value=False):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host="1.2.3.4",
addresses=["1.2.3.4"],
port=22,
hostname="Smappee5001000212.local.",
type="_ssh._tcp.local.",
name="Smappee5001000212._ssh._tcp.local.",
properties={"_raw": {}},
),
)
assert result["description_placeholders"] == {CONF_SERIALNUMBER: "5001000212"}
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "zeroconf_confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.2.3.4"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
assert len(hass.config_entries.async_entries(DOMAIN)) == 0
async def test_connection_error(hass):
"""Test we show user form on Smappee connection error."""
with patch("pysmappee.api.SmappeeLocalApi.logon", return_value=None), patch(
"pysmappee.mqtt.SmappeeLocalMqtt.start_attempt", return_value=None
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
)
assert result["step_id"] == "environment"
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"environment": ENV_LOCAL}
)
assert result["step_id"] == ENV_LOCAL
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.2.3.4"}
)
assert result["reason"] == "cannot_connect"
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
async def test_user_local_connection_error(hass):
"""Test we show user form on Smappee connection error in local next generation option."""
with patch("pysmappee.api.SmappeeLocalApi.logon", return_value=None), patch(
"pysmappee.mqtt.SmappeeLocalMqtt.start_attempt", return_value=True
), patch("pysmappee.mqtt.SmappeeLocalMqtt.start", return_value=True), patch(
"pysmappee.mqtt.SmappeeLocalMqtt.stop", return_value=True
), patch(
"pysmappee.mqtt.SmappeeLocalMqtt.is_config_ready", return_value=None
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
)
assert result["step_id"] == "environment"
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"environment": ENV_LOCAL}
)
assert result["step_id"] == ENV_LOCAL
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.2.3.4"}
)
assert result["reason"] == "cannot_connect"
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
async def test_zeroconf_wrong_mdns(hass):
"""Test we abort if unsupported mDNS name is discovered."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host="1.2.3.4",
addresses=["1.2.3.4"],
port=22,
hostname="example.local.",
type="_ssh._tcp.local.",
name="example._ssh._tcp.local.",
properties={"_raw": {}},
),
)
assert result["reason"] == "invalid_mdns"
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
async def test_full_user_wrong_mdns(hass):
"""Test we abort user flow if unsupported mDNS name got resolved."""
with patch("pysmappee.api.SmappeeLocalApi.logon", return_value={}), patch(
"pysmappee.api.SmappeeLocalApi.load_advanced_config",
return_value=[{"key": "mdnsHostName", "value": "Smappee5100000001"}],
), patch(
"pysmappee.api.SmappeeLocalApi.load_command_control_config", return_value=[]
), patch(
"pysmappee.api.SmappeeLocalApi.load_instantaneous",
return_value=[{"key": "phase0ActivePower", "value": 0}],
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
)
assert result["step_id"] == "environment"
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"environment": ENV_LOCAL}
)
assert result["step_id"] == ENV_LOCAL
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.2.3.4"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "invalid_mdns"
async def test_user_device_exists_abort(hass):
"""Test we abort user flow if Smappee device already configured."""
with patch("pysmappee.api.SmappeeLocalApi.logon", return_value={}), patch(
"pysmappee.api.SmappeeLocalApi.load_advanced_config",
return_value=[{"key": "mdnsHostName", "value": "Smappee1006000212"}],
), patch(
"pysmappee.api.SmappeeLocalApi.load_command_control_config", return_value=[]
), patch(
"pysmappee.api.SmappeeLocalApi.load_instantaneous",
return_value=[{"key": "phase0ActivePower", "value": 0}],
):
config_entry = MockConfigEntry(
domain=DOMAIN,
data={"host": "1.2.3.4"},
unique_id="1006000212",
source=SOURCE_USER,
)
config_entry.add_to_hass(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
)
assert result["step_id"] == "environment"
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"environment": ENV_LOCAL}
)
assert result["step_id"] == ENV_LOCAL
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.2.3.4"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
async def test_zeroconf_device_exists_abort(hass):
"""Test we abort zeroconf flow if Smappee device already configured."""
with patch("pysmappee.api.SmappeeLocalApi.logon", return_value={}), patch(
"pysmappee.api.SmappeeLocalApi.load_advanced_config",
return_value=[{"key": "mdnsHostName", "value": "Smappee1006000212"}],
), patch(
"pysmappee.api.SmappeeLocalApi.load_command_control_config", return_value=[]
), patch(
"pysmappee.api.SmappeeLocalApi.load_instantaneous",
return_value=[{"key": "phase0ActivePower", "value": 0}],
):
config_entry = MockConfigEntry(
domain=DOMAIN,
data={"host": "1.2.3.4"},
unique_id="1006000212",
source=SOURCE_USER,
)
config_entry.add_to_hass(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host="1.2.3.4",
addresses=["1.2.3.4"],
port=22,
hostname="Smappee1006000212.local.",
type="_ssh._tcp.local.",
name="Smappee1006000212._ssh._tcp.local.",
properties={"_raw": {}},
),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
async def test_cloud_device_exists_abort(hass):
"""Test we abort cloud flow if Smappee Cloud device already configured."""
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id="smappeeCloud",
source=SOURCE_USER,
)
config_entry.add_to_hass(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured_device"
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
async def test_zeroconf_abort_if_cloud_device_exists(hass):
"""Test we abort zeroconf flow if Smappee Cloud device already configured."""
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id="smappeeCloud",
source=SOURCE_USER,
)
config_entry.add_to_hass(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host="1.2.3.4",
addresses=["1.2.3.4"],
port=22,
hostname="Smappee1006000212.local.",
type="_ssh._tcp.local.",
name="Smappee1006000212._ssh._tcp.local.",
properties={"_raw": {}},
),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured_device"
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
async def test_zeroconf_confirm_abort_if_cloud_device_exists(hass):
"""Test we abort zeroconf confirm flow if Smappee Cloud device already configured."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host="1.2.3.4",
addresses=["1.2.3.4"],
port=22,
hostname="Smappee1006000212.local.",
type="_ssh._tcp.local.",
name="Smappee1006000212._ssh._tcp.local.",
properties={"_raw": {}},
),
)
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id="smappeeCloud",
source=SOURCE_USER,
)
config_entry.add_to_hass(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured_device"
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
async def test_abort_cloud_flow_if_local_device_exists(hass):
"""Test we abort the cloud flow if a Smappee local device already configured."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={"host": "1.2.3.4"},
unique_id="1006000212",
source=SOURCE_USER,
)
config_entry.add_to_hass(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"environment": ENV_CLOUD}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured_local_device"
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
async def test_full_user_flow(
hass, hass_client_no_auth, aioclient_mock, current_request_with_host
):
"""Check full flow."""
assert await setup.async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {CONF_CLIENT_ID: CLIENT_ID, CONF_CLIENT_SECRET: CLIENT_SECRET},
"http": {"base_url": "https://example.com"},
},
)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"environment": ENV_CLOUD}
)
state = config_entry_oauth2_flow._encode_jwt(
hass,
{
"flow_id": result["flow_id"],
"redirect_uri": "https://example.com/auth/external/callback",
},
)
client = await hass_client_no_auth()
resp = await client.get(f"/auth/external/callback?code=abcd&state={state}")
assert resp.status == HTTPStatus.OK
assert resp.headers["content-type"] == "text/html; charset=utf-8"
aioclient_mock.post(
TOKEN_URL["PRODUCTION"],
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
},
)
with patch(
"homeassistant.components.smappee.async_setup_entry", return_value=True
) as mock_setup:
await hass.config_entries.flow.async_configure(result["flow_id"])
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert len(mock_setup.mock_calls) == 1
async def test_full_zeroconf_flow(hass):
"""Test the full zeroconf flow."""
with patch("pysmappee.api.SmappeeLocalApi.logon", return_value={}), patch(
"pysmappee.api.SmappeeLocalApi.load_advanced_config",
return_value=[{"key": "mdnsHostName", "value": "Smappee1006000212"}],
), patch(
"pysmappee.api.SmappeeLocalApi.load_command_control_config", return_value=[]
), patch(
"pysmappee.api.SmappeeLocalApi.load_instantaneous",
return_value=[{"key": "phase0ActivePower", "value": 0}],
), patch(
"homeassistant.components.smappee.async_setup_entry", return_value=True
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host="1.2.3.4",
addresses=["1.2.3.4"],
port=22,
hostname="Smappee1006000212.local.",
type="_ssh._tcp.local.",
name="Smappee1006000212._ssh._tcp.local.",
properties={"_raw": {}},
),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "zeroconf_confirm"
assert result["description_placeholders"] == {CONF_SERIALNUMBER: "1006000212"}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.2.3.4"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "smappee1006000212"
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
entry = hass.config_entries.async_entries(DOMAIN)[0]
assert entry.unique_id == "1006000212"
async def test_full_user_local_flow(hass):
"""Test the full zeroconf flow."""
with patch("pysmappee.api.SmappeeLocalApi.logon", return_value={}), patch(
"pysmappee.api.SmappeeLocalApi.load_advanced_config",
return_value=[{"key": "mdnsHostName", "value": "Smappee1006000212"}],
), patch(
"pysmappee.api.SmappeeLocalApi.load_command_control_config", return_value=[]
), patch(
"pysmappee.api.SmappeeLocalApi.load_instantaneous",
return_value=[{"key": "phase0ActivePower", "value": 0}],
), patch(
"homeassistant.components.smappee.async_setup_entry", return_value=True
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
)
assert result["step_id"] == "environment"
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["description_placeholders"] is None
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"environment": ENV_LOCAL},
)
assert result["step_id"] == ENV_LOCAL
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.2.3.4"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "smappee1006000212"
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
entry = hass.config_entries.async_entries(DOMAIN)[0]
assert entry.unique_id == "1006000212"
async def test_full_zeroconf_flow_next_generation(hass):
"""Test the full zeroconf flow."""
with patch(
"pysmappee.mqtt.SmappeeLocalMqtt.start_attempt", return_value=True
), patch("pysmappee.mqtt.SmappeeLocalMqtt.start", return_value=None,), patch(
"pysmappee.mqtt.SmappeeLocalMqtt.is_config_ready",
return_value=None,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host="1.2.3.4",
addresses=["1.2.3.4"],
port=22,
hostname="Smappee5001000212.local.",
type="_ssh._tcp.local.",
name="Smappee5001000212._ssh._tcp.local.",
properties={"_raw": {}},
),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "zeroconf_confirm"
assert result["description_placeholders"] == {CONF_SERIALNUMBER: "5001000212"}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.2.3.4"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "smappee5001000212"
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
entry = hass.config_entries.async_entries(DOMAIN)[0]
assert entry.unique_id == "5001000212"
| |
"""
dogleg algorithm with rectangular trust regions for least-squares minimization.
The description of the algorithm can be found in [Voglis]_. The algorithm does
trust-region iterations, but the shape of trust regions is rectangular as
opposed to conventional elliptical. The intersection of a trust region and
an initial feasible region is again some rectangle. Thus on each iteration a
bound-constrained quadratic optimization problem is solved.
A quadratic problem is solved by well-known dogleg approach, where the
function is minimized along piecewise-linear "dogleg" path [NumOpt]_,
Chapter 4. If Jacobian is not rank-deficient then the function is decreasing
along this path, and optimization amounts to simply following along this
path as long as a point stays within the bounds. A constrained Cauchy step
(along the anti-gradient) is considered for safety in rank deficient cases,
in this situations the convergence might be slow.
If during iterations some variable hit the initial bound and the component
of anti-gradient points outside the feasible region, then a next dogleg step
won't make any progress. At this state such variables satisfy first-order
optimality conditions and they are excluded before computing a next dogleg
step.
Gauss-Newton step can be computed exactly by `numpy.linalg.lstsq` (for dense
Jacobian matrices) or by iterative procedure `scipy.sparse.linalg.lsmr` (for
dense and sparse matrices, or Jacobian being LinearOperator). The second
option allows to solve very large problems (up to couple of millions of
residuals on a regular PC), provided the Jacobian matrix is sufficiently
sparse. But note that dogbox is not very good for solving problems with
large number of constraints, because of variables exclusion-inclusion on each
iteration (a required number of function evaluations might be high or accuracy
of a solution will be poor), thus its large-scale usage is probably limited
to unconstrained problems.
References
----------
.. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region Dogleg
Approach for Unconstrained and Bound Constrained Nonlinear
Optimization", WSEAS International Conference on Applied
Mathematics, Corfu, Greece, 2004.
.. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization, 2nd edition".
"""
import numpy as np
from numpy.linalg import lstsq, norm
from scipy.sparse.linalg import LinearOperator, aslinearoperator, lsmr
from scipy.optimize import OptimizeResult
from .common import (
step_size_to_bound, in_bounds, update_tr_radius, evaluate_quadratic,
build_quadratic_1d, minimize_quadratic_1d, compute_grad,
compute_jac_scaling, check_termination, scale_for_robust_loss_function,
print_header, print_iteration)
def lsmr_operator(Jop, d, active_set):
m, n = Jop.shape
def matvec(x):
x_free = x.ravel().copy()
x_free[active_set] = 0
return Jop.matvec(x * d)
def rmatvec(x):
r = d * Jop.rmatvec(x)
r[active_set] = 0
return r
return LinearOperator((m, n), matvec=matvec, rmatvec=rmatvec, dtype=float)
def find_intersection(x, tr_bounds, lb, ub):
"""Find intersection of trust-region bounds and initial bounds.
Returns
-------
lb_total, ub_total : ndarray with shape of x
Lower and upper bounds of the intersection region.
orig_l, orig_u : ndarray of bool with shape of x
True means that an original bound is taken as a corresponding bound
in the intersection region.
tr_l, tr_u : ndarray of bool with shape of x
True means that a trust-region bound is taken as a corresponding bound
in the intersection region.
"""
lb_centered = lb - x
ub_centered = ub - x
lb_total = np.maximum(lb_centered, -tr_bounds)
ub_total = np.minimum(ub_centered, tr_bounds)
orig_l = np.equal(lb_total, lb_centered)
orig_u = np.equal(ub_total, ub_centered)
tr_l = np.equal(lb_total, -tr_bounds)
tr_u = np.equal(ub_total, tr_bounds)
return lb_total, ub_total, orig_l, orig_u, tr_l, tr_u
def dogleg_step(x, newton_step, g, a, b, tr_bounds, lb, ub):
"""Find dogleg step in rectangular region.
Returns
-------
step : ndarray, shape (n,)
Computed dogleg step.
bound_hits : ndarray of int, shape (n,)
Each component shows whether a corresponding variable hits the
initial bound after the step is taken:
* 0 - a variable doesn't hit the bound.
* -1 - lower bound is hit.
* 1 - upper bound is hit.
tr_hit : bool
Whether the step hit the boundary of the trust-region.
"""
lb_total, ub_total, orig_l, orig_u, tr_l, tr_u = find_intersection(
x, tr_bounds, lb, ub
)
bound_hits = np.zeros_like(x, dtype=int)
if in_bounds(newton_step, lb_total, ub_total):
return newton_step, bound_hits, False
to_bounds, _ = step_size_to_bound(np.zeros_like(x), -g, lb_total, ub_total)
# The classical dogleg algorithm would check if Cauchy step fits into
# the bounds, and just return it constrained version if not. But in a
# rectangular trust region it makes sense to try to improve constrained
# Cauchy step too. Thus we don't distinguish these two cases.
cauchy_step = -minimize_quadratic_1d(a, b, 0, to_bounds)[0] * g
step_diff = newton_step - cauchy_step
step_size, hits = step_size_to_bound(cauchy_step, step_diff,
lb_total, ub_total)
bound_hits[(hits < 0) & orig_l] = -1
bound_hits[(hits > 0) & orig_u] = 1
tr_hit = np.any((hits < 0) & tr_l | (hits > 0) & tr_u)
return cauchy_step + step_size * step_diff, bound_hits, tr_hit
def dogbox(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, scaling,
loss_function, tr_solver, tr_options, verbose):
f = f0
f_true = f.copy()
nfev = 1
J = J0
njev = 1
if loss_function is not None:
rho = loss_function(f)
cost = 0.5 * np.sum(rho[0])
J, f = scale_for_robust_loss_function(J, f, rho)
else:
cost = 0.5 * np.dot(f, f)
g = compute_grad(J, f)
if scaling == 'jac':
scale, scale_inv = compute_jac_scaling(J)
else:
scale, scale_inv = scaling, 1 / scaling
Delta = norm(x0 * scale, ord=np.inf)
if Delta == 0:
Delta = 1.0
on_bound = np.zeros_like(x0, dtype=int)
on_bound[np.equal(x0, lb)] = -1
on_bound[np.equal(x0, ub)] = 1
x = x0
step = np.empty_like(x0)
if max_nfev is None:
max_nfev = x0.size * 100
termination_status = None
iteration = 0
step_norm = None
actual_reduction = None
if verbose == 2:
print_header()
while True:
active_set = on_bound * g < 0
free_set = ~active_set
g_free = g[free_set]
g_full = g.copy()
g[active_set] = 0
g_norm = norm(g, ord=np.inf)
if g_norm < gtol:
termination_status = 1
if verbose == 2:
print_iteration(iteration, nfev, cost, actual_reduction,
step_norm, g_norm)
if termination_status is not None or nfev == max_nfev:
break
x_free = x[free_set]
lb_free = lb[free_set]
ub_free = ub[free_set]
scale_inv_free = scale_inv[free_set]
# Compute (Gauss-)Newton and build quadratic model for Cauchy step.
if tr_solver == 'exact':
J_free = J[:, free_set]
newton_step = lstsq(J_free, -f)[0]
# Coefficients for the quadratic model along the anti-gradient.
a, b = build_quadratic_1d(J_free, g_free, -g_free)
elif tr_solver == 'lsmr':
Jop = aslinearoperator(J)
# We compute lsmr step in scaled variables and then
# transform back to normal variables, if lsmr would give exact lsq
# solution this would be equivalent to not doing any
# transformations, but from experience it's better this way.
# We pass active_set to make computations as if we selected
# the free subset of J columns, but without actually doing any
# slicing, which is expensive for sparse matrices and impossible
# for LinearOperator.
lsmr_op = lsmr_operator(Jop, scale_inv, active_set)
newton_step = -lsmr(lsmr_op, f, **tr_options)[0][free_set]
newton_step *= scale_inv_free
# Components of g for active variables were zeroed, so this call
# is correct and equivalent to using J_free and g_free.
a, b = build_quadratic_1d(Jop, g, -g)
actual_reduction = -1.0
while actual_reduction <= 0 and nfev < max_nfev:
tr_bounds = Delta * scale_inv_free
step_free, on_bound_free, tr_hit = dogleg_step(
x_free, newton_step, g_free, a, b, tr_bounds, lb_free, ub_free)
step.fill(0.0)
step[free_set] = step_free
if tr_solver == 'exact':
predicted_reduction = -evaluate_quadratic(J_free, g_free,
step_free)
elif tr_solver == 'lsmr':
predicted_reduction = -evaluate_quadratic(Jop, g, step)
x_new = x + step
f_new = fun(x_new)
nfev += 1
step_h_norm = norm(step * scale)
if not np.all(np.isfinite(f_new)):
Delta = 0.25 * step_h_norm
continue
# Usual trust-region step quality estimation.
if loss_function is not None:
cost_new = loss_function(f_new, cost_only=True)
else:
cost_new = 0.5 * np.dot(f_new, f_new)
actual_reduction = cost - cost_new
Delta, ratio = update_tr_radius(
Delta, actual_reduction, predicted_reduction,
step_h_norm, tr_hit
)
step_norm = norm(step)
termination_status = check_termination(
actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)
if termination_status is not None:
break
if actual_reduction > 0:
on_bound[free_set] = on_bound_free
x = x_new
# Set variables exactly at the boundary.
mask = on_bound == -1
x[mask] = lb[mask]
mask = on_bound == 1
x[mask] = ub[mask]
f = f_new
f_true = f.copy()
cost = cost_new
J = jac(x, f)
njev += 1
if loss_function is not None:
rho = loss_function(f)
J, f = scale_for_robust_loss_function(J, f, rho)
g = compute_grad(J, f)
if scaling == 'jac':
scale, scale_inv = compute_jac_scaling(J, scale)
else:
step_norm = 0
actual_reduction = 0
iteration += 1
if termination_status is None:
termination_status = 0
return OptimizeResult(
x=x, cost=cost, fun=f_true, jac=J, grad=g_full, optimality=g_norm,
active_mask=on_bound, nfev=nfev, njev=njev, status=termination_status)
| |
#!/usr/bin/env python3
import json
import logging
import os
import re
from enum import Enum
from http import HTTPStatus
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from urllib.error import HTTPError, URLError
from urllib.parse import parse_qsl, urlparse
from urllib.request import Request, urlopen
import gen_universe
# Binds to all available interfaces
HOST_NAME = ''
# Gets the port number from $PORT0 environment variable
PORT_NUMBER = int(os.environ['PORT_UNIVERSECONVERTER'])
MAX_REPO_SIZE = int(os.environ.get('MAX_REPO_SIZE', '20'))
# Constants
MAX_TIMEOUT = 60
MAX_BYTES = MAX_REPO_SIZE * 1024 * 1024
header_user_agent = 'User-Agent'
header_accept = 'Accept'
header_content_type = 'Content-Type'
header_content_length = 'Content-Length'
param_charset = 'charset'
default_charset = 'utf-8'
json_key_packages = 'packages'
param_url = 'url'
transform_url_path = '/transform'
logger = logging.getLogger(__name__)
logging.basicConfig(
level=os.environ.get('LOGLEVEL', 'DEBUG'),
format="[%(asctime)s|%(threadName)s-%(funcName)s(%(lineno)d)|%(levelname)s]: %(message)s",
)
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle each request in a separate thread"""
class Handler(BaseHTTPRequestHandler):
def log_message(self, format, *args):
"""Override the default behavior of writing to stderr with `logging`"""
logger.info("[%s] %s", self.address_string(), format % args)
def do_GET(self):
logger.debug('\n{}\n{}'.format(self.requestline, self.headers).rstrip())
url_path = urlparse(self.path).path
try:
if url_path == transform_url_path:
self.handle_transform()
else:
raise ValueError(ErrorResponse.INVALID_PATH.to_msg(url_path))
except Exception as e:
self.send_error(
HTTPStatus.BAD_REQUEST,
explain=e.message if hasattr(e, 'message') else str(e)
)
def handle_transform(self):
"""
Respond to the GET request. The expected format of this request is:
http://<host>:<port>/transform?url=<url> with `User-Agent`
and `Accept` headers
"""
errors = _validate_request(self)
if errors:
self.send_error(HTTPStatus.BAD_REQUEST, explain=errors)
return
query = dict(parse_qsl(urlparse(self.path).query))
if param_url not in query:
self.send_error(
HTTPStatus.BAD_REQUEST,
explain=ErrorResponse.PARAM_NOT_PRESENT.to_msg(param_url)
)
return
user_agent = self.headers.get(header_user_agent)
accept = self.headers.get(header_accept)
decoded_url = query.get(param_url)
try:
content_type, json_response = handle(decoded_url, user_agent, accept)
except ValueError as e:
self.send_error(HTTPStatus.BAD_REQUEST, explain=str(e))
return
except HTTPError as e:
logger.info(
'Upstream error :\nURL: [%s]\nReason: [%s %s]\nBody:\n[%s]',
decoded_url,
e.code,
e.reason,
e.read(),
exc_info=True
)
self.send_error(HTTPStatus.BAD_GATEWAY, explain=str(e))
return
except URLError as e:
logger.info(
'Route error :\nURL: [%s]\nReason: [%s]',
decoded_url,
e.reason,
exc_info=True
)
self.send_error(HTTPStatus.BAD_GATEWAY, explain=str(e))
return
except Exception as e:
logger.exception(
'Unhandled exception for [{}] UA [{}] Accept [{}]'.format(
query,
user_agent,
accept
),
e
)
raise e
self.send_response(HTTPStatus.OK)
self.send_header(header_content_type, content_type)
self.send_header(header_content_length, len(json_response))
self.end_headers()
self.wfile.write(json_response.encode())
def run_server():
"""Runs a builtin python server using the given server_class.
:return: None
"""
server_address = (HOST_NAME, PORT_NUMBER)
httpd = ThreadedHTTPServer(server_address, Handler)
logger.warning('Server Starts on port - %s', PORT_NUMBER)
try:
httpd.serve_forever()
except KeyboardInterrupt:
httpd.server_close()
logger.warning('Server Stops on port - %s', PORT_NUMBER)
def handle(decoded_url, user_agent, accept) -> (str, str):
"""Returns the requested json data. May raise an error instead, if it fails.
:param decoded_url: The url to be fetched from
:type decoded_url: str
:param user_agent: User-Agent header value
:type user_agent: str
:param accept: Accept header value
:return Requested json data
:rtype str (a valid json object)
"""
req = Request(decoded_url)
req.add_header(header_user_agent, user_agent)
req.add_header(header_accept, accept)
logger.debug('\n{}\n{}\n{}'.format(
'<--- Upstream Request --->',
req.full_url,
_format_dict(req.headers)
))
with urlopen(req, timeout=MAX_TIMEOUT) as res:
charset = res.info().get_param(param_charset) or default_charset
if header_content_length not in res.headers:
raise ValueError(ErrorResponse.ENDPOINT_HEADER_MISS.to_msg())
if int(res.headers.get(header_content_length)) > MAX_BYTES:
raise ValueError(ErrorResponse.MAX_SIZE.to_msg())
resp_content = res.read().decode(charset)
logger.debug('\n{}\n{} {}\n{}\n{}'.format(
'<--- Upstream Response --->',
res.getcode(),
res.reason,
_format_dict(res.headers),
resp_content if res.getcode() // 200 != 1 else ''
))
content_type, repo_version = _get_repo_version(accept)
dcos_version = _get_dcos_version(user_agent)
logger.debug('Version [%s] DC/OS [%s]', repo_version, dcos_version)
try:
json_body = json.loads(resp_content)
except ValueError as e:
logger.exception(e)
raise ValueError(ErrorResponse.INVALID_JSON_FROM_UPSTREAM.to_msg(decoded_url))
assert json_key_packages in json_body, 'Expected key [{}] is not present in response'.format(json_key_packages)
return content_type, render_json(
json_body[json_key_packages],
dcos_version,
repo_version
)
def render_json(packages, dcos_version, repo_version):
"""Returns the json
:param packages: packages list
:type packages: list
:param dcos_version: version of dcos
:type dcos_version: str
:param repo_version: version of universe repo
:type repo_version: str
:return filtered json data based on parameters
:rtype str
"""
processed_packages = gen_universe.filter_and_downgrade_packages_by_version(
packages,
dcos_version
)
packages_dict = {json_key_packages: processed_packages}
errors = gen_universe.validate_repo_with_schema(
packages_dict,
repo_version
)
if len(errors) != 0:
logger.error(errors)
raise ValueError(ErrorResponse.VALIDATION_ERROR.to_msg(errors))
return json.dumps(packages_dict)
def _validate_request(s):
"""
:param s: The in built base http request handler
:type s: BaseHTTPRequestHandler
:return Error message (if any)
:rtype String or None
"""
if header_user_agent not in s.headers:
return ErrorResponse.HEADER_NOT_PRESENT.to_msg(header_user_agent)
if header_accept not in s.headers:
return ErrorResponse.HEADER_NOT_PRESENT.to_msg(header_accept)
def _get_repo_version(accept_headers) -> (str, str):
"""Returns the version of the universe repo parsed.
:param accept_headers: String
:return A tuple of (matched_header, repo_version) or raises Error
"""
version_regex = r'(version=)(\b\w+\b)'
filtered_headers = list(filter(
lambda x: len(re.findall(version_regex, x)) > 0,
accept_headers.split(",")
))
if not filtered_headers:
raise ValueError(ErrorResponse.UNABLE_PARSE.to_msg(
header_accept, accept_headers
))
headers = {}
for h in filtered_headers:
headers[h] = re.findall(version_regex, h)[0][1]
return sorted(headers.items(), key=lambda x: x[1], reverse=True)[0]
def _get_dcos_version(user_agent_header):
"""Parses the version of dcos from the specified header.
:param user_agent_header: String
:return dcos version as a string or raises an Error
:rtype str or raises an Error
"""
result = re.search(r'\bdcos/\b\d\.\d{1,2}', user_agent_header)
if result is None:
raise ValueError(ErrorResponse.UNABLE_PARSE.to_msg(
header_user_agent, user_agent_header
))
return str(result.group().split('/')[1])
def _format_dict(d):
"""Takes a dictionary and returns it in a pretty formatted string
:param d: dict
:return pretty formatted dictionary
:rtype str
"""
return '\n'.join('{}: {}'.format(k, v) for k, v in d.items())
class ErrorResponse(Enum):
INVALID_PATH = 'URL Path {} is invalid. Expected path /transform'
HEADER_NOT_PRESENT = 'Header {} is missing'
PARAM_NOT_PRESENT = 'Request parameter {} is missing'
UNABLE_PARSE = 'Unable to parse header {}:{}'
VALIDATION_ERROR = 'Validation errors during processing {}'
MAX_SIZE = 'Endpoint response exceeds maximum content size'
ENDPOINT_HEADER_MISS = 'Endpoint doesn\'t return Content-Length header'
INVALID_JSON_FROM_UPSTREAM = 'Upstream [{}] did not return a json body'
def to_msg(self, *args):
return self.value.format(*args)
if __name__ == '__main__':
run_server()
| |
# Copyright 2010-present Basho Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import csv
import six
from cgi import parse_header
from email import message_from_string
from email.utils import parsedate_tz, mktime_tz
from xml.etree import ElementTree
from riak import RiakError
from riak.content import RiakContent
from riak.riak_object import VClock
from riak.multidict import MultiDict
from riak.transports.http.search import XMLSearchResult
from riak.util import decode_index_value, bytes_to_str
if six.PY2:
from urllib import unquote_plus
else:
from urllib.parse import unquote_plus
# subtract length of "Link: " header string and newline
MAX_LINK_HEADER_SIZE = 8192 - 8
class HttpCodec(object):
"""
Methods for HTTP transport that marshals and unmarshals HTTP
messages.
"""
def _parse_body(self, robj, response, expected_statuses):
"""
Parse the body of an object response and populate the object.
"""
# If no response given, then return.
if response is None:
return None
status, headers, data = response
# Check if the server is down(status==0)
if not status:
m = 'Could not contact Riak Server: http://{0}:{1}!'.format(
self._node.host, self._node.http_port)
raise RiakError(m)
# Make sure expected code came back
self.check_http_code(status, expected_statuses)
if 'x-riak-vclock' in headers:
robj.vclock = VClock(headers['x-riak-vclock'], 'base64')
# If 404(Not Found), then clear the object.
if status == 404:
robj.siblings = []
return None
# If 201 Created, we need to extract the location and set the
# key on the object.
elif status == 201:
robj.key = headers['location'].strip().split('/')[-1]
# If 300(Siblings), apply the siblings to the object
elif status == 300:
ctype, params = parse_header(headers['content-type'])
if ctype == 'multipart/mixed':
if six.PY3:
data = bytes_to_str(data)
boundary = re.compile('\r?\n--%s(?:--)?\r?\n' %
re.escape(params['boundary']))
parts = [message_from_string(p)
for p in re.split(boundary, data)[1:-1]]
robj.siblings = [self._parse_sibling(RiakContent(robj),
part.items(),
part.get_payload())
for part in parts]
# Invoke sibling-resolution logic
if robj.resolver is not None:
robj.resolver(robj)
return robj
else:
raise Exception('unexpected sibling response format: {0}'.
format(ctype))
robj.siblings = [self._parse_sibling(RiakContent(robj),
headers.items(),
data)]
return robj
def _parse_sibling(self, sibling, headers, data):
"""
Parses a single sibling out of a response.
"""
sibling.exists = True
# Parse the headers...
for header, value in headers:
header = header.lower()
if header == 'content-type':
sibling.content_type, sibling.charset = \
self._parse_content_type(value)
elif header == 'etag':
sibling.etag = value
elif header == 'link':
sibling.links = self._parse_links(value)
elif header == 'last-modified':
sibling.last_modified = mktime_tz(parsedate_tz(value))
elif header.startswith('x-riak-meta-'):
metakey = header.replace('x-riak-meta-', '')
sibling.usermeta[metakey] = value
elif header.startswith('x-riak-index-'):
field = header.replace('x-riak-index-', '')
reader = csv.reader([value], skipinitialspace=True)
for line in reader:
for token in line:
token = decode_index_value(field, token)
sibling.add_index(field, token)
elif header == 'x-riak-deleted':
sibling.exists = False
sibling.encoded_data = data
return sibling
def _to_link_header(self, link):
"""
Convert the link tuple to a link header string. Used internally.
"""
try:
bucket, key, tag = link
except ValueError:
raise RiakError("Invalid link tuple %s" % link)
tag = tag if tag is not None else bucket
url = self.object_path(bucket, key)
header = '<%s>; riaktag="%s"' % (url, tag)
return header
def _parse_links(self, linkHeaders):
links = []
oldform = "</([^/]+)/([^/]+)/([^/]+)>; ?riaktag=\"([^\"]+)\""
newform = "</(buckets)/([^/]+)/keys/([^/]+)>; ?riaktag=\"([^\"]+)\""
for linkHeader in linkHeaders.strip().split(','):
linkHeader = linkHeader.strip()
matches = (re.match(oldform, linkHeader) or
re.match(newform, linkHeader))
if matches is not None:
link = (unquote_plus(matches.group(2)),
unquote_plus(matches.group(3)),
unquote_plus(matches.group(4)))
links.append(link)
return links
def _add_links_for_riak_object(self, robject, headers):
links = robject.links
if links:
current_header = ''
for link in links:
header = self._to_link_header(link)
if len(current_header + header) > MAX_LINK_HEADER_SIZE:
headers.add('Link', current_header)
current_header = ''
if current_header != '':
header = ', ' + header
current_header += header
headers.add('Link', current_header)
return headers
def _build_put_headers(self, robj, if_none_match=False):
"""Build the headers for a POST/PUT request."""
# Construct the headers...
if robj.charset is not None:
content_type = ('%s; charset="%s"' %
(robj.content_type, robj.charset))
else:
content_type = robj.content_type
headers = MultiDict({'Content-Type': content_type,
'X-Riak-ClientId': self._client_id})
# Add the vclock if it exists...
if robj.vclock is not None:
headers['X-Riak-Vclock'] = robj.vclock.encode('base64')
# Create the header from metadata
self._add_links_for_riak_object(robj, headers)
for key in robj.usermeta.keys():
headers['X-Riak-Meta-%s' % key] = robj.usermeta[key]
for field, value in robj.indexes:
key = 'X-Riak-Index-%s' % field
if key in headers:
headers[key] += ", " + str(value)
else:
headers[key] = str(value)
if if_none_match:
headers['If-None-Match'] = '*'
return headers
def _normalize_json_search_response(self, json):
"""
Normalizes a JSON search response so that PB and HTTP have the
same return value
"""
result = {}
if 'facet_counts' in json:
result['facet_counts'] = json[u'facet_counts']
if 'grouped' in json:
result['grouped'] = json[u'grouped']
if 'stats' in json:
result['stats'] = json[u'stats']
if u'response' in json:
result['num_found'] = json[u'response'][u'numFound']
result['max_score'] = float(json[u'response'][u'maxScore'])
docs = []
for doc in json[u'response'][u'docs']:
resdoc = {}
if u'_yz_rk' in doc:
# Is this a Riak 2.0 result?
resdoc = doc
else:
# Riak Search 1.0 Legacy assumptions about format
resdoc[u'id'] = doc[u'id']
if u'fields' in doc:
for k, v in six.iteritems(doc[u'fields']):
resdoc[k] = v
docs.append(resdoc)
result['docs'] = docs
return result
def _normalize_xml_search_response(self, xml):
"""
Normalizes an XML search response so that PB and HTTP have the
same return value
"""
target = XMLSearchResult()
parser = ElementTree.XMLParser(target=target)
parser.feed(xml)
return parser.close()
def _parse_content_type(self, value):
"""
Split the content-type header into two parts:
1) Actual main/sub encoding type
2) charset
:param value: Complete MIME content-type string
"""
content_type, params = parse_header(value)
if 'charset' in params:
charset = params['charset']
else:
charset = None
return content_type, charset
def _decode_datatype(self, dtype, value):
if not dtype == 'map':
return value
map = {}
for key in value:
field = self._map_key_to_pair(key)
map[field] = self._decode_datatype(field[1], value[key])
return map
def _map_key_to_pair(self, key):
name, _, type = key.rpartition('_')
return (name, type)
def _map_pair_to_key(self, pair):
return "_".join(pair)
def _encode_dt_op(self, dtype, op):
if dtype in ('counter', 'register'):
# ('increment', some_int)
# ('assign', some_str)
return dict([op])
elif dtype == 'flag':
return op
elif dtype == 'set':
set_op = {}
if 'adds' in op:
set_op['add_all'] = op['adds']
if 'removes' in op:
set_op['remove_all'] = op['removes']
return set_op
elif dtype == 'hll':
hll_op = {}
if 'adds' in op:
hll_op['add_all'] = op['adds']
return hll_op
elif dtype == 'map':
map_op = {}
for fop in op:
fopname = fop[0]
fopkey = self._map_pair_to_key(fop[1])
if fopname in ('add', 'remove'):
map_op.setdefault(fopname, []).append(fopkey)
elif fopname == 'update':
updates = map_op.setdefault(fopname, {})
updates[fopkey] = self._encode_dt_op(fop[1][1], fop[2])
return map_op
| |
"Implements Model"
import numpy as np
from .costed import CostedConstraintSet
from ..nomials import Monomial
from .prog_factories import _progify_fctry, _solve_fctry
from .gp import GeometricProgram
from .sgp import SequentialGeometricProgram
from ..small_scripts import mag
from ..tools.autosweep import autosweep_1d
from ..exceptions import InvalidGPConstraint
from .. import NamedVariables
from ..tools.docstring import expected_unbounded
from .set import add_meq_bounds
class Model(CostedConstraintSet):
"""Symbolic representation of an optimization problem.
The Model class is used both directly to create models with constants and
sweeps, and indirectly inherited to create custom model classes.
Arguments
---------
cost : Posynomial (optional)
Defaults to `Monomial(1)`.
constraints : ConstraintSet or list of constraints (optional)
Defaults to an empty list.
substitutions : dict (optional)
This dictionary will be substituted into the problem before solving,
and also allows the declaration of sweeps and linked sweeps.
name : str (optional)
Allows "naming" a model in a way similar to inherited instances,
and overrides the inherited name if there is one.
Attributes with side effects
----------------------------
`program` is set during a solve
`solution` is set at the end of a solve
"""
# name and num identify a model uniquely
name = None
num = None
# naming holds the name and num environment in which a model was created
# this includes its own name and num, and those of models containing it
naming = None
program = None
solution = None
def __init__(self, cost=None, constraints=None, *args, **kwargs):
setup_vars = None
substitutions = kwargs.pop("substitutions", None) # reserved keyword
if hasattr(self, "setup"):
self.cost = None
with NamedVariables(self.__class__.__name__):
start_args = [cost, constraints]
args = tuple(a for a in start_args if a is not None) + args
cs = self.setup(*args, **kwargs) # pylint: disable=no-member
if (isinstance(cs, tuple) and len(cs) == 2
and isinstance(cs[1], dict)):
constraints, substitutions = cs # TODO: remove
else:
constraints = cs
from .. import NAMEDVARS, MODELS, MODELNUMS
setup_vars = NAMEDVARS[tuple(MODELS), tuple(MODELNUMS)]
self.name, self.num = MODELS[-1], MODELNUMS[-1]
self.naming = (tuple(MODELS), tuple(MODELNUMS))
cost = self.cost # TODO: remove
elif args and not substitutions:
# backwards compatibility: substitutions as third arg
substitutions, = args
cost = cost or Monomial(1)
constraints = constraints or []
if setup_vars:
# add all the vars created in .setup to the Model's varkeys
# even if they aren't used in any constraints
self.unique_varkeys = frozenset(v.key for v in setup_vars)
CostedConstraintSet.__init__(self, cost, constraints, substitutions)
if hasattr(self, "setup") and self.__class__.__doc__:
if (("Unbounded" in self.__class__.__doc__ or
"Bounded by" in self.__class__.__doc__) and
"SKIP VERIFICATION" not in self.__class__.__doc__):
self.verify_docstring()
gp = _progify_fctry(GeometricProgram)
sp = _progify_fctry(SequentialGeometricProgram)
solve = _solve_fctry(_progify_fctry(GeometricProgram, "solve"))
localsolve = _solve_fctry(_progify_fctry(SequentialGeometricProgram,
"localsolve"))
def verify_docstring(self): # pylint:disable=too-many-locals,too-many-branches,too-many-statements
"Verifies docstring bounds are sufficient but not excessive."
err = "while verifying %s:\n" % self.__class__.__name__
bounded, meq_bounded = self.bounded.copy(), self.meq_bounded.copy()
doc = self.__class__.__doc__
exp_unbounds = expected_unbounded(self, doc)
unexp_bounds = bounded.intersection(exp_unbounds)
if unexp_bounds: # anything bounded that shouldn't be? err!
for direction in ["lower", "upper"]:
badvks = [v for v, d in unexp_bounds if d == direction]
if not badvks:
continue
badvks = ", ".join(str(v) for v in badvks)
badvks += (" were" if len(badvks) > 1 else " was")
err += (" %s %s-bounded; expected %s-unbounded"
"\n" % (badvks, direction, direction))
raise ValueError(err)
bounded.update(exp_unbounds) # if not, treat expected as bounded
add_meq_bounds(bounded, meq_bounded) # and add more meqs
self.missingbounds = {} # now let's figure out what's missing
for bound in meq_bounded: # first add the un-dealt-with meq bounds
for condition in list(meq_bounded[bound]):
meq_bounded[bound].remove(condition)
newcond = condition - bounded
if newcond and not any(c.issubset(newcond)
for c in meq_bounded[bound]):
meq_bounded[bound].add(newcond)
bsets = " or ".join(str(list(c)) for c in meq_bounded[bound])
self.missingbounds[bound] = (", but would gain it from any of"
" these sets of bounds: " + bsets)
# then add everything that's not in bounded
if len(bounded)+len(self.missingbounds) != 2*len(self.varkeys):
for key in self.varkeys:
for bound in ("upper", "lower"):
if (key, bound) not in bounded:
if (key, bound) not in self.missingbounds:
self.missingbounds[(key, bound)] = ""
if self.missingbounds: # anything unbounded? err!
boundstrs = "\n".join(" %s has no %s bound%s" % (v, b, x)
for (v, b), x
in self.missingbounds.items())
docstring = ("To fix this add the following to %s's"
" docstring (you may not need it all):"
" \n" % self.__class__.__name__)
for direction in ["upper", "lower"]:
mb = [k for (k, b) in self.missingbounds if b == direction]
if mb:
docstring += """
%s Unbounded
---------------
%s
""" % (direction.title(), ", ".join(set(k.name for k in mb)))
raise ValueError(err + boundstrs + "\n\n" + docstring)
def as_gpconstr(self, x0):
"Returns approximating constraint, keeping name and num"
cs = CostedConstraintSet.as_gpconstr(self, x0)
cs.name, cs.num = self.name, self.num
return cs
def subconstr_str(self, excluded=None):
"The collapsed appearance of a ConstraintBase"
return "%s_%s" % (self.name, self.num) if self.name else None
def subconstr_latex(self, excluded=None):
"The collapsed appearance of a ConstraintBase"
return "%s_{%s}" % (self.name, self.num) if self.name else None
def sweep(self, sweeps, **solveargs):
"Sweeps {var: values} pairs in sweeps. Returns swept solutions."
sols = []
for sweepvar, sweepvals in sweeps.items():
original_val = self.substitutions.get(sweepvar, None)
self.substitutions.update({sweepvar: ('sweep', sweepvals)})
try:
sols.append(self.solve(**solveargs))
except InvalidGPConstraint:
sols.append(self.localsolve(**solveargs))
if original_val:
self.substitutions[sweepvar] = original_val
else:
del self.substitutions[sweepvar]
if len(sols) == 1:
return sols[0]
return sols
def autosweep(self, sweeps, tol=0.01, samplepoints=100, **solveargs):
"""Autosweeps {var: (start, end)} pairs in sweeps to tol.
Returns swept and sampled solutions.
The original simplex tree can be accessed at sol.bst
"""
sols = []
for sweepvar, sweepvals in sweeps.items():
sweepvar = self[sweepvar].key
start, end = sweepvals
bst = autosweep_1d(self, tol, sweepvar, [start, end], **solveargs)
sols.append(bst.sample_at(np.linspace(start, end, samplepoints)))
if len(sols) == 1:
return sols[0]
return sols
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
def debug(self, solver=None, verbosity=1, **solveargs):
"""Attempts to diagnose infeasible models.
If a model debugs but errors in a process_result call, debug again
with `process_results=False`
"""
from .relax import ConstantsRelaxed, ConstraintsRelaxed
from .bounded import Bounded
sol = None
solveargs["solver"] = solver
solveargs["verbosity"] = verbosity - 1
solveargs["process_result"] = False
if verbosity:
print("< DEBUGGING >")
print("> Trying with bounded variables and relaxed constants:")
bounded = Bounded(self)
if self.substitutions:
constsrelaxed = ConstantsRelaxed(bounded)
feas = Model(constsrelaxed.relaxvars.prod()**30 * self.cost,
constsrelaxed)
# NOTE: It hasn't yet been seen but might be possible that
# the self.cost component above could cause infeasibility
else:
feas = Model(self.cost, bounded)
try:
try:
sol = feas.solve(**solveargs)
except InvalidGPConstraint:
sol = feas.localsolve(**solveargs)
sol["boundedness"] = bounded.check_boundaries(sol)
if self.substitutions:
relaxed = get_relaxed([sol(r) for r in constsrelaxed.relaxvars],
constsrelaxed.origvars,
min_return=0 if sol["boundedness"] else 1)
if verbosity and relaxed:
if sol["boundedness"]:
print("and these constants relaxed:")
else:
print("\nSolves with these constants relaxed:")
for (_, orig) in relaxed:
print(" %s: relaxed from %-.4g to %-.4g"
% (orig, mag(constsrelaxed.constants[orig.key]),
mag(sol(orig))))
print
if verbosity:
print(">> Success!")
except (ValueError, RuntimeWarning):
if verbosity:
print(">> Failure.")
print("> Trying with relaxed constraints:")
try:
constrsrelaxed = ConstraintsRelaxed(self)
feas = Model(constrsrelaxed.relaxvars.prod()**30 * self.cost,
constrsrelaxed)
try:
sol = feas.solve(**solveargs)
except InvalidGPConstraint:
sol = feas.localsolve(**solveargs)
relaxed = get_relaxed(sol(constrsrelaxed.relaxvars),
range(len(feas[0][0])))
if verbosity and relaxed:
print("\nSolves with these constraints relaxed:")
for relaxval, i in relaxed:
constraint = feas[0][0][i][0]
# substitutions of the final relax value
conleft = constraint.left.sub(
{constrsrelaxed.relaxvars[i]: relaxval})
conright = constraint.right.sub(
{constrsrelaxed.relaxvars[i]: relaxval})
origconstraint = constrsrelaxed.origconstrs[i]
relax_percent = "%i%%" % (0.5+(relaxval-1)*100)
print(" %3i: %5s relaxed, from %s %s %s \n"
" to %s %s %s "
% (i, relax_percent, origconstraint.left,
origconstraint.oper, origconstraint.right,
conleft, constraint.oper, conright))
if verbosity:
print("\n>> Success!")
except (ValueError, RuntimeWarning):
if verbosity:
print(">> Failure")
if verbosity:
print
return sol
def get_relaxed(relaxvals, mapped_list, min_return=1):
"Determines which relaxvars are considered 'relaxed'"
sortrelaxed = sorted(zip(relaxvals, mapped_list), key=lambda x: x[0],
reverse=True)
# arbitrarily 1.01 is the min that counts as "relaxed"
mostrelaxed = max(sortrelaxed[0][0], 1.01)
for i, (val, _) in enumerate(sortrelaxed):
if i >= min_return and val <= 1.01 and (val-1) <= (mostrelaxed-1)/10:
return sortrelaxed[:i]
return sortrelaxed
| |
"""
This module is executed in remote subprocesses and helps to
control a remote testing session and relay back information.
It assumes that 'py' is importable and does not have dependencies
on the rest of the xdist code. This means that the xdist-plugin
needs not to be installed in remote environments.
"""
import sys
import os
import time
import py
import pytest
from execnet.gateway_base import dumps, DumpError
from _pytest.config import _prepareconfig, Config
try:
from setproctitle import setproctitle
except ImportError:
def setproctitle(title):
pass
def worker_title(title):
try:
setproctitle(title)
except Exception:
# changing the process name is very optional, no errors please
pass
class WorkerInteractor:
def __init__(self, config, channel):
self.config = config
self.workerid = config.workerinput.get("workerid", "?")
self.testrunuid = config.workerinput["testrunuid"]
self.log = py.log.Producer("worker-%s" % self.workerid)
if not config.option.debug:
py.log.setconsumer(self.log._keywords, None)
self.channel = channel
config.pluginmanager.register(self)
def sendevent(self, name, **kwargs):
self.log("sending", name, kwargs)
self.channel.send((name, kwargs))
@pytest.hookimpl
def pytest_internalerror(self, excrepr):
formatted_error = str(excrepr)
for line in formatted_error.split("\n"):
self.log("IERROR>", line)
interactor.sendevent("internal_error", formatted_error=formatted_error)
@pytest.hookimpl
def pytest_sessionstart(self, session):
self.session = session
workerinfo = getinfodict()
self.sendevent("workerready", workerinfo=workerinfo)
@pytest.hookimpl(hookwrapper=True)
def pytest_sessionfinish(self, exitstatus):
# in pytest 5.0+, exitstatus is an IntEnum object
self.config.workeroutput["exitstatus"] = int(exitstatus)
yield
self.sendevent("workerfinished", workeroutput=self.config.workeroutput)
@pytest.hookimpl
def pytest_collection(self, session):
self.sendevent("collectionstart")
@pytest.hookimpl
def pytest_runtestloop(self, session):
self.log("entering main loop")
torun = []
while 1:
try:
name, kwargs = self.channel.receive()
except EOFError:
return True
self.log("received command", name, kwargs)
if name == "runtests":
torun.extend(kwargs["indices"])
elif name == "runtests_all":
torun.extend(range(len(session.items)))
self.log("items to run:", torun)
# only run if we have an item and a next item
while len(torun) >= 2:
self.run_one_test(torun)
if name == "shutdown":
if torun:
self.run_one_test(torun)
break
return True
def run_one_test(self, torun):
items = self.session.items
self.item_index = torun.pop(0)
item = items[self.item_index]
if torun:
nextitem = items[torun[0]]
else:
nextitem = None
worker_title("[pytest-xdist running] %s" % item.nodeid)
start = time.time()
self.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
duration = time.time() - start
worker_title("[pytest-xdist idle]")
self.sendevent(
"runtest_protocol_complete", item_index=self.item_index, duration=duration
)
@pytest.hookimpl
def pytest_collection_finish(self, session):
try:
topdir = str(self.config.rootpath)
except AttributeError: # pytest <= 6.1.0
topdir = str(self.config.rootdir)
self.sendevent(
"collectionfinish",
topdir=topdir,
ids=[item.nodeid for item in session.items],
)
@pytest.hookimpl
def pytest_runtest_logstart(self, nodeid, location):
self.sendevent("logstart", nodeid=nodeid, location=location)
@pytest.hookimpl
def pytest_runtest_logfinish(self, nodeid, location):
self.sendevent("logfinish", nodeid=nodeid, location=location)
@pytest.hookimpl
def pytest_runtest_logreport(self, report):
data = self.config.hook.pytest_report_to_serializable(
config=self.config, report=report
)
data["item_index"] = self.item_index
data["worker_id"] = self.workerid
data["testrun_uid"] = self.testrunuid
assert self.session.items[self.item_index].nodeid == report.nodeid
self.sendevent("testreport", data=data)
@pytest.hookimpl
def pytest_collectreport(self, report):
# send only reports that have not passed to controller as optimization (#330)
if not report.passed:
data = self.config.hook.pytest_report_to_serializable(
config=self.config, report=report
)
self.sendevent("collectreport", data=data)
@pytest.hookimpl
def pytest_warning_recorded(self, warning_message, when, nodeid, location):
self.sendevent(
"warning_recorded",
warning_message_data=serialize_warning_message(warning_message),
when=when,
nodeid=nodeid,
location=location,
)
def serialize_warning_message(warning_message):
if isinstance(warning_message.message, Warning):
message_module = type(warning_message.message).__module__
message_class_name = type(warning_message.message).__name__
message_str = str(warning_message.message)
# check now if we can serialize the warning arguments (#349)
# if not, we will just use the exception message on the controller node
try:
dumps(warning_message.message.args)
except DumpError:
message_args = None
else:
message_args = warning_message.message.args
else:
message_str = warning_message.message
message_module = None
message_class_name = None
message_args = None
if warning_message.category:
category_module = warning_message.category.__module__
category_class_name = warning_message.category.__name__
else:
category_module = None
category_class_name = None
result = {
"message_str": message_str,
"message_module": message_module,
"message_class_name": message_class_name,
"message_args": message_args,
"category_module": category_module,
"category_class_name": category_class_name,
}
# access private _WARNING_DETAILS because the attributes vary between Python versions
for attr_name in warning_message._WARNING_DETAILS:
if attr_name in ("message", "category"):
continue
attr = getattr(warning_message, attr_name)
# Check if we can serialize the warning detail, marking `None` otherwise
# Note that we need to define the attr (even as `None`) to allow deserializing
try:
dumps(attr)
except DumpError:
result[attr_name] = repr(attr)
else:
result[attr_name] = attr
return result
def getinfodict():
import platform
return dict(
version=sys.version,
version_info=tuple(sys.version_info),
sysplatform=sys.platform,
platform=platform.platform(),
executable=sys.executable,
cwd=os.getcwd(),
)
def remote_initconfig(option_dict, args):
option_dict["plugins"].append("no:terminal")
return Config.fromdictargs(option_dict, args)
def setup_config(config, basetemp):
config.option.looponfail = False
config.option.usepdb = False
config.option.dist = "no"
config.option.distload = False
config.option.numprocesses = None
config.option.maxprocesses = None
config.option.basetemp = basetemp
if __name__ == "__channelexec__":
channel = channel # type: ignore[name-defined] # noqa: F821
workerinput, args, option_dict, change_sys_path = channel.receive() # type: ignore[name-defined]
if change_sys_path is None:
importpath = os.getcwd()
sys.path.insert(0, importpath)
os.environ["PYTHONPATH"] = (
importpath + os.pathsep + os.environ.get("PYTHONPATH", "")
)
else:
sys.path = change_sys_path
os.environ["PYTEST_XDIST_TESTRUNUID"] = workerinput["testrunuid"]
os.environ["PYTEST_XDIST_WORKER"] = workerinput["workerid"]
os.environ["PYTEST_XDIST_WORKER_COUNT"] = str(workerinput["workercount"])
if hasattr(Config, "InvocationParams"):
config = _prepareconfig(args, None)
else:
config = remote_initconfig(option_dict, args)
config.args = args
setup_config(config, option_dict.get("basetemp"))
config._parser.prog = os.path.basename(workerinput["mainargv"][0])
config.workerinput = workerinput # type: ignore[attr-defined]
config.workeroutput = {} # type: ignore[attr-defined]
interactor = WorkerInteractor(config, channel) # type: ignore[name-defined]
config.hook.pytest_cmdline_main(config=config)
| |
"""
Form classes
"""
from __future__ import unicode_literals
from collections import OrderedDict
import copy
import datetime
import warnings
from django.core.exceptions import ValidationError, NON_FIELD_ERRORS
from django.forms.fields import Field, FileField
from django.forms.utils import flatatt, ErrorDict, ErrorList
from django.forms.widgets import Media, MediaDefiningClass, TextInput, Textarea
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import smart_text, force_text, python_2_unicode_compatible
from django.utils.html import conditional_escape, format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils import six
__all__ = ('BaseForm', 'Form')
def pretty_name(name):
"""Converts 'first_name' to 'First name'"""
if not name:
return ''
return name.replace('_', ' ').capitalize()
def get_declared_fields(bases, attrs, with_base_fields=True):
"""
Create a list of form field instances from the passed in 'attrs', plus any
similar fields on the base classes (in 'bases'). This is used by both the
Form and ModelForm metaclasses.
If 'with_base_fields' is True, all fields from the bases are used.
Otherwise, only fields in the 'declared_fields' attribute on the bases are
used. The distinction is useful in ModelForm subclassing.
Also integrates any additional media definitions.
"""
warnings.warn(
"get_declared_fields is deprecated and will be removed in Django 1.9.",
RemovedInDjango19Warning,
stacklevel=2,
)
fields = [
(field_name, attrs.pop(field_name))
for field_name, obj in list(six.iteritems(attrs)) if isinstance(obj, Field)
]
fields.sort(key=lambda x: x[1].creation_counter)
# If this class is subclassing another Form, add that Form's fields.
# Note that we loop over the bases in *reverse*. This is necessary in
# order to preserve the correct order of fields.
if with_base_fields:
for base in bases[::-1]:
if hasattr(base, 'base_fields'):
fields = list(six.iteritems(base.base_fields)) + fields
else:
for base in bases[::-1]:
if hasattr(base, 'declared_fields'):
fields = list(six.iteritems(base.declared_fields)) + fields
return OrderedDict(fields)
class DeclarativeFieldsMetaclass(MediaDefiningClass):
"""
Metaclass that collects Fields declared on the base classes.
"""
def __new__(mcs, name, bases, attrs):
# Collect fields from current class.
current_fields = []
for key, value in list(attrs.items()):
if isinstance(value, Field):
current_fields.append((key, value))
attrs.pop(key)
current_fields.sort(key=lambda x: x[1].creation_counter)
attrs['declared_fields'] = OrderedDict(current_fields)
new_class = (super(DeclarativeFieldsMetaclass, mcs)
.__new__(mcs, name, bases, attrs))
# Walk through the MRO.
declared_fields = OrderedDict()
for base in reversed(new_class.__mro__):
# Collect fields from base class.
if hasattr(base, 'declared_fields'):
declared_fields.update(base.declared_fields)
# Field shadowing.
for attr, value in base.__dict__.items():
if value is None and attr in declared_fields:
declared_fields.pop(attr)
new_class.base_fields = declared_fields
new_class.declared_fields = declared_fields
return new_class
@python_2_unicode_compatible
class BaseForm(object):
# This is the main implementation of all the Form logic. Note that this
# class is different than Form. See the comments by the Form class for more
# information. Any improvements to the form API should be made to *this*
# class, not to the Form class.
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=None,
empty_permitted=False):
self.is_bound = data is not None or files is not None
self.data = data or {}
self.files = files or {}
self.auto_id = auto_id
self.prefix = prefix
self.initial = initial or {}
self.error_class = error_class
# Translators: This is the default suffix added to form field labels
self.label_suffix = label_suffix if label_suffix is not None else _(':')
self.empty_permitted = empty_permitted
self._errors = None # Stores the errors after clean() has been called.
self._changed_data = None
# The base_fields class attribute is the *class-wide* definition of
# fields. Because a particular *instance* of the class might want to
# alter self.fields, we create self.fields here by copying base_fields.
# Instances should always modify self.fields; they should not modify
# self.base_fields.
self.fields = copy.deepcopy(self.base_fields)
def __str__(self):
return self.as_table()
def __repr__(self):
if self._errors is None:
is_valid = "Unknown"
else:
is_valid = self.is_bound and not bool(self._errors)
return '<%(cls)s bound=%(bound)s, valid=%(valid)s, fields=(%(fields)s)>' % {
'cls': self.__class__.__name__,
'bound': self.is_bound,
'valid': is_valid,
'fields': ';'.join(self.fields),
}
def __iter__(self):
for name in self.fields:
yield self[name]
def __getitem__(self, name):
"Returns a BoundField with the given name."
try:
field = self.fields[name]
except KeyError:
raise KeyError(
"Key %r not found in '%s'" % (name, self.__class__.__name__))
return BoundField(self, field, name)
@property
def errors(self):
"Returns an ErrorDict for the data provided for the form"
if self._errors is None:
self.full_clean()
return self._errors
def is_valid(self):
"""
Returns True if the form has no errors. Otherwise, False. If errors are
being ignored, returns False.
"""
return self.is_bound and not self.errors
def add_prefix(self, field_name):
"""
Returns the field name with a prefix appended, if this Form has a
prefix set.
Subclasses may wish to override.
"""
return '%s-%s' % (self.prefix, field_name) if self.prefix else field_name
def add_initial_prefix(self, field_name):
"""
Add a 'initial' prefix for checking dynamic initial values
"""
return 'initial-%s' % self.add_prefix(field_name)
def _html_output(self, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row):
"Helper function for outputting HTML. Used by as_table(), as_ul(), as_p()."
top_errors = self.non_field_errors() # Errors that should be displayed above all fields.
output, hidden_fields = [], []
for name, field in self.fields.items():
html_class_attr = ''
bf = self[name]
# Escape and cache in local variable.
bf_errors = self.error_class([conditional_escape(error) for error in bf.errors])
if bf.is_hidden:
if bf_errors:
top_errors.extend(
[_('(Hidden field %(name)s) %(error)s') % {'name': name, 'error': force_text(e)}
for e in bf_errors])
hidden_fields.append(six.text_type(bf))
else:
# Create a 'class="..."' attribute if the row should have any
# CSS classes applied.
css_classes = bf.css_classes()
if css_classes:
html_class_attr = ' class="%s"' % css_classes
if errors_on_separate_row and bf_errors:
output.append(error_row % force_text(bf_errors))
if bf.label:
label = conditional_escape(force_text(bf.label))
label = bf.label_tag(label) or ''
else:
label = ''
if field.help_text:
help_text = help_text_html % force_text(field.help_text)
else:
help_text = ''
output.append(normal_row % {
'errors': force_text(bf_errors),
'label': force_text(label),
'field': six.text_type(bf),
'help_text': help_text,
'html_class_attr': html_class_attr,
'field_name': bf.html_name,
})
if top_errors:
output.insert(0, error_row % force_text(top_errors))
if hidden_fields: # Insert any hidden fields in the last row.
str_hidden = ''.join(hidden_fields)
if output:
last_row = output[-1]
# Chop off the trailing row_ender (e.g. '</td></tr>') and
# insert the hidden fields.
if not last_row.endswith(row_ender):
# This can happen in the as_p() case (and possibly others
# that users write): if there are only top errors, we may
# not be able to conscript the last row for our purposes,
# so insert a new, empty row.
last_row = (normal_row % {'errors': '', 'label': '',
'field': '', 'help_text': '',
'html_class_attr': html_class_attr})
output.append(last_row)
output[-1] = last_row[:-len(row_ender)] + str_hidden + row_ender
else:
# If there aren't any rows in the output, just append the
# hidden fields.
output.append(str_hidden)
return mark_safe('\n'.join(output))
def as_table(self):
"Returns this form rendered as HTML <tr>s -- excluding the <table></table>."
return self._html_output(
normal_row='<tr%(html_class_attr)s><th>%(label)s</th><td>%(errors)s%(field)s%(help_text)s</td></tr>',
error_row='<tr><td colspan="2">%s</td></tr>',
row_ender='</td></tr>',
help_text_html='<br /><span class="helptext">%s</span>',
errors_on_separate_row=False)
def as_ul(self):
"Returns this form rendered as HTML <li>s -- excluding the <ul></ul>."
return self._html_output(
normal_row='<li%(html_class_attr)s>%(errors)s%(label)s %(field)s%(help_text)s</li>',
error_row='<li>%s</li>',
row_ender='</li>',
help_text_html=' <span class="helptext">%s</span>',
errors_on_separate_row=False)
def as_p(self):
"Returns this form rendered as HTML <p>s."
return self._html_output(
normal_row='<p%(html_class_attr)s>%(label)s %(field)s%(help_text)s</p>',
error_row='%s',
row_ender='</p>',
help_text_html=' <span class="helptext">%s</span>',
errors_on_separate_row=True)
def non_field_errors(self):
"""
Returns an ErrorList of errors that aren't associated with a particular
field -- i.e., from Form.clean(). Returns an empty ErrorList if there
are none.
"""
return self.errors.get(NON_FIELD_ERRORS, self.error_class(error_class='nonfield'))
def _raw_value(self, fieldname):
"""
Returns the raw_value for a particular field name. This is just a
convenient wrapper around widget.value_from_datadict.
"""
field = self.fields[fieldname]
prefix = self.add_prefix(fieldname)
return field.widget.value_from_datadict(self.data, self.files, prefix)
def add_error(self, field, error):
"""
Update the content of `self._errors`.
The `field` argument is the name of the field to which the errors
should be added. If its value is None the errors will be treated as
NON_FIELD_ERRORS.
The `error` argument can be a single error, a list of errors, or a
dictionary that maps field names to lists of errors. What we define as
an "error" can be either a simple string or an instance of
ValidationError with its message attribute set and what we define as
list or dictionary can be an actual `list` or `dict` or an instance
of ValidationError with its `error_list` or `error_dict` attribute set.
If `error` is a dictionary, the `field` argument *must* be None and
errors will be added to the fields that correspond to the keys of the
dictionary.
"""
if not isinstance(error, ValidationError):
# Normalize to ValidationError and let its constructor
# do the hard work of making sense of the input.
error = ValidationError(error)
if hasattr(error, 'error_dict'):
if field is not None:
raise TypeError(
"The argument `field` must be `None` when the `error` "
"argument contains errors for multiple fields."
)
else:
error = error.error_dict
else:
error = {field or NON_FIELD_ERRORS: error.error_list}
for field, error_list in error.items():
if field not in self.errors:
if field != NON_FIELD_ERRORS and field not in self.fields:
raise ValueError(
"'%s' has no field named '%s'." % (self.__class__.__name__, field))
if field == NON_FIELD_ERRORS:
self._errors[field] = self.error_class(error_class='nonfield')
else:
self._errors[field] = self.error_class()
self._errors[field].extend(error_list)
if field in self.cleaned_data:
del self.cleaned_data[field]
def has_error(self, field, code=None):
if code is None:
return field in self.errors
if field in self.errors:
for error in self.errors.as_data()[field]:
if error.code == code:
return True
return False
def full_clean(self):
"""
Cleans all of self.data and populates self._errors and
self.cleaned_data.
"""
self._errors = ErrorDict()
if not self.is_bound: # Stop further processing.
return
self.cleaned_data = {}
# If the form is permitted to be empty, and none of the form data has
# changed from the initial data, short circuit any validation.
if self.empty_permitted and not self.has_changed():
return
self._clean_fields()
self._clean_form()
self._post_clean()
def _clean_fields(self):
for name, field in self.fields.items():
# value_from_datadict() gets the data from the data dictionaries.
# Each widget type knows how to retrieve its own data, because some
# widgets split data over several HTML fields.
value = field.widget.value_from_datadict(self.data, self.files, self.add_prefix(name))
try:
if isinstance(field, FileField):
initial = self.initial.get(name, field.initial)
value = field.clean(value, initial)
else:
value = field.clean(value)
self.cleaned_data[name] = value
if hasattr(self, 'clean_%s' % name):
value = getattr(self, 'clean_%s' % name)()
self.cleaned_data[name] = value
except ValidationError as e:
self.add_error(name, e)
def _clean_form(self):
try:
cleaned_data = self.clean()
except ValidationError as e:
self.add_error(None, e)
else:
if cleaned_data is not None:
self.cleaned_data = cleaned_data
def _post_clean(self):
"""
An internal hook for performing additional cleaning after form cleaning
is complete. Used for model validation in model forms.
"""
pass
def clean(self):
"""
Hook for doing any extra form-wide cleaning after Field.clean() has been
called on every field. Any ValidationError raised by this method will
not be associated with a particular field; it will have a special-case
association with the field named '__all__'.
"""
return self.cleaned_data
def has_changed(self):
"""
Returns True if data differs from initial.
"""
return bool(self.changed_data)
@property
def changed_data(self):
if self._changed_data is None:
self._changed_data = []
# XXX: For now we're asking the individual fields whether or not the
# data has changed. It would probably be more efficient to hash the
# initial data, store it in a hidden field, and compare a hash of the
# submitted data, but we'd need a way to easily get the string value
# for a given field. Right now, that logic is embedded in the render
# method of each widget.
for name, field in self.fields.items():
prefixed_name = self.add_prefix(name)
data_value = field.widget.value_from_datadict(self.data, self.files, prefixed_name)
if not field.show_hidden_initial:
initial_value = self.initial.get(name, field.initial)
if callable(initial_value):
initial_value = initial_value()
else:
initial_prefixed_name = self.add_initial_prefix(name)
hidden_widget = field.hidden_widget()
try:
initial_value = field.to_python(hidden_widget.value_from_datadict(
self.data, self.files, initial_prefixed_name))
except ValidationError:
# Always assume data has changed if validation fails.
self._changed_data.append(name)
continue
if field.has_changed(initial_value, data_value):
self._changed_data.append(name)
return self._changed_data
@property
def media(self):
"""
Provide a description of all media required to render the widgets on this form
"""
media = Media()
for field in self.fields.values():
media = media + field.widget.media
return media
def is_multipart(self):
"""
Returns True if the form needs to be multipart-encoded, i.e. it has
FileInput. Otherwise, False.
"""
for field in self.fields.values():
if field.widget.needs_multipart_form:
return True
return False
def hidden_fields(self):
"""
Returns a list of all the BoundField objects that are hidden fields.
Useful for manual form layout in templates.
"""
return [field for field in self if field.is_hidden]
def visible_fields(self):
"""
Returns a list of BoundField objects that aren't hidden fields.
The opposite of the hidden_fields() method.
"""
return [field for field in self if not field.is_hidden]
class Form(six.with_metaclass(DeclarativeFieldsMetaclass, BaseForm)):
"A collection of Fields, plus their associated data."
# This is a separate class from BaseForm in order to abstract the way
# self.fields is specified. This class (Form) is the one that does the
# fancy metaclass stuff purely for the semantic sugar -- it allows one
# to define a form using declarative syntax.
# BaseForm itself has no way of designating self.fields.
@python_2_unicode_compatible
class BoundField(object):
"A Field plus data"
def __init__(self, form, field, name):
self.form = form
self.field = field
self.name = name
self.html_name = form.add_prefix(name)
self.html_initial_name = form.add_initial_prefix(name)
self.html_initial_id = form.add_initial_prefix(self.auto_id)
if self.field.label is None:
self.label = pretty_name(name)
else:
self.label = self.field.label
self.help_text = field.help_text or ''
def __str__(self):
"""Renders this field as an HTML widget."""
if self.field.show_hidden_initial:
return self.as_widget() + self.as_hidden(only_initial=True)
return self.as_widget()
def __iter__(self):
"""
Yields rendered strings that comprise all widgets in this BoundField.
This really is only useful for RadioSelect widgets, so that you can
iterate over individual radio buttons in a template.
"""
id_ = self.field.widget.attrs.get('id') or self.auto_id
attrs = {'id': id_} if id_ else {}
for subwidget in self.field.widget.subwidgets(self.html_name, self.value(), attrs):
yield subwidget
def __len__(self):
return len(list(self.__iter__()))
def __getitem__(self, idx):
# Prevent unnecessary reevaluation when accessing BoundField's attrs
# from templates.
if not isinstance(idx, six.integer_types):
raise TypeError
return list(self.__iter__())[idx]
@property
def errors(self):
"""
Returns an ErrorList for this field. Returns an empty ErrorList
if there are none.
"""
return self.form.errors.get(self.name, self.form.error_class())
def as_widget(self, widget=None, attrs=None, only_initial=False):
"""
Renders the field by rendering the passed widget, adding any HTML
attributes passed as attrs. If no widget is specified, then the
field's default widget will be used.
"""
if not widget:
widget = self.field.widget
if self.field.localize:
widget.is_localized = True
attrs = attrs or {}
auto_id = self.auto_id
if auto_id and 'id' not in attrs and 'id' not in widget.attrs:
if not only_initial:
attrs['id'] = auto_id
else:
attrs['id'] = self.html_initial_id
if not only_initial:
name = self.html_name
else:
name = self.html_initial_name
return force_text(widget.render(name, self.value(), attrs=attrs))
def as_text(self, attrs=None, **kwargs):
"""
Returns a string of HTML for representing this as an <input type="text">.
"""
return self.as_widget(TextInput(), attrs, **kwargs)
def as_textarea(self, attrs=None, **kwargs):
"Returns a string of HTML for representing this as a <textarea>."
return self.as_widget(Textarea(), attrs, **kwargs)
def as_hidden(self, attrs=None, **kwargs):
"""
Returns a string of HTML for representing this as an <input type="hidden">.
"""
return self.as_widget(self.field.hidden_widget(), attrs, **kwargs)
@property
def data(self):
"""
Returns the data for this BoundField, or None if it wasn't given.
"""
return self.field.widget.value_from_datadict(self.form.data, self.form.files, self.html_name)
def value(self):
"""
Returns the value for this BoundField, using the initial value if
the form is not bound or the data otherwise.
"""
if not self.form.is_bound:
data = self.form.initial.get(self.name, self.field.initial)
if callable(data):
data = data()
# If this is an auto-generated default date, nix the
# microseconds for standardized handling. See #22502.
if (isinstance(data, (datetime.datetime, datetime.time)) and
not getattr(self.field.widget, 'supports_microseconds', True)):
data = data.replace(microsecond=0)
else:
data = self.field.bound_data(
self.data, self.form.initial.get(self.name, self.field.initial)
)
return self.field.prepare_value(data)
def label_tag(self, contents=None, attrs=None, label_suffix=None):
"""
Wraps the given contents in a <label>, if the field has an ID attribute.
contents should be 'mark_safe'd to avoid HTML escaping. If contents
aren't given, uses the field's HTML-escaped label.
If attrs are given, they're used as HTML attributes on the <label> tag.
label_suffix allows overriding the form's label_suffix.
"""
contents = contents or self.label
if label_suffix is None:
label_suffix = (self.field.label_suffix if self.field.label_suffix is not None
else self.form.label_suffix)
# Only add the suffix if the label does not end in punctuation.
# Translators: If found as last label character, these punctuation
# characters will prevent the default label_suffix to be appended to the label
if label_suffix and contents and contents[-1] not in _(':?.!'):
contents = format_html('{0}{1}', contents, label_suffix)
widget = self.field.widget
id_ = widget.attrs.get('id') or self.auto_id
if id_:
id_for_label = widget.id_for_label(id_)
if id_for_label:
attrs = dict(attrs or {}, **{'for': id_for_label})
if self.field.required and hasattr(self.form, 'required_css_class'):
attrs = attrs or {}
if 'class' in attrs:
attrs['class'] += ' ' + self.form.required_css_class
else:
attrs['class'] = self.form.required_css_class
attrs = flatatt(attrs) if attrs else ''
contents = format_html('<label{0}>{1}</label>', attrs, contents)
else:
contents = conditional_escape(contents)
return mark_safe(contents)
def css_classes(self, extra_classes=None):
"""
Returns a string of space-separated CSS classes for this field.
"""
if hasattr(extra_classes, 'split'):
extra_classes = extra_classes.split()
extra_classes = set(extra_classes or [])
if self.errors and hasattr(self.form, 'error_css_class'):
extra_classes.add(self.form.error_css_class)
if self.field.required and hasattr(self.form, 'required_css_class'):
extra_classes.add(self.form.required_css_class)
return ' '.join(extra_classes)
@property
def is_hidden(self):
"Returns True if this BoundField's widget is hidden."
return self.field.widget.is_hidden
@property
def auto_id(self):
"""
Calculates and returns the ID attribute for this BoundField, if the
associated Form has specified auto_id. Returns an empty string otherwise.
"""
auto_id = self.form.auto_id
if auto_id and '%s' in smart_text(auto_id):
return smart_text(auto_id) % self.html_name
elif auto_id:
return self.html_name
return ''
@property
def id_for_label(self):
"""
Wrapper around the field widget's `id_for_label` method.
Useful, for example, for focusing on this field regardless of whether
it has a single widget or a MultiWidget.
"""
widget = self.field.widget
id_ = widget.attrs.get('id') or self.auto_id
return widget.id_for_label(id_)
| |
# -*- coding: utf-8 -*-
import gsxws
from django.forms.models import modelform_factory
from django.forms.models import inlineformset_factory
from django.utils.translation import ugettext as _
from django.contrib.auth.decorators import permission_required
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib import messages
from servo.lib.utils import paginate
from servo.models.order import ServiceOrderItem
from servo.models import (Order, Product, GsxAccount,
PurchaseOrder, PurchaseOrderItem,)
from servo.forms import PurchaseOrderItemEditForm, PurchaseOrderSearchForm
@permission_required("servo.change_purchaseorder")
def list_pos(request):
from datetime import timedelta
from django.utils import timezone
from django.db.models import Sum
now = timezone.now()
data = {'title': _("Purchase Orders")}
initial = {'start_date': now - timedelta(days=30), 'end_date': now}
all_orders = PurchaseOrder.objects.filter(
created_at__range=(initial['start_date'], initial['end_date'])
)
form = PurchaseOrderSearchForm(initial=initial)
if request.method == 'POST':
all_orders = PurchaseOrder.objects.all()
form = PurchaseOrderSearchForm(request.POST, initial=initial)
if form.is_valid():
fdata = form.cleaned_data
reference = fdata.get('reference')
if reference:
all_orders = all_orders.filter(reference__contains=reference)
if fdata.get('state') == 'open':
all_orders = all_orders.filter(submitted_at=None)
if fdata.get('state') == 'submitted':
all_orders = all_orders.exclude(submitted_at=None)
if fdata.get('state') == 'received':
all_orders = all_orders.exclude(has_arrived=True)
s, e = (fdata.get('start_date'), fdata.get('end_date'))
if s and e:
all_orders = all_orders.filter(created_at__range=(s, e))
created_by = fdata.get('created_by')
if created_by:
all_orders = all_orders.filter(created_by=created_by)
page = request.GET.get("page")
orders = paginate(all_orders, page, 50)
data['orders'] = orders
data['form'] = form
data['total'] = all_orders.aggregate(Sum('total'))
return render(request, "purchases/list_pos.html", data)
@permission_required("servo.change_purchaseorder")
def delete_from_po(request, pk, item_id):
# @TODO - decrement amount_ordered?
po = get_object_or_404(PurchaseOrder, pk=pk)
poi = PurchaseOrderItem.objects.get(pk=item_id)
poi.delete()
messages.success(request, _(u'Product %s removed' % poi.product.code))
return redirect(po)
@permission_required("servo.change_purchaseorder")
def add_to_po(request, pk, product_id):
po = get_object_or_404(PurchaseOrder, pk=pk)
product = get_object_or_404(Product, pk=product_id)
po.add_product(product, 1, request.user)
messages.success(request, _(u"Product %s added" % product.code))
return redirect(edit_po, po.pk)
def view_po(request, pk):
po = get_object_or_404(PurchaseOrder, pk=pk)
products = po.purchaseorderitem_set.all()
title = _('Purchase Order %d' % po.pk)
return render(request, "purchases/view_po.html", locals())
@permission_required("servo.change_purchaseorder")
def edit_po(request, pk, item_id=None):
if pk is not None:
po = get_object_or_404(PurchaseOrder, pk=pk)
else:
po = PurchaseOrder(created_by=request.user)
PurchaseOrderForm = modelform_factory(PurchaseOrder, exclude=[])
form = PurchaseOrderForm(instance=po)
ItemFormset = inlineformset_factory(
PurchaseOrder,
PurchaseOrderItem,
extra=0,
form=PurchaseOrderItemEditForm,
exclude=[]
)
formset = ItemFormset(instance=po)
if request.method == "POST":
form = PurchaseOrderForm(request.POST, instance=po)
if form.is_valid():
po = form.save()
formset = ItemFormset(request.POST, instance=po)
if formset.is_valid():
formset.save()
msg = _("Purchase Order %d saved" % po.pk)
if "confirm" in request.POST.keys():
po.submit(request.user)
msg = _("Purchase Order %d submitted") % po.pk
messages.success(request, msg)
return redirect(list_pos)
request.session['current_po'] = po.pk
data = {'order': po, 'form': form}
data['formset'] = formset
data['title'] = _('Purchase Order #%d' % po.pk)
return render(request, "purchases/edit_po.html", data)
@permission_required("servo.change_purchaseorder")
def order_stock(request, po_id):
"""
Submits the PO as a GSX Stocking Order
Using the default GSX account.
"""
po = get_object_or_404(PurchaseOrder, pk=po_id)
if request.method == "POST":
if po.submitted_at:
msg = _(u'Purchase Order %s has already been submitted') % po.pk
messages.error(request, msg)
return list_pos(request)
act = GsxAccount.default(request.user)
stock_order = gsxws.StockingOrder(
shipToCode=act.ship_to,
purchaseOrderNumber=po.id
)
for i in po.purchaseorderitem_set.all():
stock_order.add_part(i.code, i.amount)
try:
result = stock_order.submit()
po.supplier = "Apple"
po.confirmation = result.confirmationNumber
po.submit(request.user)
msg = _("Products ordered with confirmation %s" % po.confirmation)
messages.success(request, msg)
except gsxws.GsxError as e:
messages.error(request, e)
return redirect(list_pos)
data = {'action': request.path}
return render(request, "purchases/order_stock.html", data)
@permission_required('servo.delete_purchaseorder')
def delete_po(request, po_id):
po = get_object_or_404(PurchaseOrder, pk=po_id)
try:
po.delete()
messages.success(request, _("Purchase Order %s deleted" % po_id))
except Exception as e:
messages.error(request, e)
return redirect(list_pos)
@permission_required('servo.add_purchaseorder')
def create_po(request, product_id=None, order_id=None):
"""
Creates a new Purchase Order
"""
po = PurchaseOrder(created_by=request.user)
po.location = request.user.get_location()
po.save()
if order_id is not None:
po.sales_order = get_object_or_404(Order, pk=order_id)
po.save()
for i in ServiceOrderItem.objects.filter(order_id=order_id):
po.add_product(i, amount=1, user=request.user)
if product_id is not None:
product = get_object_or_404(Product, pk=product_id)
po.add_product(product, amount=1, user=request.user)
messages.success(request, _("Purchase Order %d created" % po.pk))
return redirect(edit_po, po.pk)
| |
"""Utility functions for plotting M/EEG data
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
#
# License: Simplified BSD
import math
from functools import partial
import difflib
import webbrowser
from warnings import warn
import tempfile
import numpy as np
from ..io import show_fiff
from ..utils import verbose, set_config
COLORS = ['b', 'g', 'r', 'c', 'm', 'y', 'k', '#473C8B', '#458B74',
'#CD7F32', '#FF4040', '#ADFF2F', '#8E2323', '#FF1493']
def _setup_vmin_vmax(data, vmin, vmax, norm=False):
"""Aux function to handle vmin and vmax parameters"""
if vmax is None and vmin is None:
vmax = np.abs(data).max()
if norm:
vmin = 0.
else:
vmin = -vmax
else:
if callable(vmin):
vmin = vmin(data)
elif vmin is None:
if norm:
vmin = 0.
else:
vmin = np.min(data)
if callable(vmax):
vmax = vmax(data)
elif vmax is None:
vmax = np.max(data)
return vmin, vmax
def tight_layout(pad=1.2, h_pad=None, w_pad=None, fig=None):
""" Adjust subplot parameters to give specified padding.
Note. For plotting please use this function instead of plt.tight_layout
Parameters
----------
pad : float
padding between the figure edge and the edges of subplots, as a
fraction of the font-size.
h_pad : float
Padding height between edges of adjacent subplots.
Defaults to `pad_inches`.
w_pad : float
Padding width between edges of adjacent subplots.
Defaults to `pad_inches`.
fig : instance of Figure
Figure to apply changes to.
"""
import matplotlib.pyplot as plt
fig = plt.gcf() if fig is None else fig
fig.canvas.draw()
try: # see https://github.com/matplotlib/matplotlib/issues/2654
fig.tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad)
except Exception:
warn('Matplotlib function \'tight_layout\' is not supported.'
' Skipping subplot adjusment.')
else:
try:
fig.set_tight_layout(dict(pad=pad, h_pad=h_pad, w_pad=w_pad))
except Exception:
pass
def _check_delayed_ssp(container):
""" Aux function to be used for interactive SSP selection
"""
if container.proj is True or\
all(p['active'] for p in container.info['projs']):
raise RuntimeError('Projs are already applied. Please initialize'
' the data with proj set to False.')
elif len(container.info['projs']) < 1:
raise RuntimeError('No projs found in evoked.')
def mne_analyze_colormap(limits=[5, 10, 15], format='mayavi'):
"""Return a colormap similar to that used by mne_analyze
Parameters
----------
limits : list (or array) of length 3 or 6
Bounds for the colormap, which will be mirrored across zero if length
3, or completely specified (and potentially asymmetric) if length 6.
format : str
Type of colormap to return. If 'matplotlib', will return a
matplotlib.colors.LinearSegmentedColormap. If 'mayavi', will
return an RGBA array of shape (256, 4).
Returns
-------
cmap : instance of matplotlib.pyplot.colormap | array
A teal->blue->gray->red->yellow colormap.
Notes
-----
For this will return a colormap that will display correctly for data
that are scaled by the plotting function to span [-fmax, fmax].
Examples
--------
The following code will plot a STC using standard MNE limits:
colormap = mne.viz.mne_analyze_colormap(limits=[5, 10, 15])
brain = stc.plot('fsaverage', 'inflated', 'rh', colormap)
brain.scale_data_colormap(fmin=-15, fmid=0, fmax=15, transparent=False)
"""
# Ensure limits is an array
limits = np.asarray(limits, dtype='float')
if len(limits) != 3 and len(limits) != 6:
raise ValueError('limits must have 3 or 6 elements')
if len(limits) == 3 and any(limits < 0.):
raise ValueError('if 3 elements, limits must all be non-negative')
if any(np.diff(limits) <= 0):
raise ValueError('limits must be monotonically increasing')
if format == 'matplotlib':
from matplotlib import colors
if len(limits) == 3:
limits = (np.concatenate((-np.flipud(limits), limits)) +
limits[-1]) / (2 * limits[-1])
else:
limits = (limits - np.min(limits)) / np.max(limits -
np.min(limits))
cdict = {'red': ((limits[0], 0.0, 0.0),
(limits[1], 0.0, 0.0),
(limits[2], 0.5, 0.5),
(limits[3], 0.5, 0.5),
(limits[4], 1.0, 1.0),
(limits[5], 1.0, 1.0)),
'green': ((limits[0], 1.0, 1.0),
(limits[1], 0.0, 0.0),
(limits[2], 0.5, 0.5),
(limits[3], 0.5, 0.5),
(limits[4], 0.0, 0.0),
(limits[5], 1.0, 1.0)),
'blue': ((limits[0], 1.0, 1.0),
(limits[1], 1.0, 1.0),
(limits[2], 0.5, 0.5),
(limits[3], 0.5, 0.5),
(limits[4], 0.0, 0.0),
(limits[5], 0.0, 0.0))}
return colors.LinearSegmentedColormap('mne_analyze', cdict)
elif format == 'mayavi':
if len(limits) == 3:
limits = np.concatenate((-np.flipud(limits), [0], limits)) /\
limits[-1]
else:
limits = np.concatenate((limits[:3], [0], limits[3:]))
limits /= np.max(np.abs(limits))
r = np.array([0, 0, 0, 0, 1, 1, 1])
g = np.array([1, 0, 0, 0, 0, 0, 1])
b = np.array([1, 1, 1, 0, 0, 0, 0])
a = np.array([1, 1, 0, 0, 0, 1, 1])
xp = (np.arange(256) - 128) / 128.0
colormap = np.r_[[np.interp(xp, limits, 255 * c)
for c in [r, g, b, a]]].T
return colormap
else:
raise ValueError('format must be either matplotlib or mayavi')
def _toggle_options(event, params):
"""Toggle options (projectors) dialog"""
import matplotlib.pyplot as plt
if len(params['projs']) > 0:
if params['fig_proj'] is None:
_draw_proj_checkbox(event, params, draw_current_state=False)
else:
# turn off options dialog
plt.close(params['fig_proj'])
del params['proj_checks']
params['fig_proj'] = None
def _toggle_proj(event, params):
"""Operation to perform when proj boxes clicked"""
# read options if possible
if 'proj_checks' in params:
bools = [x[0].get_visible() for x in params['proj_checks'].lines]
for bi, (b, p) in enumerate(zip(bools, params['projs'])):
# see if they tried to deactivate an active one
if not b and p['active']:
bools[bi] = True
else:
bools = [True] * len(params['projs'])
compute_proj = False
if 'proj_bools' not in params:
compute_proj = True
elif not np.array_equal(bools, params['proj_bools']):
compute_proj = True
# if projectors changed, update plots
if compute_proj is True:
params['plot_update_proj_callback'](params, bools)
def _get_help_text(params):
"""Aux function for customizing help dialogs text."""
text, text2 = list(), list()
text.append(u'\u2190 : \n')
text.append(u'\u2192 : \n')
text.append(u'\u2193 : \n')
text.append(u'\u2191 : \n')
text.append(u'- : \n')
text.append(u'+ or = : \n')
text.append(u'Home : \n')
text.append(u'End : \n')
text.append(u'Page down : \n')
text.append(u'Page up : \n')
text.append(u'F11 : \n')
text.append(u'? : \n')
text.append(u'Esc : \n\n')
text.append(u'Mouse controls\n')
text.append(u'click on data :\n')
text2.append('Navigate left\n')
text2.append('Navigate right\n')
text2.append('Scale down\n')
text2.append('Scale up\n')
text2.append('Toggle full screen mode\n')
text2.append('Open help box\n')
text2.append('Quit\n\n\n')
if 'raw' in params:
text2.insert(4, 'Reduce the time shown per view\n')
text2.insert(5, 'Increase the time shown per view\n')
text.append(u'click elsewhere in the plot :\n')
if 'ica' in params:
text.append(u'click component name :\n')
text2.insert(2, 'Navigate components down\n')
text2.insert(3, 'Navigate components up\n')
text2.insert(8, 'Reduce the number of components per view\n')
text2.insert(9, 'Increase the number of components per view\n')
text2.append('Mark bad channel\n')
text2.append('Vertical line at a time instant\n')
text2.append('Show topography for the component\n')
else:
text.append(u'click channel name :\n')
text2.insert(2, 'Navigate channels down\n')
text2.insert(3, 'Navigate channels up\n')
text2.insert(8, 'Reduce the number of channels per view\n')
text2.insert(9, 'Increase the number of channels per view\n')
text2.append('Mark bad channel\n')
text2.append('Vertical line at a time instant\n')
text2.append('Mark bad channel\n')
elif 'epochs' in params:
text.append(u'right click :\n')
text2.insert(4, 'Reduce the number of epochs per view\n')
text2.insert(5, 'Increase the number of epochs per view\n')
if 'ica' in params:
text.append(u'click component name :\n')
text2.insert(2, 'Navigate components down\n')
text2.insert(3, 'Navigate components up\n')
text2.insert(8, 'Reduce the number of components per view\n')
text2.insert(9, 'Increase the number of components per view\n')
text2.append('Mark component for exclusion\n')
text2.append('Vertical line at a time instant\n')
text2.append('Show topography for the component\n')
else:
text.append(u'click channel name :\n')
text.append(u'right click channel name :\n')
text2.insert(2, 'Navigate channels down\n')
text2.insert(3, 'Navigate channels up\n')
text2.insert(8, 'Reduce the number of channels per view\n')
text2.insert(9, 'Increase the number of channels per view\n')
text.insert(10, u'b : \n')
text2.insert(10, 'Toggle butterfly plot on/off\n')
text.insert(11, u'h : \n')
text2.insert(11, 'Show histogram of peak-to-peak values\n')
text2.append('Mark bad epoch\n')
text2.append('Vertical line at a time instant\n')
text2.append('Mark bad channel\n')
text2.append('Plot ERP/ERF image\n')
text.append(u'middle click :\n')
text2.append('Show channel name (butterfly plot)\n')
text.insert(11, u'o : \n')
text2.insert(11, 'View settings (orig. view only)\n')
return ''.join(text), ''.join(text2)
def _prepare_trellis(n_cells, max_col):
"""Aux function
"""
import matplotlib.pyplot as plt
if n_cells == 1:
nrow = ncol = 1
elif n_cells <= max_col:
nrow, ncol = 1, n_cells
else:
nrow, ncol = int(math.ceil(n_cells / float(max_col))), max_col
fig, axes = plt.subplots(nrow, ncol, figsize=(7.4, 1.5 * nrow + 1))
axes = [axes] if ncol == nrow == 1 else axes.flatten()
for ax in axes[n_cells:]: # hide unused axes
ax.set_visible(False)
return fig, axes
def _draw_proj_checkbox(event, params, draw_current_state=True):
"""Toggle options (projectors) dialog"""
from matplotlib import widgets
projs = params['projs']
# turn on options dialog
labels = [p['desc'] for p in projs]
actives = ([p['active'] for p in projs] if draw_current_state else
[True] * len(params['projs']))
width = max([len(p['desc']) for p in projs]) / 6.0 + 0.5
height = len(projs) / 6.0 + 0.5
fig_proj = figure_nobar(figsize=(width, height))
fig_proj.canvas.set_window_title('SSP projection vectors')
params['fig_proj'] = fig_proj # necessary for proper toggling
ax_temp = fig_proj.add_axes((0, 0, 1, 1), frameon=False)
proj_checks = widgets.CheckButtons(ax_temp, labels=labels, actives=actives)
# change already-applied projectors to red
for ii, p in enumerate(projs):
if p['active'] is True:
for x in proj_checks.lines[ii]:
x.set_color('r')
# make minimal size
# pass key presses from option dialog over
proj_checks.on_clicked(partial(_toggle_proj, params=params))
params['proj_checks'] = proj_checks
# this should work for non-test cases
try:
fig_proj.canvas.draw()
fig_proj.show()
except Exception:
pass
def _layout_figure(params):
"""Function for setting figure layout. Shared with raw and epoch plots"""
size = params['fig'].get_size_inches() * params['fig'].dpi
scroll_width = 25
hscroll_dist = 25
vscroll_dist = 10
l_border = 100
r_border = 10
t_border = 35
b_border = 40
# only bother trying to reset layout if it's reasonable to do so
if size[0] < 2 * scroll_width or size[1] < 2 * scroll_width + hscroll_dist:
return
# convert to relative units
scroll_width_x = scroll_width / size[0]
scroll_width_y = scroll_width / size[1]
vscroll_dist /= size[0]
hscroll_dist /= size[1]
l_border /= size[0]
r_border /= size[0]
t_border /= size[1]
b_border /= size[1]
# main axis (traces)
ax_width = 1.0 - scroll_width_x - l_border - r_border - vscroll_dist
ax_y = hscroll_dist + scroll_width_y + b_border
ax_height = 1.0 - ax_y - t_border
pos = [l_border, ax_y, ax_width, ax_height]
params['ax'].set_position(pos)
if 'ax2' in params:
params['ax2'].set_position(pos)
params['ax'].set_position(pos)
# vscroll (channels)
pos = [ax_width + l_border + vscroll_dist, ax_y,
scroll_width_x, ax_height]
params['ax_vscroll'].set_position(pos)
# hscroll (time)
pos = [l_border, b_border, ax_width, scroll_width_y]
params['ax_hscroll'].set_position(pos)
if 'ax_button' in params:
# options button
pos = [l_border + ax_width + vscroll_dist, b_border,
scroll_width_x, scroll_width_y]
params['ax_button'].set_position(pos)
if 'ax_help_button' in params:
pos = [l_border - vscroll_dist - scroll_width_x * 2, b_border,
scroll_width_x * 2, scroll_width_y]
params['ax_help_button'].set_position(pos)
params['fig'].canvas.draw()
@verbose
def compare_fiff(fname_1, fname_2, fname_out=None, show=True, indent=' ',
read_limit=np.inf, max_str=30, verbose=None):
"""Compare the contents of two fiff files using diff and show_fiff
Parameters
----------
fname_1 : str
First file to compare.
fname_2 : str
Second file to compare.
fname_out : str | None
Filename to store the resulting diff. If None, a temporary
file will be created.
show : bool
If True, show the resulting diff in a new tab in a web browser.
indent : str
How to indent the lines.
read_limit : int
Max number of bytes of data to read from a tag. Can be np.inf
to always read all data (helps test read completion).
max_str : int
Max number of characters of string representation to print for
each tag's data.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fname_out : str
The filename used for storing the diff. Could be useful for
when a temporary file is used.
"""
file_1 = show_fiff(fname_1, output=list, indent=indent,
read_limit=read_limit, max_str=max_str)
file_2 = show_fiff(fname_2, output=list, indent=indent,
read_limit=read_limit, max_str=max_str)
diff = difflib.HtmlDiff().make_file(file_1, file_2, fname_1, fname_2)
if fname_out is not None:
f = open(fname_out, 'w')
else:
f = tempfile.NamedTemporaryFile('w', delete=False, suffix='.html')
fname_out = f.name
with f as fid:
fid.write(diff)
if show is True:
webbrowser.open_new_tab(fname_out)
return fname_out
def figure_nobar(*args, **kwargs):
"""Make matplotlib figure with no toolbar"""
from matplotlib import rcParams, pyplot as plt
old_val = rcParams['toolbar']
try:
rcParams['toolbar'] = 'none'
fig = plt.figure(*args, **kwargs)
# remove button press catchers (for toolbar)
cbs = list(fig.canvas.callbacks.callbacks['key_press_event'].keys())
for key in cbs:
fig.canvas.callbacks.disconnect(key)
except Exception as ex:
raise ex
finally:
rcParams['toolbar'] = old_val
return fig
def _helper_raw_resize(event, params):
"""Helper for resizing"""
size = ','.join([str(s) for s in params['fig'].get_size_inches()])
set_config('MNE_BROWSE_RAW_SIZE', size)
_layout_figure(params)
def _plot_raw_onscroll(event, params, len_channels=None):
"""Interpret scroll events"""
if len_channels is None:
len_channels = len(params['info']['ch_names'])
orig_start = params['ch_start']
if event.step < 0:
params['ch_start'] = min(params['ch_start'] + params['n_channels'],
len_channels - params['n_channels'])
else: # event.key == 'up':
params['ch_start'] = max(params['ch_start'] - params['n_channels'], 0)
if orig_start != params['ch_start']:
_channels_changed(params, len_channels)
def _channels_changed(params, len_channels):
"""Helper function for dealing with the vertical shift of the viewport."""
if params['ch_start'] + params['n_channels'] > len_channels:
params['ch_start'] = len_channels - params['n_channels']
if params['ch_start'] < 0:
params['ch_start'] = 0
params['plot_fun']()
def _plot_raw_time(value, params):
"""Deal with changed time value"""
info = params['info']
max_times = params['n_times'] / float(info['sfreq']) - params['duration']
if value > max_times:
value = params['n_times'] / info['sfreq'] - params['duration']
if value < 0:
value = 0
if params['t_start'] != value:
params['t_start'] = value
params['hsel_patch'].set_x(value)
def _plot_raw_onkey(event, params):
"""Interpret key presses"""
import matplotlib.pyplot as plt
if event.key == 'escape':
plt.close(params['fig'])
elif event.key == 'down':
params['ch_start'] += params['n_channels']
_channels_changed(params, len(params['info']['ch_names']))
elif event.key == 'up':
params['ch_start'] -= params['n_channels']
_channels_changed(params, len(params['info']['ch_names']))
elif event.key == 'right':
value = params['t_start'] + params['duration']
_plot_raw_time(value, params)
params['update_fun']()
params['plot_fun']()
elif event.key == 'left':
value = params['t_start'] - params['duration']
_plot_raw_time(value, params)
params['update_fun']()
params['plot_fun']()
elif event.key in ['+', '=']:
params['scale_factor'] *= 1.1
params['plot_fun']()
elif event.key == '-':
params['scale_factor'] /= 1.1
params['plot_fun']()
elif event.key == 'pageup':
n_channels = params['n_channels'] + 1
offset = params['ax'].get_ylim()[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['n_channels'] = n_channels
params['ax'].set_yticks(params['offsets'])
params['vsel_patch'].set_height(n_channels)
_channels_changed(params, len(params['info']['ch_names']))
elif event.key == 'pagedown':
n_channels = params['n_channels'] - 1
if n_channels == 0:
return
offset = params['ax'].get_ylim()[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['n_channels'] = n_channels
params['ax'].set_yticks(params['offsets'])
params['vsel_patch'].set_height(n_channels)
if len(params['lines']) > n_channels: # remove line from view
params['lines'][n_channels].set_xdata([])
params['lines'][n_channels].set_ydata([])
_channels_changed(params, len(params['info']['ch_names']))
elif event.key == 'home':
duration = params['duration'] - 1.0
if duration <= 0:
return
params['duration'] = duration
params['hsel_patch'].set_width(params['duration'])
params['update_fun']()
params['plot_fun']()
elif event.key == 'end':
duration = params['duration'] + 1.0
if duration > params['raw'].times[-1]:
duration = params['raw'].times[-1]
params['duration'] = duration
params['hsel_patch'].set_width(params['duration'])
params['update_fun']()
params['plot_fun']()
elif event.key == '?':
_onclick_help(event, params)
elif event.key == 'f11':
mng = plt.get_current_fig_manager()
mng.full_screen_toggle()
def _mouse_click(event, params):
"""Vertical select callback"""
if event.button != 1:
return
if event.inaxes is None:
if params['n_channels'] > 100:
return
ax = params['ax']
ylim = ax.get_ylim()
pos = ax.transData.inverted().transform((event.x, event.y))
if pos[0] > params['t_start'] or pos[1] < 0 or pos[1] > ylim[0]:
return
params['label_click_fun'](pos)
# vertical scrollbar changed
if event.inaxes == params['ax_vscroll']:
ch_start = max(int(event.ydata) - params['n_channels'] // 2, 0)
if params['ch_start'] != ch_start:
params['ch_start'] = ch_start
params['plot_fun']()
# horizontal scrollbar changed
elif event.inaxes == params['ax_hscroll']:
_plot_raw_time(event.xdata - params['duration'] / 2, params)
params['update_fun']()
params['plot_fun']()
elif event.inaxes == params['ax']:
params['pick_bads_fun'](event)
def _select_bads(event, params, bads):
"""Helper for selecting bad channels onpick. Returns updated bads list."""
# trade-off, avoid selecting more than one channel when drifts are present
# however for clean data don't click on peaks but on flat segments
def f(x, y):
return y(np.mean(x), x.std() * 2)
lines = event.inaxes.lines
for line in lines:
ydata = line.get_ydata()
if not isinstance(ydata, list) and not np.isnan(ydata).any():
ymin, ymax = f(ydata, np.subtract), f(ydata, np.add)
if ymin <= event.ydata <= ymax:
this_chan = vars(line)['ch_name']
if this_chan in params['info']['ch_names']:
ch_idx = params['ch_start'] + lines.index(line)
if this_chan not in bads:
bads.append(this_chan)
color = params['bad_color']
line.set_zorder(-1)
else:
while this_chan in bads:
bads.remove(this_chan)
color = vars(line)['def_color']
line.set_zorder(0)
line.set_color(color)
params['ax_vscroll'].patches[ch_idx].set_color(color)
break
else:
x = np.array([event.xdata] * 2)
params['ax_vertline'].set_data(x, np.array(params['ax'].get_ylim()))
params['ax_hscroll_vertline'].set_data(x, np.array([0., 1.]))
params['vertline_t'].set_text('%0.3f' % x[0])
return bads
def _onclick_help(event, params):
"""Function for drawing help window"""
import matplotlib.pyplot as plt
text, text2 = _get_help_text(params)
width = 6
height = 5
fig_help = figure_nobar(figsize=(width, height), dpi=80)
fig_help.canvas.set_window_title('Help')
ax = plt.subplot2grid((8, 5), (0, 0), colspan=5)
ax.set_title('Keyboard shortcuts')
plt.axis('off')
ax1 = plt.subplot2grid((8, 5), (1, 0), rowspan=7, colspan=2)
ax1.set_yticklabels(list())
plt.text(0.99, 1, text, fontname='STIXGeneral', va='top', weight='bold',
ha='right')
plt.axis('off')
ax2 = plt.subplot2grid((8, 5), (1, 2), rowspan=7, colspan=3)
ax2.set_yticklabels(list())
plt.text(0, 1, text2, fontname='STIXGeneral', va='top')
plt.axis('off')
tight_layout(fig=fig_help)
# this should work for non-test cases
try:
fig_help.canvas.draw()
fig_help.show()
except Exception:
pass
class ClickableImage(object):
"""
Display an image so you can click on it and store x/y positions.
Takes as input an image array (can be any array that works with imshow,
but will work best with images. Displays the image and lets you
click on it. Stores the xy coordinates of each click, so now you can
superimpose something on top of it.
Upon clicking, the x/y coordinate of the cursor will be stored in
self.coords, which is a list of (x, y) tuples.
Parameters
----------
imdata: ndarray
The image that you wish to click on for 2-d points.
**kwargs : dict
Keyword arguments. Passed to ax.imshow.
Notes
-----
.. versionadded:: 0.9.0
"""
def __init__(self, imdata, **kwargs):
"""Display the image for clicking."""
from matplotlib.pyplot import figure, show
self.coords = []
self.imdata = imdata
self.fig = figure()
self.ax = self.fig.add_subplot(111)
self.ymax = self.imdata.shape[0]
self.xmax = self.imdata.shape[1]
self.im = self.ax.imshow(imdata, aspect='auto',
extent=(0, self.xmax, 0, self.ymax),
picker=True, **kwargs)
self.ax.axis('off')
self.fig.canvas.mpl_connect('pick_event', self.onclick)
show()
def onclick(self, event):
"""Mouse click handler.
Parameters
----------
event: matplotlib event object
The matplotlib object that we use to get x/y position.
"""
mouseevent = event.mouseevent
self.coords.append((mouseevent.xdata, mouseevent.ydata))
def plot_clicks(self, **kwargs):
"""Plot the x/y positions stored in self.coords.
Parameters
----------
**kwargs : dict
Arguments are passed to imshow in displaying the bg image.
"""
from matplotlib.pyplot import subplots, show
f, ax = subplots()
ax.imshow(self.imdata, extent=(0, self.xmax, 0, self.ymax), **kwargs)
xlim, ylim = [ax.get_xlim(), ax.get_ylim()]
xcoords, ycoords = zip(*self.coords)
ax.scatter(xcoords, ycoords, c='r')
ann_text = np.arange(len(self.coords)).astype(str)
for txt, coord in zip(ann_text, self.coords):
ax.annotate(txt, coord, fontsize=20, color='r')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
show()
def to_layout(self, **kwargs):
"""Turn coordinates into an MNE Layout object.
Normalizes by the image you used to generate clicks
Parameters
----------
**kwargs : dict
Arguments are passed to generate_2d_layout
"""
from mne.channels.layout import generate_2d_layout
coords = np.array(self.coords)
lt = generate_2d_layout(coords, bg_image=self.imdata, **kwargs)
return lt
def _fake_click(fig, ax, point, xform='ax', button=1):
"""Helper to fake a click at a relative point within axes."""
if xform == 'ax':
x, y = ax.transAxes.transform_point(point)
elif xform == 'data':
x, y = ax.transData.transform_point(point)
else:
raise ValueError('unknown transform')
try:
fig.canvas.button_press_event(x, y, button, False, None)
except Exception: # for old MPL
fig.canvas.button_press_event(x, y, button, False)
def add_background_image(fig, im, set_ratios=None):
"""Add a background image to a plot.
Adds the image specified in `im` to the
figure `fig`. This is generally meant to
be done with topo plots, though it could work
for any plot.
Note: This modifies the figure and/or axes
in place.
Parameters
----------
fig: plt.figure
The figure you wish to add a bg image to.
im: ndarray
A numpy array that works with a call to
plt.imshow(im). This will be plotted
as the background of the figure.
set_ratios: None | str
Set the aspect ratio of any axes in fig
to the value in set_ratios. Defaults to None,
which does nothing to axes.
Returns
-------
ax_im: instance of the create matplotlib axis object
corresponding to the image you added.
Notes
-----
.. versionadded:: 0.9.0
"""
if set_ratios is not None:
for ax in fig.axes:
ax.set_aspect(set_ratios)
ax_im = fig.add_axes([0, 0, 1, 1])
ax_im.imshow(im, aspect='auto')
ax_im.set_zorder(-1)
return ax_im
| |
"""Support for Toon van Eneco devices."""
from functools import partial
import logging
from typing import Any, Dict
from toonapilib import Toon
import voluptuous as vol
from homeassistant.const import CONF_PASSWORD, CONF_SCAN_INTERVAL, CONF_USERNAME
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv, device_registry as dr
from homeassistant.helpers.dispatcher import async_dispatcher_connect, dispatcher_send
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from . import config_flow # noqa: F401
from .const import (
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
CONF_DISPLAY,
CONF_TENANT,
DATA_TOON,
DATA_TOON_CLIENT,
DATA_TOON_CONFIG,
DATA_TOON_UPDATED,
DEFAULT_SCAN_INTERVAL,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
# Validation of the user's configuration
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
vol.Required(
CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL
): vol.All(cv.time_period, cv.positive_timedelta),
}
)
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_SCHEMA = vol.Schema({vol.Optional(CONF_DISPLAY): cv.string})
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Set up the Toon components."""
if DOMAIN not in config:
return True
conf = config[DOMAIN]
# Store config to be used during entry setup
hass.data[DATA_TOON_CONFIG] = conf
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigType) -> bool:
"""Set up Toon from a config entry."""
conf = hass.data.get(DATA_TOON_CONFIG)
toon = await hass.async_add_executor_job(
partial(
Toon,
entry.data[CONF_USERNAME],
entry.data[CONF_PASSWORD],
conf[CONF_CLIENT_ID],
conf[CONF_CLIENT_SECRET],
tenant_id=entry.data[CONF_TENANT],
display_common_name=entry.data[CONF_DISPLAY],
)
)
hass.data.setdefault(DATA_TOON_CLIENT, {})[entry.entry_id] = toon
toon_data = ToonData(hass, entry, toon)
hass.data.setdefault(DATA_TOON, {})[entry.entry_id] = toon_data
async_track_time_interval(hass, toon_data.update, conf[CONF_SCAN_INTERVAL])
# Register device for the Meter Adapter, since it will have no entities.
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers={(DOMAIN, toon.agreement.id, "meter_adapter")},
manufacturer="Eneco",
name="Meter Adapter",
via_device=(DOMAIN, toon.agreement.id),
)
def update(call):
"""Service call to manually update the data."""
called_display = call.data.get(CONF_DISPLAY, None)
for toon_data in hass.data[DATA_TOON].values():
if (
called_display and called_display == toon_data.display_name
) or not called_display:
toon_data.update()
hass.services.async_register(DOMAIN, "update", update, schema=SERVICE_SCHEMA)
for component in "binary_sensor", "climate", "sensor":
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
class ToonData:
"""Communication class for interacting with toonapilib."""
def __init__(self, hass: HomeAssistantType, entry: ConfigType, toon):
"""Initialize the Toon data object."""
self._hass = hass
self._toon = toon
self._entry = entry
self.agreement = toon.agreement
self.gas = toon.gas
self.power = toon.power
self.solar = toon.solar
self.temperature = toon.temperature
self.thermostat = toon.thermostat
self.thermostat_info = toon.thermostat_info
self.thermostat_state = toon.thermostat_state
@property
def display_name(self):
"""Return the display connected to."""
return self._entry.data[CONF_DISPLAY]
def update(self, now=None):
"""Update all Toon data and notify entities."""
# Ignore the TTL meganism from client library
# It causes a lots of issues, hence we take control over caching
self._toon._clear_cache() # pylint: disable=protected-access
# Gather data from client library (single API call)
self.gas = self._toon.gas
self.power = self._toon.power
self.solar = self._toon.solar
self.temperature = self._toon.temperature
self.thermostat = self._toon.thermostat
self.thermostat_info = self._toon.thermostat_info
self.thermostat_state = self._toon.thermostat_state
# Notify all entities
dispatcher_send(self._hass, DATA_TOON_UPDATED, self._entry.data[CONF_DISPLAY])
class ToonEntity(Entity):
"""Defines a base Toon entity."""
def __init__(self, toon: ToonData, name: str, icon: str) -> None:
"""Initialize the Toon entity."""
self._name = name
self._state = None
self._icon = icon
self.toon = toon
self._unsub_dispatcher = None
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def icon(self) -> str:
"""Return the mdi icon of the entity."""
return self._icon
@property
def should_poll(self) -> bool:
"""Return the polling requirement of the entity."""
return False
async def async_added_to_hass(self) -> None:
"""Connect to dispatcher listening for entity data notifications."""
self._unsub_dispatcher = async_dispatcher_connect(
self.hass, DATA_TOON_UPDATED, self._schedule_immediate_update
)
async def async_will_remove_from_hass(self) -> None:
"""Disconnect from update signal."""
self._unsub_dispatcher()
@callback
def _schedule_immediate_update(self, display_name: str) -> None:
"""Schedule an immediate update of the entity."""
if display_name == self.toon.display_name:
self.async_schedule_update_ha_state(True)
class ToonDisplayDeviceEntity(ToonEntity):
"""Defines a Toon display device entity."""
@property
def device_info(self) -> Dict[str, Any]:
"""Return device information about this thermostat."""
agreement = self.toon.agreement
model = agreement.display_hardware_version.rpartition("/")[0]
sw_version = agreement.display_software_version.rpartition("/")[-1]
return {
"identifiers": {(DOMAIN, agreement.id)},
"name": "Toon Display",
"manufacturer": "Eneco",
"model": model,
"sw_version": sw_version,
}
class ToonElectricityMeterDeviceEntity(ToonEntity):
"""Defines a Electricity Meter device entity."""
@property
def device_info(self) -> Dict[str, Any]:
"""Return device information about this entity."""
return {
"name": "Electricity Meter",
"identifiers": {(DOMAIN, self.toon.agreement.id, "electricity")},
"via_device": (DOMAIN, self.toon.agreement.id, "meter_adapter"),
}
class ToonGasMeterDeviceEntity(ToonEntity):
"""Defines a Gas Meter device entity."""
@property
def device_info(self) -> Dict[str, Any]:
"""Return device information about this entity."""
via_device = "meter_adapter"
if self.toon.gas.is_smart:
via_device = "electricity"
return {
"name": "Gas Meter",
"identifiers": {(DOMAIN, self.toon.agreement.id, "gas")},
"via_device": (DOMAIN, self.toon.agreement.id, via_device),
}
class ToonSolarDeviceEntity(ToonEntity):
"""Defines a Solar Device device entity."""
@property
def device_info(self) -> Dict[str, Any]:
"""Return device information about this entity."""
return {
"name": "Solar Panels",
"identifiers": {(DOMAIN, self.toon.agreement.id, "solar")},
"via_device": (DOMAIN, self.toon.agreement.id, "meter_adapter"),
}
class ToonBoilerModuleDeviceEntity(ToonEntity):
"""Defines a Boiler Module device entity."""
@property
def device_info(self) -> Dict[str, Any]:
"""Return device information about this entity."""
return {
"name": "Boiler Module",
"manufacturer": "Eneco",
"identifiers": {(DOMAIN, self.toon.agreement.id, "boiler_module")},
"via_device": (DOMAIN, self.toon.agreement.id),
}
class ToonBoilerDeviceEntity(ToonEntity):
"""Defines a Boiler device entity."""
@property
def device_info(self) -> Dict[str, Any]:
"""Return device information about this entity."""
return {
"name": "Boiler",
"identifiers": {(DOMAIN, self.toon.agreement.id, "boiler")},
"via_device": (DOMAIN, self.toon.agreement.id, "boiler_module"),
}
| |
from __future__ import absolute_import, division, print_function
import unittest
import numpy as np
import datashape
import blaze
from blaze.datadescriptor import ddesc_as_py
from blaze.tests.common import MayBePersistentTest
from blaze import (append,
DyND_DDesc, BLZ_DDesc, HDF5_DDesc)
from blaze.py2help import skip, skipIf
import blz
from blaze.optional_packages import tables_is_here
if tables_is_here:
import tables as tb
class TestEphemeral(unittest.TestCase):
def test_create_scalar(self):
a = blaze.array(True)
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(a.dshape, datashape.dshape('bool'))
self.assertEqual(bool(a), True)
a = blaze.array(-123456)
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(a.dshape, datashape.dshape('int32'))
self.assertEqual(int(a), -123456)
a = blaze.array(-1.25e-10)
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(a.dshape, datashape.dshape('float64'))
self.assertEqual(float(a), -1.25e-10)
a = blaze.array(-1.25e-10+2.5j)
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(a.dshape, datashape.dshape('complex[float64]'))
self.assertEqual(complex(a), -1.25e-10+2.5j)
def test_create_from_numpy(self):
a = blaze.array(np.arange(3))
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(ddesc_as_py(a.ddesc), [0, 1, 2])
def test_create(self):
# A default array (backed by DyND)
a = blaze.array([1,2,3])
self.assertTrue(isinstance(a, blaze.Array))
self.assertTrue(str(a.dshape) == "3 * int32")
self.assertEqual(ddesc_as_py(a.ddesc), [1, 2, 3])
def test_create_dshape(self):
# A default array (backed by DyND)
a = blaze.array([1,2,3], 'float64')
self.assertTrue(isinstance(a, blaze.Array))
self.assertTrue(str(a.dshape) == "3 * float64")
self.assertEqual(ddesc_as_py(a.ddesc), [1, 2, 3])
def test_create_append(self):
# A default array (backed by DyND, append not supported yet)
a = blaze.array([])
self.assertTrue(isinstance(a, blaze.Array))
self.assertRaises(ValueError, append, a, [1,2,3])
def test_create_compress(self):
# A compressed array (backed by BLZ)
ddesc = BLZ_DDesc(mode='w', bparams=blz.bparams(clevel=5))
a = blaze.array(np.arange(1,4), ddesc=ddesc)
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(ddesc_as_py(a.ddesc), [1, 2, 3])
def test_create_iter(self):
# A simple 1D array
a = blaze.array(i for i in range(10))
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(a.dshape, datashape.dshape('10 * int32'))
self.assertEqual(ddesc_as_py(a.ddesc), list(range(10)))
# A nested iter
a = blaze.array((i for i in range(x)) for x in range(5))
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(a.dshape, datashape.dshape('5 * var * int32'))
self.assertEqual(ddesc_as_py(a.ddesc),
[[i for i in range(x)] for x in range(5)])
# A list of iter
a = blaze.array([range(3), (1.5*x for x in range(4)), iter([-1, 1])])
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(a.dshape, datashape.dshape('3 * var * float64'))
self.assertEqual(ddesc_as_py(a.ddesc),
[list(range(3)),
[1.5*x for x in range(4)],
[-1, 1]])
def test_create_compress_iter(self):
# A compressed array (backed by BLZ)
ddesc = BLZ_DDesc(mode='w', bparams=blz.bparams(clevel=5))
a = blaze.array((i for i in range(10)), ddesc=ddesc)
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(ddesc_as_py(a.ddesc), list(range(10)))
def test_create_zeros(self):
# A default array
a = blaze.zeros('10 * int64')
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(ddesc_as_py(a.ddesc), [0]*10)
def test_create_compress_zeros(self):
# A compressed array (backed by BLZ)
ddesc = BLZ_DDesc(mode='w', bparams=blz.bparams(clevel=5))
a = blaze.zeros('10 * int64', ddesc=ddesc)
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(ddesc_as_py(a.ddesc), [0]*10)
def test_create_ones(self):
# A default array
a = blaze.ones('10 * int64')
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(ddesc_as_py(a.ddesc), [1]*10)
def test_create_compress_ones(self):
# A compressed array (backed by BLZ)
ddesc = BLZ_DDesc(mode='w', bparams=blz.bparams(clevel=5))
a = blaze.ones('10 * int64', ddesc=ddesc)
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(ddesc_as_py(a.ddesc), [1]*10)
def test_create_record(self):
# A simple record array
a = blaze.array([(10, 3.5), (15, 2.25)],
dshape="var * {val: int32, flt: float32}")
self.assertEqual(ddesc_as_py(a.ddesc), [{'val': 10, 'flt': 3.5},
{'val': 15, 'flt': 2.25}])
# Test field access via attributes
aval = a.val
self.assertEqual(ddesc_as_py(aval.ddesc), [10, 15])
aflt = a.flt
self.assertEqual(ddesc_as_py(aflt.ddesc), [3.5, 2.25])
class TestBLZPersistent(MayBePersistentTest, unittest.TestCase):
disk = True
dir_ = True
def test_create(self):
ddesc = BLZ_DDesc(path=self.rootdir, mode='w')
a = blaze.array([2], 'float64', ddesc=ddesc)
self.assertTrue(isinstance(a, blaze.Array))
self.assertTrue(a.dshape.shape == (1,))
self.assertEqual(ddesc_as_py(a.ddesc), [2])
def test_append(self):
ddesc = BLZ_DDesc(path=self.rootdir, mode='w')
a = blaze.zeros('0 * float64', ddesc=ddesc)
self.assertTrue(isinstance(a, blaze.Array))
append(a, list(range(10)))
self.assertEqual(ddesc_as_py(a.ddesc), list(range(10)))
# Using a 1-dim as the internal dimension
def test_append2(self):
ddesc = BLZ_DDesc(path=self.rootdir, mode='w')
a = blaze.empty('0 * 2 * float64', ddesc=ddesc)
self.assertTrue(isinstance(a, blaze.Array))
lvals = [[i,i*2] for i in range(10)]
append(a, lvals)
self.assertEqual(ddesc_as_py(a.ddesc), lvals)
class TestHDF5Persistent(MayBePersistentTest, unittest.TestCase):
disk = True
@skipIf(not tables_is_here, 'pytables is not installed')
def test_create(self):
ddesc = HDF5_DDesc(path=self.file, datapath='/earray', mode='w')
a = blaze.array([2], 'float64', ddesc=ddesc)
self.assertTrue(isinstance(a, blaze.Array))
self.assertTrue(a.dshape.shape == (1,))
self.assertEqual(ddesc_as_py(a.ddesc), [2])
@skipIf(not tables_is_here, 'pytables is not installed')
def test_append(self):
ddesc = HDF5_DDesc(path=self.file, datapath='/earray', mode='a')
a = blaze.zeros('0 * float64', ddesc=ddesc)
self.assertTrue(isinstance(a, blaze.Array))
append(a, list(range(10)))
self.assertEqual(ddesc_as_py(a.ddesc), list(range(10)))
# Using a 1-dim as the internal dimension
@skipIf(not tables_is_here, 'pytables is not installed')
def test_append2(self):
ddesc = HDF5_DDesc(path=self.file, datapath='/earray', mode='a')
a = blaze.empty('0 * 2 * float64', ddesc=ddesc)
self.assertTrue(isinstance(a, blaze.Array))
lvals = [[i,i*2] for i in range(10)]
append(a, lvals)
self.assertEqual(ddesc_as_py(a.ddesc), lvals)
if __name__ == '__main__':
unittest.main(verbosity=2)
| |
'''
cytest - A module to compare Curry output.
'''
from __future__ import print_function
import cStringIO
import os
import re
import shutil
import subprocess
import sys
import tokenize
import unittest
# OK to change the next value.
VERBOSE = True
if VERBOSE:
def log(*args):
sys.stdout.write('[INFO]: ')
print(*args)
else:
def log(*args): pass
def top_help(exitcode, stream):
stream.write(
'''
USAGE: %s [clean|compare|help|validate] filenames...
clean
Clean up any changes to the .curry source files caused by 'mkgold'.
compare
Compare the results produced by Sprite with the results produced
the oracle for one or more .curry files.
help
Display this help message.
mkgold
Generate golden results by running the oracle and appending its result
to the .curry file.
validate
Validate the comparisons of a previous call to 'compare'. The
results are stored in files ending with .results.
''' % sys.argv[0]
)
os._exit(exitcode)
# Note: warnings.warn does not interoperate well with unittest. Moreover,
# shell directs cannot redirect stdout and stderr to a file while printing a
# copy of stderr to tty. Prepend $$ to identify error messages.
def warn(msg):
sys.stderr.write('\n')
sys.stderr.write('$$ ')
sys.stderr.write(msg.replace('\n', '\n$$ '))
sys.stderr.write('\n')
def syscall(progname, args=''):
'''
Calls a system function and captures the contents written to stdout, split on
newlines. Returns the captured output and the process return code (mapped to
{0,1}).
'''
cmd = '%s %s' % (progname, args)
log('Running command:', cmd)
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True
)
(out,err) = process.communicate()
# Special case for PAKCS. If there is no value, PAKCSC exits with status 2.
if progname == 'pakcs' and process.returncode == 2:
return (out, 0)
if err:
args = (
sys.argv[0]
, os.path.basename(progname)
, err.strip().replace('\n', '\n ')
)
warn('%s: %s generates error output:\n\n %s' % args)
return out, 0 if process.returncode == 0 else 1
def extractAnswers((string, rc), do_sort=True, kics_handling=False):
lines = string.split('\n')
if kics_handling:
i = 0
n = len(lines)
while i<n and not lines[i].startswith('Evaluating expression: main'):
i += 1
lines = lines[i+1:]
if rc != 0:
# Discard warning (to stdout).
lines = filter(
lambda s: not s.startswith('Evaluation terminated with non-zero status')
, lines
)
answer = [line for line in lines if line]
return (sorted(answer) if do_sort else answer, rc)
match_answer = re.compile('-->\s+(.*)')
match_rc = re.compile('--\$\?->\s+(\d+)')
banner = 'CORRECT ANSWER BELOW GENERATED BY cytest.py'
match_banner = re.compile(banner)
def getOracleAnswers(filename):
if not any(map(match_banner.search, open(filename, 'r').readlines())):
if open(filename, 'r').readline().startswith('-- KICS2 --'):
oracle,opt_q,opt_h = 'kics2','',True
else:
oracle,opt_q,opt_h = 'pakcs','-q',False
oracle_answer, oracle_rc = extractAnswers(
syscall(oracle, '%s :l %s :eval main :q' % (opt_q, filename))
, kics_handling=opt_h
)
with open(filename, 'a') as cyfile:
cyfile.write('\n------ %s using %s ------\n' % (banner, oracle))
cyfile.write(''.join([ '--> %s\n' % line for line in oracle_answer ]))
cyfile.write('--$?-> %s\n' % oracle_rc)
else:
oracle_answer = []
oracle_rc = None
for line in open(filename, 'r').readlines():
match = match_answer.match(line)
if match:
oracle_answer.append(match.group(1))
continue
match = match_rc.match(line)
if match:
if oracle_rc is not None:
warn('Multiple return codes!')
os._exit(1)
oracle_rc = int(match.group(1))
# Assume success.
if oracle_rc is None:
oracle_rc = 0
return oracle_answer, oracle_rc
def getAnswers(filename):
log('Processing file:', filename)
oracle_answer, oracle_rc = getOracleAnswers(filename)
# Get the Sprite answer.
head,tail = os.path.splitext(filename)
if tail != '' and tail != '.curry':
warn('%s: unexpected file extension: %s' % (sys.argv[0], tail))
exe = os.path.abspath(head + '.exe')
# syscall('scc', '-o %s %s' % (exe, filename))
sprite_answer, sprite_rc = extractAnswers(syscall(exe))
log('Oracle answer: ', oracle_answer)
log('Sprite answer:', sprite_answer)
log('Oracle return code: ', oracle_rc)
log('Sprite return code:', sprite_rc)
return oracle_answer, sprite_answer, oracle_rc, sprite_rc
class Tokenizer(object):
'''
Tokenizes and compares result strings.
Implements a loose comparison by:
- Converting KiCS2-style free variable strings to PACKS-style ones.
- Ignoring differences in whether the whole expression has parens.
- Ignoring differences between 'success' and 'Success'.
'''
def __init__(self):
self.symbols = {}
self.pat_free = re.compile('^_x\d+$')
self.pat_success = re.compile(r'(.*)\bsuccess\b(.*)')
self.nextid = 0
self.out = []
def _show(self, i):
if i < 26:
return '_' + chr(97 + i)
else:
return '_a%d' % i
def finalize(self):
'''Ignore differences in outermost parenthesization.'''
if self.out[0] == '(' and self.out[-1] == ')':
self.out = self.out[1:-1]
def __call__(self, ty, text, rc_beg, rc_end, lineno):
if text == '':
return
if self.pat_free.match(text):
if text not in self.symbols:
self.symbols[text] = self.nextid
self.nextid += 1
self.out.append(self._show(self.symbols[text]))
elif self.pat_success.match(text):
m = self.pat_success.match(text)
self.out.append(m.group(1) + 'Success' + m.group(2))
else:
self.out.append(text)
def __eq__(self, rhs):
return self.out == rhs.out
def __repr__(self):
return ' '.join(self.out)
def __str__(self):
return ' '.join(self.out)
def processAnswer(text):
tok = Tokenizer()
tokenize.tokenize(cStringIO.StringIO(text).readline, tok)
tok.finalize()
return tok
def equalAnswers(tc, oracle, sprite, oracle_rc, sprite_rc):
tc.assertEqual(
len(oracle), len(sprite)
, msg='len(oracle) != len(sprite)'
)
for p,s in zip(oracle, sprite):
p,s = map(processAnswer, (p,s))
tc.assertTrue(p == s, msg='Oracle=%s Sprite=%s' % (p, s))
tc.assertEqual(
oracle_rc, sprite_rc
, msg='return codes do not agree: Oracle=%s Sprite=%s' % (oracle_rc, sprite_rc)
)
def runTestSuite(tc):
suite = unittest.TestLoader().loadTestsFromTestCase(tc)
return unittest.TextTestRunner(stream=sys.stdout, verbosity=2).run(suite)
def top_clean():
for filename in sys.argv[2:]:
filename_copy = filename + '.copy'
with open(filename_copy, 'w') as cp:
for line in open(filename, 'r').readlines():
if match_banner.search(line):
break;
cp.write(line)
shutil.move(filename_copy, filename)
def top_mkgold():
for filename in sys.argv[2:]:
getOracleAnswers(filename)
def top_compare():
class CompareTC(unittest.TestCase): pass
for filename in sys.argv[2:]:
base = os.path.splitext(os.path.basename(filename))[0]
def test(self): equalAnswers(self, *getAnswers(filename))
setattr(CompareTC, 'test_' + base, test)
rv = runTestSuite(CompareTC)
# The compare step succeeds as long as the tests were run (regardless of
# whether or not they passed).
sys.exit(0 if rv.testsRun == len(sys.argv[2:]) else 1)
def top_validate():
class ValidateTC(unittest.TestCase): pass
for filename in sys.argv[2:]:
base = os.path.splitext(os.path.basename(filename))[0]
def test(self):
text = open(filename, 'r').readlines()[-1].strip()
self.assertEquals(text, 'OK')
setattr(ValidateTC, 'test_' + base, test)
rv = runTestSuite(ValidateTC)
sys.exit(0 if rv.wasSuccessful() else 1)
if __name__ == '__main__':
command = sys.argv[1]
if command == 'clean':
top_clean()
elif command == 'compare':
top_compare()
elif command == 'mkgold':
top_mkgold()
elif command == 'help':
top_help(0, sys.stdout)
elif command == 'validate':
top_validate()
else:
sys.stderr.write('Invalid command: ' + command)
top_help(1, sys.stderr)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Plotting macros for XAFS data sets and fits
===========================================
Function Description of what is plotted
---------------- -----------------------------------------------------
plot_mu() mu(E) for XAFS data group in various forms
plot_bkg() mu(E) and background mu0(E) for XAFS data group
plot_chik() chi(k) for XAFS data group
plot_chie() chi(E) for XAFS data group
plot_chir() chi(R) for XAFS data group
plot_chifit() chi(k) and chi(R) for fit to feffit dataset
plot_path_k() chi(k) for a single path of a feffit dataset
plot_path_r() chi(R) for a single path of a feffit dataset
plot_paths_k() chi(k) for model and all paths of a feffit dataset
plot_paths_r() chi(R) for model and all paths of a feffit dataset
---------------- -----------------------------------------------------
NOTE: This is a pure matplotlib implementation of larch.wxlib.xafsplot
/!\ WORK-IN-PROGRESS /!\ => MOVING TO .xafsplotter
"""
import numpy as np
from larch import Group
from larch.math import (index_of, index_nearest, interp)
# global variables
LineColors = ('#1f77b4', '#d62728', '#2ca02c', '#ff7f0e', '#9467bd',
'#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf')
# common XAFS plot labels
plotlabels = Group(k = r'$k \rm\,(\AA^{-1})$',
r = r'$R \rm\,(\AA)$',
energy = r'$E\rm\,(eV)$',
mu = r'$\mu(E)$',
norm = r'normalized $\mu(E)$',
flat = r'flattened $\mu(E)$',
deconv = r'deconvolved $\mu(E)$',
dmude = r'$d\mu(E)/dE$',
dnormde = r'$d\mu_{\rm norm}(E)/dE$',
chie = r'$\chi(E)$',
chikw = r'$k^{{{0:g}}}\chi(k) \rm\,(\AA^{{-{0:g}}})$',
chir = r'$\chi(R) \rm\,(\AA^{{-{0:g}}})$',
chirmag = r'$|\chi(R)| \rm\,(\AA^{{-{0:g}}})$',
chirre = r'${{\rm Re}}[\chi(R)] \rm\,(\AA^{{-{0:g}}})$',
chirim = r'${{\rm Im}}[\chi(R)] \rm\,(\AA^{{-{0:g}}})$',
chirpha = r'${{\rm Phase}}[\chi(R)] \rm\,(\AA^{{-{0:g}}})$',
e0color = '#B2B282',
chirlab = None)
def _plot(ax=None, x=None, y=None, label=None, title=None,
linewidth=2,
xlabel=None, ylabel=None,
xmin=None, xmax=None,
ymin=None, ymax=None, **kws):
"""simple axis plotter wrapper"""
if ax is None:
return
if (x is None) or (y is None):
return
ax.plot(x, y, label=label, linewidth=linewidth)
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if (xmin is not None) and (xmax is not None):
ax.set_xlim(xmin, xmax)
if (xmin is not None) and (xmax is not None):
ax.set_ylim(ymin, ymax)
def chirlab(kweight, show_mag=True, show_real=False, show_imag=False):
"""generate chi(R) label for a kweight
Arguments
----------
kweight k-weight to use (required)
show_mag bool whether to plot |chi(R)| [True]
show_real bool whether to plot Re[chi(R)] [False]
show_imag bool whether to plot Im[chi(R)] [False]
"""
ylab = []
if show_mag:
ylab.append(plotlabels.chirmag)
if show_real:
ylab.append(plotlabels.chirre)
if show_imag:
ylab.append(plotlabels.chirim)
if len(ylab) > 1:
ylab = [plotlabels.chir]
return ylab[0].format(kweight+1)
def _get_title(dgroup, title=None):
"""get best title for group"""
if title is not None:
return title
data_group = getattr(dgroup, 'data', None)
for attr in ('title', 'plot_title', 'filename', 'name', '__name__'):
t = getattr(dgroup, attr, None)
if t is not None:
return t
if data_group is not None:
t = getattr(data_group, attr, None)
if t is not None:
return t
return repr(dgroup)
def _get_erange(dgroup, emin=None, emax=None):
"""get absolute emin/emax for data range, allowing using
values relative to e0.
"""
dat_emin, dat_emax = min(dgroup.energy)-100, max(dgroup.energy)+100
e0 = getattr(dgroup, 'e0', 0.0)
if emin is not None:
if not (emin > dat_emin and emin < dat_emax):
if emin+e0 > dat_emin and emin+e0 < dat_emax:
emin += e0
if emax is not None:
if not (emax > dat_emin and emax < dat_emax):
if emax+e0 > dat_emin and emax+e0 < dat_emax:
emax += e0
return emin, emax
def plot_mu(ax, dgroup, show_norm=False, show_deriv=False,
show_pre=False, show_post=False, show_e0=False, with_deriv=False,
emin=None, emax=None, label='mu', new=True, delay_draw=False,
offset=0, title=None):
"""
plot_mu(ax, dgroup, norm=False, deriv=False, show_pre=False,
show_post=False, show_e0=False, show_deriv=False,
emin=None, emax=None, label=None)
Plot mu(E) for an XAFS data group in various forms
Arguments
----------
ax Matplotlib ax
dgroup group of XAFS data after pre_edge() results (see Note 1)
show_norm bool whether to show normalized data [False]
show_deriv bool whether to show derivative of XAFS data [False]
show_pre bool whether to show pre-edge curve [False]
show_post bool whether to show post-edge curve [False]
show_e0 bool whether to show E0 [False]
with_deriv bool whether to show deriv together with mu [False]
emin min energy to show, absolute or relative to E0 [None, start of data]
emax max energy to show, absolute or relative to E0 [None, end of data]
label string for label [None: 'mu', `dmu/dE', or 'mu norm']
title string for plot titlel [None, may use filename if available]
new bool whether to start a new plot [True]
delay_draw bool whether to delay draw until more traces are added [False]
offset vertical offset to use for y-array [0]
win integer plot window to use [1]
Notes
-----
1. The input data group must have the following attributes:
energy, mu, norm, e0, pre_edge, edge_step
"""
if hasattr(dgroup, 'mu'):
mu = dgroup.mu
elif hasattr(dgroup, 'mutrans'):
mu = dgroup.mutrans
elif hasattr(dgroup, 'mufluor'):
mu = dgroup.mufluor
else:
raise ValueError("XAFS data group has no array for mu")
ylabel = plotlabels.mu
if label is None:
label = 'mu'
if show_deriv:
mu = np.gradient(mu)/np.gradient(dgroup.energy)
ylabel = plotlabels.dmude
dlabel = '%s (deriv)' % label
elif show_norm:
mu = dgroup.norm
ylabel = "%s (norm)" % ylabel
dlabel = "%s (norm)" % label
emin, emax = _get_erange(dgroup, emin, emax)
title = _get_title(dgroup, title=title)
opts = dict(ax=ax, show_legend=True, linewidth=2,
title=title, xmin=emin, xmax=emax,
delay_draw=True)
_plot(dgroup.energy, mu+offset, xlabel=plotlabels.energy, ylabel=ylabel,
label=label, zorder=20, new=new, **opts)
if with_deriv:
dmu = np.gradient(mu)/np.gradient(dgroup.energy)
_plot(dgroup.energy, dmu+offset, ylabel=plotlabels.dmude,
label='%s (deriv)' % label, zorder=18, side='right', **opts)
if (not show_norm and not show_deriv):
if show_pre:
_plot(dgroup.energy, dgroup.pre_edge+offset, label='pre_edge',
zorder=18, **opts)
if show_post:
_plot(dgroup.energy, dgroup.post_edge+offset, label='post_edge',
zorder=18, **opts)
if show_pre:
i = index_of(dgroup.energy, dgroup.e0)
ypre = dgroup.pre_edge[i]
ypost = dgroup.post_edge[i]
_plot_arrow(dgroup.e0, ypre, dgroup.e0+offset, ypost,
color=plotlabels.e0color, width=0.25,
head_width=0, zorder=3, win=win, _larch=_larch)
if show_e0:
_plot_axvline(dgroup.e0, zorder=2, size=3,
label='E0', color=plotlabels.e0color, win=win,
_larch=_larch)
disp = _getDisplay(win=win, _larch=_larch)
if disp is not None:
disp.panel.conf.draw_legend()
redraw(win=win, xmin=emin, xmax=emax, _larch=_larch)
def plot_bkg(dgroup, norm=True, emin=None, emax=None, show_e0=False,
label=None, title=None, new=True, delay_draw=False, offset=0,
win=1, _larch=None):
"""
plot_bkg(dgroup, norm=True, emin=None, emax=None, show_e0=False, label=None, new=True, win=1):
Plot mu(E) and background mu0(E) for XAFS data group
Arguments
----------
dgroup group of XAFS data after autobk() results (see Note 1)
norm bool whether to show normalized data [True]
emin min energy to show, absolute or relative to E0 [None, start of data]
emax max energy to show, absolute or relative to E0 [None, end of data]
show_e0 bool whether to show E0 [False]
label string for label [``None``: 'mu']
title string for plot titlel [None, may use filename if available]
new bool whether to start a new plot [True]
delay_draw bool whether to delay draw until more traces are added [False]
offset vertical offset to use for y-array [0]
win integer plot window to use [1]
Notes
-----
1. The input data group must have the following attributes:
energy, mu, bkg, norm, e0, pre_edge, edge_step, filename
"""
if hasattr(dgroup, 'mu'):
mu = dgroup.mu
elif hasattr(dgroup, 'mutrans'):
mu = dgroup.mutrans
else:
raise ValueError("XAFS data group has no array for mu")
#endif
bkg = dgroup.bkg
ylabel = plotlabels.mu
if label is None:
label = 'mu'
#endif
emin, emax = _get_erange(dgroup, emin, emax)
if norm:
mu = dgroup.norm
bkg = (dgroup.bkg - dgroup.pre_edge) / dgroup.edge_step
ylabel = "%s (norm)" % ylabel
label = "%s (norm)" % label
#endif
title = _get_title(dgroup, title=title)
opts = dict(win=win, show_legend=True, linewidth=3,
delay_draw=True, _larch=_larch)
_plot(dgroup.energy, mu+offset, xlabel=plotlabels.energy, ylabel=ylabel,
title=title, label=label, zorder=20, new=new, xmin=emin, xmax=emax,
**opts)
_plot(dgroup.energy, bkg+offset, zorder=18, label='bkg', **opts)
if show_e0:
_plot_axvline(dgroup.e0, zorder=2, size=3, label='E0',
color=plotlabels.e0color, win=win, _larch=_larch)
disp = _getDisplay(win=win, _larch=_larch)
if disp is not None:
disp.panel.conf.draw_legend()
#endif
redraw(win=win, xmin=emin, xmax=emax, _larch=_larch)
#enddef
def plot_chie(dgroup, emin=-25, emax=None, label=None, title=None,
new=True, delay_draw=False, offset=0, win=1, _larch=None):
"""
plot_chie(dgroup, emin=None, emax=None, label=None, new=True, win=1):
Plot chi(E) for XAFS data group
Arguments
----------
dgroup group of XAFS data after autobk() results (see Note 1)
emin min energy to show, absolute or relative to E0 [-25]
emax max energy to show, absolute or relative to E0 [None, end of data]
label string for label [``None``: 'mu']
title string for plot titlel [None, may use filename if available]
new bool whether to start a new plot [True]
delay_draw bool whether to delay draw until more traces are added [False]
offset vertical offset to use for y-array [0]
win integer plot window to use [1]
Notes
-----
1. The input data group must have the following attributes:
energy, mu, bkg, norm, e0, pre_edge, edge_step, filename
"""
if hasattr(dgroup, 'mu'):
mu = dgroup.mu
elif hasattr(dgroup, 'mutrans'):
mu = dgroup.mutrans
else:
raise ValueError("XAFS data group has no array for mu")
#endif
chie = mu - dgroup.bkg
emin, emax = _get_erange(dgroup, emin, emax)
title = _get_title(dgroup, title=title)
_plot(dgroup.energy, chie+offset, xlabel=plotlabels.energy,
ylabel=plotlabels.chie, title=title, label=label, zorder=20,
new=new, xmin=emin, xmax=emax, win=win, show_legend=True,
delay_draw=delay_draw, linewidth=3, _larch=_larch)
if delay_draw:
redraw(win=win, xmin=emin, xmax=emax, _larch=_larch)
#enddef
def plot_chik(dgroup, kweight=None, kmax=None, show_window=True,
scale_window=True, label=None, title=None, new=True,
delay_draw=False, offset=0, win=1, _larch=None):
"""
plot_chik(dgroup, kweight=None, kmax=None, show_window=True, label=None,
new=True, win=1)
Plot k-weighted chi(k) for XAFS data group
Arguments
----------
dgroup group of XAFS data after autobk() results (see Note 1)
kweight k-weighting for plot [read from last xftf(), or 0]
kmax max k to show [None, end of data]
show_window bool whether to also plot k-window [True]
scale_window bool whether to scale k-window to max |chi(k)| [True]
label string for label [``None`` to use 'chi']
title string for plot titlel [None, may use filename if available]
new bool whether to start a new plot [True]
delay_draw bool whether to delay draw until more traces are added [False]
offset vertical offset to use for y-array [0]
win integer plot window to use [1]
Notes
-----
1. The input data group must have the following attributes:
k, chi, kwin, filename
"""
if kweight is None:
kweight = 0
xft = getattr(dgroup, 'xftf_details', None)
if xft is not None:
kweight = xft.call_args.get('kweight', 0)
#endif
#endif
chi = dgroup.chi * dgroup.k ** kweight
opts = dict(win=win, show_legend=True, delay_draw=True, linewidth=3,
_larch=_larch)
if label is None:
label = 'chi'
#endif
title = _get_title(dgroup, title=title)
_plot(dgroup.k, chi+offset, xlabel=plotlabels.k,
ylabel=plotlabels.chikw.format(kweight), title=title,
label=label, zorder=20, new=new, xmax=kmax, **opts)
if show_window and hasattr(dgroup, 'kwin'):
kwin = dgroup.kwin
if scale_window:
kwin = kwin*max(abs(chi))
_plot(dgroup.k, kwin+offset, zorder=12, label='window', **opts)
#endif
redraw(win=win, xmax=kmax, _larch=_larch)
#enddef
def plot_chir(dgroup, show_mag=True, show_real=False, show_imag=False,
rmax=None, label=None, title=None, new=True, delay_draw=False,
offset=0, win=1, _larch=None):
"""
plot_chir(dgroup, show_mag=True, show_real=False, show_imag=False,
rmax=None, label=None, new=True, win=1)
Plot chi(R) for XAFS data group
Arguments
----------
dgroup group of XAFS data after xftf() results (see Note 1)
show_mag bool whether to plot |chi(R)| [True]
show_real bool whether to plot Re[chi(R)] [False]
show_imag bool whether to plot Im[chi(R)] [False]
label string for label [``None`` to use 'chir']
title string for plot titlel [None, may use filename if available]
rmax max R to show [None, end of data]
new bool whether to start a new plot [True]
delay_draw bool whether to delay draw until more traces are added [False]
offset vertical offset to use for y-array [0]
win integer plot window to use [1]
Notes
-----
1. The input data group must have the following attributes:
r, chir_mag, chir_im, chir_re, kweight, filename
"""
kweight = dgroup.xftf_details.call_args['kweight']
title = _get_title(dgroup, title=title)
opts = dict(win=win, show_legend=True, linewidth=3, title=title,
zorder=20, xmax=rmax, xlabel=plotlabels.r, new=new,
delay_draw=True, _larch=_larch)
ylabel = plotlabels.chirlab(kweight, show_mag=show_mag,
show_real=show_real, show_imag=show_imag)
opts['ylabel'] = ylabel
if label is None:
label = 'chir'
#endif
if show_mag:
_plot(dgroup.r, dgroup.chir_mag+offset, label='%s (mag)' % label, **opts)
opts['new'] = False
#endif
if show_real:
_plot(dgroup.r, dgroup.chir_re+offset, label='%s (real)' % label, **opts)
opts['new'] = False
#endif
if show_imag:
_plot(dgroup.r, dgroup.chir_im+offset, label='%s (imag)' % label, **opts)
#endif
redraw(win=win, xmax=rmax, _larch=_larch)
#enddef
def plot_chifit(dataset, kmin=0, kmax=None, kweight=None, rmax=None,
show_mag=True, show_real=False, show_imag=False,
title=None, new=True, delay_draw=False, offset=0, win=1,
_larch=None):
"""
plot_chifit(dataset, kmin=0, kmax=None, rmax=None,
show_mag=True, show_real=False, show_imag=False,
new=True, win=1)
Plot k-weighted chi(k) and chi(R) for fit to feffit dataset
Arguments
----------
dataset feffit dataset, after running feffit()
kmin min k to show [0]
kmax max k to show [None, end of data]
kweight kweight to show [None, taken from dataset]
rmax max R to show [None, end of data]
show_mag bool whether to plot |chidr(R)| [True]
show_real bool whether to plot Re[chi(R)] [False]
show_imag bool whether to plot Im[chi(R)] [False]
title string for plot titlel [None, may use filename if available]
new bool whether to start a new plot [True]
delay_draw bool whether to delay draw until more traces are added [False]
offset vertical offset to use for y-array [0]
win integer plot window to use [1]
"""
if kweight is None:
kweight = dataset.transform.kweight
#endif
if isinstance(kweight, (list, tuple, np.ndarray)): kweight=kweight[0]
data_chik = dataset.data.chi * dataset.data.k**kweight
model_chik = dataset.model.chi * dataset.model.k**kweight
title = _get_title(dataset, title=title)
opts=dict(labelfontsize=10, legendfontsize=10, linewidth=3,
show_legend=True, delay_draw=True, win=win, title=title,
_larch=_larch)
# k-weighted chi(k) in first plot window
_plot(dataset.data.k, data_chik+offset, xmin=kmin, xmax=kmax,
xlabel=plotlabels.k, ylabel=plotlabels.chikw.format(kweight),
label='data', new=new, **opts)
_plot(dataset.model.k, model_chik+offset, label='fit', **opts)
redraw(win=win, xmin=kmin, xmax=kmax, _larch=_larch)
# show chi(R) in next plot window
opts['win'] = win = win+1
ylabel = plotlabels.chirlab(kweight, show_mag=show_mag,
show_real=show_real, show_imag=show_imag)
opts.update(dict(xlabel=plotlabels.r, ylabel=ylabel,
xmax=rmax, new=True, show_legend=True))
if show_mag:
_plot(dataset.data.r, dataset.data.chir_mag+offset,
label='|data|', **opts)
opts['new'] = False
_plot(dataset.model.r, dataset.model.chir_mag+offset,
label='|fit|', **opts)
#endif
if show_real:
_plot(dataset.data.r, dataset.data.chir_re+offset, label='Re[data]', **opts)
opts['new'] = False
_plot(dataset.model.r, dataset.model.chir_re+offset, label='Re[fit]', **opts)
#endif
if show_imag:
plot(dataset.data.r, dataset.data.chir_im+offset, label='Im[data]', **opts)
opts['new'] = False
plot(dataset.model.r, dataset.model.chir_im+offset, label='Im[fit]', **opts)
#endif
redraw(win=win, xmax=kmax, _larch=_larch)
#enddef
def plot_path_k(dataset, ipath=0, kmin=0, kmax=None, offset=0, label=None,
new=False, delay_draw=False, win=1, _larch=None, **kws):
"""
plot_path_k(dataset, ipath, kmin=0, kmax=None, offset=0,
label=None, new=False, win=1, **kws)
Plot k-weighted chi(k) for a single Path of a feffit dataset
Arguments
----------
dataset feffit dataset, after running feffit()
ipath index of path, starting count at 0 [0]
kmin min k to show [0]
kmax max k to show [None, end of data]
offset vertical offset to use for plot [0]
label path label ['path %d' % ipath]
new bool whether to start a new plot [True]
delay_draw bool whether to delay draw until more traces are added [False]
win integer plot window to use [1]
kws additional keyword arguments are passed to plot()
"""
kweight = dataset.transform.kweight
path = dataset.pathlist[ipath]
if label is None: label = 'path %i' % (1+ipath)
chi_kw = offset + path.chi * path.k**kweight
_plot(path.k, chi_kw, label=label, xmin=kmin, xmax=kmax,
xlabel=plotlabels.k, ylabel=plotlabels.chikw.format(kweight),
win=win, new=new, delay_draw=delay_draw, _larch=_larch, **kws)
if delay_draw:
redraw(win=win, xmin=kmin, xmax=kmax, _larch=_larch)
#enddef
def plot_path_r(dataset, ipath, rmax=None, offset=0, label=None,
show_mag=True, show_real=False, show_imag=True,
new=False, delay_draw=False, win=1, _larch=None,
**kws):
"""
plot_path_r(dataset, ipath,rmax=None, offset=0, label=None,
show_mag=True, show_real=False, show_imag=True,
new=False, win=1, **kws)
Plot chi(R) for a single Path of a feffit dataset
Arguments
----------
dataset feffit dataset, after running feffit()
ipath index of path, starting count at 0 [0]
rmax max R to show [None, end of data]
offset vertical offset to use for plot [0]
label path label ['path %d' % ipath]
show_mag bool whether to plot |chi(R)| [True]
show_real bool whether to plot Re[chi(R)] [False]
show_imag bool whether to plot Im[chi(R)] [False]
new bool whether to start a new plot [True]
delay_draw bool whether to delay draw until more traces are added [False]
win integer plot window to use [1]
kws additional keyword arguments are passed to plot()
"""
path = dataset.pathlist[ipath]
if label is None:
label = 'path %i' % (1+ipath)
#endif
kweight =dataset.transform.kweight
ylabel = plotlabels.chirlab(kweight, show_mag=show_mag,
show_real=show_real, show_imag=show_imag)
opts = dict(xlabel=plotlabels.r, ylabel=ylabel, xmax=rmax, new=new,
delay_draw=True, _larch=_larch)
opts.update(kws)
if show_mag:
_plot(path.r, offset+path.chir_mag, label=label, **opts)
opts['new'] = False
#endif
if show_real:
_plot(path.r, offset+path.chir_re, label=label, **opts)
opts['new'] = False
#endif
if show_imag:
_plot(path.r, offset+path.chir_im, label=label, **opts)
opts['new'] = False
#endif
redraw(win=win, xmax=rmax, _larch=_larch)
#enddef
def plot_paths_k(dataset, offset=-1, kmin=0, kmax=None, title=None,
new=True, delay_draw=False, win=1, _larch=None, **kws):
"""
plot_paths_k(dataset, offset=-1, kmin=0, kmax=None, new=True, win=1, **kws):
Plot k-weighted chi(k) for model and all paths of a feffit dataset
Arguments
----------
dataset feffit dataset, after running feffit()
kmin min k to show [0]
kmax max k to show [None, end of data]
offset vertical offset to use for paths for plot [-1]
new bool whether to start a new plot [True]
title string for plot titlel [None, may use filename if available]
win integer plot window to use [1]
delay_draw bool whether to delay draw until more traces are added [False]
kws additional keyword arguments are passed to plot()
"""
# make k-weighted chi(k)
kweight = dataset.transform.kweight
model = dataset.model
model_chi_kw = model.chi * model.k**kweight
title = _get_title(dataset, title=title)
_plot(model.k, model_chi_kw, title=title, label='sum', new=new,
xlabel=plotlabels.r, ylabel=plotlabels.chikw.format(kweight),
xmin=kmin, xmax=kmax, win=win, delay_draw=True,_larch=_larch,
**kws)
for ipath in range(len(dataset.pathlist)):
plot_path_k(dataset, ipath, offset=(ipath+1)*offset,
kmin=kmin, kmax=kmax, new=False, delay_draw=True,
win=win, _larch=_larch)
#endfor
redraw(win=win, xmin=kmin, xmax=kmax, _larch=_larch)
#enddef
def plot_paths_r(dataset, offset=-0.25, rmax=None, show_mag=True,
show_real=False, show_imag=False, title=None, new=True,
win=1, delay_draw=False, _larch=None, **kws):
"""
plot_paths_r(dataset, offset=-0.5, rmax=None, show_mag=True, show_real=False,
show_imag=False, new=True, win=1, **kws):
Plot chi(R) for model and all paths of a feffit dataset
Arguments
----------
dataset feffit dataset, after running feffit()
offset vertical offset to use for paths for plot [-0.5]
rmax max R to show [None, end of data]
show_mag bool whether to plot |chi(R)| [True]
show_real bool whether to plot Re[chi(R)] [False]
show_imag bool whether to plot Im[chi(R)] [False]
title string for plot titlel [None, may use filename if available]
new bool whether to start a new plot [True]
delay_draw bool whether to delay draw until more traces are added [False]
win integer plot window to use [1]
kws additional keyword arguments are passed to plot()
"""
kweight = dataset.transform.kweight
model = dataset.model
ylabel = plotlabels.chirlab(kweight, show_mag=show_mag,
show_real=show_real, show_imag=show_imag)
title = _get_title(dataset, title=title)
opts = dict(xlabel=plotlabels.r, ylabel=ylabel, xmax=rmax, new=new,
delay_draw=True, title=title, _larch=_larch)
opts.update(kws)
if show_mag:
_plot(model.r, model.chir_mag, label='|sum|', **opts)
opts['new'] = False
#endif
if show_real:
_plot(model.r, model.chir_re, label='Re[sum]', **opts)
opts['new'] = False
#endif
if show_imag:
_plot(model.r, model.chir_im, label='Im[sum]', **opts)
opts['new'] = False
#endif
for ipath in range(len(dataset.pathlist)):
plot_path_r(dataset, ipath, offset=(ipath+1)*offset,
show_mag=show_mag, show_real=show_real,
show_imag=show_imag, **opts)
#endfor
redraw(win=win, xmax=rmax,_larch=_larch)
#enddef
def extend_plotrange(x, y, xmin=None, xmax=None, extend=0.10):
"""return plot limits to extend a plot range for x, y pairs"""
xeps = min(np.diff(x)) / 5.
if xmin is None:
xmin = min(x)
if xmax is None:
xmax = max(x)
xmin = max(min(x), xmin-5)
xmax = min(max(x), xmax+5)
i0 = index_of(x, xmin + xeps)
i1 = index_of(x, xmax + xeps) + 1
xspan = x[i0:i1]
xrange = max(xspan) - min(xspan)
yspan = y[i0:i1]
yrange = max(yspan) - min(yspan)
return (min(xspan) - extend * xrange,
max(xspan) + extend * xrange,
min(yspan) - extend * yrange,
max(yspan) + extend * yrange)
def plot_prepeaks_baseline(dgroup, subtract_baseline=False, show_fitrange=True,
show_peakrange=True, win=1, _larch=None, **kws):
"""Plot pre-edge peak baseline fit, as from `pre_edge_baseline` or XAS Viewer
dgroup must have a 'prepeaks' attribute
"""
if not hasattr(dgroup, 'prepeaks'):
raise ValueError('Group needs prepeaks')
#endif
ppeak = dgroup.prepeaks
px0, px1, py0, py1 = extend_plotrange(dgroup.xdat, dgroup.ydat,
xmin=ppeak.emin, xmax=ppeak.emax)
title = "pre_edge baseline\n %s" % dgroup.filename
popts = dict(xmin=px0, xmax=px1, ymin=py0, ymax=py1, title=title,
xlabel='Energy (eV)', ylabel='mu (normalized)', delay_draw=True,
show_legend=True, style='solid', linewidth=3,
label='data', new=True,
marker='None', markersize=4, win=win, _larch=_larch)
popts.update(kws)
ydat = dgroup.ydat
xdat = dgroup.xdat
if subtract_baseline:
xdat = ppeak.energy
ydat = ppeak.baseline
popts['label'] = 'baseline subtracted peaks'
_plot(xdat, ydat, **popts)
else:
_plot(xdat, ydat, **popts)
popts['new'] = False
popts['label'] = 'baseline'
_oplot(ppeak.energy, ppeak.baseline, **popts)
popts = dict(win=win, _larch=_larch, delay_draw=True,
label='_nolegend_')
if show_fitrange:
for x in (ppeak.emin, ppeak.emax):
_plot_axvline(x, color='#DDDDCC', **popts)
_plot_axvline(ppeak.centroid, color='#EECCCC', **popts)
if show_peakrange:
for x in (ppeak.elo, ppeak.ehi):
y = ydat[index_of(xdat, x)]
_plot_marker(x, y, color='#222255', marker='o', size=8, **popts)
redraw(win=win, xmin=px0, xmax=px1, ymin=py0, ymax=py1,
show_legend=True, _larch=_larch)
#enddef
def plot_prepeaks_fit(dgroup, nfit=0, show_init=False, subtract_baseline=False,
show_residual=False, win=1, _larch=None):
"""plot pre-edge peak fit, as from XAS Viewer
dgroup must have a 'peakfit_history' attribute
"""
if not hasattr(dgroup, 'prepeaks'):
raise ValueError('Group needs prepeaks')
#endif
if show_init:
result = dgroup.prepeaks
else:
result = getattr(dgroup.prepeaks, 'fit_history', None)
if nfit > len(result):
nfit = 0
result = result[nfit]
#endif
if result is None:
raise ValueError('Group needs prepeaks.fit_history or init_fit')
#endif
opts = result.user_options
xeps = min(np.diff(dgroup.xdat)) / 5.
xdat = 1.0*result.energy
ydat = 1.0*result.norm
xdat_full = 1.0*dgroup.xdat
ydat_full = 1.0*dgroup.ydat
if show_init:
yfit = 1.0*result.init_fit
ycomps = None
ylabel = 'model'
else:
yfit = 1.0*result.best_fit
ycomps = result.ycomps
ylabel = 'best fit'
baseline = 0.*ydat
if ycomps is not None:
for label, ycomp in ycomps.items():
if label in opts['bkg_components']:
baseline += ycomp
plotopts = dict(title='%s:\npre-edge peak' % dgroup.filename,
xlabel='Energy (eV)', ylabel=opts['array_desc'],
delay_draw=True, show_legend=True, style='solid',
linewidth=3, marker='None', markersize=4)
if subtract_baseline:
ydat -= baseline
yfit -= baseline
ydat_full = 1.0*ydat
xdat_full = 1.0*xdat
plotopts['ylabel'] = '%s-baseline' % plotopts['ylabel']
dx0, dx1, dy0, dy1 = extend_plotrange(xdat_full, ydat_full,
xmin=opts['emin'], xmax=opts['emax'])
fx0, fx1, fy0, fy1 = extend_plotrange(xdat, yfit,
xmin=opts['emin'], xmax=opts['emax'])
ncolor = 0
popts = {'win': win, '_larch': _larch}
plotopts.update(popts)
if show_residual:
popts['stacked'] = True
_fitplot(xdat, ydat, yfit, label='data', label2=ylabel, **plotopts)
else:
_plot(xdat_full, ydat_full, new=True, label='data',
color=LineColors[0], **plotopts)
_oplot(xdat, yfit, label=ylabel, color=LineColors[1], **plotopts)
ncolor = 1
if ycomps is not None:
ncomps = len(ycomps)
if not subtract_baseline:
ncolor += 1
_oplot(xdat, baseline, label='baseline', delay_draw=True,
style='short dashed', marker='None', markersize=5,
color=LineColors[ncolor], **popts)
for icomp, label in enumerate(ycomps):
ycomp = ycomps[label]
if label in opts['bkg_components']:
continue
ncolor = (ncolor+1) % 10
_oplot(xdat, ycomp, label=label, delay_draw=(icomp != ncomps-1),
style='short dashed', marker='None', markersize=5,
color=LineColors[ncolor], **popts)
if opts['show_fitrange']:
for attr in ('emin', 'emax'):
_plot_axvline(opts[attr], ymin=0, ymax=1,
delay_draw=False, color='#DDDDCC',
label='_nolegend_', **popts)
if opts['show_centroid']:
pcen = getattr(dgroup.prepeaks, 'centroid', None)
if hasattr(result, 'params'):
pcen = result.params.get('fit_centroid', None)
if pcen is not None:
pcen = pcen.value
if pcen is not None:
_plot_axvline(pcen, delay_draw=False, ymin=0, ymax=1,
color='#EECCCC', label='_nolegend_', **popts)
redraw(win=win, xmin=dx0, xmax=dx1, ymin=min(dy0, fy0),
ymax=max(dy1, fy1), show_legend=True, _larch=_larch)
def _pca_ncomps(result, min_weight=0, ncomps=None):
if ncomps is None:
if min_weight > 1.e-12:
ncomps = np.where(result.variances < min_weight)[0][0]
else:
ncomps = np.argmin(result.ind)
return ncomps - 1
def plot_pca_components(result, min_weight=0, ncomps=None, win=1, _larch=None, **kws):
"""Plot components from PCA result
result must be output of `pca_train`
"""
title = "PCA components"
popts = dict(xmin=result.xmin, xmax=result.xmax, title=title,
xlabel=plotlabels.energy, ylabel=plotlabels.norm,
delay_draw=True, show_legend=True, style='solid',
linewidth=3, new=True, marker='None', markersize=4,
win=win, _larch=_larch)
popts.update(kws)
ncomps = _pca_ncomps(result, min_weight=min_weight, ncomps=ncomps)
_plot(result.x, result.mean, label='Mean', **popts)
for i, comp in enumerate(result.components[:ncomps+1]):
label = 'Comp# %d (%.4f)' % (i+1, result.variances[i])
_oplot(result.x, comp, label=label, **popts)
redraw(win=win, show_legend=True, _larch=_larch)
def plot_pca_weights(result, min_weight=0, ncomps=None, win=1, _larch=None, **kws):
"""Plot component weights from PCA result (aka SCREE plot)
result must be output of `pca_train`
"""
max_comps = len(result.components)
title = "PCA Variances (SCREE) and Indicator Values"
popts = dict(title=title, xlabel='Component #', zorder=10,
xmax=max_comps+1.5, xmin=0.25, ymax=1, ylabel='variance',
style='solid', ylog_scale=True, show_legend=True,
linewidth=1, new=True, marker='o', win=win, _larch=_larch)
popts.update(kws)
ncomps = _pca_ncomps(result, min_weight=min_weight, ncomps=ncomps)
x = 1 + np.arange(ncomps)
y = result.variances[:ncomps]
_plot(x, y, label='significant', **popts)
xe = 1 + np.arange(ncomps-1, max_comps)
ye = result.variances[ncomps-1:ncomps+max_comps]
popts.update(dict(new=False, zorder=5, style='short dashed',
color='#B34050', ymin=2.e-3*result.variances[ncomps]))
_plot(xe, ye, label='not significant', **popts)
xi = 1 + np.arange(len(result.ind)-2)
_plot(xi, result.ind[1:len(xi)+1], zorder=15, y2label='Indicator Value',
label='IND', style='solid', win=win, show_legend=True,
linewidth=1, marker='o', side='right', _larch=_larch)
def plot_pca_fit(dgroup, win=1, with_components=False, _larch=None, **kws):
"""Plot data and fit result from pca_fit, which rom PCA result
result must be output of `pca_fit`
"""
title = "PCA fit: %s" % (dgroup.filename)
result = dgroup.pca_result
model = result.pca_model
popts = dict(xmin=model.xmin, xmax=model.xmax, title=title,
xlabel=plotlabels.energy, ylabel=plotlabels.norm,
delay_draw=True, show_legend=True, style='solid',
linewidth=3, new=True, marker='None', markersize=4,
stacked=True, win=win, _larch=_larch)
popts.update(kws)
_fitplot(result.x, result.ydat, result.yfit,
label='data', label2='PCA fit', **popts)
disp = _getDisplay(win=win, stacked=True, _larch=_larch)
if with_components and disp is not None:
disp.panel.oplot(result.x, model.mean, label='mean')
for n in range(len(result.weights)):
cval = model.components[n]*result.weights[n]
disp.panel.oplot(result.x, cval, label='Comp #%d' % (n+1))
redraw(win=win, show_legend=True, stacked=True, _larch=_larch)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable-msg=C0103
################################################################################
# Copyright (c) 2006-2017 Franz Inc.
# All rights reserved. This program and the accompanying materials are
# made available under the terms of the MIT License which accompanies
# this distribution, and is available at http://opensource.org/licenses/MIT
################################################################################
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import str
from builtins import map
from builtins import object
from future import standard_library
standard_library.install_aliases()
import os
from ..exceptions import ServerException
from ..repository.repository import Repository, RepositoryConnection
from ...miniclient import repository as miniserver
import re, urllib.request, urllib.parse, urllib.error
from . import spec
from past.builtins import basestring
READ_ONLY = 'READ_ONLY'
LEGAL_OPTION_TYPES = {READ_ONLY: bool,}
ROOT_CATALOG_NAME = 'root'
class AllegroGraphServer(object):
"""
The AllegroGraphServer object represents a remote AllegroGraph server on
the network. It is used to inventory and access the catalogs of that
server.
"""
def __init__(self, host=None, port=None, user=None, password=None,
cainfo=None, sslcert=None, verifyhost=None, verifypeer=None,
protocol=None,
proxy=os.environ.get('AGRAPH_PROXY'),
**options):
"""
Define the connection to the AllegroGraph HTTP server.
Pass either ``user`` and ``password`` for Basic Authentication or
``cainfo``, ``sslcert`` values for client X.509 certificate
authentication.
:param host: Address of the AllegroGraph server to connect to.
This can also include protocol and port
(e.g. http://localhost:10035). Can also be specified in
the ``AGRAPH_HOST`` environment variable.
The default value is ``'localhost'```.
:type host: string
:param port: Port on which the server is listening.
The default is 10035 if ``protocol`` is ``'http'``
and 10036 if it is ``'https'``. Can also be specified in
the ``AGRAPH_PORT`` environment variable.
If passed explicitly this parameter overrides any value
that might have been specified as a part of the ``host`` string.
:type port: int
:param protocol: Connection protocol, either ``'http'`` or ``'https'``.
The default is ``'http'`` if no SSL parameters are set
and `'https'`` otherwise.
If passed explicitly this parameter overrides any value
that might have been specified as a part of the ``host`` string.
:type protocol: string
:param user: Username (when using Basic authentication). Can also be specified in
the ``AGRAPH_USER`` environment variable.
:type user: string
:param password: Password (when using Basic authentication). Can also be specified in
the ``AGRAPH_PASSWORD`` environment variable.
:type password: string
:param cainfo: Path to a file or directory containing CA certificates that
will be used to validate the server's certificate.
:type cainfo: string
:param sslcert: Client certificate path (when using SSL authentication).
:type sslcert: string
:param verifyhost: If set to ``0`` it will not be an error if the server's
SSL certificate does not match the server's address.
The default value is ``2``, meaning that the host name will
be validated against the certificate.
..seealso:: https://curl.haxx.se/libcurl/c/CURLOPT_SSL_VERIFYHOST.html
:type verifyhost: int
:param verifypeer: If set to ``1`` (the default) the validity of the server's
SSL certificate will be checked. Set to ``0`` to disable
the validation.
..seealso:: https://curl.haxx.se/libcurl/c/CURLOPT_SSL_VERIFYPEER.html
:type verifypeer: int
:param proxy: Proxy specification string. The format is SCHEME://HOST:PORT.
Supported schemes are 'http', 'socks4' and 'socks5'.
Note that for SOCKS proxies DNS requests are performed by the
proxy server.
The default value is taken from the AGRAPH_PROXY environment
variable.
:type proxy: string
:param options: Ignored.
"""
# Not sure why we accept these, but don't want to change the API at this point.
del options
host = host or os.environ.get('AGRAPH_HOST', '127.0.0.1')
# Check if other arguments were passed as a part of host
match = re.match(r'^(?:(?P<protocol>https?)://)?'
r'(?P<host>[^:]*)(?::(?P<port>[0-9]*))?(?P<tail>.*)$',
host)
if match:
if protocol is None:
protocol = match.group('protocol')
if port is None and match.group('port') is not None:
port = int(match.group('port'))
host = match.group('host')
tail = match.group('tail')
else:
tail = ''
has_https_params = cainfo or sslcert or verifyhost is not None or verifypeer is not None
if protocol is None:
protocol = 'https' if has_https_params else 'http'
if port is None:
if 'AGRAPH_PORT' in os.environ:
port = int(os.environ.get('AGRAPH_PORT'))
else:
port = 10035 if protocol == 'http' else 10036
uri = '{protocol}://{host}:{port}{tail}'.format(protocol=protocol, host=host, port=port, tail=tail)
user = user or os.environ.get('AGRAPH_USER')
password = password or os.environ.get('AGRAPH_PASSWORD')
self._client = miniserver.Client(uri, user, password, cainfo, sslcert, verifyhost, verifypeer,
proxy=proxy)
@property
def url(self):
"""Return the server's URL."""
return self._client.url
@property
def version(self):
"""Return the server's version as a string."""
return self._client.getVersion()
@property
def versionTuple(self):
"""
Return the version number as a tuple of integers.
All non-digit characters from the version string are ignored
and act only as separators. All trailing zeros are skipped.
This is compatible with the way in which the Java client
parses and compares AG version strings.
"""
components = [int(c) for c in re.findall(r'\d+', self.version)]
while components and components[-1] == 0:
components.pop()
return tuple(components)
def listCatalogs(self):
"""
Get the list of catalogs on this server.
A value of ``None`` will be included in this list to
represent the root catalog.
:return: A list of catalog names.
:rtype: list[string]
"""
catalogs = self._client.listCatalogs()
return catalogs
def openCatalog(self, name=None):
"""
Open a catalog by name. Pass None to open the root catalog.
:param name: One of the catalog names from :meth:`listCatalogs`
or ``None`` to open the root catalog.
:return: A catalog object.
:rtype: Catalog
"""
# Allow for 'root', None and '' (or anything else that evaluates to
# False) to mean the root catalog.
if not name or name == ROOT_CATALOG_NAME:
name = None
cats = self.listCatalogs()
if not name in cats:
raise ServerException("There is no catalog named '%s' (found %s)"
% (name, cats))
return Catalog(name, self, self._client)
def getInitfile(self):
"""
Retrieve the contents of the server initialization file.
The initialization file is a collection of Common Lisp code
that is executed in every back-end as it is created.
:return: Init file content.
:rtype: string
"""
return self._client.getInitfile()
def setInitfile(self, content=None, restart=True):
"""
Replace the current initialization file contents with the
``content`` string or remove if None.
:param content: New init file conent.
:type content: string
:param restart: specifies whether any running shared back-ends should
be shut down, so that subsequent requests will be handled by
back-ends that include the new code. Default is ``True``.
"""
return self._client.setInitfile(content, restart)
def openSession(self, spec, autocommit=False, lifetime=None, loadinitfile=False):
"""
Open a session on a federated, reasoning, or filtered store.
Use the helper functions in the :mod:`franz.openrdf.sail.spec` module
to create the spec string.
:param spec: Session :mod:`specification string <franz.openrdf.sail.spec>`.
:type spec: string
:param autocommit: Autocommit mode (default: ``False`` = start a transaction).
:type autocommit: bool
:param lifetime: The number of seconds a session can be idle before being terminated.
The default value is 300 (5 minutes). The maximum acceptable value
is 21600 (6 hours).
:param loadinitfile: If ``True`` the init file will be loaded into the new session.
The default is False.
:type loadinitfile: bool
:return: A connection to the new session.
:rtype: RepositoryConnection
"""
minirep = self._client.openSession(spec, autocommit=autocommit, lifetime=lifetime, loadinitfile=loadinitfile)
return RepositoryConnection(Repository(None, None, minirep), is_session=True)
def listScripts(self):
"""
Return the list of Sitescripts currently on the server.
When a user creates a session, they can choose to load one or more
of these scripts into the session.
:return: A list of script names.
:rtype: list[string]
"""
return self._client.listScripts()
def addScript(self, module, code):
"""
Create or replace a sitescript.
:param module: Script name.
:type module: string
:param code: Script content.
:type code: string
"""
return self._client.addScript(module, code)
def deleteScript(self, module):
"""
Delete a sitescript.
:param module: Script name.
:type module: string
"""
return self._client.deleteScript(module)
def getScript(self, module):
"""
Get the body of a sitescript.
:param module: Script name.
:type module: string
:return: Script content.
:rtype: string
"""
return self._client.getScript(module)
def getUserData(self, key):
"""
Retrieve user data value with given key.
Users can store arbitrary strings on the server
using :meth:`setUserData`.
:param key: Value identifier.
:type key: str
:return: Stored string or None if there is no data under specified key.
:rtype: str|None
"""
return self._client.getUserData(key)
def setUserData(self, key, data):
"""
Set user data with given key.
This can be used to store arbitrary strings on the server.
:param key: Value identifier.
:type key: str
:param data: Value to be stored.
:type data: str
"""
self._client.setUserData(key, data)
def deleteUserData(self, key):
"""
Remove user data from the server.
This removes a value set with :meth:`setUserData`
:param key: Value identifier.
:type key: str
"""
self._client.deleteUserData(key)
def listUserData(self):
"""
Get all used keys from the user data store on the server.
:return: A list of key names.
:rtype: list[str]
"""
return self._client.listUserData()
def openFederated(self, repositories, autocommit=False, lifetime=None, loadinitfile=False):
"""
Open a session that federates several repositories. The
repositories argument must be an array containing store
designators, which can be Repository or RepositoryConnection
objects, strings (naming a store in the root catalog, or the
URL of a store), or (storename, catalogname) tuples.
:param repositories: List of repositories to federate.
:type repositories: list[string|(string, string)|Repository|RepositoryConnection]
:param autocommit: Autocommit mode (default: ``False`` = start a transaction).
:type autocommit: bool
:param lifetime: The number of seconds a session can be idle before being terminated.
The default value is 300 (5 minutes). The maximum acceptable value
is 21600 (6 hours).
:param loadinitfile: If ``True`` the init file will be loaded into the new session.
The default is False.
:type loadinitfile: bool
:return: A connection to the new session.
:rtype: RepositoryConnection
"""
def asRepoString(x):
if isinstance(x, basestring): return spec.local(x)
elif isinstance(x, tuple): return spec.local(x[0], x[1])
elif isinstance(x, Repository): return x.getSpec()
elif isinstance(x, RepositoryConnection):
if (x.repository.database_name == None):
raise ServerException(str(x) + " is not a RepositoryConnection created by Repository.getConnection() and therefore cannot be federated")
return x.getSpec()
else: raise TypeError(str(x) + " is not a valid repository specification.")
return self.openSession(spec.federate(*list(map(asRepoString, repositories))), autocommit, lifetime, loadinitfile)
def listUsers(self):
"""
Returns a list of names of all the users that have been defined.
"""
return self._client.listUsers()
def addUser(self, name, password=None):
"""
Create a new user. Expects a password parameter, which specifies the
user's password (can be left off when creating the anonymous user).
"""
assert password is not None or name == 'anonymous'
self._client.addUser(name, password)
def deleteUser(self, name):
"""
Delete a user.
"""
self._client.deleteUser(name)
def changeUserPassword(self, name, password):
"""
Change the password for the given user.
"""
self._client.changeUserPassword(name, password)
def listUserAccess(self, name):
"""
Retrieve the read/write access for a user. This returns a result set,
each element of which has a read, write, catalog, and repository
component. The first two are booleans, the latter two strings. For
permissions granted globally, catalog and repository will have a
value of "*", for those granted per-catalog, only repository will
be "*". catalog normally contains the catalog name, but for the rootAs above, but also includes the access granted to roles that this user has.
catalog "/" is used.
"""
return self._client.listUserAccess(name)
def addUserAccess(self, name, read, write, catalog='*', repository='*'):
"""
Grant read/write access to a user.
:param read: Whether to grant read access. Defaults to ``False``.
:type read: bool
:param write: Whether to grant write access. Defaults to ``False``.
:type write: bool
:param catalog: Which catalog to grant the access on. Leave off or pass ``"*"`` to grant
access on all catalogs. Use ``"/"`` for the root catalog.
:type catalog: string
:param repository: Specifies the repository that access is granted on. Passing ``"*"``,
or leaving the parameter off, means all repositories in the
given catalog.
:type repository: string
"""
self._client.addUserAccess(name, read, write, catalog, repository)
def deleteUserAccess(self, name, read, write, catalog='*', repository='*'):
"""
Takes the same parameters as PUT on this URL, but revokes the access instead of granting it.
"""
self._client.deleteUserAccess(name, read, write, catalog, repository)
def listUserEffectiveAccess(self, name):
"""
Like listUserAccess, but also includes the access granted to roles that this user has.
"""
return self._client.listUserEffectiveAccess(name)
def listUserPermissions(self, name):
"""
List the permission flags that have been assigned to a user (any of
super, eval, session, replication).
"""
return self._client.listUserPermissions(name)
def listUserEffectivePermissions(self, name):
"""
Retrieve the permission flags assigned to the user, or any of its roles.
"""
return self._client.listUserEffectivePermissions(name)
def addUserPermission(self, name, _type):
"""
Assigns the given permission to this user. type should be super, eval,
replication, or session.
"""
self._client.addUserPermission(name, _type)
def deleteUserPermission(self, name, _type):
"""
Revokes the given permission for this user.
"""
self._client.deleteUserPermission(name, _type)
def listRoles(self):
"""
Returns the names of all defined roles.
"""
return self._client.listRoles()
def addRole(self, role):
"""
Creates a new role.
"""
self._client.addRole(role)
def deleteRole(self, role):
"""
Deletes a role.
"""
self._client.deleteRole(role)
def listRolePermissions(self, role):
"""
Lists the permission flags granted to a role.
"""
return self._client.listRolePermissions(role)
def addRolePermission(self, role, _type):
"""
Grants a role a certain permission. type should be super, eval, or session.
"""
self._client.addRolePermission(role, _type)
def deleteRolePermission(self, role, _type):
"""
Revokes a permission for a role.
"""
self._client.deleteRolePermission(role, _type)
def listRoleAccess(self, role):
"""
Query the access granted to a role. Returns a result in the same
format as the equivalent service for users.
"""
return self._client.listRoleAccess(role)
def addRoleAccess(self, role, read, write, catalog='*', repository='*'):
"""
Grant read/write access to a role. See here for the parameters
that are expected.
"""
return self._client.addRoleAccess(role, read, write, catalog, repository)
def deleteRoleAccess(self, role, read, write, catalog='*', repository='*'):
"""
Revoke read/write access for a role. Accepts the same parameters as above.
"""
self._client.deleteRoleAccess(role, read, write, catalog, repository)
def listUserRoles(self, name):
"""
Retrieves a list of role names for this user name.
"""
return self._client.listUserRoles(name)
def addUserRole(self, name, role):
"""
Add a role to a user.
"""
self._client.addUserRole(name, role)
def deleteUserRole(self, name, role):
"""
Remove a role from a user.
"""
self._client.deleteUserRole(name, role)
def listUserSecurityFilters(self, name, _type):
"""
List security filters for user.
_type is one of "allow", "disallow"
"""
return self._client.listUserSecurityFilters(name, _type)
def addUserSecurityFilter(self, name, _type, s=None, p=None, o=None, g=None):
"""
Add a security filter for the user.
name - user name
_type - one of 'allow' or 'disallow'
s - optional subject
p - optional predicate
o - optional predicate
g - optional graph
"""
self._client.addUserSecurityFilter(name, _type, s, p, o, g)
def deleteUserSecurityFilter(self, name, _type, s=None, p=None, o=None, g=None):
"""
Add a security filter for the user.
name - user name
_type - one of 'allow' or 'disallow'
s - optional subject
p - optional predicate
o - optional predicate
g - optional graph
"""
self._client.deleteUserSecurityFilter(name, _type, s, p, o, g)
def listRoleSecurityFilters(self, role, _type):
"""
List security filters for user.
_type is one of "allow", "disallow"
"""
return self._client.listRoleSecurityFilters(role, _type)
def addRoleSecurityFilter(self, role, _type, s=None, p=None, o=None, g=None):
"""
Add a security filter for the user.
role - role name
_type - one of 'allow' or 'disallow'
s - optional subject
p - optional predicate
o - optional predicate
g - optional graph
"""
self._client.addRoleSecurityFilter(role, _type, s, p, o, g)
def deleteRoleSecurityFilter(self, role, _type, s=None, p=None, o=None, g=None):
"""
Add a security filter for the user.
role - role name
_type - one of 'allow' or 'disallow'
s - optional subject
p - optional predicate
o - optional predicate
g - optional graph
"""
self._client.deleteRoleSecurityFilter(role, _type, s, p, o, g)
class Catalog(object):
"""
Container of multiple repositories (triple stores).
"""
def __init__(self, name, server, client):
self.server = server
self.mini_catalog = client.openCatalogByName(name)
self._name = name
def getName(self):
"""Return the catalog name."""
return self._name
name = property(getName)
def listRepositories(self):
"""
Return a list of names of repositories (triple stores) managed by
this catalog.
"""
return self.mini_catalog.listRepositories()
def deleteRepository(self, name):
"""
Delete a repository.
:param name: Repository name.
:type name: string
"""
return self.mini_catalog.deleteRepository(name)
def getRepository(self, name, access_verb):
"""
Creates or opens a repository.
:param name: Repository name.
:type name: string
:param access_verb: Determines mode of operation. Possible values are:
- **Repository.RENEW** clears the contents of an existing
repository before opening. If the indicated repository does not
exist, it creates one.
- **Repository.OPEN** opens an existing repository, or throws an
exception if the repository is not found.
- **Repository.ACCESS** opens an existing repository, or creates a
new one if the repository is not found.
- **Repository.CREATE** creates a new repository, or throws an
exception if one by that name already exists.
:return: A repository object.
:rtype: Repository
"""
access_verb = access_verb.upper()
name = urllib.parse.quote_plus(name)
exists = name in self.listRepositories();
if access_verb == Repository.RENEW:
if exists:
self.deleteRepository(name)
return self.createRepository(name)
if access_verb == Repository.CREATE:
if exists:
raise ServerException(
"Can't create triple store named '%s' because a store with that name already exists.",
name)
return self.createRepository(name)
if access_verb == Repository.OPEN:
if not exists:
raise ServerException(
"Can't open a triple store named '%s' because there is none.", name)
return Repository(self, name, self.mini_catalog.getRepository(name))
if access_verb == Repository.ACCESS:
if not exists:
return self.createRepository(name)
return Repository(self, name, self.mini_catalog.getRepository(name))
def createRepository(self, name, indices=None):
"""
Creates a new repository with the given name.
:param name: Repository name.
:type name: string
:param indices: If provided, creates a store with the given indices.
(e.g. ``["spogi, "gspoi", ...]``)
:type indices: list[string]
:return: A repository object.
:rtype: Repository
"""
return Repository(self, name, self.mini_catalog.createRepository(name, indices=indices))
| |
"""
Adaptive numerical evaluation of SymPy expressions, using mpmath
for mathematical functions.
"""
import sympy.mpmath.libmp as libmp
from sympy.mpmath import make_mpc, make_mpf, mp, mpc, mpf, nsum, quadts, quadosc
from sympy.mpmath import inf as mpmath_inf
from sympy.mpmath.libmp import (bitcount, from_int, from_man_exp, \
from_rational, fhalf, fnone, fone, fzero, mpf_abs, mpf_add, mpf_atan, \
mpf_atan2, mpf_cmp, mpf_cos, mpf_e, mpf_exp, mpf_log, mpf_lt, mpf_mul, \
mpf_neg, mpf_pi, mpf_pow, mpf_pow_int, mpf_shift, mpf_sin, mpf_sqrt, \
normalize, round_nearest, to_int, to_str)
from sympy.mpmath.libmp.backend import MPZ
from sympy.mpmath.libmp.libmpf import dps_to_prec
from sympy.mpmath.libmp.gammazeta import mpf_bernoulli
import math
from sympify import sympify
from core import C
from singleton import S
from containers import Tuple
LG10 = math.log(10,2)
# Used in a few places as placeholder values to denote exponents and
# precision levels, e.g. of exact numbers. Must be careful to avoid
# passing these to mpmath functions or returning them in final results.
INF = float(mpmath_inf)
MINUS_INF = float(-mpmath_inf)
# ~= 100 digits. Real men set this to INF.
DEFAULT_MAXPREC = 333
class PrecisionExhausted(ArithmeticError):
pass
#----------------------------------------------------------------------------#
# #
# Helper functions for arithmetic and complex parts #
# #
#----------------------------------------------------------------------------#
"""
An mpf value tuple is a tuple of integers (sign, man, exp, bc)
representing a floating-point number: (-1)**sign*man*2**exp where
bc should correspond to the number of bits used to represent the
mantissa (man) in binary notation, e.g. (0,5,1,3) represents 10::
>>> from sympy.core.evalf import bitcount
>>> n=(-1)**0 * 5 * 2**1; n, bitcount(5)
(10, 3)
A temporary result is a tuple (re, im, re_acc, im_acc) where
re and im are nonzero mpf value tuples representing approximate
numbers, or None to denote exact zeros.
re_acc, im_acc are integers denoting log2(e) where e is the estimated
relative accuracy of the respective complex part, but may be anything
if the corresponding complex part is None.
"""
def fastlog(x):
"""Fast approximation of log2(x) for an mpf value tuple x.
Notes: Calculated as exponent + width of mantissa. This is an
approximation for two reasons: 1) it gives the ceil(log2(abs(x)))
value and 2) it is too high by 1 in the case that x is an exact
power of 2. Although this is easy to remedy by testing to see if
the odd mpf mantissa is 1 (indicating that one was dealing with
an exact power of 2) that would decrease the speed and is not
necessary as this is only being used as an approximation for the
number of bits in x. The correct return value could be written as
"x[2] + (x[3] if x[1] != 1 else 0)".
Since mpf tuples always have an odd mantissa, no check is done
to see if the mantissa is a multiple of 2 (in which case the
result would be too large by 1).
Example::
>>> from sympy import log
>>> from sympy.core.evalf import fastlog, bitcount
>>> n=(-1)**0*5*2**1; n, (log(n)/log(2)).evalf(), fastlog((0,5,1,bitcount(5)))
(10, 3.32192809488736, 4)
"""
if not x or x == fzero:
return MINUS_INF
return x[2] + x[3]
def complex_accuracy(result):
"""
Returns relative accuracy of a complex number with given accuracies
for the real and imaginary parts. The relative accuracy is defined
in the complex norm sense as ||z|+|error|| / |z| where error
is equal to (real absolute error) + (imag absolute error)*i.
The full expression for the (logarithmic) error can be approximated
easily by using the max norm to approximate the complex norm.
In the worst case (re and im equal), this is wrong by a factor
sqrt(2), or by log2(sqrt(2)) = 0.5 bit.
"""
re, im, re_acc, im_acc = result
if not im:
if not re:
return INF
return re_acc
if not re:
return im_acc
re_size = fastlog(re)
im_size = fastlog(im)
absolute_error = max(re_size-re_acc, im_size-im_acc)
relative_error = absolute_error - max(re_size, im_size)
return -relative_error
def get_abs(expr, prec, options):
re, im, re_acc, im_acc = evalf(expr, prec+2, options)
if not re:
re, re_acc, im, im_acc = im, im_acc, re, re_acc
if im:
return libmp.mpc_abs((re, im), prec), None, re_acc, None
else:
return mpf_abs(re), None, re_acc, None
def get_complex_part(expr, no, prec, options):
"""no = 0 for real part, no = 1 for imaginary part"""
workprec = prec
i = 0
while 1:
res = evalf(expr, workprec, options)
value, accuracy = res[no::2]
if (not value) or accuracy >= prec:
return value, None, accuracy, None
workprec += max(30, 2**i)
i += 1
def evalf_abs(expr, prec, options):
return get_abs(expr.args[0], prec, options)
def evalf_re(expr, prec, options):
return get_complex_part(expr.args[0], 0, prec, options)
def evalf_im(expr, prec, options):
return get_complex_part(expr.args[0], 1, prec, options)
def finalize_complex(re, im, prec):
assert re and im
if re == fzero and im == fzero:
raise ValueError("got complex zero with unknown accuracy")
size_re = fastlog(re)
size_im = fastlog(im)
# Convert fzeros to scaled zeros
if re == fzero:
re = mpf_shift(fone, size_im-prec)
size_re = fastlog(re)
elif im == fzero:
im = mpf_shift(fone, size_re-prec)
size_im = fastlog(im)
if size_re > size_im:
re_acc = prec
im_acc = prec + min(-(size_re - size_im), 0)
else:
im_acc = prec
re_acc = prec + min(-(size_im - size_re), 0)
return re, im, re_acc, im_acc
def chop_parts(value, prec):
"""
Chop off tiny real or complex parts.
"""
re, im, re_acc, im_acc = value
# Method 1: chop based on absolute value
if re and (fastlog(re) < -prec+4):
re, re_acc = None, None
if im and (fastlog(im) < -prec+4):
im, im_acc = None, None
# Method 2: chop if inaccurate and relatively small
if re and im:
delta = fastlog(re) - fastlog(im)
if re_acc < 2 and (delta - re_acc <= -prec+4):
re, re_acc = None, None
if im_acc < 2 and (delta - im_acc >= prec-4):
im, im_acc = None, None
return re, im, re_acc, im_acc
def check_target(expr, result, prec):
a = complex_accuracy(result)
if a < prec:
raise PrecisionExhausted("Failed to distinguish the expression: \n\n%s\n\n"
"from zero. Try simplifying the input, using chop=True, or providing "
"a higher maxn for evalf" % (expr))
def get_integer_part(expr, no, options, return_ints=False):
"""
With no = 1, computes ceiling(expr)
With no = -1, computes floor(expr)
Note: this function either gives the exact result or signals failure.
"""
# The expression is likely less than 2^30 or so
assumed_size = 30
ire, iim, ire_acc, iim_acc = evalf(expr, assumed_size, options)
# We now know the size, so we can calculate how much extra precision
# (if any) is needed to get within the nearest integer
if ire and iim:
gap = max(fastlog(ire)-ire_acc, fastlog(iim)-iim_acc)
elif ire:
gap = fastlog(ire)-ire_acc
elif iim:
gap = fastlog(iim)-iim_acc
else:
# ... or maybe the expression was exactly zero
return None, None, None, None
margin = 10
if gap >= -margin:
ire, iim, ire_acc, iim_acc = evalf(expr, margin+assumed_size+gap, options)
# We can now easily find the nearest integer, but to find floor/ceil, we
# must also calculate whether the difference to the nearest integer is
# positive or negative (which may fail if very close)
def calc_part(expr, nexpr):
nint = int(to_int(nexpr, round_nearest))
expr = C.Add(expr, -nint, evaluate=False)
x, _, x_acc, _ = evalf(expr, 10, options)
check_target(expr, (x, None, x_acc, None), 3)
nint += int(no*(mpf_cmp(x or fzero, fzero) == no))
nint = from_int(nint)
return nint, fastlog(nint) + 10
re, im, re_acc, im_acc = None, None, None, None
if ire:
re, re_acc = calc_part(C.re(expr, evaluate=False), ire)
if iim:
im, im_acc = calc_part(C.im(expr, evaluate=False), iim)
if return_ints:
return int(to_int(re or fzero)), int(to_int(im or fzero))
return re, im, re_acc, im_acc
def evalf_ceiling(expr, prec, options):
return get_integer_part(expr.args[0], 1, options)
def evalf_floor(expr, prec, options):
return get_integer_part(expr.args[0], -1, options)
#----------------------------------------------------------------------------#
# #
# Arithmetic operations #
# #
#----------------------------------------------------------------------------#
def add_terms(terms, prec, target_prec):
"""
Helper for evalf_add. Adds a list of (mpfval, accuracy) terms.
"""
if len(terms) == 1:
if not terms[0]:
# XXX: this is supposed to represent a scaled zero
return mpf_shift(fone, target_prec), -1
return terms[0]
max_extra_prec = 2*prec
sum_man, sum_exp, absolute_error = 0, 0, MINUS_INF
for x, accuracy in terms:
if not x:
continue
sign, man, exp, bc = x
if sign:
man = -man
absolute_error = max(absolute_error, bc+exp-accuracy)
delta = exp - sum_exp
if exp >= sum_exp:
# x much larger than existing sum?
# first: quick test
if (delta > max_extra_prec) and \
((not sum_man) or delta-bitcount(abs(sum_man)) > max_extra_prec):
sum_man = man
sum_exp = exp
else:
sum_man += (man << delta)
else:
delta = -delta
# x much smaller than existing sum?
if delta-bc > max_extra_prec:
if not sum_man:
sum_man, sum_exp = man, exp
else:
sum_man = (sum_man << delta) + man
sum_exp = exp
if absolute_error == MINUS_INF:
return None, None
if not sum_man:
# XXX: this is supposed to represent a scaled zero
return mpf_shift(fone, absolute_error), -1
if sum_man < 0:
sum_sign = 1
sum_man = -sum_man
else:
sum_sign = 0
sum_bc = bitcount(sum_man)
sum_accuracy = sum_exp + sum_bc - absolute_error
r = normalize(sum_sign, sum_man, sum_exp, sum_bc, target_prec,
round_nearest), sum_accuracy
#print "returning", to_str(r[0],50), r[1]
return r
def evalf_add(v, prec, options):
args = v.args
target_prec = prec
i = 0
oldmaxprec = options.get('maxprec', DEFAULT_MAXPREC)
options['maxprec'] = min(oldmaxprec, 2*prec)
try:
while 1:
terms = [evalf(arg, prec+10, options) for arg in args]
re, re_acc = add_terms([(a[0],a[2]) for a in terms if a[0]], prec, target_prec)
im, im_acc = add_terms([(a[1],a[3]) for a in terms if a[1]], prec, target_prec)
accuracy = complex_accuracy((re, im, re_acc, im_acc))
if accuracy >= target_prec:
if options.get('verbose'):
print "ADD: wanted", target_prec, "accurate bits, got", re_acc, im_acc
return re, im, re_acc, im_acc
else:
diff = target_prec - accuracy
if (prec-target_prec) > options.get('maxprec', DEFAULT_MAXPREC):
return re, im, re_acc, im_acc
prec = prec + max(10+2**i, diff)
options['maxprec'] = min(oldmaxprec, 2*prec)
if options.get('verbose'):
print "ADD: restarting with prec", prec
i += 1
finally:
options['maxprec'] = oldmaxprec
def evalf_mul(v, prec, options):
args = v.args
# With guard digits, multiplication in the real case does not destroy
# accuracy. This is also true in the complex case when considering the
# total accuracy; however accuracy for the real or imaginary parts
# separately may be lower.
acc = prec
target_prec = prec
# XXX: big overestimate
prec = prec + len(args) + 5
direction = 0
# Empty product is 1
man, exp, bc = MPZ(1), 0, 1
direction = 0
complex_factors = []
# First, we multiply all pure real or pure imaginary numbers.
# direction tells us that the result should be multiplied by
# i**direction
for arg in args:
re, im, re_acc, im_acc = evalf(arg, prec, options)
if re and im:
complex_factors.append((re, im, re_acc, im_acc))
continue
elif re:
(s, m, e, b), w_acc = re, re_acc
elif im:
(s, m, e, b), w_acc = im, im_acc
direction += 1
else:
return None, None, None, None
direction += 2*s
man *= m
exp += e
bc += b
if bc > 3*prec:
man >>= prec
exp += prec
acc = min(acc, w_acc)
sign = (direction & 2) >> 1
v = normalize(sign, man, exp, bitcount(man), prec, round_nearest)
if complex_factors:
# make existing real scalar look like an imaginary and
# multiply by the remaining complex numbers
re, im = v, (0, MPZ(0), 0, 0)
for wre, wim, wre_acc, wim_acc in complex_factors:
# acc is the overall accuracy of the product; we aren't
# computing exact accuracies of the product.
acc = min(acc,
complex_accuracy((wre, wim, wre_acc, wim_acc)))
A = mpf_mul(re, wre, prec)
B = mpf_mul(mpf_neg(im), wim, prec)
C = mpf_mul(re, wim, prec)
D = mpf_mul(im, wre, prec)
re, xre_acc = add_terms([(A, acc), (B, acc)], prec, target_prec)
im, xim_acc = add_terms([(C, acc), (D, acc)], prec, target_prec)
if options.get('verbose'):
print "MUL: wanted", target_prec, "accurate bits, got", acc
# multiply by i
if direction & 1:
return mpf_neg(im), re, acc, acc
else:
return re, im, acc, acc
else:
# multiply by i
if direction & 1:
return None, v, None, acc
else:
return v, None, acc, None
def evalf_pow(v, prec, options):
target_prec = prec
base, exp = v.args
# We handle x**n separately. This has two purposes: 1) it is much
# faster, because we avoid calling evalf on the exponent, and 2) it
# allows better handling of real/imaginary parts that are exactly zero
if exp.is_Integer:
p = exp.p
# Exact
if not p:
return fone, None, prec, None
# Exponentiation by p magnifies relative error by |p|, so the
# base must be evaluated with increased precision if p is large
prec += int(math.log(abs(p),2))
re, im, re_acc, im_acc = evalf(base, prec+5, options)
# Real to integer power
if re and not im:
return mpf_pow_int(re, p, target_prec), None, target_prec, None
# (x*I)**n = I**n * x**n
if im and not re:
z = mpf_pow_int(im, p, target_prec)
case = p % 4
if case == 0: return z, None, target_prec, None
if case == 1: return None, z, None, target_prec
if case == 2: return mpf_neg(z), None, target_prec, None
if case == 3: return None, mpf_neg(z), None, target_prec
# Zero raised to an integer power
if not re:
return None, None, None, None
# General complex number to arbitrary integer power
re, im = libmp.mpc_pow_int((re, im), p, prec)
# Assumes full accuracy in input
return finalize_complex(re, im, target_prec)
# Pure square root
if exp is S.Half:
xre, xim, xre_acc, yim_acc = evalf(base, prec+5, options)
# General complex square root
if xim:
re, im = libmp.mpc_sqrt((xre or fzero, xim), prec)
return finalize_complex(re, im, prec)
if not xre:
return None, None, None, None
# Square root of a negative real number
if mpf_lt(xre, fzero):
return None, mpf_sqrt(mpf_neg(xre), prec), None, prec
# Positive square root
return mpf_sqrt(xre, prec), None, prec, None
# We first evaluate the exponent to find its magnitude
# This determines the working precision that must be used
prec += 10
yre, yim, yre_acc, yim_acc = evalf(exp, prec, options)
# Special cases: x**0
if not (yre or yim):
return fone, None, prec, None
ysize = fastlog(yre)
# Restart if too big
# XXX: prec + ysize might exceed maxprec
if ysize > 5:
prec += ysize
yre, yim, yre_acc, yim_acc = evalf(exp, prec, options)
# Pure exponential function; no need to evalf the base
if base is S.Exp1:
if yim:
re, im = libmp.mpc_exp((yre or fzero, yim), prec)
return finalize_complex(re, im, target_prec)
return mpf_exp(yre, target_prec), None, target_prec, None
xre, xim, xre_acc, yim_acc = evalf(base, prec+5, options)
# 0**y
if not (xre or xim):
return None, None, None, None
# (real ** complex) or (complex ** complex)
if yim:
re, im = libmp.mpc_pow((xre or fzero, xim or fzero), (yre or fzero, yim),
target_prec)
return finalize_complex(re, im, target_prec)
# complex ** real
if xim:
re, im = libmp.mpc_pow_mpf((xre or fzero, xim), yre, target_prec)
return finalize_complex(re, im, target_prec)
# negative ** real
elif mpf_lt(xre, fzero):
re, im = libmp.mpc_pow_mpf((xre, fzero), yre, target_prec)
return finalize_complex(re, im, target_prec)
# positive ** real
else:
return mpf_pow(xre, yre, target_prec), None, target_prec, None
#----------------------------------------------------------------------------#
# #
# Special functions #
# #
#----------------------------------------------------------------------------#
def evalf_trig(v, prec, options):
"""
This function handles sin and cos of real arguments.
TODO: should also handle tan and complex arguments.
"""
if v.func is C.cos:
func = mpf_cos
elif v.func is C.sin:
func = mpf_sin
else:
raise NotImplementedError
arg = v.args[0]
# 20 extra bits is possibly overkill. It does make the need
# to restart very unlikely
xprec = prec + 20
re, im, re_acc, im_acc = evalf(arg, xprec, options)
if im:
raise NotImplementedError
if not re:
if v.func is C.cos:
return fone, None, prec, None
elif v.func is C.sin:
return None, None, None, None
else:
raise NotImplementedError
# For trigonometric functions, we are interested in the
# fixed-point (absolute) accuracy of the argument.
xsize = fastlog(re)
# Magnitude <= 1.0. OK to compute directly, because there is no
# danger of hitting the first root of cos (with sin, magnitude
# <= 2.0 would actually be ok)
if xsize < 1:
return func(re, prec, round_nearest), None, prec, None
# Very large
if xsize >= 10:
xprec = prec + xsize
re, im, re_acc, im_acc = evalf(arg, xprec, options)
# Need to repeat in case the argument is very close to a
# multiple of pi (or pi/2), hitting close to a root
while 1:
y = func(re, prec, round_nearest)
ysize = fastlog(y)
gap = -ysize
accuracy = (xprec - xsize) - gap
if accuracy < prec:
if options.get('verbose'):
print "SIN/COS", accuracy, "wanted", prec, "gap", gap
print to_str(y,10)
if xprec > options.get('maxprec', DEFAULT_MAXPREC):
return y, None, accuracy, None
xprec += gap
re, im, re_acc, im_acc = evalf(arg, xprec, options)
continue
else:
return y, None, prec, None
def evalf_log(expr, prec, options):
arg = expr.args[0]
workprec = prec+10
xre, xim, xacc, _ = evalf(arg, workprec, options)
if xim:
# XXX: use get_abs etc instead
re = evalf_log(C.log(C.Abs(arg, evaluate=False), evaluate=False), prec, options)
im = mpf_atan2(xim, xre or fzero, prec)
return re[0], im, re[2], prec
imaginary_term = (mpf_cmp(xre, fzero) < 0)
re = mpf_log(mpf_abs(xre), prec, round_nearest)
size = fastlog(re)
if prec - size > workprec:
# We actually need to compute 1+x accurately, not x
arg = C.Add(S.NegativeOne,arg,evaluate=False)
xre, xim, xre_acc, xim_acc = evalf_add(arg, prec, options)
prec2 = workprec - fastlog(xre)
re = mpf_log(mpf_add(xre, fone, prec2), prec, round_nearest)
re_acc = prec
if imaginary_term:
return re, mpf_pi(prec), re_acc, prec
else:
return re, None, re_acc, None
def evalf_atan(v, prec, options):
arg = v.args[0]
xre, xim, reacc, imacc = evalf(arg, prec+5, options)
if xim:
raise NotImplementedError
return mpf_atan(xre, prec, round_nearest), None, prec, None
def evalf_piecewise(expr, prec, options):
if 'subs' in options:
expr = expr.subs(options['subs'])
del options['subs']
if hasattr(expr,'func'):
return evalf(expr, prec, options)
if type(expr) == float:
return evalf(C.Float(expr), prec, options)
if type(expr) == int:
return evalf(C.Integer(expr), prec, options)
#subs unit of measure, pnumKMGP
expr = expr.subs( { p:10**(-12), n:10**(-9), u:10**(-6), m:10**(-3), K:10**3, M:10**6, G:10**9, P:10**12 } )
# We still have undefined symbols
raise NotImplementedError
def evalf_bernoulli(expr, prec, options):
arg = expr.args[0]
if not arg.is_Integer:
raise ValueError("Bernoulli number index must be an integer")
n = int(arg)
b = mpf_bernoulli(n, prec, round_nearest)
if b == fzero:
return None, None, None, None
return b, None, prec, None
#----------------------------------------------------------------------------#
# #
# High-level operations #
# #
#----------------------------------------------------------------------------#
def as_mpmath(x, prec, options):
x = sympify(x)
if isinstance(x, C.Zero):
return mpf(0)
if isinstance(x, C.Infinity):
return mpf('inf')
if isinstance(x, C.NegativeInfinity):
return mpf('-inf')
# XXX
re, im, _, _ = evalf(x, prec, options)
if im:
return mpc(re or fzero, im)
return mpf(re)
def do_integral(expr, prec, options):
func = expr.args[0]
x, xlow, xhigh = expr.args[1]
orig = mp.prec
oldmaxprec = options.get('maxprec', DEFAULT_MAXPREC)
options['maxprec'] = min(oldmaxprec, 2*prec)
try:
mp.prec = prec+5
xlow = as_mpmath(xlow, prec+15, options)
xhigh = as_mpmath(xhigh, prec+15, options)
# Integration is like summation, and we can phone home from
# the integrand function to update accuracy summation style
# Note that this accuracy is inaccurate, since it fails
# to account for the variable quadrature weights,
# but it is better than nothing
have_part = [False, False]
max_real_term = [MINUS_INF]
max_imag_term = [MINUS_INF]
def f(t):
re, im, re_acc, im_acc = evalf(func, mp.prec, {'subs':{x:t}})
have_part[0] = re or have_part[0]
have_part[1] = im or have_part[1]
max_real_term[0] = max(max_real_term[0], fastlog(re))
max_imag_term[0] = max(max_imag_term[0], fastlog(im))
if im:
return mpc(re or fzero, im)
return mpf(re or fzero)
if options.get('quad') == 'osc':
A = C.Wild('A', exclude=[x])
B = C.Wild('B', exclude=[x])
D = C.Wild('D')
m = func.match(C.cos(A*x+B)*D)
if not m:
m = func.match(C.sin(A*x+B)*D)
if not m:
raise ValueError("An integrand of the form sin(A*x+B)*f(x) "
"or cos(A*x+B)*f(x) is required for oscillatory quadrature")
period = as_mpmath(2*S.Pi/m[A], prec+15, options)
result = quadosc(f, [xlow, xhigh], period=period)
# XXX: quadosc does not do error detection yet
quadrature_error = MINUS_INF
else:
result, quadrature_error = quadts(f, [xlow, xhigh], error=1)
quadrature_error = fastlog(quadrature_error._mpf_)
finally:
options['maxprec'] = oldmaxprec
mp.prec = orig
if have_part[0]:
re = result.real._mpf_
if re == fzero:
re = mpf_shift(fone, min(-prec,-max_real_term[0],-quadrature_error))
re_acc = -1
else:
re_acc = -max(max_real_term[0]-fastlog(re)-prec, quadrature_error)
else:
re, re_acc = None, None
if have_part[1]:
im = result.imag._mpf_
if im == fzero:
im = mpf_shift(fone, min(-prec,-max_imag_term[0],-quadrature_error))
im_acc = -1
else:
im_acc = -max(max_imag_term[0]-fastlog(im)-prec, quadrature_error)
else:
im, im_acc = None, None
result = re, im, re_acc, im_acc
return result
def evalf_integral(expr, prec, options):
workprec = prec
i = 0
maxprec = options.get('maxprec', INF)
while 1:
result = do_integral(expr, workprec, options)
accuracy = complex_accuracy(result)
if accuracy >= prec or workprec >= maxprec:
return result
workprec += prec - max(-2**i, accuracy)
i += 1
def check_convergence(numer, denom, n):
"""
Returns (h, g, p) where
-- h is:
> 0 for convergence of rate 1/factorial(n)**h
< 0 for divergence of rate factorial(n)**(-h)
= 0 for geometric or polynomial convergence or divergence
-- abs(g) is:
> 1 for geometric convergence of rate 1/h**n
< 1 for geometric divergence of rate h**n
= 1 for polynomial convergence or divergence
(g < 0 indicates an alternating series)
-- p is:
> 1 for polynomial convergence of rate 1/n**h
<= 1 for polynomial divergence of rate n**(-h)
"""
npol = C.Poly(numer, n)
dpol = C.Poly(denom, n)
p = npol.degree()
q = dpol.degree()
rate = q - p
if rate:
return rate, None, None
constant = dpol.LC() / npol.LC()
if abs(constant) != 1:
return rate, constant, None
if npol.degree() == dpol.degree() == 0:
return rate, constant, 0
pc = npol.all_coeffs()[1]
qc = dpol.all_coeffs()[1]
return rate, constant, (qc-pc)/dpol.LC()
def hypsum(expr, n, start, prec):
"""
Sum a rapidly convergent infinite hypergeometric series with
given general term, e.g. e = hypsum(1/factorial(n), n). The
quotient between successive terms must be a quotient of integer
polynomials.
"""
from sympy import hypersimp, lambdify
if start:
expr = expr.subs(n, n+start)
hs = hypersimp(expr, n)
if hs is None:
raise NotImplementedError("a hypergeometric series is required")
num, den = hs.as_numer_denom()
func1 = lambdify(n, num)
func2 = lambdify(n, den)
h, g, p = check_convergence(num, den, n)
if h < 0:
raise ValueError("Sum diverges like (n!)^%i" % (-h))
# Direct summation if geometric or faster
if h > 0 or (h == 0 and abs(g) > 1):
term = expr.subs(n, 0)
term = (MPZ(term.p) << prec) // term.q
s = term
k = 1
while abs(term) > 5:
term *= MPZ(func1(k-1))
term //= MPZ(func2(k-1))
s += term
k += 1
return from_man_exp(s, -prec)
else:
alt = g < 0
if abs(g) < 1:
raise ValueError("Sum diverges like (%i)^n" % abs(1/g))
if p < 1 or (p == 1 and not alt):
raise ValueError("Sum diverges like n^%i" % (-p))
# We have polynomial convergence: use Richardson extrapolation
# Need to use at least quad precision because a lot of cancellation
# might occur in the extrapolation process
prec2 = 4*prec
term = expr.subs(n, 0)
term = (MPZ(term.p) << prec2) // term.q
def summand(k, _term=[term]):
if k:
k = int(k)
_term[0] *= MPZ(func1(k-1))
_term[0] //= MPZ(func2(k-1))
return make_mpf(from_man_exp(_term[0], -prec2))
orig = mp.prec
try:
mp.prec = prec
v = nsum(summand, [0, mpmath_inf], method='richardson')
finally:
mp.prec = orig
return v._mpf_
def evalf_sum(expr, prec, options):
func = expr.function
limits = expr.limits
if len(limits) != 1 or not isinstance(limits[0], Tuple) or \
len(limits[0]) != 3:
raise NotImplementedError
prec2 = prec+10
try:
n, a, b = limits[0]
if b != S.Infinity or a != int(a):
raise NotImplementedError
# Use fast hypergeometric summation if possible
v = hypsum(func, n, int(a), prec2)
delta = prec - fastlog(v)
if fastlog(v) < -10:
v = hypsum(func, n, int(a), delta)
return v, None, min(prec, delta), None
except NotImplementedError:
# Euler-Maclaurin summation for general series
eps = C.Float(2.0)**(-prec)
for i in range(1, 5):
m = n = 2**i * prec
s, err = expr.euler_maclaurin(m=m, n=n, eps=eps, \
eval_integral=False)
err = err.evalf()
if err <= eps:
break
err = fastlog(evalf(abs(err), 20, options)[0])
re, im, re_acc, im_acc = evalf(s, prec2, options)
if re_acc is None:
re_acc = -err
if im_acc is None:
im_acc = -err
return re, im, re_acc, im_acc
#----------------------------------------------------------------------------#
# #
# Symbolic interface #
# #
#----------------------------------------------------------------------------#
def evalf_symbol(x, prec, options):
val = options['subs'][x]
if isinstance(val, mpf):
if not val:
return None, None, None, None
return val._mpf_, None, prec, None
else:
if not '_cache' in options:
options['_cache'] = {}
cache = options['_cache']
cached, cached_prec = cache.get(x.name, (None, MINUS_INF))
if cached_prec >= prec:
return cached
v = evalf(sympify(val), prec, options)
cache[x.name] = (v, prec)
return v
evalf_table = None
def _create_evalf_table():
global evalf_table
evalf_table = {
C.Symbol : evalf_symbol,
C.Dummy : evalf_symbol,
C.Float : lambda x, prec, options: (x._mpf_, None, prec, None),
C.Rational : lambda x, prec, options: (from_rational(x.p, x.q, prec), None, prec, None),
C.Integer : lambda x, prec, options: (from_int(x.p, prec), None, prec, None),
C.Zero : lambda x, prec, options: (None, None, prec, None),
C.One : lambda x, prec, options: (fone, None, prec, None),
C.Half : lambda x, prec, options: (fhalf, None, prec, None),
C.Pi : lambda x, prec, options: (mpf_pi(prec), None, prec, None),
C.Exp1 : lambda x, prec, options: (mpf_e(prec), None, prec, None),
C.ImaginaryUnit : lambda x, prec, options: (None, fone, None, prec),
C.NegativeOne : lambda x, prec, options: (fnone, None, prec, None),
C.exp : lambda x, prec, options: evalf_pow(C.Pow(S.Exp1, x.args[0],
evaluate=False), prec, options),
C.cos : evalf_trig,
C.sin : evalf_trig,
C.Add : evalf_add,
C.Mul : evalf_mul,
C.Pow : evalf_pow,
C.log : evalf_log,
C.atan : evalf_atan,
C.Abs : evalf_abs,
C.re : evalf_re,
C.im : evalf_im,
C.floor : evalf_floor,
C.ceiling : evalf_ceiling,
C.Integral : evalf_integral,
C.Sum : evalf_sum,
C.Piecewise : evalf_piecewise,
C.bernoulli : evalf_bernoulli,
}
def evalf(x, prec, options):
try:
rf = evalf_table[x.func]
r = rf(x, prec, options)
except KeyError:
#r = finalize_complex(x._eval_evalf(prec)._mpf_, fzero, prec)
try:
# Fall back to ordinary evalf if possible
if 'subs' in options:
x = x.subs(options['subs'])
r = x._eval_evalf(prec)._mpf_, None, prec, None
except AttributeError:
raise NotImplementedError
if options.get("verbose"):
print "### input", x
print "### output", to_str(r[0] or fzero, 50)
print "### raw", r#r[0], r[2]
print
if options.get("chop"):
r = chop_parts(r, prec)
if options.get("strict"):
check_target(x, r, prec)
return r
class EvalfMixin(object):
"""Mixin class adding evalf capabililty."""
__slots__ = []
def evalf(self, n=13, subs=None, maxn=100, chop=False, strict=False, quad=None, verbose=False):
"""
Evaluate the given formula to an accuracy of n digits. (default=15)
Optional keyword arguments:
subs=<dict>
Substitute numerical values for symbols, e.g.
subs={x:3, y:1+pi}.
maxn=<integer>
Allow a maximum temporary working precision of maxn digits
(default=100)
chop=<bool>
Replace tiny real or imaginary parts in subresults
by exact zeros (default=False)
strict=<bool>
Raise PrecisionExhausted if any subresult fails to evaluate
to full accuracy, given the available maxprec
(default=False)
quad=<str>
Choose algorithm for numerical quadrature. By default,
tanh-sinh quadrature is used. For oscillatory
integrals on an infinite interval, try quad='osc'.
verbose=<bool>
Print debug information (default=False)
"""
if not evalf_table:
_create_evalf_table()
prec = dps_to_prec(n)
options = {'maxprec': max(prec,int(maxn*LG10)), 'chop': chop,
'strict': strict, 'verbose': verbose}
if subs is not None:
options['subs'] = subs
if quad is not None:
options['quad'] = quad
try:
result = evalf(self, prec+4, options)
except NotImplementedError:
# Fall back to the ordinary evalf
v = self._eval_evalf(prec)
if v is None:
return self
try:
# If the result is numerical, normalize it
result = evalf(v, prec, options)
except:
# Probably contains symbols or unknown functions
return v
re, im, re_acc, im_acc = result
if re:
p = max(min(prec, re_acc), 1)
#re = mpf_pos(re, p, round_nearest)
re = C.Float._new(re, p)
else:
re = S.Zero
if im:
p = max(min(prec, im_acc), 1)
#im = mpf_pos(im, p, round_nearest)
im = C.Float._new(im, p)
return re + im*S.ImaginaryUnit
else:
return re
n = evalf
def _evalf(self, prec):
"""Helper for evalf. Does the same thing but takes binary precision"""
r = self._eval_evalf(prec)
if r is None:
r = self
return r
def _eval_evalf(self, prec):
return
def _to_mpmath(self, prec, allow_ints=True):
# mpmath functions accept ints as input
errmsg = "cannot convert to mpmath number"
if allow_ints and self.is_Integer:
return self.p
try:
re, im, _, _ = evalf(self, prec, {})
if im:
if not re:
re = fzero
return make_mpc((re, im))
else:
return make_mpf(re)
except NotImplementedError:
v = self._eval_evalf(prec)
if v is None:
raise ValueError(errmsg)
if v.is_Float:
return make_mpf(v._mpf_)
# Number + Number*I is also fine
re, im = v.as_real_imag()
if allow_ints and re.is_Integer:
re = from_int(re.p)
elif re.is_Float:
re = re._mpf_
else:
raise ValueError(errmsg)
if allow_ints and im.is_Integer:
im = from_int(im.p)
elif im.is_Float:
im = im._mpf_
else:
raise ValueError(errmsg)
return make_mpc((re, im))
def N(x, n=15, **options):
"""
Calls x.evalf(n, \*\*options).
Both .evalf() and N() are equivalent, use the one that you like better.
See also the docstring of .evalf() for information on the options.
Example:
>>> from sympy import Sum, Symbol, oo, N
>>> from sympy.abc import k
>>> Sum(1/k**k, (k, 1, oo))
Sum(k**(-k), (k, 1, oo))
>>> N(Sum(1/k**k, (k, 1, oo)), 4)
1.291
"""
return sympify(x).evalf(n, **options)
| |
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print(json.dumps("\"foo\bar"))
"\"foo\bar"
>>> print(json.dumps(u'\u1234'))
"\u1234"
>>> print(json.dumps('\\'))
"\\"
>>> print(json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True))
{"a": 0, "b": 0, "c": 0}
>>> from simplejson.compat import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> obj = [1,2,3,{'4': 5, '6': 7}]
>>> json.dumps(obj, separators=(',',':'), sort_keys=True)
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> print(json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' '))
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from simplejson.compat import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> from decimal import Decimal
>>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError(repr(o) + " is not JSON serializable")
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 3 (char 2)
"""
from __future__ import absolute_import
__version__ = '3.6.3'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONDecodeError', 'JSONEncoder',
'OrderedDict', 'simple_first',
]
__author__ = 'Bob Ippolito <bob@redivi.com>'
from decimal import Decimal
from .scanner import JSONDecodeError
from .decoder import JSONDecoder
from .encoder import JSONEncoder, JSONEncoderForHTML
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
from . import ordered_dict
return ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
def _import_c_make_encoder():
try:
from ._speedups import make_encoder
return make_encoder
except ImportError:
return None
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
use_decimal=True,
namedtuple_as_object=True,
tuple_as_array=True,
bigint_as_string=False,
item_sort_key=None,
for_json=False,
ignore_nan=False,
int_as_string_bitcount=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True,
namedtuple_as_object=True, tuple_as_array=True,
bigint_as_string=False, sort_keys=False, item_sort_key=None,
for_json=False, ignore_nan=False, int_as_string_bitcount=None, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If *skipkeys* is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If *ensure_ascii* is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If *check_circular* is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If *allow_nan* is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the original JSON specification, instead of using
the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). See
*ignore_nan* for ECMA-262 compliant behavior.
If *indent* is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If specified, *separators* should be an
``(item_separator, key_separator)`` tuple. The default is ``(', ', ': ')``
if *indent* is ``None`` and ``(',', ': ')`` otherwise. To get the most
compact JSON representation, you should specify ``(',', ':')`` to eliminate
whitespace.
*encoding* is the character encoding for str instances, default is UTF-8.
*default(obj)* is a function that should return a serializable version
of obj or raise ``TypeError``. The default simply raises ``TypeError``.
If *use_decimal* is true (default: ``True``) then decimal.Decimal
will be natively serialized to JSON with full precision.
If *namedtuple_as_object* is true (default: ``True``),
:class:`tuple` subclasses with ``_asdict()`` methods will be encoded
as JSON objects.
If *tuple_as_array* is true (default: ``True``),
:class:`tuple` (and subclasses) will be encoded as JSON arrays.
If *bigint_as_string* is true (default: ``False``), ints 2**53 and higher
or lower than -2**53 will be encoded as strings. This is to avoid the
rounding that happens in Javascript otherwise. Note that this is still a
lossy operation that will not round-trip correctly and should be used
sparingly.
If *int_as_string_bitcount* is a positive number (n), then int of size
greater than or equal to 2**n or lower than or equal to -2**n will be
encoded as strings.
If specified, *item_sort_key* is a callable used to sort the items in
each dictionary. This is useful if you want to sort items other than
in alphabetical order by key. This option takes precedence over
*sort_keys*.
If *sort_keys* is true (default: ``False``), the output of dictionaries
will be sorted by item.
If *for_json* is true (default: ``False``), objects with a ``for_json()``
method will use the return value of that method for encoding as JSON
instead of the object.
If *ignore_nan* is true (default: ``False``), then out of range
:class:`float` values (``nan``, ``inf``, ``-inf``) will be serialized as
``null`` in compliance with the ECMA-262 specification. If true, this will
override *allow_nan*.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg. NOTE: You should use *default* or *for_json* instead
of subclassing whenever possible.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and use_decimal
and namedtuple_as_object and tuple_as_array
and not bigint_as_string and int_as_string_bitcount is None
and not item_sort_key and not for_json and not ignore_nan and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, use_decimal=use_decimal,
namedtuple_as_object=namedtuple_as_object,
tuple_as_array=tuple_as_array,
bigint_as_string=bigint_as_string,
sort_keys=sort_keys,
item_sort_key=item_sort_key,
for_json=for_json,
ignore_nan=ignore_nan,
int_as_string_bitcount=int_as_string_bitcount,
**kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True,
namedtuple_as_object=True, tuple_as_array=True,
bigint_as_string=False, sort_keys=False, item_sort_key=None,
for_json=False, ignore_nan=False, int_as_string_bitcount=None, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If specified, ``separators`` should be an
``(item_separator, key_separator)`` tuple. The default is ``(', ', ': ')``
if *indent* is ``None`` and ``(',', ': ')`` otherwise. To get the most
compact JSON representation, you should specify ``(',', ':')`` to eliminate
whitespace.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``True``) then decimal.Decimal
will be natively serialized to JSON with full precision.
If *namedtuple_as_object* is true (default: ``True``),
:class:`tuple` subclasses with ``_asdict()`` methods will be encoded
as JSON objects.
If *tuple_as_array* is true (default: ``True``),
:class:`tuple` (and subclasses) will be encoded as JSON arrays.
If *bigint_as_string* is true (not the default), ints 2**53 and higher
or lower than -2**53 will be encoded as strings. This is to avoid the
rounding that happens in Javascript otherwise.
If *int_as_string_bitcount* is a positive number (n), then int of size
greater than or equal to 2**n or lower than or equal to -2**n will be
encoded as strings.
If specified, *item_sort_key* is a callable used to sort the items in
each dictionary. This is useful if you want to sort items other than
in alphabetical order by key. This option takes precendence over
*sort_keys*.
If *sort_keys* is true (default: ``False``), the output of dictionaries
will be sorted by item.
If *for_json* is true (default: ``False``), objects with a ``for_json()``
method will use the return value of that method for encoding as JSON
instead of the object.
If *ignore_nan* is true (default: ``False``), then out of range
:class:`float` values (``nan``, ``inf``, ``-inf``) will be serialized as
``null`` in compliance with the ECMA-262 specification. If true, this will
override *allow_nan*.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg. NOTE: You should use *default* instead of subclassing
whenever possible.
"""
# cached encoder
if (
not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and use_decimal
and namedtuple_as_object and tuple_as_array
and not bigint_as_string and int_as_string_bitcount is None
and not sort_keys and not item_sort_key and not for_json
and not ignore_nan and not kw
):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
use_decimal=use_decimal,
namedtuple_as_object=namedtuple_as_object,
tuple_as_array=tuple_as_array,
bigint_as_string=bigint_as_string,
sort_keys=sort_keys,
item_sort_key=item_sort_key,
for_json=for_json,
ignore_nan=ignore_nan,
int_as_string_bitcount=int_as_string_bitcount,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None,
object_pairs_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, namedtuple_as_object=True, tuple_as_array=True,
**kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg. NOTE: You should use *object_hook* or *object_pairs_hook* instead
of subclassing whenever possible.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
use_decimal=use_decimal, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg. NOTE: You should use *object_hook* or *object_pairs_hook* instead
of subclassing whenever possible.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None
and not use_decimal and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
if use_decimal:
if parse_float is not None:
raise TypeError("use_decimal=True implies parse_float=Decimal")
kw['parse_float'] = Decimal
return cls(encoding=encoding, **kw).decode(s)
def _toggle_speedups(enabled):
from . import decoder as dec
from . import encoder as enc
from . import scanner as scan
c_make_encoder = _import_c_make_encoder()
if enabled:
dec.scanstring = dec.c_scanstring or dec.py_scanstring
enc.c_make_encoder = c_make_encoder
enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or
enc.py_encode_basestring_ascii)
scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner
else:
dec.scanstring = dec.py_scanstring
enc.c_make_encoder = None
enc.encode_basestring_ascii = enc.py_encode_basestring_ascii
scan.make_scanner = scan.py_make_scanner
dec.make_scanner = scan.make_scanner
global _default_decoder
_default_decoder = JSONDecoder(
encoding=None,
object_hook=None,
object_pairs_hook=None,
)
global _default_encoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
def simple_first(kv):
"""Helper function to pass to item_sort_key to sort simple
elements to the top, then container elements.
"""
return (isinstance(kv[1], (list, dict, tuple)), kv[0])
| |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Miscellaneous helper methods for Jupyter Notebooks."""
import builtins
import difflib
import os
import re
import subprocess # nosec
import sys
import uuid
import warnings
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
import pkg_resources
from deprecated.sphinx import deprecated
from IPython import get_ipython
from IPython.core.display import HTML, display
from tqdm import tqdm, tqdm_notebook
from .._version import VERSION
__version__ = VERSION
__author__ = "Ian Hellen"
def export(func: Callable):
"""Decorate function or class to export to __all__."""
mod = sys.modules[func.__module__]
if hasattr(mod, "__all__"):
all_list = getattr(mod, "__all__")
all_list.append(func.__name__)
else:
all_list = [func.__name__]
setattr(mod, "__all__", all_list)
return func
@export
def string_empty(string: str) -> bool:
"""Return True if the input string is None or whitespace."""
return (string is None) or not (string and string.strip())
@export
def is_not_empty(test_object: Any) -> bool:
"""Return True if the test_object is not None or empty."""
if test_object:
if isinstance(test_object, str):
if test_object.strip():
# test_object is not None AND myString is not empty or blank
return True
return False
return True
return False
# Toggle Code Cell Contents
_TOGGLE_CODE_STR = """
<form action="javascript:code_toggle()">
<input type="submit" id="toggleButton" value="Show/Hide Code">
</form>
"""
_TOGGLE_CODE_PREPARE_STR = """
<script>
function code_toggle() {
if ($('div.cell.code_cell.rendered.selected div.input').css('display')!='none'){
$('div.cell.code_cell.rendered.selected div.input').hide();
} else {
$('div.cell.code_cell.rendered.selected div.input').show();
}
}
</script>
"""
@export
def enable_toggle_code():
"""Load JS Function to enable code toggle button."""
display(HTML(_TOGGLE_CODE_PREPARE_STR))
@export
def toggle_code():
"""Display a toggle button to hide/reveal code cell."""
display(HTML(_TOGGLE_CODE_STR))
# String escapes
@export
def escape_windows_path(str_path: str) -> str:
"""Escape backslash characters in a string."""
if is_not_empty(str_path):
return str_path.replace("\\", "\\\\")
return str_path
@export
def unescape_windows_path(str_path: str) -> str:
"""Remove escaping from backslash characters in a string."""
if is_not_empty(str_path):
return str_path.replace("\\\\", "\\")
return str_path
@deprecated(reason="Inline Javascript no longer supported", version="0.3.2")
@export
def get_nb_query_param(nb_url_search: str, param: str) -> Optional[str]:
"""
Get a url query parameter from the search string.
Parameters
----------
nb_url_search: str
The URL search string
param: str
The parameter name to search for
Returns
-------
Optional[str]
value of the query string parameter or None if not found.
"""
qs_regex = r"[\\?&]{param}=(?P<val>[^&#]*)".format(param=param)
query_string_match = re.search(qs_regex, nb_url_search)
if query_string_match:
return query_string_match["val"]
return None
@deprecated(reason="Inline Javascript no longer supported", version="0.3.2")
@export
def get_nb_query_params(nb_url_search: str) -> dict:
"""
Get the url query parameters from the search string.
Parameters
----------
nb_url_search : str
The URL search string
Returns
-------
dict
dictionary of the query string parameters.
"""
nb_params = {}
query_string_match = re.search(r"\?(?P<qs>[^#]+)#?", nb_url_search)
if query_string_match:
for param in query_string_match["qs"].split("&"):
if "=" in param:
nb_params[param.split("=")[0]] = param.split("=")[1]
return nb_params
@deprecated(reason="Inline Javascript no longer supported", version="0.3.2")
@export
def get_notebook_query_string():
"""Execute javascript to publish notebook query string as python variable."""
HTML(
"""
<script type="text/javascript">
IPython.notebook.kernel.execute(
"nb_query_string='".concat(window.location.search).concat("'"));
</script>
"""
)
@export
def check_py_version(min_ver: Tuple = (3, 6)):
"""
Check that the current python version is not less than `min_ver`.
Parameters
----------
min_ver : Tuple, optional
Minimum required version, by default (3,6)
"""
if isinstance(min_ver, (float, str)):
min_ver_list = str(min_ver).split(".")
min_ver = (int(min_ver_list[0]), int(min_ver_list[1]))
if sys.version_info < min_ver:
print("Check the Kernel->Change Kernel menu and ensure that Python 3.6")
print("or later is selected as the active kernel.")
raise SystemExit(
"Python %s.%s or later is required.\n" % (min_ver[0], min_ver[1])
)
@export
def resolve_pkg_path(part_path: str):
"""
Resolve a path relative to the package.
Parameters
----------
part_path : str
Absolute or relative path to resolve.
"""
if Path(part_path).is_absolute():
return part_path
resolved_path = str(Path(__file__).resolve().parent.parent.joinpath(part_path))
if Path(resolved_path).exists():
return str(resolved_path)
searched_paths = list(
Path(__file__).resolve().parent.parent.glob(str(Path("**").joinpath(part_path)))
)
if not searched_paths or len(searched_paths) > 1:
warnings.warn(f"No path or ambiguous match for {part_path} not found")
return None
return str(searched_paths[0])
# pylint: disable=not-an-iterable, too-many-branches
@export # noqa: MC0001
def check_and_install_missing_packages(
required_packages: List[str],
force_notebook: bool = False,
user: bool = True,
upgrade: bool = False,
) -> bool:
"""
Check and install missing packages from provided list of packages.
Parameters
----------
required_packages : List[str]
List of packages to check and install in a current environment
Note you can add package version constraints by appending them to
the package name, e.g. `pandas>=1.01`
force_notebook : bool, optional
Boolean value to force notebook version of progress bar,
by default False (autodetect)
user : bool, optional
Boolean value to toggle user flag while installing pip packages,
by default True
upgrade: bool, option
If true supply `--upgrade` flag to pip to install the latest
version (applies to all package in `required_packages`)
Returns
-------
bool :
True if successful, else False
"""
missing_packages = []
# Check package requirements against installed set
for req in required_packages:
pkg_req = pkg_resources.Requirement.parse(req)
try:
found_pkg = pkg_resources.working_set.find(pkg_req)
except pkg_resources.VersionConflict:
found_pkg = None
if found_pkg is None:
missing_packages.append(req)
if not missing_packages:
print("All packages are already installed")
return True
print("Missing packages to be installed: ", *missing_packages, sep=" ")
if is_ipython() or force_notebook:
pkgbar = tqdm_notebook(missing_packages, desc="Installing...", unit="bytes")
else:
pkgbar = tqdm(missing_packages, desc="Installing...", unit="bytes")
pkg_command = ["pip", "install"]
if user:
pkg_command.append("--user")
if upgrade:
pkg_command.append("--upgrade")
pkg_success = True
for package in pkgbar:
try:
subprocess.run( # nosec
pkg_command + [package],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
except subprocess.CalledProcessError as proc_err:
print(f"An Error has occured while installing {package}.")
print(f"Output: {str(proc_err.stdout)}")
print(f"Errs: {str(proc_err.stderr)}")
pkg_success = False
print(f"{package} installed.")
return pkg_success
# pylint: enable=not-an-iterable, too-many-branches
# pylint: disable=invalid-name
@export
def md(string: str, styles: Union[str, Iterable[str]] = None):
"""
Return string as Markdown with optional style.
Parameters
----------
string : str
The string to display
styles : Union[str, Iterable[str]], optional
A style mnemonic or collection of styles. If multiple styles,
these can be supplied as an interable of strings or a comma-separated
string, by default None
"""
style_str = ""
if isinstance(styles, str):
if "," in styles:
styles = [style.strip() for style in styles.split(",")]
else:
style_str = _F_STYLES.get(styles, "")
if isinstance(styles, list):
style_str = ";".join([_F_STYLES.get(style, "") for style in styles])
display(HTML(f"<p style='{style_str}'>{string}</p>"))
# pylint: enable=invalid-name
@export
def md_warn(string: str):
"""
Return string as a warning - orange text prefixed by "Warning".
Parameters
----------
string : str
The warning message.
"""
md(f"Warning: {string}", "bold, orange, large")
@export
def md_error(string: str):
"""
Return string as an error - red text prefixed by "Error".
Parameters
----------
string : str
The error message.
"""
md(f"Error: {string}", "bold, orange, large")
# Styles available to use in the above Markdown tools.
_F_STYLES = {
"bold": "font-weight: bold",
"italic": "font-style: italic",
"red": "color: red",
"green": "color: green",
"blue": "color: blue",
"large": "font-size: 130%",
"heading": "font-size: 200%",
}
@export
def is_ipython() -> bool:
"""
Return True if running in IPython environment.
Returns
-------
bool
True if running in IPython environment,
otherwise False
"""
return bool(get_ipython())
def check_kwarg(arg_name: str, legal_args: List[str]):
"""
Check argument names against a list.
Parameters
----------
arg_name : str
Argument to check
legal_args : List[str]
List of possible arguments.
Raises
------
NameError
If the argument is not legal. If the `arg_name` is
a close match to one or more, `legal_args` these are
returned in the exception.
"""
if arg_name not in legal_args:
closest = difflib.get_close_matches(arg_name, legal_args)
mssg = f"{arg_name} is not a recognized argument. "
if len(closest) == 1:
mssg += f"Closest match is '{closest[0]}'"
elif closest:
match_list = [f"'{mtch}'" for mtch in closest]
mssg += f"Closest matches are {', '.join(match_list)}"
else:
mssg += f"Valid arguments are {', '.join(legal_args)}"
raise NameError(arg_name, mssg)
def check_kwargs(supplied_args: Dict[str, Any], legal_args: List[str]):
"""
Check all kwargs names against a list.
Parameters
----------
supplied_args : Dict[str, Any]
Arguments to check
legal_args : List[str]
List of possible arguments.
Raises
------
NameError
If any of the arguments are not legal. If the an arg is
a close match to one or more `legal_args`, these are
returned in the exception.
"""
name_errs = []
for name in supplied_args:
try:
check_kwarg(name, legal_args)
except NameError as err:
name_errs.append(err)
if name_errs:
raise NameError(name_errs)
_U_TEST_ENV = "MP_UNIT_TEST"
def unit_testing() -> bool:
"""
Return True if in unit testing.
Returns
-------
bool
True if in unit testing
"""
return _U_TEST_ENV in os.environ
# pylint: disable=invalid-name
def set_unit_testing(on: bool = True):
"""
Set flag env var to indicated that code is being unit-tested.
Parameters
----------
on : bool, optional
Turn unit testing flag on or off, by default True
"""
if on:
os.environ[_U_TEST_ENV] = "True"
else:
os.environ.pop(_U_TEST_ENV, None)
# pylint: enable=invalid-name
def is_valid_uuid(uuid_str: Any) -> bool:
"""
Return true if `uuid_str` is a value GUID/UUID.
Parameters
----------
uuid_str : Any
String to test
Returns
-------
bool
True if valid GUID/UUID.
"""
if not uuid_str:
return False
try:
uuid.UUID(uuid_str)
except (ValueError, TypeError):
return False
return True
def valid_pyname(identifier: str) -> str:
"""
Return legal Python identifier, which doesn't collide with builtins.
Parameters
----------
identifier : str
The input identifier
Returns
-------
str
The cleaned identifier
"""
builtin_names = set(dir(builtins))
if identifier in builtin_names:
identifier = f"{identifier}_bi"
identifier = re.sub("[^a-zA-Z0-9_]", "_", identifier)
if identifier[0].isdigit():
identifier = f"n_{identifier}"
return identifier
| |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Base and helper classes for Google RESTful APIs."""
__all__ = ['add_sync_methods']
import random
import time
from cloudstorage import api_utils
try:
from google.appengine.api import app_identity
from google.appengine.ext import ndb
except ImportError:
from google.appengine.api import app_identity
from google.appengine.ext import ndb
def _make_sync_method(name):
"""Helper to synthesize a synchronous method from an async method name.
Used by the @add_sync_methods class decorator below.
Args:
name: The name of the synchronous method.
Returns:
A method (with first argument 'self') that retrieves and calls
self.<name>, passing its own arguments, expects it to return a
Future, and then waits for and returns that Future's result.
"""
def sync_wrapper(self, *args, **kwds):
method = getattr(self, name)
future = method(*args, **kwds)
return future.get_result()
return sync_wrapper
def add_sync_methods(cls):
"""Class decorator to add synchronous methods corresponding to async methods.
This modifies the class in place, adding additional methods to it.
If a synchronous method of a given name already exists it is not
replaced.
Args:
cls: A class.
Returns:
The same class, modified in place.
"""
for name in cls.__dict__.keys():
if name.endswith('_async'):
sync_name = name[:-6]
if not hasattr(cls, sync_name):
setattr(cls, sync_name, _make_sync_method(name))
return cls
class _AE_TokenStorage_(ndb.Model):
"""Entity to store app_identity tokens in memcache."""
token = ndb.StringProperty()
expires = ndb.FloatProperty()
@ndb.tasklet
def _make_token_async(scopes, service_account_id):
"""Get a fresh authentication token.
Args:
scopes: A list of scopes.
service_account_id: Internal-use only.
Raises:
An ndb.Return with a tuple (token, expiration_time) where expiration_time is
seconds since the epoch.
"""
rpc = app_identity.create_rpc()
app_identity.make_get_access_token_call(rpc, scopes, service_account_id)
token, expires_at = yield rpc
raise ndb.Return((token, expires_at))
class _RestApi(object):
"""Base class for REST-based API wrapper classes.
This class manages authentication tokens and request retries. All
APIs are available as synchronous and async methods; synchronous
methods are synthesized from async ones by the add_sync_methods()
function in this module.
WARNING: Do NOT directly use this api. It's an implementation detail
and is subject to change at any release.
"""
def __init__(self, scopes, service_account_id=None, token_maker=None,
retry_params=None):
"""Constructor.
Args:
scopes: A scope or a list of scopes.
service_account_id: Internal use only.
token_maker: An asynchronous function of the form
(scopes, service_account_id) -> (token, expires).
retry_params: An instance of api_utils.RetryParams. If None, the
default for current thread will be used.
"""
if isinstance(scopes, basestring):
scopes = [scopes]
self.scopes = scopes
self.service_account_id = service_account_id
self.make_token_async = token_maker or _make_token_async
if not retry_params:
retry_params = api_utils._get_default_retry_params()
self.retry_params = retry_params
self.user_agent = {'User-Agent': retry_params._user_agent}
self.expiration_headroom = random.randint(60, 240)
def __getstate__(self):
"""Store state as part of serialization/pickling."""
return {'scopes': self.scopes,
'id': self.service_account_id,
'a_maker': (None if self.make_token_async == _make_token_async
else self.make_token_async),
'retry_params': self.retry_params,
'expiration_headroom': self.expiration_headroom}
def __setstate__(self, state):
"""Restore state as part of deserialization/unpickling."""
self.__init__(state['scopes'],
service_account_id=state['id'],
token_maker=state['a_maker'],
retry_params=state['retry_params'])
self.expiration_headroom = state['expiration_headroom']
@ndb.tasklet
def do_request_async(self, url, method='GET', headers=None, payload=None,
deadline=None, callback=None):
"""Issue one HTTP request.
It performs async retries using tasklets.
Args:
url: the url to fetch.
method: the method in which to fetch.
headers: the http headers.
payload: the data to submit in the fetch.
deadline: the deadline in which to make the call.
callback: the call to make once completed.
Yields:
The async fetch of the url.
"""
retry_wrapper = api_utils._RetryWrapper(
self.retry_params,
retriable_exceptions=api_utils._RETRIABLE_EXCEPTIONS,
should_retry=api_utils._should_retry)
resp = yield retry_wrapper.run(
self.urlfetch_async,
url=url,
method=method,
headers=headers,
payload=payload,
deadline=deadline,
callback=callback,
follow_redirects=False)
raise ndb.Return((resp.status_code, resp.headers, resp.content))
@ndb.tasklet
def get_token_async(self, refresh=False):
"""Get an authentication token.
The token is cached in memcache, keyed by the scopes argument.
Uses a random token expiration headroom value generated in the constructor
to eliminate a burst of GET_ACCESS_TOKEN API requests.
Args:
refresh: If True, ignore a cached token; default False.
Yields:
An authentication token. This token is guaranteed to be non-expired.
"""
key = '%s,%s' % (self.service_account_id, ','.join(self.scopes))
ts = yield _AE_TokenStorage_.get_by_id_async(
key, use_cache=True, use_memcache=True,
use_datastore=self.retry_params.save_access_token)
if refresh or ts is None or ts.expires < (
time.time() + self.expiration_headroom):
token, expires_at = yield self.make_token_async(
self.scopes, self.service_account_id)
timeout = int(expires_at - time.time())
ts = _AE_TokenStorage_(id=key, token=token, expires=expires_at)
if timeout > 0:
yield ts.put_async(memcache_timeout=timeout,
use_datastore=self.retry_params.save_access_token,
use_cache=True, use_memcache=True)
raise ndb.Return(ts.token)
@ndb.tasklet
def urlfetch_async(self, url, method='GET', headers=None,
payload=None, deadline=None, callback=None,
follow_redirects=False):
"""Make an async urlfetch() call.
This is an async wrapper around urlfetch(). It adds an authentication
header.
Args:
url: the url to fetch.
method: the method in which to fetch.
headers: the http headers.
payload: the data to submit in the fetch.
deadline: the deadline in which to make the call.
callback: the call to make once completed.
follow_redirects: whether or not to follow redirects.
Yields:
This returns a Future despite not being decorated with @ndb.tasklet!
"""
headers = {} if headers is None else dict(headers)
headers.update(self.user_agent)
self.token = yield self.get_token_async()
if self.token:
headers['authorization'] = 'OAuth ' + self.token
deadline = deadline or self.retry_params.urlfetch_timeout
ctx = ndb.get_context()
resp = yield ctx.urlfetch(
url, payload=payload, method=method,
headers=headers, follow_redirects=follow_redirects,
deadline=deadline, callback=callback)
raise ndb.Return(resp)
_RestApi = add_sync_methods(_RestApi)
| |
from __future__ import division
from pandas.tools.plotting import scatter_matrix
import matplotlib.pyplot as plt
from libtbx.utils import Sorry
import scipy.optimize as op
import collect_ncs_files
import seaborn as sns
import pandas as pd
import numpy as np
import sys
import os
class Explore_data(object):
def __init__(self):
self.df = None
headers, table_pos_map = collect_ncs_files.table_headers()
self.headers = headers
self.table_pos_map = table_pos_map
self.refine_test_names = collect_ncs_files.get_refine_test_names()
# Del: just for testing
self.refine_test_names = [
'no ncs','cartesian ncs restraints','torsion ncs restraints']
# experiment and plot types
self.plot_types = ['r-free final','final clashscore']
self.plot_deltas = ['r-work final','r-free final']
def get_data_frame(self):
""" Create pandas data frame """
c = collect_ncs_files.ncs_paper_data_collection()
fn = os.path.join(c.ncs_dir,'ncs_paper_data.csv')
if not os.path.isfile(fn):
raise Sorry('Make sure ncs_paper_data.csv is in %s'%c.ncs_dir)
df = pd.pandas.DataFrame.from_csv(fn,index_col=False)
self.df = df
return df
def plot_clashscore(self):
pass
def plot_delta_r_values(self,vals):
"""
Args:
vals (list): list of length 2, with the columns to be compared
"""
print 'Plotting difference scatter plot for: ({} , {})'.format(*vals)
df = self.df
rfree_list = ['{} : {}'.format(vals[1],x) for x in self.refine_test_names]
rwork_list = ['{} : {}'.format(vals[0],x) for x in self.refine_test_names]
# look at r-free final
rfreefinal = df[rfree_list]
# look at r-work final
rworkfinal = df[rwork_list]
# look at the difference
x = rworkfinal.values
y = rfreefinal.values
# get values for r_work - r_free
rdiff = pd.concat(
[df['pdb id'],pd.DataFrame(x-y,columns=self.refine_test_names)],axis=1)
# relative values
c = 'r-free final : no ncs'
i = rfreefinal.columns.get_loc('r-free final : no ncs')
# remove all PDBs without refinement results
rdiff = rdiff.dropna(axis=0)
print rdiff.head(5)
print 'Number of files with all results:',len(rdiff)
# Scatter Plot r-work , r-free difference
values = (rdiff.values).transpose()
y = values[1]
plot_types = ['.b','.y','.k']
opacity = [1,0.5,0.5]
for i in range(1,len(self.refine_test_names)):
print self.refine_test_names[i],i,plot_types[i],opacity[i]
x = values[i]
plt.plot(x,y,plot_types[i],alpha=opacity[i])
min_x = min(y)
max_x = max(y)
print min_x,max_x
plt.plot([min_x,max_x],[min_x,max_x],'-b')
plt.xlim([min_x,max_x])
plt.ylim([min_x,max_x])
plt.show()
# plot
# sns.tsplot(df, err_style="unit_points", color="mediumpurple")
# df.plot(kind='',figsize=(15,5))
# add text to plot
# plt.annotate()
def plot_final_values(self,var_name):
"""
Args:
var_name (str): the name of the variable we want to make a plot for
"""
print '-'*50
print 'Making plot for:',var_name
plt.close("all")
rfree_list = ['{} : {}'.format(var_name,x) for x in self.refine_test_names]
rfreefinal = self.df[rfree_list]
rfinal = rfreefinal.dropna(axis=0)
print 'Total number of files (including with no results:',len(rfreefinal)
print 'Total number of files:',len(rfinal)
# rfinal = rfinal.sort(['r-free final : no ncs'], ascending=True)
rfinal = rfinal[rfinal > 0].dropna()
print 'Number of plotted files: ',len(rfinal)
print rfinal.head(5)
f = plt.figure(figsize=(10,6))
# Plot r-free differene between methods
x = range(len(rfinal))
# sort values in by the 'no ncs' order
values = (rfinal.values).transpose()
values = values[:,np.argsort(values[0,:])]
plot_types = ['.-b','.y','.k']
opacity = [1,0.5,0.5]
assert rfree_list[0].split(' : ')[1] == 'no ncs'
for refine_t in rfree_list:
i = rfreefinal.columns.get_loc(refine_t)
y = values[i]
plt.plot(x,y,plot_types[i],alpha=opacity[i])
# add text to plot
txt1 = 'Number of structures: {}'.format(len(rfinal))
plt.annotate(txt1,xy=(0.7*max(x),min(y)))
if var_name == 'final clashscore' : y_lable = 'Clashscore'
if var_name == 'r-free final' : y_lable = 'R-Free Final'
plt.xlabel('PDB structures')
plt.ylabel(y_lable)
plt.legend(self.refine_test_names,fontsize=14,loc=2)
fig_name = y_lable + '.png'
c = collect_ncs_files.ncs_paper_data_collection()
plt.savefig(os.path.join(c.figures_dir,fig_name),ext="png",dpi=600)
plt.show()
def save_pdb_ids_with_ncs_issues(self,df):
"""
When running refinement with the default value for
refinement.ncs.excessive_distance_limit
Many files had issues with Excessive distances to NCS averages,
and refinement did not complete.
We changed the refinement option to:
Excessive distances to NCS averages=None
But saved the PDB IDs with issues in:
files_with_excessive_distance_limit_issues.txt
"""
g = df['r-free final : no ncs'] > 0
df2 = df[[
'pdb id',
'r-free final : cartesian ncs restraints',
'r-free final : torsion ncs restraints']][g]
g2 = (df2['r-free final : cartesian ncs restraints'] == 0)
g2 |= (df2['r-free final : torsion ncs restraints'] == 0)
pdb_ids = df2['pdb id'][g2].values
# Save file list
c = collect_ncs_files.ncs_paper_data_collection()
pdb_ids = '\n'.join(pdb_ids)
fn = os.path.join(c.ncs_dir,'files_with_excessive_distance_limit_issues.txt')
open(fn,'w').write(pdb_ids)
def find_outliers(self,df,var_name,test_name):
"""
find cases where the value of 'var_name : test_name' is larger than
'var_name : no ncs'
Args:
df : data frame
var_name (str): the name of the variable we want to make a plot for
test_name (str): refinement test name
"""
# create the names of the columns to use
tests = ['no ncs',test_name]
col_list = ['{} : {}'.format(var_name,x) for x in tests]
col_list.insert(0,'pdb id')
print col_list
# Get those columns
f = self.get_clean_data_frame(df)
f = f[col_list]
f = f.dropna()
g = f[col_list[1]]<f[col_list[2]]
f = f[g]
f['delta'] = f[col_list[2]] - f[col_list[1]]
f = f.sort(['delta'],ascending=False)
print 'number of files with worse results:',sum(g)
print f.head(6)
print f.describe()
print 'done'
def get_clean_data_frame(self,df):
""" remove rows with NA and zeros"""
g = df['r-free final : no ncs'] > 0
g &= (df['r-free final : cartesian ncs restraints'] > 0)
g &= (df['r-free final : torsion ncs restraints'] > 0)
return df[g]
def find_when_to_use_ncs(self,df,var_name,test_name):
"""
Learn from data what are the conditions where NCS refinement will be most
useful
Args:
df : data frame
var_name (str): the name of the variable we want to make a plot for
test_name (str): refinement test name
Returns:
"""
# get the results:
# y = 1 : NCS improved results (smaller value)
# y = 0 : NCS did not improve
tests = ['no ncs',test_name]
col_list = ['{} : {}'.format(var_name,x) for x in tests]
col_list.insert(0,'pdb id')
# drop non numerical columns
# todo: check why the following have missing values (look at mtz files)
'''
3asn
'''
drop_list = ['pdb id','year','r-free header',
'r-work header','master only',
'experiment',
'r-free init : torsion ncs restraints',
'r-free init : cartesian ncs restraints']
remove_n = [
'refinement time','r-work final',
'cbeta deviations','cbeta final','rotamer outliers','rama outliers',
'rotamer final','r-work init']
# replace column names with shorter names
new_names_1=[
'Copies','Groups','Res.','Comp.','Solv.',
'ASU Atom','p/d ncs','p/d asu',
'R-Free init', 'R-Free final',
'R final Cartesian',
'R final Torsion']
old_names_1=[
'n copies', 'n groups', 'resolution', 'completeness', 'solvent fraction',
'atoms in asu', 'p-to-d ratio ncs', 'p-to-d ratio asu',
'r-free init : no ncs', 'r-free final : no ncs',
'r-free final : cartesian ncs restraints',
'r-free final : torsion ncs restraints']
remove_n_1 = remove_n + ['final clashscore','all-atom clashscore']
n = 100
sampled_f = self.get_partial_df(
df,
drop_list=drop_list,
new_names=new_names_1,
old_names=old_names_1,
remove_n=remove_n_1,
n=n)
scatterplot_matrix(sampled_f,fig_name='Grid_R_free.png')
test_df = pd.DataFrame(np.random.randn(1000,4),columns=['a','b','c','d'])
# scatterplot_matrix(test_df)
# plot 2
new_names_2=[
'Copies','Groups','Res.','Comp.','Solv.',
'ASU Atom','p/d ncs','p/d asu',
'R-Free final',
'clashsocre',
'C.S. Cartesian',
'C.S. Torsion']
old_names_2=[
'n copies', 'n groups', 'resolution', 'completeness', 'solvent fraction',
'atoms in asu', 'p-to-d ratio ncs', 'p-to-d ratio asu',
'r-free final : no ncs',
'final clashscore : no ncs',
'final clashscore : cartesian ncs restraints',
'final clashscore : torsion ncs restraints']
drop_list_2 = drop_list + ['r-free final : cartesian ncs restraints']
drop_list_2.append('r-free final : torsion ncs restraints')
drop_list_2.append('r-free init : no ncs')
remove_n_2 = remove_n + ['all-atom clashscore']
sampled_f = self.get_partial_df(
df,
drop_list=drop_list_2,
new_names=new_names_2,
old_names=old_names_2,
remove_n=remove_n_2,
n=n)
scatterplot_matrix(sampled_f,fig_name='Grid_clashscore.png')
# g = f[col_list[1]]>f[col_list[2]]
# # remove the answer column from data
# f.drop(col_list[2],axis=1,inplace=True)
# y = np.array(g*1)
# y = y.reshape((len(g),1))
print 'Done'
def get_partial_df(self,df,drop_list,new_names,old_names,remove_n,n):
""" remove item from dataframe """
drop_list = list(drop_list)
refine_n = collect_ncs_files.get_refine_test_names()
f = self.get_clean_data_frame(df)
f = f.dropna(axis=1)
for tst in refine_n:
remove_list = ['{} : {}'.format(x,tst) for x in remove_n]
drop_list.extend(remove_list)
for col in drop_list:
if col in f.columns:
f.drop(col,axis=1,inplace=True)
rows = np.random.choice(f.index.values, n)
f = f.ix[rows]
assert len(new_names) == len(old_names)
assert list(f.columns) == old_names
f.columns = new_names
return f
def scatterplot_matrix(df,fig_name=''):
g = sns.PairGrid(
df,
hue="p/d ncs",
size=1.2,
# aspect=1.6,
dropna=True)
# fig.square_grid = True
g.map_diag(plt.hist)
g.map_offdiag(plt.scatter)
# reduce the number of values in the legend
legend_keys = sorted(g._legend_data)
i = len(legend_keys) - 1
d = i//10 + 1
keys = []
while i>=0:
keys.append(legend_keys[i])
i -= d
legend = {k:g._legend_data[k] for k in keys}
g.add_legend(title='Data/Param NCS',legend_data=legend)
g.set(ylim=(0, None))
g.set(xlim=(0, None))
# set the space between subplot
g.fig.subplots_adjust(wspace=0.02,hspace=0.02)
# set the number of ticks in each subplot
allticks = g.fig.get_axes()
for ticks in allticks:
# reduce the number of ticks
tx = ticks.get_xticks()
ty = ticks.get_yticks()
if len(tx) > 3:
mx = len(tx)//2
ticks.set_xticks([tx[0],tx[mx],tx[-2]])
if len(ty) > 3:
my = len(ty)//2
ticks.set_yticks([ty[0],ty[my],ty[-2]])
# fig.map(plt.scatter)
if fig_name:
c = collect_ncs_files.ncs_paper_data_collection()
plt.savefig(os.path.join(c.figures_dir,fig_name),ext="png",dpi=300)
# plt.show()
def learn_relations_param_to_good_ncs_effect(data):
"""
Use logistic regression to evaluate when will the NCS refinement results
will not be as good as those without NCS
Args:
data : training data m-by-n matrix, where the last
column is the 1: < no-ncs, 0: > no-ncs
"""
# set y as the last column, the "answers"
y = data[:,-1]
# set x as the data
X = data[:,:-1]
# Normalize X parameters
m,n = X.shape
y = y.reshape((m,1))
# add a column with the value 1 at the start of the parameters
X = np.append(np.ones([m,1]),X,1)
# Initialize fitting parameters
initial_theta = np.zeros([n + 1, 1])
# Compute and display initial cost and gradient
cost = cost_function(initial_theta, X, y)
grad = gradient(initial_theta, X, y)
print 'Initial cost:',cost
# Call minimizer
options = {'full_output': True, 'maxiter': 400}
myargs = (X, y)
out = op.fmin_bfgs(
cost_function,
initial_theta,
args=myargs,
fprime=gradient,
**options)
# collect results
optimal_theta,cost,grad_at_min,inv_hessian,fun_calls,grad_calls,flags = out
print 'Cost after minimization:',cost
print 'Parameters:',optimal_theta
# return predictor with optimal theta
return predict_func(optimal_theta)
def cost_function(theta, X, y):
"""
Compute cost and gradient for logistic regression
Args:
theta (array) : n-by-1 array of current parameters values
X (2D array): training data
y (array): training answers
Returns:
cost (float): costFunction(theta, X, y) computes the cost of using theta
"""
#Initialize some useful values
m = len(y) # number of training examples
h = sigmoid(X.dot(theta))
cost = -(y.transpose().dot(np.log(h))+(1-y).transpose().dot(np.log(1-h)))/m
grad = X.transpose().dot((h - y))/m
return cost[0,0], grad
def gradient(theta,X,y):
"""
Return the gradient of a logistic regression
grad (array) : the gradient of the cost with the respective theta parameters
"""
h = sigmoid(X.dot(theta))
m,n = X.shape
h = h.reshape((m,1))
y = y.reshape((m,1))
grad = X.transpose().dot((h - y))/m
return grad
def sigmoid(x):
""" returns a Sigmoid of an array x """
return 1/(1 + np.exp(-x))
def predict_func(theta):
def predict(x):
h = sigmoid(x.dot(theta))
return np.round(h,2)
return predict
def run():
explore = Explore_data()
df = explore.get_data_frame()
#
explore.find_when_to_use_ncs(df,'r-free final','cartesian ncs restraints')
# explore.plot_delta_r_values(['r-work final','r-free final'])
# explore.find_outliers(df,'r-free final','cartesian ncs restraints')
print '+'*50
explore.find_outliers(df,'final clashscore','cartesian ncs restraints')
print '+'*50
# #
# explore.plot_final_values('r-free final')
# explore.plot_final_values('final clashscore')
# explore.save_pdb_ids_with_ncs_issues(df)
print 'Done'
if __name__ == '__main__':
run()
| |
# coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: cloudsupport@telestream.net
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from telestream_cloud_qc.configuration import Configuration
class ChromaLevelTest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'y_level_default_or_custom': 'DefaultOrCustomType',
'y_level_lower': 'int',
'y_level_upper': 'int',
'y_level_max_outside_range': 'float',
'y_level_tolerance_low': 'float',
'y_level_tolerance_high': 'float',
'u_vlevel_default_or_custom': 'DefaultOrCustomType',
'u_vlevel_lower': 'int',
'u_vlevel_upper': 'int',
'u_vlevel_max_outside_range': 'float',
'low_pass_filter': 'LowPassFilterType',
'reject_on_error': 'bool',
'do_correction': 'bool',
'checked': 'bool'
}
attribute_map = {
'y_level_default_or_custom': 'y_level_default_or_custom',
'y_level_lower': 'y_level_lower',
'y_level_upper': 'y_level_upper',
'y_level_max_outside_range': 'y_level_max_outside_range',
'y_level_tolerance_low': 'y_level_tolerance_low',
'y_level_tolerance_high': 'y_level_tolerance_high',
'u_vlevel_default_or_custom': 'u_vlevel_default_or_custom',
'u_vlevel_lower': 'u_vlevel_lower',
'u_vlevel_upper': 'u_vlevel_upper',
'u_vlevel_max_outside_range': 'u_vlevel_max_outside_range',
'low_pass_filter': 'low_pass_filter',
'reject_on_error': 'reject_on_error',
'do_correction': 'do_correction',
'checked': 'checked'
}
def __init__(self, y_level_default_or_custom=None, y_level_lower=None, y_level_upper=None, y_level_max_outside_range=None, y_level_tolerance_low=None, y_level_tolerance_high=None, u_vlevel_default_or_custom=None, u_vlevel_lower=None, u_vlevel_upper=None, u_vlevel_max_outside_range=None, low_pass_filter=None, reject_on_error=None, do_correction=None, checked=None, local_vars_configuration=None): # noqa: E501
"""ChromaLevelTest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._y_level_default_or_custom = None
self._y_level_lower = None
self._y_level_upper = None
self._y_level_max_outside_range = None
self._y_level_tolerance_low = None
self._y_level_tolerance_high = None
self._u_vlevel_default_or_custom = None
self._u_vlevel_lower = None
self._u_vlevel_upper = None
self._u_vlevel_max_outside_range = None
self._low_pass_filter = None
self._reject_on_error = None
self._do_correction = None
self._checked = None
self.discriminator = None
if y_level_default_or_custom is not None:
self.y_level_default_or_custom = y_level_default_or_custom
if y_level_lower is not None:
self.y_level_lower = y_level_lower
if y_level_upper is not None:
self.y_level_upper = y_level_upper
if y_level_max_outside_range is not None:
self.y_level_max_outside_range = y_level_max_outside_range
if y_level_tolerance_low is not None:
self.y_level_tolerance_low = y_level_tolerance_low
if y_level_tolerance_high is not None:
self.y_level_tolerance_high = y_level_tolerance_high
if u_vlevel_default_or_custom is not None:
self.u_vlevel_default_or_custom = u_vlevel_default_or_custom
if u_vlevel_lower is not None:
self.u_vlevel_lower = u_vlevel_lower
if u_vlevel_upper is not None:
self.u_vlevel_upper = u_vlevel_upper
if u_vlevel_max_outside_range is not None:
self.u_vlevel_max_outside_range = u_vlevel_max_outside_range
if low_pass_filter is not None:
self.low_pass_filter = low_pass_filter
if reject_on_error is not None:
self.reject_on_error = reject_on_error
if do_correction is not None:
self.do_correction = do_correction
if checked is not None:
self.checked = checked
@property
def y_level_default_or_custom(self):
"""Gets the y_level_default_or_custom of this ChromaLevelTest. # noqa: E501
:return: The y_level_default_or_custom of this ChromaLevelTest. # noqa: E501
:rtype: DefaultOrCustomType
"""
return self._y_level_default_or_custom
@y_level_default_or_custom.setter
def y_level_default_or_custom(self, y_level_default_or_custom):
"""Sets the y_level_default_or_custom of this ChromaLevelTest.
:param y_level_default_or_custom: The y_level_default_or_custom of this ChromaLevelTest. # noqa: E501
:type: DefaultOrCustomType
"""
self._y_level_default_or_custom = y_level_default_or_custom
@property
def y_level_lower(self):
"""Gets the y_level_lower of this ChromaLevelTest. # noqa: E501
:return: The y_level_lower of this ChromaLevelTest. # noqa: E501
:rtype: int
"""
return self._y_level_lower
@y_level_lower.setter
def y_level_lower(self, y_level_lower):
"""Sets the y_level_lower of this ChromaLevelTest.
:param y_level_lower: The y_level_lower of this ChromaLevelTest. # noqa: E501
:type: int
"""
self._y_level_lower = y_level_lower
@property
def y_level_upper(self):
"""Gets the y_level_upper of this ChromaLevelTest. # noqa: E501
:return: The y_level_upper of this ChromaLevelTest. # noqa: E501
:rtype: int
"""
return self._y_level_upper
@y_level_upper.setter
def y_level_upper(self, y_level_upper):
"""Sets the y_level_upper of this ChromaLevelTest.
:param y_level_upper: The y_level_upper of this ChromaLevelTest. # noqa: E501
:type: int
"""
self._y_level_upper = y_level_upper
@property
def y_level_max_outside_range(self):
"""Gets the y_level_max_outside_range of this ChromaLevelTest. # noqa: E501
:return: The y_level_max_outside_range of this ChromaLevelTest. # noqa: E501
:rtype: float
"""
return self._y_level_max_outside_range
@y_level_max_outside_range.setter
def y_level_max_outside_range(self, y_level_max_outside_range):
"""Sets the y_level_max_outside_range of this ChromaLevelTest.
:param y_level_max_outside_range: The y_level_max_outside_range of this ChromaLevelTest. # noqa: E501
:type: float
"""
self._y_level_max_outside_range = y_level_max_outside_range
@property
def y_level_tolerance_low(self):
"""Gets the y_level_tolerance_low of this ChromaLevelTest. # noqa: E501
:return: The y_level_tolerance_low of this ChromaLevelTest. # noqa: E501
:rtype: float
"""
return self._y_level_tolerance_low
@y_level_tolerance_low.setter
def y_level_tolerance_low(self, y_level_tolerance_low):
"""Sets the y_level_tolerance_low of this ChromaLevelTest.
:param y_level_tolerance_low: The y_level_tolerance_low of this ChromaLevelTest. # noqa: E501
:type: float
"""
self._y_level_tolerance_low = y_level_tolerance_low
@property
def y_level_tolerance_high(self):
"""Gets the y_level_tolerance_high of this ChromaLevelTest. # noqa: E501
:return: The y_level_tolerance_high of this ChromaLevelTest. # noqa: E501
:rtype: float
"""
return self._y_level_tolerance_high
@y_level_tolerance_high.setter
def y_level_tolerance_high(self, y_level_tolerance_high):
"""Sets the y_level_tolerance_high of this ChromaLevelTest.
:param y_level_tolerance_high: The y_level_tolerance_high of this ChromaLevelTest. # noqa: E501
:type: float
"""
self._y_level_tolerance_high = y_level_tolerance_high
@property
def u_vlevel_default_or_custom(self):
"""Gets the u_vlevel_default_or_custom of this ChromaLevelTest. # noqa: E501
:return: The u_vlevel_default_or_custom of this ChromaLevelTest. # noqa: E501
:rtype: DefaultOrCustomType
"""
return self._u_vlevel_default_or_custom
@u_vlevel_default_or_custom.setter
def u_vlevel_default_or_custom(self, u_vlevel_default_or_custom):
"""Sets the u_vlevel_default_or_custom of this ChromaLevelTest.
:param u_vlevel_default_or_custom: The u_vlevel_default_or_custom of this ChromaLevelTest. # noqa: E501
:type: DefaultOrCustomType
"""
self._u_vlevel_default_or_custom = u_vlevel_default_or_custom
@property
def u_vlevel_lower(self):
"""Gets the u_vlevel_lower of this ChromaLevelTest. # noqa: E501
:return: The u_vlevel_lower of this ChromaLevelTest. # noqa: E501
:rtype: int
"""
return self._u_vlevel_lower
@u_vlevel_lower.setter
def u_vlevel_lower(self, u_vlevel_lower):
"""Sets the u_vlevel_lower of this ChromaLevelTest.
:param u_vlevel_lower: The u_vlevel_lower of this ChromaLevelTest. # noqa: E501
:type: int
"""
self._u_vlevel_lower = u_vlevel_lower
@property
def u_vlevel_upper(self):
"""Gets the u_vlevel_upper of this ChromaLevelTest. # noqa: E501
:return: The u_vlevel_upper of this ChromaLevelTest. # noqa: E501
:rtype: int
"""
return self._u_vlevel_upper
@u_vlevel_upper.setter
def u_vlevel_upper(self, u_vlevel_upper):
"""Sets the u_vlevel_upper of this ChromaLevelTest.
:param u_vlevel_upper: The u_vlevel_upper of this ChromaLevelTest. # noqa: E501
:type: int
"""
self._u_vlevel_upper = u_vlevel_upper
@property
def u_vlevel_max_outside_range(self):
"""Gets the u_vlevel_max_outside_range of this ChromaLevelTest. # noqa: E501
:return: The u_vlevel_max_outside_range of this ChromaLevelTest. # noqa: E501
:rtype: float
"""
return self._u_vlevel_max_outside_range
@u_vlevel_max_outside_range.setter
def u_vlevel_max_outside_range(self, u_vlevel_max_outside_range):
"""Sets the u_vlevel_max_outside_range of this ChromaLevelTest.
:param u_vlevel_max_outside_range: The u_vlevel_max_outside_range of this ChromaLevelTest. # noqa: E501
:type: float
"""
self._u_vlevel_max_outside_range = u_vlevel_max_outside_range
@property
def low_pass_filter(self):
"""Gets the low_pass_filter of this ChromaLevelTest. # noqa: E501
:return: The low_pass_filter of this ChromaLevelTest. # noqa: E501
:rtype: LowPassFilterType
"""
return self._low_pass_filter
@low_pass_filter.setter
def low_pass_filter(self, low_pass_filter):
"""Sets the low_pass_filter of this ChromaLevelTest.
:param low_pass_filter: The low_pass_filter of this ChromaLevelTest. # noqa: E501
:type: LowPassFilterType
"""
self._low_pass_filter = low_pass_filter
@property
def reject_on_error(self):
"""Gets the reject_on_error of this ChromaLevelTest. # noqa: E501
:return: The reject_on_error of this ChromaLevelTest. # noqa: E501
:rtype: bool
"""
return self._reject_on_error
@reject_on_error.setter
def reject_on_error(self, reject_on_error):
"""Sets the reject_on_error of this ChromaLevelTest.
:param reject_on_error: The reject_on_error of this ChromaLevelTest. # noqa: E501
:type: bool
"""
self._reject_on_error = reject_on_error
@property
def do_correction(self):
"""Gets the do_correction of this ChromaLevelTest. # noqa: E501
:return: The do_correction of this ChromaLevelTest. # noqa: E501
:rtype: bool
"""
return self._do_correction
@do_correction.setter
def do_correction(self, do_correction):
"""Sets the do_correction of this ChromaLevelTest.
:param do_correction: The do_correction of this ChromaLevelTest. # noqa: E501
:type: bool
"""
self._do_correction = do_correction
@property
def checked(self):
"""Gets the checked of this ChromaLevelTest. # noqa: E501
:return: The checked of this ChromaLevelTest. # noqa: E501
:rtype: bool
"""
return self._checked
@checked.setter
def checked(self, checked):
"""Sets the checked of this ChromaLevelTest.
:param checked: The checked of this ChromaLevelTest. # noqa: E501
:type: bool
"""
self._checked = checked
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ChromaLevelTest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ChromaLevelTest):
return True
return self.to_dict() != other.to_dict()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/MedicinalProductAuthorization) on 2019-05-07.
# 2019, SMART Health IT.
from . import domainresource
class MedicinalProductAuthorization(domainresource.DomainResource):
""" The regulatory authorization of a medicinal product.
"""
resource_type = "MedicinalProductAuthorization"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.country = None
""" The country in which the marketing authorization has been granted.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.dataExclusivityPeriod = None
""" A period of time after authorization before generic product
applicatiosn can be submitted.
Type `Period` (represented as `dict` in JSON). """
self.dateOfFirstAuthorization = None
""" The date when the first authorization was granted by a Medicines
Regulatory Agency.
Type `FHIRDate` (represented as `str` in JSON). """
self.holder = None
""" Marketing Authorization Holder.
Type `FHIRReference` (represented as `dict` in JSON). """
self.identifier = None
""" Business identifier for the marketing authorization, as assigned by
a regulator.
List of `Identifier` items (represented as `dict` in JSON). """
self.internationalBirthDate = None
""" Date of first marketing authorization for a company's new medicinal
product in any country in the World.
Type `FHIRDate` (represented as `str` in JSON). """
self.jurisdiction = None
""" Jurisdiction within a country.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.jurisdictionalAuthorization = None
""" Authorization in areas within a country.
List of `MedicinalProductAuthorizationJurisdictionalAuthorization` items (represented as `dict` in JSON). """
self.legalBasis = None
""" The legal framework against which this authorization is granted.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.procedure = None
""" The regulatory procedure for granting or amending a marketing
authorization.
Type `MedicinalProductAuthorizationProcedure` (represented as `dict` in JSON). """
self.regulator = None
""" Medicines Regulatory Agency.
Type `FHIRReference` (represented as `dict` in JSON). """
self.restoreDate = None
""" The date when a suspended the marketing or the marketing
authorization of the product is anticipated to be restored.
Type `FHIRDate` (represented as `str` in JSON). """
self.status = None
""" The status of the marketing authorization.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.statusDate = None
""" The date at which the given status has become applicable.
Type `FHIRDate` (represented as `str` in JSON). """
self.subject = None
""" The medicinal product that is being authorized.
Type `FHIRReference` (represented as `dict` in JSON). """
self.validityPeriod = None
""" The beginning of the time period in which the marketing
authorization is in the specific status shall be specified A
complete date consisting of day, month and year shall be specified
using the ISO 8601 date format.
Type `Period` (represented as `dict` in JSON). """
super(MedicinalProductAuthorization, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicinalProductAuthorization, self).elementProperties()
js.extend([
("country", "country", codeableconcept.CodeableConcept, True, None, False),
("dataExclusivityPeriod", "dataExclusivityPeriod", period.Period, False, None, False),
("dateOfFirstAuthorization", "dateOfFirstAuthorization", fhirdate.FHIRDate, False, None, False),
("holder", "holder", fhirreference.FHIRReference, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("internationalBirthDate", "internationalBirthDate", fhirdate.FHIRDate, False, None, False),
("jurisdiction", "jurisdiction", codeableconcept.CodeableConcept, True, None, False),
("jurisdictionalAuthorization", "jurisdictionalAuthorization", MedicinalProductAuthorizationJurisdictionalAuthorization, True, None, False),
("legalBasis", "legalBasis", codeableconcept.CodeableConcept, False, None, False),
("procedure", "procedure", MedicinalProductAuthorizationProcedure, False, None, False),
("regulator", "regulator", fhirreference.FHIRReference, False, None, False),
("restoreDate", "restoreDate", fhirdate.FHIRDate, False, None, False),
("status", "status", codeableconcept.CodeableConcept, False, None, False),
("statusDate", "statusDate", fhirdate.FHIRDate, False, None, False),
("subject", "subject", fhirreference.FHIRReference, False, None, False),
("validityPeriod", "validityPeriod", period.Period, False, None, False),
])
return js
from . import backboneelement
class MedicinalProductAuthorizationJurisdictionalAuthorization(backboneelement.BackboneElement):
""" Authorization in areas within a country.
"""
resource_type = "MedicinalProductAuthorizationJurisdictionalAuthorization"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.country = None
""" Country of authorization.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.identifier = None
""" The assigned number for the marketing authorization.
List of `Identifier` items (represented as `dict` in JSON). """
self.jurisdiction = None
""" Jurisdiction within a country.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.legalStatusOfSupply = None
""" The legal status of supply in a jurisdiction or region.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.validityPeriod = None
""" The start and expected end date of the authorization.
Type `Period` (represented as `dict` in JSON). """
super(MedicinalProductAuthorizationJurisdictionalAuthorization, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicinalProductAuthorizationJurisdictionalAuthorization, self).elementProperties()
js.extend([
("country", "country", codeableconcept.CodeableConcept, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("jurisdiction", "jurisdiction", codeableconcept.CodeableConcept, True, None, False),
("legalStatusOfSupply", "legalStatusOfSupply", codeableconcept.CodeableConcept, False, None, False),
("validityPeriod", "validityPeriod", period.Period, False, None, False),
])
return js
class MedicinalProductAuthorizationProcedure(backboneelement.BackboneElement):
""" The regulatory procedure for granting or amending a marketing authorization.
"""
resource_type = "MedicinalProductAuthorizationProcedure"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.application = None
""" Applcations submitted to obtain a marketing authorization.
List of `MedicinalProductAuthorizationProcedure` items (represented as `dict` in JSON). """
self.dateDateTime = None
""" Date of procedure.
Type `FHIRDate` (represented as `str` in JSON). """
self.datePeriod = None
""" Date of procedure.
Type `Period` (represented as `dict` in JSON). """
self.identifier = None
""" Identifier for this procedure.
Type `Identifier` (represented as `dict` in JSON). """
self.type = None
""" Type of procedure.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicinalProductAuthorizationProcedure, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicinalProductAuthorizationProcedure, self).elementProperties()
js.extend([
("application", "application", MedicinalProductAuthorizationProcedure, True, None, False),
("dateDateTime", "dateDateTime", fhirdate.FHIRDate, False, "date", False),
("datePeriod", "datePeriod", period.Period, False, "date", False),
("identifier", "identifier", identifier.Identifier, False, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, True),
])
return js
import sys
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
| |
"""CAP Collector tests base classes."""
__author__ = "arcadiy@google.com (Arkadii Yakovets)"
import os
import re
from django.conf import settings
from django.contrib.auth.models import Group
from django.contrib.auth.models import User
from django.test import Client
from django.test import LiveServerTestCase
from django.test import TestCase
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.ui import WebDriverWait
UUID_RE = re.compile(
r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}")
class TestBase(TestCase):
"""Base class for other tests."""
TEST_USER_EMAIL = "mr.web@driver.com"
TEST_USER_LOGIN = "web_driver"
TEST_USER_PASSWORD = "test_password"
def setUp(self):
self.test_user = User.objects.get(username=self.TEST_USER_LOGIN)
class CAPCollectorLiveServer(TestBase, LiveServerTestCase):
"""Base class for live server tests."""
fixtures = ["test_alerts.json", "test_auth.json", "test_templates.json"]
LATEST_ALERT_XPATH = "//*[@id='current_alerts_span']/a"
UPDATE_ALERT_BUTTON_XPATH = "//*[@id='update_button']"
CANCEL_ALERT_BUTTON_XPATH = "//*[@id='cancel_button']"
ISSUE_NEW_ALERT_BUTTON_XPATH = "//*[@id='current-next-button']/span"
ADD_ALERT_DETAILS_BUTTON_XPATH = "//*[@id='alert-next-button']/span"
TARGET_AREA_BUTTON_XPATH = "//*[@id='info-next-button']/span"
RELEASE_BUTTON_XPATH = "//*[@id='area-next-button']/span"
RELEASE_ALERT_BUTTON_XPATH = "//*[@id='release']/div[2]/a/span"
MESSAGE_TEMPLATE_ELEMENT = "//*[@id='select-message-template']"
MESSAGE_TEMPLATE_ITEMS_XPATH = "//*[@id='select-message-template']/option[%s]"
CATEGORY_SELECT_ELEMENT = "//*[@id='select-categories']"
CATEGORY_KEYS = ("geo", "met", "safety", "security", "rescue", "fire",
"health", "env", "transport", "infra", "cbrne", "other")
CATEGORY_XPATHS = {
key: "//*[@id='select-categories']/option[%s]" % (index + 2)
for index, key in enumerate(CATEGORY_KEYS)}
RESPONSE_TYPE_SELECT_ELEMENT = "//*[@id='select-responseTypes']"
RESPONSE_TYPE_KEYS = ("shelter", "evacuate", "prepare", "execute", "avoid",
"monitor", "assess", "allclear", "none")
RESPONSE_TYPE_XPATHS = {
key: "//*[@id='select-responseTypes']/option[%s]" % (index + 2)
for index, key in enumerate(RESPONSE_TYPE_KEYS)}
URGENCY_SELECT_ELEMENT = "//*[@id='select-urgency']"
URGENCY_KEYS = ("immediate", "expected", "future", "past", "unknown")
URGENCY_XPATHS = {
key: "//*[@id='select-urgency']/option[%s]" % (index + 2)
for index, key in enumerate(URGENCY_KEYS)}
SEVERITY_SELECT_ELEMENT = "//*[@id='select-severity']"
SEVERITY_KEYS = ("extreme", "severe", "moderate", "minor", "unknown")
SEVERITY_XPATHS = {
key: "//*[@id='select-severity']/option[%s]" % (index + 2)
for index, key in enumerate(SEVERITY_KEYS)}
CERTAINTY_SELECT_ELEMENT = "//*[@id='select-certainty']"
CERTAINTY_KEYS = ("observed", "likely", "possible", "unlikely", "unknown")
CERTAINTY_XPATHS = {
key: "//*[@id='select-certainty']/option[%s]" % (index + 2)
for index, key in enumerate(CERTAINTY_KEYS)}
EXPIRATION_SELECT_ELEMENT = "//*[@id='select-expires-min']"
EXPIRATION_XPATHS = {
120: "//*[@id='select-expires-min']/option[6]",
"Other": "//*[@id='select-expires-min']/option[11]",
}
EXPIRATION_OTHER_TEXT_INPUT_ELEMENT_NAME = "text-expires"
# Message tab.
ALERT_LANGUAGE_ELEMENT = ("//*[@id='info']/div[2]/div[1]/div/div/span/"
"span[1]/span")
ALERT_LANGUAGE_XPATHS = {
language_tuple[0]: "//*[@id='select-language']/option[%s]" % (index + 1)
for index, language_tuple in enumerate(settings.LANGUAGES)
}
ALERT_SENDER_ELEMENT_NAME = "text-senderName"
HEADLINE_ELEMENT_NAME = "text-headline"
EVENT_ELEMENT_NAME = "text-event"
DESCRIPTION_ELEMENT_NAME = "textarea-description"
INSTRUCTION_ELEMENT_NAME = "textarea-instruction"
CONTACT_ELEMENT_NAME = "text-contact"
WEB_ELEMENT_NAME = "text-web"
# Area tab.
AREA_TEMPLATE_ELEMENT = "//*[@id='select-area-template']"
AREA_TEMPLATE_SEARCH_XPATH = (
"//input[contains(@class, 'select2-search__field')]")
AREA_TEMPLATE_ITEMS_XPATH = (
"//ul[contains(@class, 'select2-results__options')]/"
"li[substring(@id, string-length(@id) - 1, 2) = '-%s']")
AREA_ELEMENT_NAME = "textarea-areaDesc"
AREA_GEOCODE_ADD_BUTTON_XPATH = "//*[@id='geocode_div']/a"
AREA_GEOCODE_NAME_XPATH = "//*[@id='geocode_div']/div/div[1]/div[2]/input"
AREA_GEOCODE_VALUE_XPATH = "//*[@id='geocode_div']/div/div[2]/div[2]/input"
# Release tab.
USERNAME_ELEMENT_XPATH = "//*[@id='text-uid']"
PASSWORD_ELEMENT_XPATH = "//*[@id='text-pwd']"
UUID_ELEMENT_XPATH = "//*[@id='response_uuid']"
AUTH_USERNAME_ELEMENT_NAME = "username"
AUTH_PASSWORD_ELEMENT_NAME = "password"
AUTH_BUTTON_XPATH = "/html/body/form/div/input[3]"
USER_LANGUAGE_ELEMENT = "//*[@id='info']/div[1]/div/div/div"
USER_LANGUAGE_XPATHS = {
language_tuple[0]: "//*[@id='ui-language']/option[%s]" % (index + 1)
for index, language_tuple in enumerate(settings.LANGUAGES)
}
ALERT_TAB_REQUIRED_PLACEHOLDER_XPATH = "//*[@id='alert']/div[2]/span"
MESSAGE_TAB_REQUIRED_PLACEHOLDER_XPATH = "//*[@id='info']/div[2]/div[11]"
AREA_TAB_REQUIRED_PLACEHOLDER_XPATH = "//*[@id='area']/div[2]/div[8]"
MESSAGE_TAB_INVALID_PLACEHOLDER_XPATH = (
"//*[@id='info']/div[2]/div[12]")
AREA_TAB_INVALID_PLACEHOLDER_XPATH = "//*[@id='area']/div[2]/div[9]"
AREA_TAB_REQUIRED_COMBINED_PLACEHOLDER_XPATH = (
"//*[@id='area']/div[2]/div[10]")
AREA_TAB_MAP_VISIBLE_ELEMENT_ID = "OpenLayers_Control_MaximizeDiv_innerImage"
@classmethod
def setUpClass(cls):
cls.client = Client()
cls.webdriver = WebDriver()
super(CAPCollectorLiveServer, cls).setUpClass()
@classmethod
def tearDownClass(cls):
cls.webdriver.quit()
super(CAPCollectorLiveServer, cls).tearDownClass()
def WaitUntilVisible(self, xpath, by=By.XPATH, timeout=5):
return WebDriverWait(self.webdriver, timeout).until(
ec.visibility_of_element_located((by, xpath)))
def Clear(self, element):
return element.clear()
def WaitUntilMapVisible(self):
self.WaitUntilVisible(self.AREA_TAB_MAP_VISIBLE_ELEMENT_ID, by=By.ID)
@property
def latest_alert_link(self):
return self.WaitUntilVisible(self.LATEST_ALERT_XPATH)
@property
def update_alert_button(self):
return self.WaitUntilVisible(self.UPDATE_ALERT_BUTTON_XPATH)
@property
def cancel_alert_button(self):
return self.WaitUntilVisible(self.CANCEL_ALERT_BUTTON_XPATH)
@property
def issue_new_alert_button(self):
return self.WaitUntilVisible(self.ISSUE_NEW_ALERT_BUTTON_XPATH)
@property
def add_alert_details_button(self):
return self.WaitUntilVisible(self.ADD_ALERT_DETAILS_BUTTON_XPATH)
@property
def target_area_button(self):
return self.WaitUntilVisible(self.TARGET_AREA_BUTTON_XPATH)
@property
def release_button(self):
return self.WaitUntilVisible(self.RELEASE_BUTTON_XPATH)
@property
def release_alert_button(self):
return self.WaitUntilVisible(self.RELEASE_ALERT_BUTTON_XPATH)
@property
def alert_tab_required_placeholder(self):
return self.webdriver.find_element_by_xpath(
self.ALERT_TAB_REQUIRED_PLACEHOLDER_XPATH)
@property
def message_tab_required_placeholder(self):
return self.webdriver.find_element_by_xpath(
self.MESSAGE_TAB_REQUIRED_PLACEHOLDER_XPATH)
@property
def message_tab_invalid_placeholder(self):
return self.webdriver.find_element_by_xpath(
self.MESSAGE_TAB_INVALID_PLACEHOLDER_XPATH)
@property
def area_tab_required_placeholder(self):
return self.webdriver.find_element_by_xpath(
self.AREA_TAB_REQUIRED_PLACEHOLDER_XPATH)
@property
def area_tab_invalid_placeholder(self):
return self.webdriver.find_element_by_xpath(
self.AREA_TAB_INVALID_PLACEHOLDER_XPATH)
@property
def area_tab_required_combined_placeholder(self):
return self.webdriver.find_element_by_xpath(
self.AREA_TAB_REQUIRED_COMBINED_PLACEHOLDER_XPATH)
@property
def area_geocode_add_button(self):
return self.webdriver.find_element_by_xpath(
self.AREA_GEOCODE_ADD_BUTTON_XPATH)
@property
def area_geocode_name_element(self):
return self.webdriver.find_element_by_xpath(self.AREA_GEOCODE_NAME_XPATH)
@property
def area_geocode_value_element(self):
return self.webdriver.find_element_by_xpath(self.AREA_GEOCODE_VALUE_XPATH)
@property
def message_template_select(self):
return self.WaitUntilVisible(self.MESSAGE_TEMPLATE_ELEMENT)
@property
def category_select(self):
return self.find_element_by_xpath(self.CATEGORY_SELECT_ELEMENT)
@property
def response_type_select(self):
return self.webdriver.find_element_by_xpath(
self.RESPONSE_TYPE_SELECT_ELEMENT)
@property
def urgency_select(self):
return self.webdriver.find_element_by_xpath(self.URGENCY_SELECT_ELEMENT)
@property
def severity_select(self):
return self.webdriver.find_element_by_xpath(self.SEVERITY_SELECT_ELEMENT)
@property
def certainty_select(self):
return self.webdriver.find_element_by_xpath(self.CERTAINTY_SELECT_ELEMENT)
@property
def expiration_select(self):
return self.webdriver.find_element_by_xpath(self.EXPIRATION_SELECT_ELEMENT)
@property
def language_select(self):
return self.WaitUntilVisible(self.ALERT_LANGUAGE_ELEMENT)
@property
def text_expire_element(self):
return self.WaitUntilVisible(self.EXPIRATION_OTHER_TEXT_INPUT_ELEMENT_NAME,
by=By.NAME)
@property
def sender_element(self):
return self.WaitUntilVisible(self.ALERT_SENDER_ELEMENT_NAME, by=By.NAME)
@property
def headline_element(self):
return self.WaitUntilVisible(self.HEADLINE_ELEMENT_NAME, by=By.NAME)
@property
def event_element(self):
return self.WaitUntilVisible(self.EVENT_ELEMENT_NAME, by=By.NAME)
@property
def description_element(self):
return self.webdriver.find_element_by_name(self.DESCRIPTION_ELEMENT_NAME)
@property
def instruction_element(self):
return self.webdriver.find_element_by_name(self.INSTRUCTION_ELEMENT_NAME)
@property
def contact_element(self):
return self.webdriver.find_element_by_name(self.CONTACT_ELEMENT_NAME)
@property
def web_element(self):
return self.webdriver.find_element_by_name(self.WEB_ELEMENT_NAME)
@property
def area_template_select(self):
return self.WaitUntilVisible(self.AREA_TEMPLATE_ELEMENT)
@property
def area_element(self):
return self.WaitUntilVisible(self.AREA_ELEMENT_NAME, by=By.NAME)
@property
def username_element(self):
return self.WaitUntilVisible(self.USERNAME_ELEMENT_XPATH)
@property
def password_element(self):
return self.webdriver.find_element_by_xpath(self.PASSWORD_ELEMENT_XPATH)
@property
def uuid_element(self):
return self.WaitUntilVisible(self.UUID_ELEMENT_XPATH)
@property
def auth_username_element(self):
return self.webdriver.find_element_by_name(self.AUTH_USERNAME_ELEMENT_NAME)
@property
def auth_password_element(self):
return self.webdriver.find_element_by_name(self.AUTH_PASSWORD_ELEMENT_NAME)
@property
def user_language_select(self):
return self.WaitUntilVisible(self.USER_LANGUAGE_ELEMENT)
def GoToAlertsTab(self):
self.webdriver.get(self.live_server_url)
def GoToAlertTab(self):
self.issue_new_alert_button.click()
def GoToMessageTab(self):
self.add_alert_details_button.click()
def GoToAreaTab(self):
self.target_area_button.click()
def GoToReleaseTab(self):
self.release_button.click()
def OpenLatestAlert(self):
self.latest_alert_link.click()
def ClickUpdateAlertButton(self):
self.update_alert_button.click()
def ClickCancelAlertButton(self):
self.cancel_alert_button.click()
def ReleaseAlert(self):
self.release_alert_button.click()
def SetMessageTemplate(self, template_item_number):
message_template_xpath = (self.MESSAGE_TEMPLATE_ITEMS_XPATH %
(template_item_number + 1))
menu_item = self.WaitUntilVisible(message_template_xpath)
menu_item.click()
def GetMessageTemplate(self):
return self.message_template_select.get_attribute("value")
def SetCategory(self, category):
category_xpath = self.CATEGORY_XPATHS.get(category.lower())
menu_item = self.WaitUntilVisible(category_xpath)
menu_item.click()
def GetCategory(self):
return self.category_select.get_attribute("value")
def SetResponseType(self, response_type):
response_type_xpath = self.RESPONSE_TYPE_XPATHS.get(response_type.lower())
menu_item = self.WaitUntilVisible(response_type_xpath)
menu_item.click()
def GetResponseType(self):
return self.response_type_select.get_attribute("value")
def SetUrgency(self, urgency):
urgency_xpath = self.URGENCY_XPATHS.get(urgency.lower())
self.webdriver.find_element_by_xpath(urgency_xpath).click()
def GetUrgency(self):
return self.urgency_select.get_attribute("value")
def SetSeverity(self, severity):
severity_xpath = self.SEVERITY_XPATHS.get(severity.lower())
self.webdriver.find_element_by_xpath(severity_xpath).click()
def GetExpiration(self):
return self.expiration_select.get_attribute("value")
def SetExpiration(self, expiration):
expiration_xpath = self.EXPIRATION_XPATHS.get(expiration)
if expiration_xpath:
self.webdriver.find_element_by_xpath(expiration_xpath).click()
def SetOtherTextExpireMinutes(self, expire_minutes):
self.text_expire_element.send_keys(expire_minutes)
def GetSeverity(self):
return self.severity_select.get_attribute("value")
def SetCertainty(self, certainty):
certainty_xpath = self.CERTAINTY_XPATHS.get(certainty.lower())
if certainty_xpath:
self.webdriver.find_element_by_xpath(certainty_xpath).click()
def GetCertainty(self):
return self.certainty_select.get_attribute("value")
def SetAreaTemplate(self, template_item_number):
self.WaitUntilMapVisible()
search = self.WaitUntilVisible(self.AREA_TEMPLATE_SEARCH_XPATH)
search.click()
menu_item = self.WaitUntilVisible(
self.AREA_TEMPLATE_ITEMS_XPATH % template_item_number)
menu_item.click()
def GetAreaTemplate(self):
return self.area_template_select.get_attribute("value")
def GetUuid(self):
return self.uuid_element.text
def GetLanguage(self):
return self.language_select.text
def SetLanguage(self, language):
language_xpath = self.ALERT_LANGUAGE_XPATHS.get(language.lower())
self.language_select.click()
menu_item = self.WaitUntilVisible(language_xpath)
menu_item.click()
def SetAlertSenderName(self, sender_name):
self.sender_element.send_keys(sender_name)
def SetHeadline(self, head_line):
self.Clear(self.headline_element)
self.headline_element.send_keys(head_line)
def ClearEvent(self):
self.Clear(self.event_element)
def SetEvent(self, event):
self.event_element.send_keys(event)
def ClearDescription(self):
self.Clear(self.description_element)
def SetDescription(self, description):
self.description_element.send_keys(description)
def ClearInstruction(self):
self.Clear(self.instruction_element)
def SetInstruction(self, instruction):
self.instruction_element.send_keys(instruction)
def SetContact(self, contact):
self.contact_element.send_keys(contact)
def SetWeb(self, url):
self.web_element.send_keys(url)
def ClearArea(self):
self.Clear(self.area_element)
def SetArea(self, area):
self.WaitUntilMapVisible()
self.Clear(self.area_element)
self.area_element.send_keys(area)
def SetGeocode(self, name, value):
"""Sets geocode. Supports only one geocode item."""
self.WaitUntilMapVisible()
self.area_geocode_add_button.click()
self.area_geocode_name_element.send_keys(name)
self.area_geocode_value_element.send_keys(value)
def SetUsername(self, username):
self.username_element.send_keys(username)
def SetPassword(self, password):
self.password_element.send_keys(password)
def Login(self):
self.auth_username_element.send_keys(self.TEST_USER_LOGIN)
self.auth_password_element.send_keys(self.TEST_USER_PASSWORD)
self.webdriver.find_element_by_xpath(self.AUTH_BUTTON_XPATH).click()
def SetUserLanguage(self, language):
self.user_language_select.click()
language_xpath = self.ALERT_LANGUAGE_XPATHS.get(language)
self.webdriver.find_element_by_xpath(language_xpath).click()
| |
# -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein, Ant1, Marius van Voorden
#
# This code is subject to the (new) BSD license:
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Module images2gif
Provides functionality for reading and writing animated GIF images.
Use writeGif to write a series of numpy arrays or PIL images as an
animated GIF. Use readGif to read an animated gif as a series of numpy
arrays.
Note that since July 2004, all patents on the LZW compression patent have
expired. Therefore the GIF format may now be used freely.
Acknowledgements
----------------
Many thanks to Ant1 for:
* noting the use of "palette=PIL.Image.ADAPTIVE", which significantly
improves the results.
* the modifications to save each image with its own palette, or optionally
the global palette (if its the same).
Many thanks to Marius van Voorden for porting the NeuQuant quantization
algorithm of Anthony Dekker to Python (See the NeuQuant class for its
license).
Many thanks to Alex Robinson for implementing the concept of subrectangles,
which (depening on image content) can give a very significant reduction in
file size.
This code is based on gifmaker (in the scripts folder of the source
distribution of PIL)
Usefull links
-------------
* http://tronche.com/computer-graphics/gif/
* http://en.wikipedia.org/wiki/Graphics_Interchange_Format
* http://www.w3.org/Graphics/GIF/spec-gif89a.txt
"""
import os, time
try:
import PIL
from PIL import Image
from PIL.GifImagePlugin import getheader, getdata
except ImportError:
PIL = None
try:
import numpy as np
except ImportError:
np = None
def get_cKDTree():
try:
from scipy.spatial import cKDTree
except ImportError:
cKDTree = None
return cKDTree
# getheader gives a 87a header and a color palette (two elements in a list).
# getdata()[0] gives the Image Descriptor up to (including) "LZW min code size".
# getdatas()[1:] is the image data itself in chuncks of 256 bytes (well
# technically the first byte says how many bytes follow, after which that
# amount (max 255) follows).
def checkImages(images):
""" checkImages(images)
Check numpy images and correct intensity range etc.
The same for all movie formats.
"""
# Init results
images2 = []
for im in images:
if PIL and isinstance(im, PIL.Image.Image):
# We assume PIL images are allright
images2.append(im)
elif np and isinstance(im, np.ndarray):
# Check and convert dtype
if im.dtype == np.uint8:
images2.append(im) # Ok
elif im.dtype in [np.float32, np.float64]:
im = im.copy()
im[im<0] = 0
im[im>1] = 1
im *= 255
images2.append( im.astype(np.uint8) )
else:
im = im.astype(np.uint8)
images2.append(im)
# Check size
if im.ndim == 2:
pass # ok
elif im.ndim == 3:
if im.shape[2] not in [3,4]:
raise ValueError('This array can not represent an image.')
else:
raise ValueError('This array can not represent an image.')
else:
raise ValueError('Invalid image type: ' + str(type(im)))
# Done
return images2
def intToBin(i):
""" Integer to two bytes """
# devide in two parts (bytes)
i1 = i % 256
i2 = int( i/256)
# make string (little endian)
return chr(i1) + chr(i2)
class GifWriter:
""" GifWriter()
Class that contains methods for helping write the animated GIF file.
"""
def getheaderAnim(self, im):
""" getheaderAnim(im)
Get animation header. To replace PILs getheader()[0]
"""
bb = "GIF89a"
bb += intToBin(im.size[0])
bb += intToBin(im.size[1])
bb += "\x87\x00\x00"
return bb
def getImageDescriptor(self, im, xy=None):
""" getImageDescriptor(im, xy=None)
Used for the local color table properties per image.
Otherwise global color table applies to all frames irrespective of
whether additional colors comes in play that require a redefined
palette. Still a maximum of 256 color per frame, obviously.
Written by Ant1 on 2010-08-22
Modified by Alex Robinson in Janurari 2011 to implement subrectangles.
"""
# Defaule use full image and place at upper left
if xy is None:
xy = (0,0)
# Image separator,
bb = '\x2C'
# Image position and size
bb += intToBin( xy[0] ) # Left position
bb += intToBin( xy[1] ) # Top position
bb += intToBin( im.size[0] ) # image width
bb += intToBin( im.size[1] ) # image height
# packed field: local color table flag1, interlace0, sorted table0,
# reserved00, lct size111=7=2^(7+1)=256.
bb += '\x87'
# LZW minimum size code now comes later, begining of [image data] blocks
return bb
def getAppExt(self, loops=float('inf')):
""" getAppExt(loops=float('inf'))
Application extention. This part specifies the amount of loops.
If loops is 0 or inf, it goes on infinitely.
"""
if loops==0 or loops==float('inf'):
loops = 2**16-1
#bb = "" # application extension should not be used
# (the extension interprets zero loops
# to mean an infinite number of loops)
# Mmm, does not seem to work
if True:
bb = "\x21\xFF\x0B" # application extension
bb += "NETSCAPE2.0"
bb += "\x03\x01"
bb += intToBin(loops)
bb += '\x00' # end
return bb
def getGraphicsControlExt(self, duration=0.1, dispose=2):
""" getGraphicsControlExt(duration=0.1, dispose=2)
Graphics Control Extension. A sort of header at the start of
each image. Specifies duration and transparancy.
Dispose
-------
* 0 - No disposal specified.
* 1 - Do not dispose. The graphic is to be left in place.
* 2 - Restore to background color. The area used by the graphic
must be restored to the background color.
* 3 - Restore to previous. The decoder is required to restore the
area overwritten by the graphic with what was there prior to
rendering the graphic.
* 4-7 -To be defined.
"""
bb = '\x21\xF9\x04'
bb += chr((dispose & 3) << 2) # low bit 1 == transparency,
# 2nd bit 1 == user input , next 3 bits, the low two of which are used,
# are dispose.
bb += intToBin( int(duration*100) ) # in 100th of seconds
bb += '\x00' # no transparant color
bb += '\x00' # end
return bb
def handleSubRectangles(self, images, subRectangles):
""" handleSubRectangles(images)
Handle the sub-rectangle stuff. If the rectangles are given by the
user, the values are checked. Otherwise the subrectangles are
calculated automatically.
"""
if isinstance(subRectangles, (tuple,list)):
# xy given directly
# Check xy
xy = subRectangles
if xy is None:
xy = (0,0)
if hasattr(xy, '__len__'):
if len(xy) == len(images):
xy = [xxyy for xxyy in xy]
else:
raise ValueError("len(xy) doesn't match amount of images.")
else:
xy = [xy for im in images]
xy[0] = (0,0)
else:
# Calculate xy using some basic image processing
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy to use auto-subRectangles.")
# First make numpy arrays if required
for i in range(len(images)):
im = images[i]
if isinstance(im, Image.Image):
tmp = im.convert() # Make without palette
a = np.asarray(tmp)
if len(a.shape)==0:
raise MemoryError("Too little memory to convert PIL image to array")
images[i] = a
# Determine the sub rectangles
images, xy = self.getSubRectangles(images)
# Done
return images, xy
def getSubRectangles(self, ims):
""" getSubRectangles(ims)
Calculate the minimal rectangles that need updating each frame.
Returns a two-element tuple containing the cropped images and a
list of x-y positions.
Calculating the subrectangles takes extra time, obviously. However,
if the image sizes were reduced, the actual writing of the GIF
goes faster. In some cases applying this method produces a GIF faster.
"""
# Check image count
if len(ims) < 2:
return ims, [(0,0) for i in ims]
# We need numpy
if np is None:
raise RuntimeError("Need Numpy to calculate sub-rectangles. ")
# Prepare
ims2 = [ims[0]]
xy = [(0,0)]
t0 = time.time()
# Iterate over images
prev = ims[0]
for im in ims[1:]:
# Get difference, sum over colors
diff = np.abs(im-prev)
if diff.ndim==3:
diff = diff.sum(2)
# Get begin and end for both dimensions
X = np.argwhere(diff.sum(0))
Y = np.argwhere(diff.sum(1))
# Get rect coordinates
if X.size and Y.size:
x0, x1 = X[0], X[-1]+1
y0, y1 = Y[0], Y[-1]+1
else: # No change ... make it minimal
x0, x1 = 0, 2
y0, y1 = 0, 2
# Cut out and store
im2 = im[y0:y1,x0:x1]
prev = im
ims2.append(im2)
xy.append((x0,y0))
# Done
#print '%1.2f seconds to determine subrectangles of %i images' % (time.time()-t0, len(ims2))
return ims2, xy
def convertImagesToPIL(self, images, dither, nq=0):
""" convertImagesToPIL(images, nq=0)
Convert images to Paletted PIL images, which can then be
written to a single animaged GIF.
"""
# Convert to PIL images
images2 = []
for im in images:
if isinstance(im, Image.Image):
images2.append(im)
elif np and isinstance(im, np.ndarray):
if im.ndim==3 and im.shape[2]==3:
im = Image.fromarray(im,'RGB')
elif im.ndim==3 and im.shape[2]==4:
im = Image.fromarray(im[:,:,:3],'RGB')
elif im.ndim==2:
im = Image.fromarray(im,'L')
images2.append(im)
# Convert to paletted PIL images
images, images2 = images2, []
if nq >= 1:
# NeuQuant algorithm
for im in images:
im = im.convert("RGBA") # NQ assumes RGBA
nqInstance = NeuQuant(im, int(nq)) # Learn colors from image
if dither:
im = im.convert("RGB").quantize(palette=nqInstance.paletteImage())
else:
im = nqInstance.quantize(im) # Use to quantize the image itself
images2.append(im)
else:
# Adaptive PIL algorithm
AD = Image.ADAPTIVE
for im in images:
im = im.convert('P', palette=AD, dither=dither)
images2.append(im)
# Done
return images2
def writeGifToFile(self, fp, images, durations, loops, xys, disposes):
""" writeGifToFile(fp, images, durations, loops, xys, disposes)
Given a set of images writes the bytes to the specified stream.
"""
# Obtain palette for all images and count each occurance
palettes, occur = [], []
for im in images:
palettes.append( getheader(im)[1] )
for palette in palettes:
occur.append( palettes.count( palette ) )
# Select most-used palette as the global one (or first in case no max)
globalPalette = palettes[ occur.index(max(occur)) ]
# Init
frames = 0
firstFrame = True
for im, palette in zip(images, palettes):
if firstFrame:
# Write header
# Gather info
header = self.getheaderAnim(im)
appext = self.getAppExt(loops)
# Write
fp.write(header)
fp.write(globalPalette)
fp.write(appext)
# Next frame is not the first
firstFrame = False
if True:
# Write palette and image data
# Gather info
data = getdata(im)
imdes, data = data[0], data[1:]
graphext = self.getGraphicsControlExt(durations[frames],
disposes[frames])
# Make image descriptor suitable for using 256 local color palette
lid = self.getImageDescriptor(im, xys[frames])
# Write local header
if (palette != globalPalette) or (disposes[frames] != 2):
# Use local color palette
fp.write(graphext)
fp.write(lid) # write suitable image descriptor
fp.write(palette) # write local color table
fp.write('\x08') # LZW minimum size code
else:
# Use global color palette
fp.write(graphext)
fp.write(imdes) # write suitable image descriptor
# Write image data
for d in data:
fp.write(d)
# Prepare for next round
frames = frames + 1
fp.write(";") # end gif
return frames
## Exposed functions
def writeGif(filename, images, duration=0.1, repeat=True, dither=False,
nq=0, subRectangles=True, dispose=None):
""" writeGif(filename, images, duration=0.1, repeat=True, dither=False,
nq=0, subRectangles=True, dispose=None)
Write an animated gif from the specified images.
Parameters
----------
filename : string
The name of the file to write the image to.
images : list
Should be a list consisting of PIL images or numpy arrays.
The latter should be between 0 and 255 for integer types, and
between 0 and 1 for float types.
duration : scalar or list of scalars
The duration for all frames, or (if a list) for each frame.
repeat : bool or integer
The amount of loops. If True, loops infinitetely.
dither : bool
Whether to apply dithering
nq : integer
If nonzero, applies the NeuQuant quantization algorithm to create
the color palette. This algorithm is superior, but slower than
the standard PIL algorithm. The value of nq is the quality
parameter. 1 represents the best quality. 10 is in general a
good tradeoff between quality and speed. When using this option,
better results are usually obtained when subRectangles is False.
subRectangles : False, True, or a list of 2-element tuples
Whether to use sub-rectangles. If True, the minimal rectangle that
is required to update each frame is automatically detected. This
can give significant reductions in file size, particularly if only
a part of the image changes. One can also give a list of x-y
coordinates if you want to do the cropping yourself. The default
is True.
dispose : int
How to dispose each frame. 1 means that each frame is to be left
in place. 2 means the background color should be restored after
each frame. 3 means the decoder should restore the previous frame.
If subRectangles==False, the default is 2, otherwise it is 1.
"""
# Check PIL
if PIL is None:
raise RuntimeError("Need PIL to write animated gif files.")
# Check images
images = checkImages(images)
# Instantiate writer object
gifWriter = GifWriter()
# Check loops
if repeat is False:
loops = 1
elif repeat is True:
loops = 0 # zero means infinite
else:
loops = int(repeat)
# Check duration
if hasattr(duration, '__len__'):
if len(duration) == len(images):
duration = [d for d in duration]
else:
raise ValueError("len(duration) doesn't match amount of images.")
else:
duration = [duration for im in images]
# Check subrectangles
if subRectangles:
images, xy = gifWriter.handleSubRectangles(images, subRectangles)
defaultDispose = 1 # Leave image in place
else:
# Normal mode
xy = [(0,0) for im in images]
defaultDispose = 2 # Restore to background color.
# Check dispose
if dispose is None:
dispose = defaultDispose
if hasattr(dispose, '__len__'):
if len(dispose) != len(images):
raise ValueError("len(xy) doesn't match amount of images.")
else:
dispose = [dispose for im in images]
# Make images in a format that we can write easy
images = gifWriter.convertImagesToPIL(images, dither, nq)
# Write
fp = open(filename, 'wb')
try:
gifWriter.writeGifToFile(fp, images, duration, loops, xy, dispose)
finally:
fp.close()
def readGif(filename, asNumpy=True):
""" readGif(filename, asNumpy=True)
Read images from an animated GIF file. Returns a list of numpy
arrays, or, if asNumpy is false, a list if PIL images.
"""
# Check PIL
if PIL is None:
raise RuntimeError("Need PIL to read animated gif files.")
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy to read animated gif files.")
# Check whether it exists
if not os.path.isfile(filename):
raise IOError('File not found: '+str(filename))
# Load file using PIL
pilIm = PIL.Image.open(filename)
pilIm.seek(0)
# Read all images inside
images = []
try:
while True:
# Get image as numpy array
tmp = pilIm.convert() # Make without palette
a = np.asarray(tmp)
if len(a.shape)==0:
raise MemoryError("Too little memory to convert PIL image to array")
# Store, and next
images.append(a)
pilIm.seek(pilIm.tell()+1)
except EOFError:
pass
# Convert to normal PIL images if needed
if not asNumpy:
images2 = images
images = []
for im in images2:
images.append( PIL.Image.fromarray(im) )
# Done
return images
class NeuQuant:
""" NeuQuant(image, samplefac=10, colors=256)
samplefac should be an integer number of 1 or higher, 1
being the highest quality, but the slowest performance.
With avalue of 10, one tenth of all pixels are used during
training. This value seems a nice tradeof between speed
and quality.
colors is the amount of colors to reduce the image to. This
should best be a power of two.
See also:
http://members.ozemail.com.au/~dekker/NEUQUANT.HTML
License of the NeuQuant Neural-Net Quantization Algorithm
---------------------------------------------------------
Copyright (c) 1994 Anthony Dekker
Ported to python by Marius van Voorden in 2010
NEUQUANT Neural-Net quantization algorithm by Anthony Dekker, 1994.
See "Kohonen neural networks for optimal colour quantization"
in "network: Computation in Neural Systems" Vol. 5 (1994) pp 351-367.
for a discussion of the algorithm.
See also http://members.ozemail.com.au/~dekker/NEUQUANT.HTML
Any party obtaining a copy of these files from the author, directly or
indirectly, is granted, free of charge, a full and unrestricted irrevocable,
world-wide, paid up, royalty-free, nonexclusive right and license to deal
in this software and documentation files (the "Software"), including without
limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons who receive
copies from any such party to do so, with the only requirement being
that this copyright notice remain intact.
"""
NCYCLES = None # Number of learning cycles
NETSIZE = None # Number of colours used
SPECIALS = None # Number of reserved colours used
BGCOLOR = None # Reserved background colour
CUTNETSIZE = None
MAXNETPOS = None
INITRAD = None # For 256 colours, radius starts at 32
RADIUSBIASSHIFT = None
RADIUSBIAS = None
INITBIASRADIUS = None
RADIUSDEC = None # Factor of 1/30 each cycle
ALPHABIASSHIFT = None
INITALPHA = None # biased by 10 bits
GAMMA = None
BETA = None
BETAGAMMA = None
network = None # The network itself
colormap = None # The network itself
netindex = None # For network lookup - really 256
bias = None # Bias and freq arrays for learning
freq = None
pimage = None
# Four primes near 500 - assume no image has a length so large
# that it is divisible by all four primes
PRIME1 = 499
PRIME2 = 491
PRIME3 = 487
PRIME4 = 503
MAXPRIME = PRIME4
pixels = None
samplefac = None
a_s = None
def setconstants(self, samplefac, colors):
self.NCYCLES = 100 # Number of learning cycles
self.NETSIZE = colors # Number of colours used
self.SPECIALS = 3 # Number of reserved colours used
self.BGCOLOR = self.SPECIALS-1 # Reserved background colour
self.CUTNETSIZE = self.NETSIZE - self.SPECIALS
self.MAXNETPOS = self.NETSIZE - 1
self.INITRAD = self.NETSIZE/8 # For 256 colours, radius starts at 32
self.RADIUSBIASSHIFT = 6
self.RADIUSBIAS = 1 << self.RADIUSBIASSHIFT
self.INITBIASRADIUS = self.INITRAD * self.RADIUSBIAS
self.RADIUSDEC = 30 # Factor of 1/30 each cycle
self.ALPHABIASSHIFT = 10 # Alpha starts at 1
self.INITALPHA = 1 << self.ALPHABIASSHIFT # biased by 10 bits
self.GAMMA = 1024.0
self.BETA = 1.0/1024.0
self.BETAGAMMA = self.BETA * self.GAMMA
self.network = np.empty((self.NETSIZE, 3), dtype='float64') # The network itself
self.colormap = np.empty((self.NETSIZE, 4), dtype='int32') # The network itself
self.netindex = np.empty(256, dtype='int32') # For network lookup - really 256
self.bias = np.empty(self.NETSIZE, dtype='float64') # Bias and freq arrays for learning
self.freq = np.empty(self.NETSIZE, dtype='float64')
self.pixels = None
self.samplefac = samplefac
self.a_s = {}
def __init__(self, image, samplefac=10, colors=256):
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy for the NeuQuant algorithm.")
# Check image
if image.size[0] * image.size[1] < NeuQuant.MAXPRIME:
raise IOError("Image is too small")
if image.mode != "RGBA":
raise IOError("Image mode should be RGBA.")
# Initialize
self.setconstants(samplefac, colors)
self.pixels = np.fromstring(image.tostring(), np.uint32)
self.setUpArrays()
self.learn()
self.fix()
self.inxbuild()
def writeColourMap(self, rgb, outstream):
for i in range(self.NETSIZE):
bb = self.colormap[i,0];
gg = self.colormap[i,1];
rr = self.colormap[i,2];
outstream.write(rr if rgb else bb)
outstream.write(gg)
outstream.write(bb if rgb else rr)
return self.NETSIZE
def setUpArrays(self):
self.network[0,0] = 0.0 # Black
self.network[0,1] = 0.0
self.network[0,2] = 0.0
self.network[1,0] = 255.0 # White
self.network[1,1] = 255.0
self.network[1,2] = 255.0
# RESERVED self.BGCOLOR # Background
for i in range(self.SPECIALS):
self.freq[i] = 1.0 / self.NETSIZE
self.bias[i] = 0.0
for i in range(self.SPECIALS, self.NETSIZE):
p = self.network[i]
p[:] = (255.0 * (i-self.SPECIALS)) / self.CUTNETSIZE
self.freq[i] = 1.0 / self.NETSIZE
self.bias[i] = 0.0
# Omitted: setPixels
def altersingle(self, alpha, i, b, g, r):
"""Move neuron i towards biased (b,g,r) by factor alpha"""
n = self.network[i] # Alter hit neuron
n[0] -= (alpha*(n[0] - b))
n[1] -= (alpha*(n[1] - g))
n[2] -= (alpha*(n[2] - r))
def geta(self, alpha, rad):
try:
return self.a_s[(alpha, rad)]
except KeyError:
length = rad*2-1
mid = length/2
q = np.array(range(mid-1,-1,-1)+range(-1,mid))
a = alpha*(rad*rad - q*q)/(rad*rad)
a[mid] = 0
self.a_s[(alpha, rad)] = a
return a
def alterneigh(self, alpha, rad, i, b, g, r):
if i-rad >= self.SPECIALS-1:
lo = i-rad
start = 0
else:
lo = self.SPECIALS-1
start = (self.SPECIALS-1 - (i-rad))
if i+rad <= self.NETSIZE:
hi = i+rad
end = rad*2-1
else:
hi = self.NETSIZE
end = (self.NETSIZE - (i+rad))
a = self.geta(alpha, rad)[start:end]
p = self.network[lo+1:hi]
p -= np.transpose(np.transpose(p - np.array([b, g, r])) * a)
#def contest(self, b, g, r):
# """ Search for biased BGR values
# Finds closest neuron (min dist) and updates self.freq
# finds best neuron (min dist-self.bias) and returns position
# for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
# self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
#
# i, j = self.SPECIALS, self.NETSIZE
# dists = abs(self.network[i:j] - np.array([b,g,r])).sum(1)
# bestpos = i + np.argmin(dists)
# biasdists = dists - self.bias[i:j]
# bestbiaspos = i + np.argmin(biasdists)
# self.freq[i:j] -= self.BETA * self.freq[i:j]
# self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
# self.freq[bestpos] += self.BETA
# self.bias[bestpos] -= self.BETAGAMMA
# return bestbiaspos
def contest(self, b, g, r):
""" Search for biased BGR values
Finds closest neuron (min dist) and updates self.freq
finds best neuron (min dist-self.bias) and returns position
for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
i, j = self.SPECIALS, self.NETSIZE
dists = abs(self.network[i:j] - np.array([b,g,r])).sum(1)
bestpos = i + np.argmin(dists)
biasdists = dists - self.bias[i:j]
bestbiaspos = i + np.argmin(biasdists)
self.freq[i:j] *= (1-self.BETA)
self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
self.freq[bestpos] += self.BETA
self.bias[bestpos] -= self.BETAGAMMA
return bestbiaspos
def specialFind(self, b, g, r):
for i in range(self.SPECIALS):
n = self.network[i]
if n[0] == b and n[1] == g and n[2] == r:
return i
return -1
def learn(self):
biasRadius = self.INITBIASRADIUS
alphadec = 30 + ((self.samplefac-1)/3)
lengthcount = self.pixels.size
samplepixels = lengthcount / self.samplefac
delta = samplepixels / self.NCYCLES
alpha = self.INITALPHA
i = 0;
rad = biasRadius >> self.RADIUSBIASSHIFT
if rad <= 1:
rad = 0
print "Beginning 1D learning: samplepixels =",samplepixels," rad =", rad
step = 0
pos = 0
if lengthcount%NeuQuant.PRIME1 != 0:
step = NeuQuant.PRIME1
elif lengthcount%NeuQuant.PRIME2 != 0:
step = NeuQuant.PRIME2
elif lengthcount%NeuQuant.PRIME3 != 0:
step = NeuQuant.PRIME3
else:
step = NeuQuant.PRIME4
i = 0
printed_string = ''
while i < samplepixels:
if i%100 == 99:
tmp = '\b'*len(printed_string)
printed_string = str((i+1)*100/samplepixels)+"%\n"
print tmp + printed_string,
p = self.pixels[pos]
r = (p >> 16) & 0xff
g = (p >> 8) & 0xff
b = (p ) & 0xff
if i == 0: # Remember background colour
self.network[self.BGCOLOR] = [b, g, r]
j = self.specialFind(b, g, r)
if j < 0:
j = self.contest(b, g, r)
if j >= self.SPECIALS: # Don't learn for specials
a = (1.0 * alpha) / self.INITALPHA
self.altersingle(a, j, b, g, r)
if rad > 0:
self.alterneigh(a, rad, j, b, g, r)
pos = (pos+step)%lengthcount
i += 1
if i%delta == 0:
alpha -= alpha / alphadec
biasRadius -= biasRadius / self.RADIUSDEC
rad = biasRadius >> self.RADIUSBIASSHIFT
if rad <= 1:
rad = 0
print "Finished 1D learning: final alpha =",(1.0*alpha)/self.INITALPHA,"!"
def fix(self):
for i in range(self.NETSIZE):
for j in range(3):
x = int(0.5 + self.network[i,j])
x = max(0, x)
x = min(255, x)
self.colormap[i,j] = x
self.colormap[i,3] = i
def inxbuild(self):
previouscol = 0
startpos = 0
for i in range(self.NETSIZE):
p = self.colormap[i]
q = None
smallpos = i
smallval = p[1] # Index on g
# Find smallest in i..self.NETSIZE-1
for j in range(i+1, self.NETSIZE):
q = self.colormap[j]
if q[1] < smallval: # Index on g
smallpos = j
smallval = q[1] # Index on g
q = self.colormap[smallpos]
# Swap p (i) and q (smallpos) entries
if i != smallpos:
p[:],q[:] = q, p.copy()
# smallval entry is now in position i
if smallval != previouscol:
self.netindex[previouscol] = (startpos+i) >> 1
for j in range(previouscol+1, smallval):
self.netindex[j] = i
previouscol = smallval
startpos = i
self.netindex[previouscol] = (startpos+self.MAXNETPOS) >> 1
for j in range(previouscol+1, 256): # Really 256
self.netindex[j] = self.MAXNETPOS
def paletteImage(self):
""" PIL weird interface for making a paletted image: create an image which
already has the palette, and use that in Image.quantize. This function
returns this palette image. """
if self.pimage is None:
palette = []
for i in range(self.NETSIZE):
palette.extend(self.colormap[i][:3])
palette.extend([0]*(256-self.NETSIZE)*3)
# a palette image to use for quant
self.pimage = Image.new("P", (1, 1), 0)
self.pimage.putpalette(palette)
return self.pimage
def quantize(self, image):
""" Use a kdtree to quickly find the closest palette colors for the pixels """
if get_cKDTree():
return self.quantize_with_scipy(image)
else:
print 'Scipy not available, falling back to slower version.'
return self.quantize_without_scipy(image)
def quantize_with_scipy(self, image):
w,h = image.size
px = np.asarray(image).copy()
px2 = px[:,:,:3].reshape((w*h,3))
cKDTree = get_cKDTree()
kdtree = cKDTree(self.colormap[:,:3],leafsize=10)
result = kdtree.query(px2)
colorindex = result[1]
print "Distance:", (result[0].sum()/(w*h))
px2[:] = self.colormap[colorindex,:3]
return Image.fromarray(px).convert("RGB").quantize(palette=self.paletteImage())
def quantize_without_scipy(self, image):
"""" This function can be used if no scipy is availabe.
It's 7 times slower though.
"""
w,h = image.size
px = np.asarray(image).copy()
memo = {}
for j in range(w):
for i in range(h):
key = (px[i,j,0],px[i,j,1],px[i,j,2])
try:
val = memo[key]
except KeyError:
val = self.convert(key)
memo[key] = val
px[i,j,0],px[i,j,1],px[i,j,2] = val
return Image.fromarray(px).convert("RGB").quantize(palette=self.paletteImage())
def convert(self, (r, g, b)):
i = self.inxsearch(r, g, b)
return self.colormap[i,:3]
def inxsearch(self, r, g, b):
"""Search for BGR values 0..255 and return colour index"""
dists = (self.colormap[:,:3] - np.array([r,g,b]))
a= np.argmin((dists*dists).sum(1))
return a
if __name__ == '__main__':
im = np.zeros((200,200), dtype=np.uint8)
im[10:30,:] = 100
im[:,80:120] = 255
im[-50:-40,:] = 50
images = [im*1.0, im*0.8, im*0.6, im*0.4, im*0]
writeGif('lala3.gif',images, duration=0.5, dither=0)
| |
import calendar
import datetime
import re
import sys
import urllib
import urlparse
from email.Utils import formatdate
from airy.utils.encoding import smart_str, force_unicode
from airy.utils.functional import allow_lazy
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
def urlquote(url, safe='/'):
"""
A version of Python's urllib.quote() function that can operate on unicode
strings. The url is first UTF-8 encoded before quoting. The returned string
can safely be used as part of an argument to a subsequent iri_to_uri() call
without double-quoting occurring.
"""
return force_unicode(urllib.quote(smart_str(url), smart_str(safe)))
urlquote = allow_lazy(urlquote, unicode)
def urlquote_plus(url, safe=''):
"""
A version of Python's urllib.quote_plus() function that can operate on
unicode strings. The url is first UTF-8 encoded before quoting. The
returned string can safely be used as part of an argument to a subsequent
iri_to_uri() call without double-quoting occurring.
"""
return force_unicode(urllib.quote_plus(smart_str(url), smart_str(safe)))
urlquote_plus = allow_lazy(urlquote_plus, unicode)
def urlencode(query, doseq=0):
"""
A version of Python's urllib.urlencode() function that can operate on
unicode strings. The parameters are first case to UTF-8 encoded strings and
then encoded as per normal.
"""
if hasattr(query, 'items'):
query = query.items()
return urllib.urlencode(
[(smart_str(k),
isinstance(v, (list,tuple)) and [smart_str(i) for i in v] or smart_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
"""
Formats the time to ensure compatibility with Netscape's cookie standard.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Formats the time to match the RFC1123 date format as specified by HTTP
RFC2616 section 3.3.1.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s GMT' % rfcdate[:25]
def parse_http_date(date):
"""
Parses a date format as specified by HTTP RFC2616 section 3.3.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Returns an floating point number expressed in seconds since the epoch, in
UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC2616 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception:
raise ValueError("%r is not a valid date" % date)
def parse_http_date_safe(date):
"""
Same as parse_http_date, but returns None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Converts a base 36 string to an ``int``. Raises ``ValueError` if the
input won't fit into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is long than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
value = int(s, 36)
# ... then do a final check that the value will fit into an int.
if value > sys.maxint:
raise ValueError("Base36 input too large")
return value
def int_to_base36(i):
"""
Converts an integer to a base36 string
"""
digits = "0123456789abcdefghijklmnopqrstuvwxyz"
factor = 0
# Find starting factor
while True:
factor += 1
if i < 36 ** factor:
factor -= 1
break
base36 = []
# Construct base36 representation
while factor >= 0:
j = 36 ** factor
base36.append(digits[i / j])
i = i % j
factor -= 1
return ''.join(base36)
def parse_etags(etag_str):
"""
Parses a string with one or several etags passed in If-None-Match and
If-Match headers by the rules in RFC 2616. Returns a list of etags
without surrounding double quotes (") and unescaped from \<CHAR>.
"""
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.decode('string_escape') for e in etags]
return etags
def quote_etag(etag):
"""
Wraps a string in double quotes escaping contents as necesary.
"""
return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
if sys.version_info >= (2, 6):
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)
return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)
else:
# Python 2.4, 2.5 compatibility. This actually works for Python 2.6 and
# above, but the above definition is much more obviously correct and so is
# preferred going forward.
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)
return p1[0:2] == p2[0:2]
| |
from __future__ import print_function
import os
import logging
from . import shutitfile
def setup_docker_pattern(shutit,
skel_path,
skel_delivery,
skel_domain,
skel_module_name,
skel_shutitfiles,
skel_domain_hash,
skel_depends):
# Set up shutitfile cfg
shutit.shutitfile['base_image'] = shutit.cfg['skeleton']['base_image']
shutit.shutitfile['cmd'] = """/bin/sh -c 'sleep infinity'"""
shutit.shutitfile['expose'] = []
shutit.shutitfile['env'] = []
shutit.shutitfile['volume'] = []
shutit.shutitfile['onbuild'] = []
shutit.shutitfile['script'] = []
# arguments
shutit.cfg['skeleton']['volumes_arg'] = ''
for varg in shutit.shutitfile['volume']:
shutit.cfg['skeleton']['volumes_arg'] += ' -v ' + varg + ':' + varg
shutit.cfg['skeleton']['ports_arg'] = ''
if isinstance(shutit.shutitfile['expose'], str):
for parg in shutit.shutitfile['expose']:
shutit.cfg['skeleton']['ports_arg'] += ' -p ' + parg + ':' + parg
else:
for parg in shutit.shutitfile['expose']:
for port in parg.split():
shutit.cfg['skeleton']['ports_arg'] += ' -p ' + port + ':' + port
shutit.cfg['skeleton']['env_arg'] = ''
for earg in shutit.shutitfile['env']:
shutit.cfg['skeleton']['env_arg'] += ' -e ' + earg.split()[0] + ':' + earg.split()[1]
os.system('mkdir -p ' + skel_path)
build_bin_filename = skel_path + '/build.sh'
build_bin_file = open(build_bin_filename,'w+')
build_bin_file.write('''#!/bin/bash
[[ -z "$SHUTIT" ]] && SHUTIT="$1/shutit"
[[ ! -a "$SHUTIT" ]] || [[ -z "$SHUTIT" ]] && SHUTIT="$(which shutit)"
if [[ ! -a "$SHUTIT" ]]
then
echo "Must have shutit on path, eg export PATH=$PATH:/path/to/shutit_dir"
exit 1
fi
$SHUTIT build -d ''' + skel_delivery + ''' "$@"
if [[ $? != 0 ]]
then
exit 1
fi''')
build_bin_file.close()
os.chmod(build_bin_filename,0o755)
run_bin_filename = skel_path + '/run.sh'
run_bin_file = open(run_bin_filename,'w+')
# TODO: sort out entrypoint properly
entrypoint = ''
run_bin_file.write('''#!/bin/bash
# Example for running
DOCKER=${DOCKER:-docker}
IMAGE_NAME=%s
CONTAINER_NAME=$IMAGE_NAME
DOCKER_ARGS=''
while getopts "i:c:a:" opt
do
case "$opt" in
i)
IMAGE_NAME=$OPTARG
;;
c)
CONTAINER_NAME=$OPTARG
;;
a)
DOCKER_ARGS=$OPTARG
;;
esac
done
${DOCKER} run -d --name ${CONTAINER_NAME} ''' + skel_module_name + ''' ''' + shutit.cfg['skeleton']['ports_arg'] + ''' ''' + shutit.cfg['skeleton']['ports_arg'] + ''' ''' + shutit.cfg['skeleton']['env_arg'] + ''' ${DOCKER_ARGS} ${IMAGE_NAME} ''' + entrypoint + ''' ''' + shutit.shutitfile['cmd'])
run_bin_file.close()
os.chmod(run_bin_filename,0o755)
test_bin_filename = skel_path + '/test.sh'
test_bin_file = open(test_bin_filename,'w+')
test_bin_file.write('''#!/bin/bash
# Test the building of this module
if [ $0 != test.sh ] && [ $0 != ./test.sh ]
then
echo
echo "Called as: $0"
echo "Must be run as test.sh or ./test.sh"
exit
fi
./build.sh "$@"''')
test_bin_file.close()
os.chmod(test_bin_filename,0o755)
os.system('mkdir -p ' + skel_path + '/configs')
push_cnf_filename = skel_path + '/configs/push.cnf'
push_cnf_file = open(push_cnf_filename,'w+')
push_cnf_file.write('''###############################################################################
# PLEASE NOTE: This file should be changed only by the maintainer.
# PLEASE NOTE: IF YOU WANT TO CHANGE THE CONFIG, PASS IN
# --config configfilename
# OR ADD DETAILS TO YOUR
# ~/.shutit/config
# FILE
###############################################################################
[target]
rm:false
[repository]
# COPY THESE TO YOUR ~/.shutit/config FILE AND FILL OUT ITEMS IN CAPS
#user:YOUR_USERNAME
## Fill these out in server- and username-specific config (also in this directory)
#password:YOUR_REGISTRY_PASSWORD_OR_BLANK
## Fill these out in server- and username-specific config (also in this directory)
#email:YOUR_REGISTRY_EMAIL_OR_BLANK
#tag:no
#push:yes
#save:no
#export:no
##server:REMOVE_ME_FOR_DOCKER_INDEX
## tag suffix, defaults to "latest", eg registry/username/repository:latest.
## empty is also "latest"
#tag_name:latest
#suffix_date:no
#suffix_format:%s''')
push_cnf_file.close()
os.chmod(push_cnf_filename,0o400)
dockerfile_filename = skel_path + '/Dockerfile'
dockerfile_file = open(dockerfile_filename,'w+')
dockerfile_file.write('''FROM ''' + shutit.shutitfile['base_image'] + '''
RUN apt-get update
RUN apt-get install -y -qq git python-pip python-dev
RUN pip install shutit
WORKDIR /opt
# Change the next two lines to build your ShutIt module.
RUN git clone https://github.com/yourname/yourshutitproject.git
WORKDIR /opt/yourshutitproject
RUN shutit build --delivery dockerfile
CMD ["/bin/bash"]''')
dockerfile_file.close()
# User message
shutit.log('''# Run:
cd ''' + skel_path + ''' && ./build.sh
# to build.
# And then:
./run.sh
# to run.''',transient=True)
if skel_shutitfiles:
shutit.log('Processing ShutItFiles: ' + str(skel_shutitfiles),level=logging.DEBUG)
_total = len(skel_shutitfiles)
_count = 0
for skel_shutitfile in skel_shutitfiles:
_count += 1
shutit.log('Processing ShutItFile: ' + str(skel_shutitfile),level=logging.INFO)
module_modifier = '_' + str(_count)
new_module_filename = skel_path + '/' + os.path.join(skel_module_name + module_modifier + '.py')
shutit.cfg['skeleton']['module_modifier'] = module_modifier
(sections, skel_module_id, skel_module_name, _, _) = shutitfile.shutitfile_to_shutit_module(shutit, skel_shutitfile,skel_path,skel_domain,skel_module_name,skel_domain_hash,skel_delivery,skel_depends,_count,_total,module_modifier)
shutit.cfg['skeleton']['header_section'] = sections['header_section']
shutit.cfg['skeleton']['config_section'] = sections['config_section']
shutit.cfg['skeleton']['build_section'] = sections['build_section']
shutit.cfg['skeleton']['finalize_section'] = sections['finalize_section']
shutit.cfg['skeleton']['test_section'] = sections['test_section']
shutit.cfg['skeleton']['isinstalled_section'] = sections['isinstalled_section']
shutit.cfg['skeleton']['start_section'] = sections['start_section']
shutit.cfg['skeleton']['stop_section'] = sections['stop_section']
shutit.cfg['skeleton']['final_section'] = sections['final_section']
module_file = open(new_module_filename,'w+')
module_file.write(shutit.cfg['skeleton']['header_section'] + '''
def build(self, shutit):
''' + shutit.cfg['skeleton']['build_section'] + '''
return True
def get_config(self, shutit):
''' + shutit.cfg['skeleton']['config_section'] + '''
return True
def test(self, shutit):
''' + shutit.cfg['skeleton']['test_section'] + '''
return True
def finalize(self, shutit):
''' + shutit.cfg['skeleton']['finalize_section'] + '''
return True
def is_installed(self, shutit):
''' + shutit.cfg['skeleton']['isinstalled_section'] + '''
return False
def start(self, shutit):
''' + shutit.cfg['skeleton']['start_section'] + '''
return True
def stop(self, shutit):
''' + shutit.cfg['skeleton']['stop_section'] + '''
return True
''' + shutit.cfg['skeleton']['final_section'])
module_file.close()
# Set up build.cnf
build_cnf_filename = skel_path + '/configs/build.cnf'
if _count == 1:
build_cnf_file = open(build_cnf_filename,'w+')
build_cnf_file.write('''###############################################################################
# PLEASE NOTE: This file should be changed only by the maintainer.
# PLEASE NOTE: This file is only sourced if the "shutit build" command is run
# and this file is in the relative path: configs/build.cnf
# This is to ensure it is only sourced if _this_ module is the
# target.
###############################################################################
# When this module is the one being built, which modules should be built along with it by default?
# This feeds into automated testing of each module.
[''' + skel_module_id + ''']
shutit.core.module.build:yes
# Allowed images as a regexp, eg ["ubuntu:12.*"], or [".*"], or ["centos"].
# It's recommended this is locked down as far as possible.
shutit.core.module.allowed_images:["''' + shutit.shutitfile['base_image'] + '''"]
# Aspects of build process
[build]
base_image:''' + shutit.shutitfile['base_image'] + '''
# Volume arguments wanted as part of the build
[target]
volumes:
[repository]
name:''' + skel_module_name)
build_cnf_file.close()
else:
build_cnf_file = open(build_cnf_filename,'a')
build_cnf_file.write('''
[''' + skel_domain + '''.''' + skel_module_name + module_modifier + ''']
shutit.core.module.build:yes''')
build_cnf_file.close()
os.chmod(build_cnf_filename,0o400)
else:
shutit.cfg['skeleton']['header_section'] = 'from shutit_module import ShutItModule\n\nclass ' + skel_module_name + '(ShutItModule):\n'
shutit.cfg['skeleton']['config_section'] = ''
shutit.cfg['skeleton']['build_section'] = ''
shutit.cfg['skeleton']['finalize_section'] = ''
shutit.cfg['skeleton']['test_section'] = ''
shutit.cfg['skeleton']['isinstalled_section'] = ''
shutit.cfg['skeleton']['start_section'] = ''
shutit.cfg['skeleton']['stop_section'] = ''
shutit.cfg['skeleton']['final_section'] = """def module():
return """ + skel_module_name + """(
'""" + skel_domain + '''.''' + skel_module_name + """', """ + skel_domain_hash + """.0001,
description='',
maintainer='',
delivery_methods=['""" + skel_delivery + """'],
depends=['""" + skel_depends + """']
)"""
new_module_filename = skel_path + '/' + os.path.join(skel_module_name) + '.py'
module_file = open(new_module_filename,'w+')
module_file.write(shutit.cfg['skeleton']['header_section'] + '''
def build(self, shutit):
''' + shutit.cfg['skeleton']['build_section'] + '''
return True
def get_config(self, shutit):
''' + shutit.cfg['skeleton']['config_section'] + '''
return True
def test(self, shutit):
''' + shutit.cfg['skeleton']['test_section'] + '''
return True
def finalize(self, shutit):
''' + shutit.cfg['skeleton']['finalize_section'] + '''
return True
def is_installed(self, shutit):
''' + shutit.cfg['skeleton']['isinstalled_section'] + '''
return False
def start(self, shutit):
''' + shutit.cfg['skeleton']['start_section'] + '''
return True
def stop(self, shutit):
''' + shutit.cfg['skeleton']['stop_section'] + '''
return True
''' + shutit.cfg['skeleton']['final_section'])
module_file.close()
build_cnf_filename = skel_path + '/configs/build.cnf'
build_cnf_file = open(build_cnf_filename,'w+')
build_cnf_file.write('''###############################################################################
# PLEASE NOTE: This file should be changed only by the maintainer.
# PLEASE NOTE: This file is only sourced if the "shutit build" command is run
# and this file is in the relative path: configs/build.cnf
# This is to ensure it is only sourced if _this_ module is the
# target.
###############################################################################
# When this module is the one being built, which modules should be built along with it by default?
# This feeds into automated testing of each module.
[''' + skel_domain + '''.''' + skel_module_name + ''']
shutit.core.module.build:yes
# Allowed images as a regexp, eg ["ubuntu:12.*"], or [".*"], or ["centos"].
# It's recommended this is locked down as far as possible.
shutit.core.module.allowed_images:["''' + shutit.shutitfile['base_image'] + '''"]
# Aspects of build process
[build]
base_image:''' + shutit.shutitfile['base_image'] + '''
# Volume arguments wanted as part of the build
[target]
volumes:
[repository]
name:''' + skel_module_name)
build_cnf_file.close()
os.chmod(build_cnf_filename,0o400)
| |
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
# Module caffe2.python.examples.resnet50_trainer
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import numpy as np
import time
import os
from caffe2.python import core, workspace, experiment_util, data_parallel_model
from caffe2.python import data_parallel_model_utils, dyndep, optimizer
from caffe2.python import timeout_guard, model_helper, brew
from caffe2.proto import caffe2_pb2
import caffe2.python.models.resnet as resnet
from caffe2.python.modeling.initializers import Initializer, pFP16Initializer
import caffe2.python.predictor.predictor_exporter as pred_exp
import caffe2.python.predictor.predictor_py_utils as pred_utils
from caffe2.python.predictor_constants import predictor_constants as predictor_constants
'''
Parallelized multi-GPU distributed trainer for Resnet 50. Can be used to train
on imagenet data, for example.
To run the trainer in single-machine multi-gpu mode by setting num_shards = 1.
To run the trainer in multi-machine multi-gpu mode with M machines,
run the same program on all machines, specifying num_shards = M, and
shard_id = a unique integer in the set [0, M-1].
For rendezvous (the trainer processes have to know about each other),
you can either use a directory path that is visible to all processes
(e.g. NFS directory), or use a Redis instance. Use the former by
passing the `file_store_path` argument. Use the latter by passing the
`redis_host` and `redis_port` arguments.
'''
logging.basicConfig()
log = logging.getLogger("resnet50_trainer")
log.setLevel(logging.DEBUG)
dyndep.InitOpsLibrary('@/caffe2/caffe2/distributed:file_store_handler_ops')
dyndep.InitOpsLibrary('@/caffe2/caffe2/distributed:redis_store_handler_ops')
def AddImageInput(model, reader, batch_size, img_size, dtype, is_test):
'''
The image input operator loads image and label data from the reader and
applies transformations to the images (random cropping, mirroring, ...).
'''
data, label = brew.image_input(
model,
reader, ["data", "label"],
batch_size=batch_size,
output_type=dtype,
use_gpu_transform=True if model._device_type == 1 else False,
use_caffe_datum=True,
mean=128.,
std=128.,
scale=256,
crop=img_size,
mirror=1,
is_test=is_test,
)
data = model.StopGradient(data, data)
def AddNullInput(model, reader, batch_size, img_size, dtype):
'''
The null input function uses a gaussian fill operator to emulate real image
input. A label blob is hardcoded to a single value. This is useful if you
want to test compute throughput or don't have a dataset available.
'''
suffix = "_fp16" if dtype == "float16" else ""
model.param_init_net.GaussianFill(
[],
["data" + suffix],
shape=[batch_size, 3, img_size, img_size],
)
if dtype == "float16":
model.param_init_net.FloatToHalf("data" + suffix, "data")
model.param_init_net.ConstantFill(
[],
["label"],
shape=[batch_size],
value=1,
dtype=core.DataType.INT32,
)
def SaveModel(args, train_model, epoch):
prefix = "[]_{}".format(train_model._device_prefix, train_model._devices[0])
predictor_export_meta = pred_exp.PredictorExportMeta(
predict_net=train_model.net.Proto(),
parameters=data_parallel_model.GetCheckpointParams(train_model),
inputs=[prefix + "/data"],
outputs=[prefix + "/softmax"],
shapes={
prefix + "/softmax": (1, args.num_labels),
prefix + "/data": (args.num_channels, args.image_size, args.image_size)
}
)
# save the train_model for the current epoch
model_path = "%s/%s_%d.mdl" % (
args.file_store_path,
args.save_model_name,
epoch,
)
# set db_type to be "minidb" instead of "log_file_db", which breaks
# the serialization in save_to_db. Need to switch back to log_file_db
# after migration
pred_exp.save_to_db(
db_type="minidb",
db_destination=model_path,
predictor_export_meta=predictor_export_meta,
)
def LoadModel(path, model):
'''
Load pretrained model from file
'''
log.info("Loading path: {}".format(path))
meta_net_def = pred_exp.load_from_db(path, 'minidb')
init_net = core.Net(pred_utils.GetNet(
meta_net_def, predictor_constants.GLOBAL_INIT_NET_TYPE))
predict_init_net = core.Net(pred_utils.GetNet(
meta_net_def, predictor_constants.PREDICT_INIT_NET_TYPE))
predict_init_net.RunAllOnGPU()
init_net.RunAllOnGPU()
assert workspace.RunNetOnce(predict_init_net)
assert workspace.RunNetOnce(init_net)
# Hack: fix iteration counter which is in CUDA context after load model
itercnt = workspace.FetchBlob("optimizer_iteration")
workspace.FeedBlob(
"optimizer_iteration",
itercnt,
device_option=core.DeviceOption(caffe2_pb2.CPU, 0)
)
def RunEpoch(
args,
epoch,
train_model,
test_model,
total_batch_size,
num_shards,
expname,
explog,
):
'''
Run one epoch of the trainer.
TODO: add checkpointing here.
'''
# TODO: add loading from checkpoint
log.info("Starting epoch {}/{}".format(epoch, args.num_epochs))
epoch_iters = int(args.epoch_size / total_batch_size / num_shards)
for i in range(epoch_iters):
# This timeout is required (temporarily) since CUDA-NCCL
# operators might deadlock when synchronizing between GPUs.
timeout = 600.0 if i == 0 else 60.0
with timeout_guard.CompleteInTimeOrDie(timeout):
t1 = time.time()
workspace.RunNet(train_model.net.Proto().name)
t2 = time.time()
dt = t2 - t1
fmt = "Finished iteration {}/{} of epoch {} ({:.2f} images/sec)"
log.info(fmt.format(i + 1, epoch_iters, epoch, total_batch_size / dt))
prefix = "{}_{}".format(
train_model._device_prefix,
train_model._devices[0])
accuracy = workspace.FetchBlob(prefix + '/accuracy')
loss = workspace.FetchBlob(prefix + '/loss')
train_fmt = "Training loss: {}, accuracy: {}"
log.info(train_fmt.format(loss, accuracy))
num_images = epoch * epoch_iters * total_batch_size
prefix = "{}_{}".format(train_model._device_prefix, train_model._devices[0])
accuracy = workspace.FetchBlob(prefix + '/accuracy')
loss = workspace.FetchBlob(prefix + '/loss')
learning_rate = workspace.FetchBlob(
data_parallel_model.GetLearningRateBlobNames(train_model)[0]
)
test_accuracy = 0
if (test_model is not None):
# Run 100 iters of testing
ntests = 0
for _ in range(0, 100):
workspace.RunNet(test_model.net.Proto().name)
for g in test_model._devices:
test_accuracy += np.asscalar(workspace.FetchBlob(
"{}_{}".format(test_model._device_prefix, g) + '/accuracy'
))
ntests += 1
test_accuracy /= ntests
else:
test_accuracy = (-1)
explog.log(
input_count=num_images,
batch_count=(i + epoch * epoch_iters),
additional_values={
'accuracy': accuracy,
'loss': loss,
'learning_rate': learning_rate,
'epoch': epoch,
'test_accuracy': test_accuracy,
}
)
assert loss < 40, "Exploded gradients :("
# TODO: add checkpointing
return epoch + 1
def Train(args):
# Either use specified device list or generate one
if args.gpus is not None:
gpus = [int(x) for x in args.gpus.split(',')]
num_gpus = len(gpus)
else:
gpus = list(range(args.num_gpus))
num_gpus = args.num_gpus
log.info("Running on GPUs: {}".format(gpus))
# Verify valid batch size
total_batch_size = args.batch_size
batch_per_device = total_batch_size // num_gpus
assert \
total_batch_size % num_gpus == 0, \
"Number of GPUs must divide batch size"
# Round down epoch size to closest multiple of batch size across machines
global_batch_size = total_batch_size * args.num_shards
epoch_iters = int(args.epoch_size / global_batch_size)
args.epoch_size = epoch_iters * global_batch_size
log.info("Using epoch size: {}".format(args.epoch_size))
# Create ModelHelper object
train_arg_scope = {
'order': 'NCHW',
'use_cudnn': True,
'cudnn_exhaustive_search': True,
'ws_nbytes_limit': (args.cudnn_workspace_limit_mb * 1024 * 1024),
}
train_model = model_helper.ModelHelper(
name="resnet50", arg_scope=train_arg_scope
)
num_shards = args.num_shards
shard_id = args.shard_id
# Expect interfaces to be comma separated.
# Use of multiple network interfaces is not yet complete,
# so simply use the first one in the list.
interfaces = args.distributed_interfaces.split(",")
# Rendezvous using MPI when run with mpirun
if os.getenv("OMPI_COMM_WORLD_SIZE") is not None:
num_shards = int(os.getenv("OMPI_COMM_WORLD_SIZE", 1))
shard_id = int(os.getenv("OMPI_COMM_WORLD_RANK", 0))
if num_shards > 1:
rendezvous = dict(
kv_handler=None,
num_shards=num_shards,
shard_id=shard_id,
engine="GLOO",
transport=args.distributed_transport,
interface=interfaces[0],
mpi_rendezvous=True,
exit_nets=None)
elif num_shards > 1:
# Create rendezvous for distributed computation
store_handler = "store_handler"
if args.redis_host is not None:
# Use Redis for rendezvous if Redis host is specified
workspace.RunOperatorOnce(
core.CreateOperator(
"RedisStoreHandlerCreate", [], [store_handler],
host=args.redis_host,
port=args.redis_port,
prefix=args.run_id,
)
)
else:
# Use filesystem for rendezvous otherwise
workspace.RunOperatorOnce(
core.CreateOperator(
"FileStoreHandlerCreate", [], [store_handler],
path=args.file_store_path,
prefix=args.run_id,
)
)
rendezvous = dict(
kv_handler=store_handler,
shard_id=shard_id,
num_shards=num_shards,
engine="GLOO",
transport=args.distributed_transport,
interface=interfaces[0],
exit_nets=None)
else:
rendezvous = None
# Model building functions
def create_resnet50_model_ops(model, loss_scale):
initializer = (pFP16Initializer if args.dtype == 'float16'
else Initializer)
with brew.arg_scope([brew.conv, brew.fc],
WeightInitializer=initializer,
BiasInitializer=initializer,
enable_tensor_core=args.enable_tensor_core,
float16_compute=args.float16_compute):
pred = resnet.create_resnet50(
model,
"data",
num_input_channels=args.num_channels,
num_labels=args.num_labels,
no_bias=True,
no_loss=True,
)
if args.dtype == 'float16':
pred = model.net.HalfToFloat(pred, pred + '_fp32')
softmax, loss = model.SoftmaxWithLoss([pred, 'label'],
['softmax', 'loss'])
loss = model.Scale(loss, scale=loss_scale)
brew.accuracy(model, [softmax, "label"], "accuracy")
return [loss]
def add_optimizer(model):
stepsz = int(30 * args.epoch_size / total_batch_size / num_shards)
if args.float16_compute:
# TODO: merge with multi-prceision optimizer
opt = optimizer.build_fp16_sgd(
model,
args.base_learning_rate,
momentum=0.9,
nesterov=1,
weight_decay=args.weight_decay, # weight decay included
policy="step",
stepsize=stepsz,
gamma=0.1
)
else:
optimizer.add_weight_decay(model, args.weight_decay)
opt = optimizer.build_multi_precision_sgd(
model,
args.base_learning_rate,
momentum=0.9,
nesterov=1,
policy="step",
stepsize=stepsz,
gamma=0.1
)
return opt
# Define add_image_input function.
# Depends on the "train_data" argument.
# Note that the reader will be shared with between all GPUS.
if args.train_data == "null":
def add_image_input(model):
AddNullInput(
model,
None,
batch_size=batch_per_device,
img_size=args.image_size,
dtype=args.dtype,
)
else:
reader = train_model.CreateDB(
"reader",
db=args.train_data,
db_type=args.db_type,
num_shards=num_shards,
shard_id=shard_id,
)
def add_image_input(model):
AddImageInput(
model,
reader,
batch_size=batch_per_device,
img_size=args.image_size,
dtype=args.dtype,
is_test=False,
)
def add_post_sync_ops(model):
"""Add ops applied after initial parameter sync."""
for param_info in model.GetOptimizationParamInfo(model.GetParams()):
if param_info.blob_copy is not None:
model.param_init_net.HalfToFloat(
param_info.blob,
param_info.blob_copy[core.DataType.FLOAT]
)
# Create parallelized model
data_parallel_model.Parallelize(
train_model,
input_builder_fun=add_image_input,
forward_pass_builder_fun=create_resnet50_model_ops,
optimizer_builder_fun=add_optimizer,
post_sync_builder_fun=add_post_sync_ops,
devices=gpus,
rendezvous=rendezvous,
optimize_gradient_memory=False,
cpu_device=args.use_cpu,
shared_model=args.use_cpu,
)
if args.model_parallel:
# Shift half of the activations to another GPU
assert workspace.NumCudaDevices() >= 2 * args.num_gpus
activations = data_parallel_model_utils.GetActivationBlobs(train_model)
data_parallel_model_utils.ShiftActivationDevices(
train_model,
activations=activations[len(activations) // 2:],
shifts={g: args.num_gpus + g for g in range(args.num_gpus)},
)
data_parallel_model.OptimizeGradientMemory(train_model, {}, set(), False)
workspace.RunNetOnce(train_model.param_init_net)
workspace.CreateNet(train_model.net)
# Add test model, if specified
test_model = None
if (args.test_data is not None):
log.info("----- Create test net ----")
test_arg_scope = {
'order': "NCHW",
'use_cudnn': True,
'cudnn_exhaustive_search': True,
}
test_model = model_helper.ModelHelper(
name="resnet50_test", arg_scope=test_arg_scope, init_params=False
)
test_reader = test_model.CreateDB(
"test_reader",
db=args.test_data,
db_type=args.db_type,
)
def test_input_fn(model):
AddImageInput(
model,
test_reader,
batch_size=batch_per_device,
img_size=args.image_size,
dtype=args.dtype,
is_test=True,
)
data_parallel_model.Parallelize(
test_model,
input_builder_fun=test_input_fn,
forward_pass_builder_fun=create_resnet50_model_ops,
post_sync_builder_fun=add_post_sync_ops,
param_update_builder_fun=None,
devices=gpus,
cpu_device=args.use_cpu,
)
workspace.RunNetOnce(test_model.param_init_net)
workspace.CreateNet(test_model.net)
epoch = 0
# load the pre-trained model and reset epoch
if args.load_model_path is not None:
LoadModel(args.load_model_path, train_model)
# Sync the model params
data_parallel_model.FinalizeAfterCheckpoint(train_model)
# reset epoch. load_model_path should end with *_X.mdl,
# where X is the epoch number
last_str = args.load_model_path.split('_')[-1]
if last_str.endswith('.mdl'):
epoch = int(last_str[:-4])
log.info("Reset epoch to {}".format(epoch))
else:
log.warning("The format of load_model_path doesn't match!")
expname = "resnet50_gpu%d_b%d_L%d_lr%.2f_v2" % (
args.num_gpus,
total_batch_size,
args.num_labels,
args.base_learning_rate,
)
explog = experiment_util.ModelTrainerLog(expname, args)
# Run the training one epoch a time
while epoch < args.num_epochs:
epoch = RunEpoch(
args,
epoch,
train_model,
test_model,
total_batch_size,
num_shards,
expname,
explog
)
# Save the model for each epoch
SaveModel(args, train_model, epoch)
model_path = "%s/%s_" % (
args.file_store_path,
args.save_model_name
)
# remove the saved model from the previous epoch if it exists
if os.path.isfile(model_path + str(epoch - 1) + ".mdl"):
os.remove(model_path + str(epoch - 1) + ".mdl")
def main():
# TODO: use argv
parser = argparse.ArgumentParser(
description="Caffe2: Resnet-50 training"
)
parser.add_argument("--train_data", type=str, default=None, required=True,
help="Path to training data (or 'null' to simulate)")
parser.add_argument("--test_data", type=str, default=None,
help="Path to test data")
parser.add_argument("--db_type", type=str, default="lmdb",
help="Database type (such as lmdb or leveldb)")
parser.add_argument("--gpus", type=str,
help="Comma separated list of GPU devices to use")
parser.add_argument("--num_gpus", type=int, default=1,
help="Number of GPU devices (instead of --gpus)")
parser.add_argument("--model_parallel", type=bool, default=False,
help="Split model over 2 x num_gpus")
parser.add_argument("--num_channels", type=int, default=3,
help="Number of color channels")
parser.add_argument("--image_size", type=int, default=227,
help="Input image size (to crop to)")
parser.add_argument("--num_labels", type=int, default=1000,
help="Number of labels")
parser.add_argument("--batch_size", type=int, default=32,
help="Batch size, total over all GPUs")
parser.add_argument("--epoch_size", type=int, default=1500000,
help="Number of images/epoch, total over all machines")
parser.add_argument("--num_epochs", type=int, default=1000,
help="Num epochs.")
parser.add_argument("--base_learning_rate", type=float, default=0.1,
help="Initial learning rate.")
parser.add_argument("--weight_decay", type=float, default=1e-4,
help="Weight decay (L2 regularization)")
parser.add_argument("--cudnn_workspace_limit_mb", type=int, default=64,
help="CuDNN workspace limit in MBs")
parser.add_argument("--num_shards", type=int, default=1,
help="Number of machines in distributed run")
parser.add_argument("--shard_id", type=int, default=0,
help="Shard id.")
parser.add_argument("--run_id", type=str,
help="Unique run identifier (e.g. uuid)")
parser.add_argument("--redis_host", type=str,
help="Host of Redis server (for rendezvous)")
parser.add_argument("--redis_port", type=int, default=6379,
help="Port of Redis server (for rendezvous)")
parser.add_argument("--file_store_path", type=str, default="/tmp",
help="Path to directory to use for rendezvous")
parser.add_argument("--save_model_name", type=str, default="resnet50_model",
help="Save the trained model to a given name")
parser.add_argument("--load_model_path", type=str, default=None,
help="Load previously saved model to continue training")
parser.add_argument("--use_cpu", type=bool, default=False,
help="Use CPU instead of GPU")
parser.add_argument('--dtype', default='float',
choices=['float', 'float16'],
help='Data type used for training')
parser.add_argument('--float16_compute', action='store_true',
help="Use float 16 compute, if available")
parser.add_argument('--enable-tensor-core', action='store_true',
help='Enable Tensor Core math for Conv and FC ops')
parser.add_argument("--distributed_transport", type=str, default="tcp",
help="Transport to use for distributed run [tcp|ibverbs]")
parser.add_argument("--distributed_interfaces", type=str, default="",
help="Network interfaces to use for distributed run")
args = parser.parse_args()
Train(args)
if __name__ == '__main__':
workspace.GlobalInit(['caffe2', '--caffe2_log_level=2'])
main()
| |
import logging
import os
import numpy as np
import ray.ray_constants as ray_constants
logger = logging.getLogger(__name__)
class RayParams:
"""A class used to store the parameters used by Ray.
Attributes:
redis_address (str): The address of the Redis server to connect to. If
this address is not provided, then this command will start Redis, a
raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits.
redis_port (int): The port that the primary Redis shard should listen
to. If None, then it will fall back to
ray.ray_constants.DEFAULT_PORT, or a random port if the default is
not available.
redis_shard_ports: A list of the ports to use for the non-primary Redis
shards. If None, then it will fall back to the ports right after
redis_port, or random ports if those are not available.
num_cpus (int): Number of CPUs to configure the raylet with.
num_gpus (int): Number of GPUs to configure the raylet with.
resources: A dictionary mapping the name of a resource to the quantity
of that resource available.
memory: Total available memory for workers requesting memory.
object_store_memory: The amount of memory (in bytes) to start the
object store with.
redis_max_memory: The max amount of memory (in bytes) to allow redis
to use, or None for no limit. Once the limit is exceeded, redis
will start LRU eviction of entries. This only applies to the
sharded redis tables (task and object tables).
object_manager_port int: The port to use for the object manager.
node_manager_port: The port to use for the node manager.
gcs_server_port: The port to use for the GCS server.
node_ip_address (str): The IP address of the node that we are on.
raylet_ip_address (str): The IP address of the raylet that this node
connects to.
min_worker_port (int): The lowest port number that workers will bind
on. If not set or set to 0, random ports will be chosen.
max_worker_port (int): The highest port number that workers will bind
on. If set, min_worker_port must also be set.
worker_port_list (str): An explicit list of ports to be used for
workers (comma-separated). Overrides min_worker_port and
max_worker_port.
ray_client_server_port (int): The port number the ray client server
will bind on. If not set, the ray client server will not
be started.
object_ref_seed (int): Used to seed the deterministic generation of
object refs. The same value can be used across multiple runs of the
same job in order to generate the object refs in a consistent
manner. However, the same ID should not be used for different jobs.
redirect_output (bool): True if stdout and stderr for non-worker
processes should be redirected to files and false otherwise.
external_addresses (str): The address of external Redis server to
connect to, in format of "ip1:port1,ip2:port2,...". If this
address is provided, then ray won't start Redis instances in the
head node but use external Redis server(s) instead.
num_redis_shards: The number of Redis shards to start in addition to
the primary Redis shard.
redis_max_clients: If provided, attempt to configure Redis with this
maxclients number.
redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
worker_path (str): The path of the source code that will be run by the
worker.
setup_worker_path (str): The path of the Python file that will set up
the environment for the worker process.
huge_pages: Boolean flag indicating whether to start the Object
Store with hugetlbfs support. Requires plasma_directory.
include_dashboard: Boolean flag indicating whether to start the web
UI, which displays the status of the Ray cluster. If this value is
None, then the UI will be started if the relevant dependencies are
present.
dashboard_host: The host to bind the web UI server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
dashboard_port: The port to bind the dashboard server to.
Defaults to 8265.
dashboard_agent_listen_port: The port for dashboard agents to listen on
for HTTP requests.
logging_level: Logging level, default will be logging.INFO.
logging_format: Logging format, default contains a timestamp,
filename, line number, and message. See ray_constants.py.
plasma_store_socket_name (str): If provided, it will specify the socket
name used by the plasma store.
raylet_socket_name (str): If provided, it will specify the socket path
used by the raylet process.
temp_dir (str): If provided, it will specify the root temporary
directory for the Ray process.
runtime_env_dir_name (str): If provided, specifies the directory that
will be created in the session dir to hold runtime_env files.
include_log_monitor (bool): If True, then start a log monitor to
monitor the log files for all processes on this node and push their
contents to Redis.
autoscaling_config: path to autoscaling config file.
metrics_agent_port(int): The port to bind metrics agent.
metrics_export_port(int): The port at which metrics are exposed
through a Prometheus endpoint.
no_monitor(bool): If True, the ray autoscaler monitor for this cluster
will not be started.
_system_config (dict): Configuration for overriding RayConfig
defaults. Used to set system configuration and for experimental Ray
core feature flags.
enable_object_reconstruction (bool): Enable plasma reconstruction on
failure.
start_initial_python_workers_for_first_job (bool): If true, start
initial Python workers for the first job on the node.
ray_debugger_external (bool): If true, make the Ray debugger for a
worker available externally to the node it is running on. This will
bind on 0.0.0.0 instead of localhost.
env_vars (dict): Override environment variables for the raylet.
"""
def __init__(
self,
redis_address=None,
gcs_address=None,
num_cpus=None,
num_gpus=None,
resources=None,
memory=None,
object_store_memory=None,
redis_max_memory=None,
redis_port=None,
redis_shard_ports=None,
object_manager_port=None,
node_manager_port=0,
gcs_server_port=None,
node_ip_address=None,
raylet_ip_address=None,
min_worker_port=None,
max_worker_port=None,
worker_port_list=None,
ray_client_server_port=None,
object_ref_seed=None,
driver_mode=None,
redirect_output=None,
external_addresses=None,
num_redis_shards=None,
redis_max_clients=None,
redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,
plasma_directory=None,
worker_path=None,
setup_worker_path=None,
huge_pages=False,
include_dashboard=None,
dashboard_host=ray_constants.DEFAULT_DASHBOARD_IP,
dashboard_port=ray_constants.DEFAULT_DASHBOARD_PORT,
dashboard_agent_listen_port=0,
logging_level=logging.INFO,
logging_format=ray_constants.LOGGER_FORMAT,
plasma_store_socket_name=None,
raylet_socket_name=None,
temp_dir=None,
runtime_env_dir_name=None,
include_log_monitor=None,
autoscaling_config=None,
start_initial_python_workers_for_first_job=False,
ray_debugger_external=False,
_system_config=None,
enable_object_reconstruction=False,
metrics_agent_port=None,
metrics_export_port=None,
tracing_startup_hook=None,
no_monitor=False,
env_vars=None,
):
self.redis_address = redis_address
self.gcs_address = gcs_address
self.num_cpus = num_cpus
self.num_gpus = num_gpus
self.memory = memory
self.object_store_memory = object_store_memory
self.resources = resources
self.redis_max_memory = redis_max_memory
self.redis_port = redis_port
self.redis_shard_ports = redis_shard_ports
self.object_manager_port = object_manager_port
self.node_manager_port = node_manager_port
self.gcs_server_port = gcs_server_port
self.node_ip_address = node_ip_address
self.raylet_ip_address = raylet_ip_address
self.min_worker_port = min_worker_port
self.max_worker_port = max_worker_port
self.worker_port_list = worker_port_list
self.ray_client_server_port = ray_client_server_port
self.driver_mode = driver_mode
self.redirect_output = redirect_output
self.external_addresses = external_addresses
self.num_redis_shards = num_redis_shards
self.redis_max_clients = redis_max_clients
self.redis_password = redis_password
self.plasma_directory = plasma_directory
self.worker_path = worker_path
self.setup_worker_path = setup_worker_path
self.huge_pages = huge_pages
self.include_dashboard = include_dashboard
self.dashboard_host = dashboard_host
self.dashboard_port = dashboard_port
self.dashboard_agent_listen_port = dashboard_agent_listen_port
self.plasma_store_socket_name = plasma_store_socket_name
self.raylet_socket_name = raylet_socket_name
self.temp_dir = temp_dir
self.runtime_env_dir_name = (
runtime_env_dir_name or ray_constants.DEFAULT_RUNTIME_ENV_DIR_NAME
)
self.include_log_monitor = include_log_monitor
self.autoscaling_config = autoscaling_config
self.metrics_agent_port = metrics_agent_port
self.metrics_export_port = metrics_export_port
self.tracing_startup_hook = tracing_startup_hook
self.no_monitor = no_monitor
self.object_ref_seed = object_ref_seed
self.start_initial_python_workers_for_first_job = (
start_initial_python_workers_for_first_job
)
self.ray_debugger_external = ray_debugger_external
self.env_vars = env_vars
self._system_config = _system_config or {}
self._enable_object_reconstruction = enable_object_reconstruction
self._check_usage()
# Set the internal config options for object reconstruction.
if enable_object_reconstruction:
# Turn off object pinning.
if self._system_config is None:
self._system_config = dict()
print(self._system_config)
self._system_config["lineage_pinning_enabled"] = True
def update(self, **kwargs):
"""Update the settings according to the keyword arguments.
Args:
kwargs: The keyword arguments to set corresponding fields.
"""
for arg in kwargs:
if hasattr(self, arg):
setattr(self, arg, kwargs[arg])
else:
raise ValueError(f"Invalid RayParams parameter in update: {arg}")
self._check_usage()
def update_if_absent(self, **kwargs):
"""Update the settings when the target fields are None.
Args:
kwargs: The keyword arguments to set corresponding fields.
"""
for arg in kwargs:
if hasattr(self, arg):
if getattr(self, arg) is None:
setattr(self, arg, kwargs[arg])
else:
raise ValueError(
"Invalid RayParams parameter in" " update_if_absent: %s" % arg
)
self._check_usage()
def update_pre_selected_port(self):
"""Update the pre-selected port information
Returns:
The dictionary mapping of component -> ports.
"""
def wrap_port(port):
# 0 port means select a random port for the grpc server.
if port is None or port == 0:
return []
else:
return [port]
# Create a dictionary of the component -> port mapping.
pre_selected_ports = {
"gcs": wrap_port(self.redis_port),
"object_manager": wrap_port(self.object_manager_port),
"node_manager": wrap_port(self.node_manager_port),
"gcs_server": wrap_port(self.gcs_server_port),
"client_server": wrap_port(self.ray_client_server_port),
"dashboard": wrap_port(self.dashboard_port),
"dashboard_agent_grpc": wrap_port(self.metrics_agent_port),
"dashboard_agent_http": wrap_port(self.dashboard_agent_listen_port),
"metrics_export": wrap_port(self.metrics_export_port),
}
redis_shard_ports = self.redis_shard_ports
if redis_shard_ports is None:
redis_shard_ports = []
pre_selected_ports["redis_shards"] = redis_shard_ports
if self.worker_port_list is None:
if self.min_worker_port is not None and self.max_worker_port is not None:
pre_selected_ports["worker_ports"] = list(
range(self.min_worker_port, self.max_worker_port + 1)
)
else:
# The dict is not updated when it requires random ports.
pre_selected_ports["worker_ports"] = []
else:
pre_selected_ports["worker_ports"] = [
int(port) for port in self.worker_port_list.split(",")
]
# Update the pre selected port set.
self.reserved_ports = set()
for comp, port_list in pre_selected_ports.items():
for port in port_list:
if port in self.reserved_ports:
raise ValueError(
f"Ray component {comp} is trying to use "
f"a port number {port} that is used by "
"other components.\n"
f"Port information: "
f"{self._format_ports(pre_selected_ports)}\n"
"If you allocate ports, "
"please make sure the same port is not used by "
"multiple components."
)
self.reserved_ports.add(port)
def _check_usage(self):
if self.worker_port_list is not None:
for port_str in self.worker_port_list.split(","):
try:
port = int(port_str)
except ValueError as e:
raise ValueError(
"worker_port_list must be a comma-separated "
+ "list of integers: {}".format(e)
) from None
if port < 1024 or port > 65535:
raise ValueError(
"Ports in worker_port_list must be "
"between 1024 and 65535. Got: {}".format(port)
)
# Used primarily for testing.
if os.environ.get("RAY_USE_RANDOM_PORTS", False):
if self.min_worker_port is None and self.max_worker_port is None:
self.min_worker_port = 0
self.max_worker_port = 0
if self.min_worker_port is not None:
if self.min_worker_port != 0 and (
self.min_worker_port < 1024 or self.min_worker_port > 65535
):
raise ValueError(
"min_worker_port must be 0 or an integer " "between 1024 and 65535."
)
if self.max_worker_port is not None:
if self.min_worker_port is None:
raise ValueError(
"If max_worker_port is set, min_worker_port " "must also be set."
)
elif self.max_worker_port != 0:
if self.max_worker_port < 1024 or self.max_worker_port > 65535:
raise ValueError(
"max_worker_port must be 0 or an integer between "
"1024 and 65535."
)
elif self.max_worker_port <= self.min_worker_port:
raise ValueError(
"max_worker_port must be higher than " "min_worker_port."
)
if self.ray_client_server_port is not None:
if (
self.ray_client_server_port < 1024
or self.ray_client_server_port > 65535
):
raise ValueError(
"ray_client_server_port must be an integer "
"between 1024 and 65535."
)
if self.resources is not None:
assert "CPU" not in self.resources, (
"'CPU' should not be included in the resource dictionary. Use "
"num_cpus instead."
)
assert "GPU" not in self.resources, (
"'GPU' should not be included in the resource dictionary. Use "
"num_gpus instead."
)
if self.redirect_output is not None:
raise DeprecationWarning("The redirect_output argument is deprecated.")
# Parse the numpy version.
numpy_version = np.__version__.split(".")
numpy_major, numpy_minor = int(numpy_version[0]), int(numpy_version[1])
if numpy_major <= 1 and numpy_minor < 16:
logger.warning(
"Using ray with numpy < 1.16.0 will result in slow "
"serialization. Upgrade numpy if using with ray."
)
def _format_ports(self, pre_selected_ports):
"""Format the pre selected ports information to be more
human readable.
"""
ports = pre_selected_ports.copy()
for comp, port_list in ports.items():
if len(port_list) == 1:
ports[comp] = port_list[0]
elif len(port_list) == 0:
# Nothing is selected, meaning it will be randomly selected.
ports[comp] = "random"
elif comp == "worker_ports":
min_port = port_list[0]
max_port = port_list[len(port_list) - 1]
port_range_str = None
if len(port_list) < 50:
port_range_str = str(port_list)
else:
port_range_str = f"from {min_port} to {max_port}"
ports[comp] = f"{len(port_list)} ports {port_range_str}"
return ports
| |
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
from __future__ import with_statement
import re, datetime
import math
import pytz
import codecs
import gsmcodecs
import threading
MSG_LIMITS = {
# 'encoding', (max_normal, max_csm)
'gsm': (160,152),
'ucs2': (70,67)
}
MAX_CSM_SEGMENTS = 255
# used to track csm reference numbers per receiver
__csm_refs = {}
__ref_lock = threading.Lock()
def get_outbound_pdus(text, recipient):
"""
Returns a list of PDUs to send the provided
text to the given recipient.
If everything fits in one message, the list
will have just one PDU.
Otherwise is will be a list of Concatenated SM PDUs
If the message goes beyond the max length for a CSM
(it's gotta be _REALLY BIG_), this will raise a 'ValueError'
"""
# first figure out the encoding
# if 'gsm', encode it to account for
# multi-byte char length
encoding = 'ucs2'
try:
encoded_text = text.encode('gsm')
encoding = 'gsm'
except:
encoded_text = text
csm_max = MSG_LIMITS[encoding][1]
if len(encoded_text)>(MAX_CSM_SEGMENTS*csm_max):
raise ValueError('Message text too long')
# see if we are under the single PDU limit
if len(encoded_text)<=MSG_LIMITS[encoding][0]:
return [OutboundGsmPdu(text, recipient)]
# ok, we are a CSM, so lets figure out
# the parts
# get our ref
with __ref_lock:
if recipient not in __csm_refs:
__csm_refs[recipient]=0
csm_ref = __csm_refs[recipient] % 256
__csm_refs[recipient]+=1
# make the PDUs
num = int(math.ceil(len(encoded_text)/float(MSG_LIMITS[encoding][0])))
pdus=[]
for seq in range(num):
i = seq*csm_max
seg_txt = encoded_text[i:i+csm_max]
if encoding=='gsm':
# a little silly to encode, decode, then have PDU
# re-encode but keeps PDU API clean
seg_txt = seg_txt.decode('gsm')
pdus.append(
OutboundGsmPdu(
seg_txt,
recipient,
csm_ref=csm_ref,
csm_seq=seq+1,
csm_total=num
)
)
return pdus
class SmsParseException(Exception):
pass
class SmsEncodeException(Exception):
pass
class GsmPdu(object):
def __init__(self):
self.is_csm = False
self.csm_seq = None
self.csm_total = None
self.csm_ref = None
self.address = None
self.text = None
self.pdu_string = None
self.sent_ts = None
def dump(self):
"""
Return a useful multiline rep of self
"""
header='Addressee: %s\nLength: %s\nSent %s' % \
(self.address, len(self.text), self.sent_ts)
csm_info=''
if self.is_csm:
csm_info='\nCSM: %d of %d for Ref# %d' % (self.csm_seq, self.csm_total,self.csm_ref)
return '%s%s\nMessage: \n%s\nPDU: %s' % (header, csm_info,self.text,self.pdu_string)
class OutboundGsmPdu(GsmPdu):
"""
Formatted outbound PDU. Basically just
a struct.
Don't instantiate directly! Use 'get_outbound_pdus()'
which will return a list of PDUs needed to
send the message
"""
def __init__(self, text, recipient, csm_ref=None, csm_seq=None, csm_total=None):
GsmPdu.__init__(self)
self.address = recipient
self.text = text
self.gsm_text = None # if we are gsm, put the gsm encoded str here
self.is_csm = csm_ref is not None
self.csm_ref = ( None if csm_ref is None else int(csm_ref) )
self.csm_seq = ( None if csm_seq is None else int(csm_seq) )
self.csm_total = ( None if csm_total is None else int(csm_total) )
try:
# following does two things:
# 1. Raises exception if text cannot be encoded GSM
# 2. measures the number of chars after encoding
# since GSM is partially multi-byte, a string
# in GSM can be longer than the obvious num of chars
# e.g. 'hello' is 5 but 'hello^' is _7_
self.gsm_text=self.text.encode('gsm')
num_chars=len(self.gsm_text)
except:
num_chars=len(self.text)
if self.is_csm:
max = MSG_LIMITS[self.encoding][1]
else:
max = MSG_LIMITS[self.encoding][0]
if num_chars>max:
raise SmsEncodeException('Text length too great')
@property
def encoding(self):
return ( 'gsm' if self.is_gsm else 'ucs2' )
@property
def is_gsm(self):
return self.gsm_text is not None
@property
def is_ucs2(self):
return not self.is_gsm
def __get_pdu_string(self):
# now put the PDU string together
# first octet is SMSC info, 00 means get from stored on SIM
pdu=['00']
# Next is 'SMS-SUBMIT First Octet' -- '11' means submit w/validity.
# '51' means Concatendated SM w/validity
pdu.append('51' if self.is_csm else '11')
# Next is 'message' reference. '00' means phone can set this
pdu.append('00')
# now recipient number, first type
if self.address[0]=='+':
num = self.address[1:]
type = '91' # international
else:
num = self.address
type = 'A8' # national number
# length
num_len = len(num)
# twiddle it
num = _twiddle(num, False)
pdu.append('%02X' % num_len) # length
pdu.append(type)
pdu.append(num)
# now protocol ID
pdu.append('00')
# data coding scheme
pdu.append('00' if self.is_gsm else '08')
# validity period, just default to 4 days
pdu.append('AA')
# Now the fun! Make the user data (the text message)
# Complications:
# 1. If we are a CSM, need the CSM header
# 2. If we are a CSM and GSM, need to pad the data
padding = 0
udh=''
if self.is_csm:
# data header always starts the same:
# length: 5 octets '05'
# type: CSM '00'
# length of CSM info, 3 octets '03'
udh='050003%02X%02X%02X' % (self.csm_ref, self.csm_total, self.csm_seq)
if self.is_gsm:
# padding is number of pits to pad-out beyond
# the header to make everything land on a '7-bit'
# boundary rather than 8-bit.
# Can calculate as 7 - (UDH*8 % 7), but the UDH
# is always 48, so padding is always 1
padding = 1
# now encode contents
encoded_sm = (
_pack_septets(self.gsm_text, padding=padding)
if self.is_gsm
else self.text.encode('utf_16_be')
)
encoded_sm = encoded_sm.encode('hex').upper()
# and get the data length which is in septets
# if GSM, and octets otherwise
if self.is_gsm:
# just take length of encoded gsm text
# as each char becomes a septet when encoded
udl = len(self.gsm_text)
if len(udh)>0:
udl+=7 # header is always 7 septets (inc. padding)
else:
# in this case just the byte length of content + header
udl = (len(encoded_sm)+len(udh))/2
# now add it all to the pdu
pdu.append('%02X' % udl)
pdu.append(udh)
pdu.append(encoded_sm)
return ''.join(pdu)
def __set_pdu_string(self, val):
pass
pdu_string=property(__get_pdu_string, __set_pdu_string)
class ReceivedGsmPdu(GsmPdu):
"""
A nice little class to parse a PDU and give you useful
properties.
Maybe one day it will let you set text and sender info and
ask it to write itself out as a PDU!
"""
def __init__(self, pdu_str):
GsmPdu.__init__(self)
# hear are the properties that are set below in the
# ugly parse code.
self.tp_mms = False # more messages to send
self.tp_sri = False # status report indication
self.address = None # phone number of sender as string
self.sent_ts = None # Datetime of when SMSC stamped the message, roughly when sent
self.text = None # string of message contents
self.pdu_string = pdu_str.upper() # original data as a string
self.is_csm = False # is this one of a sequence of concatenated messages?
self.csm_ref = 0 # reference number
self.csm_seq = 0 # this chunks sequence num, 1-based
self.csm_total = 0 # number of chunks total
self.encoding = None # either 'gsm' or 'ucs2'
self.__parse_pdu()
"""
This is truly hideous, just don't look below this line!
It's times like this that I miss closed-compiled source...
"""
def __parse_pdu(self):
pdu=self.pdu_string # make copy
# grab smsc header, and throw away
# length is held in first octet
smsc_len,pdu=_consume_one_int(pdu)
# consume smsc header
c,pdu=_consume(pdu, smsc_len)
# grab the deliver octect
deliver_attrs,pdu=_consume_one_int(pdu)
if deliver_attrs & 0x03 != 0:
raise SmsParseException("Not a SMS-DELIVER, we ignore")
self.tp_mms=deliver_attrs & 0x04 # more messages to send
self.tp_sri=deliver_attrs & 0x20 # Status report indication
tp_udhi=deliver_attrs & 0x40 # There is a user data header in the user data portion
# get the sender number.
# First the length which is given in 'nibbles' (half octets)
# so divide by 2 and round up for odd
sender_dec_len,pdu=_consume_one_int(pdu)
sender_len=int(math.ceil(sender_dec_len/2.0))
# next is sender id type
sender_type,pdu=_consume(pdu,1)
# now the number itself, (unparsed)
num,pdu=_consume(pdu,sender_len)
# now parse the number
self.address=_parse_phone_num(sender_type,num)
# now the protocol id
# we only understand SMS (0)
tp_pid,pdu=_consume_one_int(pdu)
if tp_pid >= 32:
# can't deal
print "TP PID: %s" % tp_pid
raise SmsParseException("Not SMS protocol, bailing")
# get and interpet DCS (char encoding info)
self.encoding,pdu=_consume(pdu,1,_read_dcs)
if self.encoding not in ['gsm','ucs2']:
raise SmsParseException("Don't understand short message encoding")
#get and interpret timestamp
self.sent_ts,pdu=_consume(pdu,7,_read_ts)
# ok, how long is ud?
# note, if encoding is GSM this is num 7-bit septets
# if ucs2, it's num bytes
udl,pdu=_consume_one_int(pdu)
# Now to deal with the User Data header!
if tp_udhi:
# yup, we got one, probably part of a 'concatenated short message',
# what happens when you type too much text and your phone sends
# multiple SMSs
#
# in fact this is the _only_ case we care about
# get the header length
udhl,pdu=_consume_decimal(pdu)
# now loop through consuming the header
# and looking to see if we are a csm
i=0
while i<udhl:
# get info about the element
ie_type,pdu=_consume_one_int(pdu)
ie_l,pdu=_consume_decimal(pdu)
ie_d,pdu=_consume(pdu,ie_l)
i+=(ie_l+2) # move index up for all bytes read
if ie_type == 0x00:
# got csm info!
self.is_csm=True
(ref,self.csm_total,self.csm_seq),r=_consume_bytes(ie_d,3)
self.csm_ref=ref % 256 # the definition is 'modulo 256'
# ok, done with header
# now see if we are gsm, in which case we need to unpack bits
if self.encoding=='gsm':
# if we had a data header, we need to figure out padding
if tp_udhi:
# num septets * 7 bits minus
# 8 * header length (+1 for length indicator octet)
# mod'd by 7 to git the number of leftover padding bits
padding=((7*udl) - (8*(udhl+1))) % 7
else:
padding=0
# now decode
try:
self.text=_unpack_septets(pdu, padding).decode('gsm')
except Exception, ex:
# we have bogus data! But don't die
# as we are used deeply embedded
raise SmsParseException('GSM encoded data is invalid')
else:
# we are just good old UCS2
# problem is, we don't necessarily know the byte order
# some phones include it, some--including some
# popular Nokia's _don't_, in which case it
# seems they use big-endian...
bom=pdu[0:4]
decoded_text = ''
if bom==codecs.BOM_UTF16_LE.encode('hex'):
decoded_text=pdu[4:].decode('hex').decode('utf_16_le')
else:
decoded_text=pdu.decode('hex').decode('utf_16_be')
self.text=decoded_text
# some phones add a leading <cr> so strip it
self.text=self.text.strip()
#
# And all the ugly helper functions
#
def _read_dcs(dcs):
# make an int for masking
dcs=int(dcs,16)
# for an SMS, as opposed to a 'voice mail waiting'
# indicator, first 4-bits must be zero
if dcs & 0xf0 != 0:
# not an SMS!
return None
dcs &= 0x0c # mask off everything but bits 3&2
if dcs==0:
return 'gsm'
elif dcs==8:
return 'ucs2'
# not a type we know about, but should never get here
return None
def _B(slot):
"""Convert slot to Byte boundary"""
return slot*2
def _consume(seq, num,func=None):
"""
Consume the num of BYTES
return a tuple of (consumed,remainder)
func -- a function to call on the consumed. Result in tuple[0]
"""
num=_B(num)
c=seq[:num]
r=seq[num:]
if func:
c=func(c)
return (c,r)
def _consume_decimal(seq):
"""read 2 chars as a decimal"""
return (int(seq[0:2],10),seq[2:])
def _consume_one_int(seq):
"""
Consumes one byte and returns int and remainder
(int, remainder_of_seq)
"""
ints,remainder = _consume_bytes(seq,1)
return (ints[0],remainder)
def _consume_bytes(seq,num=1):
"""
consumes bytes for num ints (e.g. 2-chars per byte)
coverts to int, returns tuple of ([byte...], remainder)
"""
bytes=[]
for i in range(0,_B(num),2):
bytes.append(int(seq[i:i+2],16))
return (bytes,seq[_B(num):])
def _twiddle(seq, decode=True):
seq=seq.upper() # just in case
result=list()
for i in range(0,len(seq)-1,2):
result.extend((seq[i+1],seq[i]))
if len(result)<len(seq) and not decode:
# encoding odd length
result.extend(('F',seq[-1]))
elif decode and result[-1:][0]=='F':
# strip trailing 'F'
result.pop()
return ''.join(result)
def _parse_phone_num(num_type,seq):
if num_type[0]=='D':
# it's gsm encoded!
return _unpack_septets(seq).decode('gsm')
# sender number is encoded in DECIMAL with each octect swapped, and
# padded to even length with F
# so 1 415 555 1212 is: 41 51 55 15 12 f2
num=_twiddle(seq)
intl_code=''
if num_type[0]=='9':
intl_code='+'
return '%s%s' % (intl_code,num)
def _chop(seq,how_much):
"""chops the number of octets given off front of seq"""
return seq[_B(how_much):]
TS_MATCHER=re.compile(r'^(..)(..)(..)(..)(..)(..)(..)$')
TZ_SIGN_MASK=0x08
def _read_ts(seq):
ts=_twiddle(seq)
m = TS_MATCHER.match(ts)
if m is None:
print "TS not valid: %s" % ts
return datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
yr,mo,dy,hr,mi,se=[int(g) for g in m.groups()[:-1]]
# handle time-zone separately to deal with
# the MSB bit for negative
tz = int(m.groups()[-1],16)
neg = False
if tz>0x80:
neg = True
tz-=0x80
# now convert BACK to dec rep,
# I know, ridiculous, but that's
# the format...
tz = int('%02X' % tz)
tz_offset = tz/4
if neg:
tz_offset = -tz_offset
tz_delta = datetime.timedelta(hours=tz_offset)
# year is 2 digit! Yeah! Y2K problem again!!
if yr<90:
yr+=2000
else:
yr+=1900
# python sucks with timezones,
# so create UTC not using this offset
dt = None
try:
# parse TS and adjust for TZ to get into UTC
dt = datetime.datetime(yr,mo,dy,hr,mi,se, tzinfo=pytz.utc) - tz_delta
except ValueError, ex:
# Timestamp was bogus, set it to UTC now
dt = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
return dt
def _to_binary(n):
s = ""
for i in range(8):
s = ("%1d" % (n & 1)) + s
n >>= 1
return s
def _unpack_septets(seq,padding=0):
"""
this function taken from:
http://offog.org/darcs/misccode/desms.py
Thank you Adam Sampson <ats@offog.org>!
"""
# Unpack 7-bit characters
msgbytes,r = _consume_bytes(seq,len(seq)/2)
msgbytes.reverse()
asbinary = ''.join(map(_to_binary, msgbytes))
if padding != 0:
asbinary = asbinary[:-padding]
chars = []
while len(asbinary) >= 7:
chars.append(int(asbinary[-7:], 2))
asbinary = asbinary[:-7]
return "".join(map(chr, chars))
def _pack_septets(str, padding=0):
bytes=[ord(c) for c in str]
bytes.reverse()
asbinary = ''.join([_to_binary(b)[1:] for b in bytes])
# add padding
for i in range(padding):
asbinary+='0'
# zero extend last octet if needed
extra = len(asbinary) % 8
if extra>0:
for i in range(8-extra):
asbinary='0'+asbinary
# convert back to bytes
bytes=[]
for i in range(0,len(asbinary),8):
bytes.append(int(asbinary[i:i+8],2))
bytes.reverse()
return ''.join([chr(b) for b in bytes])
if __name__ == "__main__":
# poor man's unit tests
pdus = [
"0791227167830001040C912271479288270600902132210403001D31D90CE40E87E9F4FAF9CD06B9C3E6F75B5EA6BFE7F4B01B0402"
"07912180958729F6040B814151733717F500009011709055902B0148",
"07912180958729F6400B814151733717F500009070208044148AA0050003160201986FF719C47EBBCF20F6DB7D06B1DFEE3388FD769F41ECB7FB0C62BFDD6710FBED3E83D8ECB73B0D62BFDD67109BFD76A741613719C47EBBCF20F6DB7D06BCF61BC466BF41ECF719C47EBBCF20F6D",
"07912180958729F6440B814151733717F500009070207095828AA00500030E0201986FF719C47EBBCF20F6DB7D06B1DFEE3388FD769F41ECB7FB0C62BFDD6710FBED3E83D8ECB7",
"07912180958729F6040B814151733717F500009070103281418A09D93728FFDE940303",
"07912180958729F6040B814151733717F500009070102230438A02D937",
"0791227167830001040C912271271640910008906012024514001C002E004020AC00A300680065006C006C006F002000E900EC006B00F0",
"07917283010010F5040BC87238880900F10000993092516195800AE8329BFD4697D9EC37",
"0791448720900253040C914497035290960000500151614414400DD4F29C9E769F41E17338ED06",
"0791448720003023440C91449703529096000050015132532240A00500037A020190E9339A9D3EA3E920FA1B1466B341E472193E079DD3EE73D85DA7EB41E7B41C1407C1CBF43228CC26E3416137390F3AABCFEAB3FAAC3EABCFEAB3FAAC3EABCFEAB3FAAC3EABCFEAB3FADC3EB7CFED73FBDC3EBF5D4416D9457411596457137D87B7E16438194E86BBCF6D16D9055D429548A28BE822BA882E6370196C2A8950E291E822BA88",
"0791448720003023440C91449703529096000050015132537240310500037A02025C4417D1D52422894EE5B17824BA8EC423F1483C129BC725315464118FCDE011247C4A8B44",
"07914477790706520414D06176198F0EE361F2321900005001610013334014C324350B9287D12079180D92A3416134480E",
"0791448720003023440C91449703529096000050016121855140A005000301060190F5F31C447F83C8E5327CEE0221EBE73988FE0691CB65F8DC05028190F5F31C447F83C8E5327CEE028140C8FA790EA2BF41E472193E7781402064FD3C07D1DF2072B90C9FBB402010B27E9E83E86F10B95C86CF5D2064FD3C07D1DF2072B90C9FBB40C8FA790EA2BF41E472193E7781402064FD3C07D1DF2072B90C9FBB402010B27E9E83E8",
"0791448720003023440C91449703529096000050016121850240A0050003010602DE2072B90C9FBB402010B27E9E83E86F10B95C86CF5D201008593FCF41F437885C2EC3E72E100884AC9FE720FA1B442E97E1731708593FCF41F437885C2EC3E72E100884AC9FE720FA1B442E97E17317080442D6CF7310FD0D2297CBF0B90B040221EBE73988FE0691CB65F8DC05028190F5F31C447F83C8E5327CEE028140C8FA790EA2BF41",
"0791448720003023440C91449703529096000050016121854240A0050003010603C8E5327CEE0221EBE73988FE0691CB65F8DC05028190F5F31C447F83C8E5327CEE028140C8FA790EA2BF41E472193E7781402064FD3C07D1DF2072B90C9FBB402010B27E9E83E86F10B95C86CF5D201008593FCF41F437885C2EC3E72E10B27E9E83E86F10B95C86CF5D201008593FCF41F437885C2EC3E72E100884AC9FE720FA1B442E97E1",
"0791448720003023400C91449703529096000050016121853340A005000301060540C8FA790EA2BF41E472193E7781402064FD3C07D1DF2072B90C9FBB402010B27E9E83E86F10B95C86CF5D201008593FCF41F437885C2EC3E72E100884AC9FE720FA1B442E97E17317080442D6CF7310FD0D2297CBF0B90B84AC9FE720FA1B442E97E17317080442D6CF7310FD0D2297CBF0B90B040221EBE73988FE0691CB65F8DC05028190",
"0791448720003023440C914497035290960000500161218563402A050003010606EAE73988FE0691CB65F8DC05028190F5F31C447F83C8E5327CEE0281402010",
]
"""
print
print '\n'.join([
p.dump() for p in get_outbound_pdus(
u'\u5c71hellohello hellohello hellohello hellohello hellohello hellohello hellohello hellohello hellohello hellohello hellohello hellohello hellohello hellohello hellohello hellohello',
'+14153773715'
)
])
"""
for p in pdus:
print '\n-------- Received ----------\nPDU: %s\n' % p
rp = ReceivedGsmPdu(p)
print rp.dump()
op = get_outbound_pdus(rp.text, rp.address)[0]
print '\nOut ------> \n'
print op.dump()
print '-----------------------------'
| |
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
import pytz
from sqlalchemy import DDL, orm
from sqlalchemy.dialects.postgresql import ARRAY, JSONB, array
from sqlalchemy.event import listens_for
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import column_property
from sqlalchemy.sql import exists, func, literal, select
from indico.core import signals
from indico.core.config import config
from indico.core.db import db
from indico.core.db.sqlalchemy import PyIntEnum
from indico.core.db.sqlalchemy.attachments import AttachedItemsMixin
from indico.core.db.sqlalchemy.descriptions import DescriptionMixin, RenderMode
from indico.core.db.sqlalchemy.protection import ProtectionManagersMixin, ProtectionMode
from indico.core.db.sqlalchemy.searchable_titles import SearchableTitleMixin
from indico.core.db.sqlalchemy.util.models import auto_table_args
from indico.util.date_time import get_display_tz
from indico.util.decorators import strict_classproperty
from indico.util.i18n import _
from indico.util.locators import locator_property
from indico.util.string import MarkdownText, format_repr, return_ascii, text_to_repr
from indico.util.struct.enum import RichIntEnum
from indico.web.flask.util import url_for
def _get_next_position(context):
parent_id = context.current_parameters['parent_id']
res = db.session.query(db.func.max(Category.position)).filter_by(parent_id=parent_id).one()
return (res[0] or 0) + 1
def _get_default_event_themes():
from indico.modules.events.layout import theme_settings
return {
'meeting': theme_settings.defaults['meeting'],
'lecture': theme_settings.defaults['lecture']
}
class EventMessageMode(RichIntEnum):
__titles__ = [_('None'), _('Info'), _('Warning'), _('Danger')]
disabled = 0
info = 1
warning = 2
danger = 3
class Category(SearchableTitleMixin, DescriptionMixin, ProtectionManagersMixin, AttachedItemsMixin, db.Model):
"""An Indico category"""
__tablename__ = 'categories'
disallowed_protection_modes = frozenset()
inheriting_have_acl = True
possible_render_modes = {RenderMode.markdown}
default_render_mode = RenderMode.markdown
allow_no_access_contact = True
ATTACHMENT_FOLDER_ID_COLUMN = 'category_id'
@strict_classproperty
@classmethod
def __auto_table_args(cls):
return (db.CheckConstraint("(icon IS NULL) = (icon_metadata::text = 'null')", 'valid_icon'),
db.CheckConstraint("(logo IS NULL) = (logo_metadata::text = 'null')", 'valid_logo'),
db.CheckConstraint("(parent_id IS NULL) = (id = 0)", 'valid_parent'),
db.CheckConstraint("(id != 0) OR NOT is_deleted", 'root_not_deleted'),
db.CheckConstraint("(id != 0) OR (protection_mode != {})".format(ProtectionMode.inheriting),
'root_not_inheriting'),
db.CheckConstraint('visibility IS NULL OR visibility > 0', 'valid_visibility'),
{'schema': 'categories'})
@declared_attr
def __table_args__(cls):
return auto_table_args(cls)
id = db.Column(
db.Integer,
primary_key=True
)
parent_id = db.Column(
db.Integer,
db.ForeignKey('categories.categories.id'),
index=True,
nullable=True
)
is_deleted = db.Column(
db.Boolean,
nullable=False,
default=False
)
position = db.Column(
db.Integer,
nullable=False,
default=_get_next_position
)
visibility = db.Column(
db.Integer,
nullable=True,
default=None
)
icon_metadata = db.Column(
JSONB,
nullable=False,
default=lambda: None
)
icon = db.deferred(db.Column(
db.LargeBinary,
nullable=True
))
logo_metadata = db.Column(
JSONB,
nullable=False,
default=lambda: None
)
logo = db.deferred(db.Column(
db.LargeBinary,
nullable=True
))
timezone = db.Column(
db.String,
nullable=False,
default=lambda: config.DEFAULT_TIMEZONE
)
default_event_themes = db.Column(
JSONB,
nullable=False,
default=_get_default_event_themes
)
event_creation_restricted = db.Column(
db.Boolean,
nullable=False,
default=True
)
event_creation_notification_emails = db.Column(
ARRAY(db.String),
nullable=False,
default=[]
)
event_message_mode = db.Column(
PyIntEnum(EventMessageMode),
nullable=False,
default=EventMessageMode.disabled
)
_event_message = db.Column(
'event_message',
db.Text,
nullable=False,
default=''
)
suggestions_disabled = db.Column(
db.Boolean,
nullable=False,
default=False
)
notify_managers = db.Column(
db.Boolean,
nullable=False,
default=False
)
default_ticket_template_id = db.Column(
db.ForeignKey('indico.designer_templates.id'),
nullable=True,
index=True
)
children = db.relationship(
'Category',
order_by='Category.position',
primaryjoin=(id == db.remote(parent_id)) & ~db.remote(is_deleted),
lazy=True,
backref=db.backref(
'parent',
primaryjoin=(db.remote(id) == parent_id),
lazy=True
)
)
acl_entries = db.relationship(
'CategoryPrincipal',
backref='category',
cascade='all, delete-orphan',
collection_class=set
)
default_ticket_template = db.relationship(
'DesignerTemplate',
lazy=True,
foreign_keys=default_ticket_template_id,
backref='default_ticket_template_of'
)
# column properties:
# - deep_events_count
# relationship backrefs:
# - attachment_folders (AttachmentFolder.category)
# - designer_templates (DesignerTemplate.category)
# - events (Event.category)
# - favorite_of (User.favorite_categories)
# - legacy_mapping (LegacyCategoryMapping.category)
# - parent (Category.children)
# - settings (CategorySetting.category)
# - suggestions (SuggestedCategory.category)
@hybrid_property
def event_message(self):
return MarkdownText(self._event_message)
@event_message.setter
def event_message(self, value):
self._event_message = value
@event_message.expression
def event_message(cls):
return cls._event_message
@return_ascii
def __repr__(self):
return format_repr(self, 'id', is_deleted=False, _text=text_to_repr(self.title, max_length=75))
@property
def protection_parent(self):
return self.parent if not self.is_root else None
@locator_property
def locator(self):
return {'category_id': self.id}
@classmethod
def get_root(cls):
"""Get the root category"""
return cls.query.filter(cls.is_root).one()
@property
def url(self):
return url_for('categories.display', self)
@property
def has_only_events(self):
return self.has_events and not self.children
@hybrid_property
def is_root(self):
return self.parent_id is None
@is_root.expression
def is_root(cls):
return cls.parent_id.is_(None)
@property
def is_empty(self):
return not self.deep_children_count and not self.deep_events_count
@property
def has_icon(self):
return self.icon_metadata is not None
@property
def has_effective_icon(self):
return self.effective_icon_data['metadata'] is not None
@property
def has_logo(self):
return self.logo_metadata is not None
@property
def tzinfo(self):
return pytz.timezone(self.timezone)
@property
def display_tzinfo(self):
"""The tzinfo of the category or the one specified by the user"""
return get_display_tz(self, as_timezone=True)
def can_create_events(self, user):
"""Check whether the user can create events in the category."""
# if creation is not restricted anyone who can access the category
# can also create events in it, otherwise only people with the
# creation role can
return user and ((not self.event_creation_restricted and self.can_access(user)) or
self.can_manage(user, permission='create'))
def move(self, target):
"""Move the category into another category."""
assert not self.is_root
old_parent = self.parent
self.position = (max(x.position for x in target.children) + 1) if target.children else 1
self.parent = target
db.session.flush()
signals.category.moved.send(self, old_parent=old_parent)
@classmethod
def get_tree_cte(cls, col='id'):
"""Create a CTE for the category tree.
The CTE contains the following columns:
- ``id`` -- the category id
- ``path`` -- an array containing the path from the root to
the category itself
- ``is_deleted`` -- whether the category is deleted
:param col: The name of the column to use in the path or a
callable receiving the category alias that must
return the expression used for the 'path'
retrieved by the CTE.
"""
cat_alias = db.aliased(cls)
if callable(col):
path_column = col(cat_alias)
else:
path_column = getattr(cat_alias, col)
cte_query = (select([cat_alias.id, array([path_column]).label('path'), cat_alias.is_deleted])
.where(cat_alias.parent_id.is_(None))
.cte(recursive=True))
rec_query = (select([cat_alias.id,
cte_query.c.path.op('||')(path_column),
cte_query.c.is_deleted | cat_alias.is_deleted])
.where(cat_alias.parent_id == cte_query.c.id))
return cte_query.union_all(rec_query)
@classmethod
def get_protection_cte(cls):
cat_alias = db.aliased(cls)
cte_query = (select([cat_alias.id, cat_alias.protection_mode])
.where(cat_alias.parent_id.is_(None))
.cte(recursive=True))
rec_query = (select([cat_alias.id,
db.case({ProtectionMode.inheriting.value: cte_query.c.protection_mode},
else_=cat_alias.protection_mode, value=cat_alias.protection_mode)])
.where(cat_alias.parent_id == cte_query.c.id))
return cte_query.union_all(rec_query)
def get_protection_parent_cte(self):
cte_query = (select([Category.id, db.cast(literal(None), db.Integer).label('protection_parent')])
.where(Category.id == self.id)
.cte(recursive=True))
rec_query = (select([Category.id,
db.case({ProtectionMode.inheriting.value: func.coalesce(cte_query.c.protection_parent,
self.id)},
else_=Category.id, value=Category.protection_mode)])
.where(Category.parent_id == cte_query.c.id))
return cte_query.union_all(rec_query)
@classmethod
def get_icon_data_cte(cls):
cat_alias = db.aliased(cls)
cte_query = (select([cat_alias.id, cat_alias.id.label('source_id'), cat_alias.icon_metadata])
.where(cat_alias.parent_id.is_(None))
.cte(recursive=True))
rec_query = (select([cat_alias.id,
db.case({'null': cte_query.c.source_id}, else_=cat_alias.id,
value=db.func.jsonb_typeof(cat_alias.icon_metadata)),
db.case({'null': cte_query.c.icon_metadata}, else_=cat_alias.icon_metadata,
value=db.func.jsonb_typeof(cat_alias.icon_metadata))])
.where(cat_alias.parent_id == cte_query.c.id))
return cte_query.union_all(rec_query)
@property
def deep_children_query(self):
"""Get a query object for all subcategories.
This includes subcategories at any level of nesting.
"""
cte = Category.get_tree_cte()
return (Category.query
.join(cte, Category.id == cte.c.id)
.filter(cte.c.path.contains([self.id]),
cte.c.id != self.id,
~cte.c.is_deleted))
@staticmethod
def _get_chain_query(start_criterion):
cte_query = (select([Category.id, Category.parent_id, literal(0).label('level')])
.where(start_criterion)
.cte('category_chain', recursive=True))
parent_query = (select([Category.id, Category.parent_id, cte_query.c.level + 1])
.where(Category.id == cte_query.c.parent_id))
cte_query = cte_query.union_all(parent_query)
return Category.query.join(cte_query, Category.id == cte_query.c.id).order_by(cte_query.c.level.desc())
@property
def chain_query(self):
"""Get a query object for the category chain.
The query retrieves the root category first and then all the
intermediate categories up to (and including) this category.
"""
return self._get_chain_query(Category.id == self.id)
@property
def parent_chain_query(self):
"""Get a query object for the category's parent chain.
The query retrieves the root category first and then all the
intermediate categories up to (excluding) this category.
"""
return self._get_chain_query(Category.id == self.parent_id)
def nth_parent(self, n_categs, fail_on_overflow=True):
"""Return the nth parent of the category.
:param n_categs: the number of categories to go up
:param fail_on_overflow: whether to fail if we try to go above the root category
:return: `Category` object or None (only if ``fail_on_overflow`` is not set)
"""
if n_categs == 0:
return self
chain = self.parent_chain_query.all()
assert n_categs >= 0
if n_categs > len(chain):
if fail_on_overflow:
raise IndexError("Root category has no parent!")
else:
return None
return chain[::-1][n_categs - 1]
def is_descendant_of(self, categ):
return categ != self and self.parent_chain_query.filter(Category.id == categ.id).has_rows()
@property
def visibility_horizon_query(self):
"""Get a query object that returns the highest category this one is visible from."""
cte_query = (select([Category.id, Category.parent_id,
db.case([(Category.visibility.is_(None), None)],
else_=(Category.visibility - 1)).label('n'),
literal(0).label('level')])
.where(Category.id == self.id)
.cte('visibility_horizon', recursive=True))
parent_query = (select([Category.id, Category.parent_id,
db.case([(Category.visibility.is_(None) & cte_query.c.n.is_(None), None)],
else_=db.func.least(Category.visibility, cte_query.c.n) - 1),
cte_query.c.level + 1])
.where(db.and_(Category.id == cte_query.c.parent_id,
(cte_query.c.n > 0) | cte_query.c.n.is_(None))))
cte_query = cte_query.union_all(parent_query)
return db.session.query(cte_query.c.id, cte_query.c.n).order_by(cte_query.c.level.desc()).limit(1)
@property
def own_visibility_horizon(self):
"""Get the highest category this one would like to be visible from (configured visibility)."""
if self.visibility is None:
return Category.get_root()
else:
return self.nth_parent(self.visibility - 1)
@property
def real_visibility_horizon(self):
"""Get the highest category this one is actually visible from (as limited by categories above)."""
horizon_id, final_visibility = self.visibility_horizon_query.one()
if final_visibility is not None and final_visibility < 0:
return None # Category is invisible
return Category.get(horizon_id)
@staticmethod
def get_visible_categories_cte(category_id):
"""
Get a sqlalchemy select for the visible categories within
the given category, including the category itself.
"""
cte_query = (select([Category.id, literal(0).label('level')])
.where((Category.id == category_id) & (Category.visibility.is_(None) | (Category.visibility > 0)))
.cte(recursive=True))
parent_query = (select([Category.id, cte_query.c.level + 1])
.where(db.and_(Category.parent_id == cte_query.c.id,
db.or_(Category.visibility.is_(None),
Category.visibility > cte_query.c.level + 1))))
return cte_query.union_all(parent_query)
@property
def visible_categories_query(self):
"""
Get a query object for the visible categories within
this category, including the category itself.
"""
cte_query = Category.get_visible_categories_cte(self.id)
return Category.query.join(cte_query, Category.id == cte_query.c.id)
@property
def icon_url(self):
"""Get the HTTP URL of the icon."""
return url_for('categories.display_icon', self, slug=self.icon_metadata['hash'])
@property
def effective_icon_url(self):
"""Get the HTTP URL of the icon (possibly inherited)."""
data = self.effective_icon_data
return url_for('categories.display_icon', category_id=data['source_id'], slug=data['metadata']['hash'])
@property
def logo_url(self):
"""Get the HTTP URL of the logo."""
return url_for('categories.display_logo', self, slug=self.logo_metadata['hash'])
Category.register_protection_events()
@listens_for(orm.mapper, 'after_configured', once=True)
def _mappers_configured():
# We create some column properties here since even with `declared_attr`
# the code runs at import time, making it impossible/risky to import other
# modules or reference the object itself in there.
# The advantage of those column properties is that they behave like regular
# (read-only) columns even though they are generated by subqueries. This
# allows them to be loaded together with the rest of the data, avoiding
# extra queries. To load them automatically you need to undefer them using
# the `undefer` query option, e.g. `.options(undefer('chain_titles'))`.
from indico.modules.events import Event
# Category.effective_protection_mode -- the effective protection mode
# (public/protected) of the category, even if it's inheriting it from its
# parent category
cte = Category.get_protection_cte()
query = select([cte.c.protection_mode]).where(cte.c.id == Category.id).correlate_except(cte)
Category.effective_protection_mode = column_property(query, deferred=True, expire_on_flush=False)
# Category.effective_icon_data -- the effective icon data of the category,
# either set on the category itself or inherited from it
cte = Category.get_icon_data_cte()
query = (select([db.func.json_build_object('source_id', cte.c.source_id,
'metadata', cte.c.icon_metadata)])
.where(cte.c.id == Category.id)
.correlate_except(cte))
Category.effective_icon_data = column_property(query, deferred=True)
# Category.event_count -- the number of events in the category itself,
# excluding deleted events
query = (select([db.func.count(Event.id)])
.where((Event.category_id == Category.id) & ~Event.is_deleted)
.correlate_except(Event))
Category.event_count = column_property(query, deferred=True)
# Category.has_events -- whether the category itself contains any
# events, excluding deleted events
query = (exists([1])
.where((Event.category_id == Category.id) & ~Event.is_deleted)
.correlate_except(Event))
Category.has_events = column_property(query, deferred=True)
# Category.chain_titles -- a list of the titles in the parent chain,
# starting with the root category down to the current category.
cte = Category.get_tree_cte('title')
query = select([cte.c.path]).where(cte.c.id == Category.id).correlate_except(cte)
Category.chain_titles = column_property(query, deferred=True)
# Category.chain -- a list of the ids and titles in the parent
# chain, starting with the root category down to the current
# category. Each chain entry is a dict containing 'id' and `title`.
cte = Category.get_tree_cte(lambda cat: db.func.json_build_object('id', cat.id, 'title', cat.title))
query = select([cte.c.path]).where(cte.c.id == Category.id).correlate_except(cte)
Category.chain = column_property(query, deferred=True)
# Category.deep_events_count -- the number of events in the category
# or any child category (excluding deleted events)
cte = Category.get_tree_cte()
crit = db.and_(cte.c.id == Event.category_id,
cte.c.path.contains(array([Category.id])),
~cte.c.is_deleted,
~Event.is_deleted)
query = select([db.func.count()]).where(crit).correlate_except(Event)
Category.deep_events_count = column_property(query, deferred=True)
# Category.deep_children_count -- the number of subcategories in the
# category or any child category (excluding deleted ones)
cte = Category.get_tree_cte()
crit = db.and_(cte.c.path.contains(array([Category.id])),
cte.c.id != Category.id, ~cte.c.is_deleted)
query = select([db.func.count()]).where(crit).correlate_except(cte)
Category.deep_children_count = column_property(query, deferred=True)
@listens_for(Category.__table__, 'after_create')
def _add_deletion_consistency_trigger(target, conn, **kw):
sql = """
CREATE CONSTRAINT TRIGGER consistent_deleted
AFTER INSERT OR UPDATE OF parent_id, is_deleted
ON {table}
DEFERRABLE INITIALLY DEFERRED
FOR EACH ROW
EXECUTE PROCEDURE categories.check_consistency_deleted();
""".format(table=target.fullname)
DDL(sql).execute(conn)
@listens_for(Category.__table__, 'after_create')
def _add_cycle_check_trigger(target, conn, **kw):
sql = """
CREATE CONSTRAINT TRIGGER no_cycles
AFTER INSERT OR UPDATE OF parent_id
ON {table}
NOT DEFERRABLE
FOR EACH ROW
EXECUTE PROCEDURE categories.check_cycles();
""".format(table=target.fullname)
DDL(sql).execute(conn)
| |
import redash.models
from redash.models import db
from redash.utils import gen_query_hash, utcnow
from redash.utils.configuration import ConfigurationContainer
from redash.permissions import ACCESS_TYPE_MODIFY
class ModelFactory(object):
def __init__(self, model, **kwargs):
self.model = model
self.kwargs = kwargs
def _get_kwargs(self, override_kwargs):
kwargs = self.kwargs.copy()
kwargs.update(override_kwargs)
for key, arg in kwargs.items():
if callable(arg):
kwargs[key] = arg()
return kwargs
def create(self, **override_kwargs):
kwargs = self._get_kwargs(override_kwargs)
obj = self.model(**kwargs)
db.session.add(obj)
db.session.commit()
return obj
class Sequence(object):
def __init__(self, string):
self.sequence = 0
self.string = string
def __call__(self):
self.sequence += 1
return self.string.format(self.sequence)
user_factory = ModelFactory(redash.models.User,
name='John Doe', email=Sequence('test{}@example.com'),
group_ids=[2],
org_id=1)
org_factory = ModelFactory(redash.models.Organization,
name=Sequence("Org {}"),
slug=Sequence("org{}.example.com"),
settings={})
data_source_factory = ModelFactory(redash.models.DataSource,
name=Sequence('Test {}'),
type='pg',
# If we don't use lambda here it will reuse the same options between tests:
options=lambda: ConfigurationContainer.from_json('{"dbname": "test"}'),
org_id=1)
dashboard_factory = ModelFactory(redash.models.Dashboard,
name='test',
user=user_factory.create,
layout='[]',
is_draft=False,
org=1)
api_key_factory = ModelFactory(redash.models.ApiKey,
object=dashboard_factory.create)
query_factory = ModelFactory(redash.models.Query,
name='Query',
description='',
query_text='SELECT 1',
user=user_factory.create,
is_archived=False,
is_draft=False,
schedule=None,
data_source=data_source_factory.create,
org_id=1)
query_with_params_factory = ModelFactory(redash.models.Query,
name='New Query with Params',
description='',
query_text='SELECT {{param1}}',
user=user_factory.create,
is_archived=False,
is_draft=False,
schedule=None,
data_source=data_source_factory.create,
org_id=1)
access_permission_factory = ModelFactory(redash.models.AccessPermission,
object_id=query_factory.create,
object_type=redash.models.Query.__name__,
access_type=ACCESS_TYPE_MODIFY,
grantor=user_factory.create,
grantee=user_factory.create)
alert_factory = ModelFactory(redash.models.Alert,
name=Sequence('Alert {}'),
query_rel=query_factory.create,
user=user_factory.create,
options={})
query_result_factory = ModelFactory(redash.models.QueryResult,
data='{"columns":{}, "rows":[]}',
runtime=1,
retrieved_at=utcnow,
query_text="SELECT 1",
query_hash=gen_query_hash('SELECT 1'),
data_source=data_source_factory.create,
org_id=1)
visualization_factory = ModelFactory(redash.models.Visualization,
type='CHART',
query_rel=query_factory.create,
name='Chart',
description='',
options='{}')
widget_factory = ModelFactory(redash.models.Widget,
type='chart',
width=1,
options='{}',
dashboard=dashboard_factory.create,
visualization=visualization_factory.create)
destination_factory = ModelFactory(redash.models.NotificationDestination,
org_id=1,
user=user_factory.create,
name=Sequence('Destination {}'),
type='slack',
options=ConfigurationContainer.from_json('{"url": "https://www.slack.com"}'))
alert_subscription_factory = ModelFactory(redash.models.AlertSubscription,
user=user_factory.create,
destination=destination_factory.create,
alert=alert_factory.create)
query_snippet_factory = ModelFactory(redash.models.QuerySnippet,
trigger=Sequence('trigger {}'),
description='description',
snippet='snippet')
class Factory(object):
def __init__(self):
self.org, self.admin_group, self.default_group = redash.models.init_db()
self._data_source = None
self._user = None
@property
def user(self):
if self._user is None:
self._user = self.create_user()
# Test setup creates users, they need to be in the db by the time
# the handler's db transaction starts.
db.session.commit()
return self._user
@property
def data_source(self):
if self._data_source is None:
self._data_source = data_source_factory.create(org=self.org)
db.session.add(redash.models.DataSourceGroup(
group=self.default_group,
data_source=self._data_source))
return self._data_source
def _init_org(self):
if self._org is None:
self._org, self._admin_group, self._default_group = redash.models.init_db()
self.org.domain = 'org0.example.org'
def create_org(self, **kwargs):
org = org_factory.create(**kwargs)
self.create_group(org=org, type=redash.models.Group.BUILTIN_GROUP, name="default")
self.create_group(org=org, type=redash.models.Group.BUILTIN_GROUP, name="admin",
permissions=["admin"])
return org
def create_user(self, **kwargs):
args = {
'org': self.org,
'group_ids': [self.default_group.id]
}
if 'org' in kwargs:
args['group_ids'] = [kwargs['org'].default_group.id]
args.update(kwargs)
return user_factory.create(**args)
def create_admin(self, **kwargs):
args = {
'org': self.org,
'group_ids': [self.admin_group.id, self.default_group.id]
}
if 'org' in kwargs:
args['group_ids'] = [kwargs['org'].default_group.id, kwargs['org'].admin_group.id]
args.update(kwargs)
return user_factory.create(**args)
def create_group(self, **kwargs):
args = {
'name': 'Group',
'org': self.org
}
args.update(kwargs)
g = redash.models.Group(**args)
return g
def create_alert(self, **kwargs):
args = {
'user': self.user,
'query_rel': self.create_query()
}
args.update(**kwargs)
return alert_factory.create(**args)
def create_alert_subscription(self, **kwargs):
args = {
'user': self.user,
'alert': self.create_alert()
}
args.update(**kwargs)
return alert_subscription_factory.create(**args)
def create_data_source(self, **kwargs):
group = None
if 'group' in kwargs:
group = kwargs.pop('group')
args = {
'org': self.org
}
args.update(kwargs)
if group and 'org' not in kwargs:
args['org'] = group.org
view_only = args.pop('view_only', False)
data_source = data_source_factory.create(**args)
if group:
db.session.add(redash.models.DataSourceGroup(
group=group,
data_source=data_source,
view_only=view_only))
return data_source
def create_dashboard(self, **kwargs):
args = {
'user': self.user,
'org': self.org
}
args.update(kwargs)
return dashboard_factory.create(**args)
def create_query(self, **kwargs):
args = {
'user': self.user,
'data_source': self.data_source,
'org': self.org
}
args.update(kwargs)
return query_factory.create(**args)
def create_query_with_params(self, **kwargs):
args = {
'user': self.user,
'data_source': self.data_source,
'org': self.org
}
args.update(kwargs)
return query_with_params_factory.create(**args)
def create_access_permission(self, **kwargs):
args = {
'grantor': self.user
}
args.update(kwargs)
return access_permission_factory.create(**args)
def create_query_result(self, **kwargs):
args = {
'data_source': self.data_source,
}
args.update(kwargs)
if 'data_source' in args and 'org' not in args:
args['org'] = args['data_source'].org
return query_result_factory.create(**args)
def create_visualization(self, **kwargs):
args = {
'query_rel': self.create_query()
}
args.update(kwargs)
return visualization_factory.create(**args)
def create_visualization_with_params(self, **kwargs):
args = {
'query_rel': self.create_query_with_params()
}
args.update(kwargs)
return visualization_factory.create(**args)
def create_widget(self, **kwargs):
args = {
'dashboard': self.create_dashboard(),
'visualization': self.create_visualization()
}
args.update(kwargs)
return widget_factory.create(**args)
def create_api_key(self, **kwargs):
args = {
'org': self.org
}
args.update(kwargs)
return api_key_factory.create(**args)
def create_destination(self, **kwargs):
args = {
'org': self.org
}
args.update(kwargs)
return destination_factory.create(**args)
def create_query_snippet(self, **kwargs):
args = {
'user': self.user,
'org': self.org
}
args.update(kwargs)
return query_snippet_factory.create(**args)
| |
# -*- coding: utf-8 -*-
"""
pygments.lexers.erlang
~~~~~~~~~~~~~~~~~~~~~~
Lexers for Erlang.
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, bygroups, words, do_insertions, \
include, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic
__all__ = ['ErlangLexer', 'ErlangShellLexer', 'ElixirConsoleLexer',
'ElixirLexer']
line_re = re.compile('.*?\n')
class ErlangLexer(RegexLexer):
"""
For the Erlang functional programming language.
Blame Jeremy Thurgood (http://jerith.za.net/).
.. versionadded:: 0.9
"""
name = 'Erlang'
aliases = ['erlang']
filenames = ['*.erl', '*.hrl', '*.es', '*.escript']
mimetypes = ['text/x-erlang']
keywords = (
'after', 'begin', 'case', 'catch', 'cond', 'end', 'fun', 'if',
'let', 'of', 'query', 'receive', 'try', 'when',
)
builtins = ( # See erlang(3) man page
'abs', 'append_element', 'apply', 'atom_to_list', 'binary_to_list',
'bitstring_to_list', 'binary_to_term', 'bit_size', 'bump_reductions',
'byte_size', 'cancel_timer', 'check_process_code', 'delete_module',
'demonitor', 'disconnect_node', 'display', 'element', 'erase', 'exit',
'float', 'float_to_list', 'fun_info', 'fun_to_list',
'function_exported', 'garbage_collect', 'get', 'get_keys',
'group_leader', 'hash', 'hd', 'integer_to_list', 'iolist_to_binary',
'iolist_size', 'is_atom', 'is_binary', 'is_bitstring', 'is_boolean',
'is_builtin', 'is_float', 'is_function', 'is_integer', 'is_list',
'is_number', 'is_pid', 'is_port', 'is_process_alive', 'is_record',
'is_reference', 'is_tuple', 'length', 'link', 'list_to_atom',
'list_to_binary', 'list_to_bitstring', 'list_to_existing_atom',
'list_to_float', 'list_to_integer', 'list_to_pid', 'list_to_tuple',
'load_module', 'localtime_to_universaltime', 'make_tuple', 'md5',
'md5_final', 'md5_update', 'memory', 'module_loaded', 'monitor',
'monitor_node', 'node', 'nodes', 'open_port', 'phash', 'phash2',
'pid_to_list', 'port_close', 'port_command', 'port_connect',
'port_control', 'port_call', 'port_info', 'port_to_list',
'process_display', 'process_flag', 'process_info', 'purge_module',
'put', 'read_timer', 'ref_to_list', 'register', 'resume_process',
'round', 'send', 'send_after', 'send_nosuspend', 'set_cookie',
'setelement', 'size', 'spawn', 'spawn_link', 'spawn_monitor',
'spawn_opt', 'split_binary', 'start_timer', 'statistics',
'suspend_process', 'system_flag', 'system_info', 'system_monitor',
'system_profile', 'term_to_binary', 'tl', 'trace', 'trace_delivered',
'trace_info', 'trace_pattern', 'trunc', 'tuple_size', 'tuple_to_list',
'universaltime_to_localtime', 'unlink', 'unregister', 'whereis'
)
operators = r'(\+\+?|--?|\*|/|<|>|/=|=:=|=/=|=<|>=|==?|<-|!|\?)'
word_operators = (
'and', 'andalso', 'band', 'bnot', 'bor', 'bsl', 'bsr', 'bxor',
'div', 'not', 'or', 'orelse', 'rem', 'xor'
)
atom_re = r"(?:[a-z]\w*|'[^\n']*[^\\]')"
variable_re = r'(?:[A-Z_]\w*)'
esc_char_re = r'[bdefnrstv\'"\\]'
esc_octal_re = r'[0-7][0-7]?[0-7]?'
esc_hex_re = r'(?:x[0-9a-fA-F]{2}|x\{[0-9a-fA-F]+\})'
esc_ctrl_re = r'\^[a-zA-Z]'
escape_re = r'(?:\\(?:'+esc_char_re+r'|'+esc_octal_re+r'|'+esc_hex_re+r'|'+esc_ctrl_re+r'))'
macro_re = r'(?:'+variable_re+r'|'+atom_re+r')'
base_re = r'(?:[2-9]|[12][0-9]|3[0-6])'
tokens = {
'root': [
(r'\s+', Text),
(r'%.*\n', Comment),
(words(keywords, suffix=r'\b'), Keyword),
(words(builtins, suffix=r'\b'), Name.Builtin),
(words(word_operators, suffix=r'\b'), Operator.Word),
(r'^-', Punctuation, 'directive'),
(operators, Operator),
(r'"', String, 'string'),
(r'<<', Name.Label),
(r'>>', Name.Label),
('(' + atom_re + ')(:)', bygroups(Name.Namespace, Punctuation)),
('(?:^|(?<=:))(' + atom_re + r')(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[+-]?' + base_re + r'#[0-9a-zA-Z]+', Number.Integer),
(r'[+-]?\d+', Number.Integer),
(r'[+-]?\d+.\d+', Number.Float),
(r'[]\[:_@\".{}()|;,]', Punctuation),
(variable_re, Name.Variable),
(atom_re, Name),
(r'\?'+macro_re, Name.Constant),
(r'\$(?:'+escape_re+r'|\\[ %]|[^\\])', String.Char),
(r'#'+atom_re+r'(:?\.'+atom_re+r')?', Name.Label),
# Erlang script shebang
(r'\A#!.+\n', Comment.Hashbang),
# EEP 43: Maps
# http://www.erlang.org/eeps/eep-0043.html
(r'#\{', Punctuation, 'map_key'),
],
'string': [
(escape_re, String.Escape),
(r'"', String, '#pop'),
(r'~[0-9.*]*[~#+BPWXb-ginpswx]', String.Interpol),
(r'[^"\\~]+', String),
(r'~', String),
],
'directive': [
(r'(define)(\s*)(\()('+macro_re+r')',
bygroups(Name.Entity, Text, Punctuation, Name.Constant), '#pop'),
(r'(record)(\s*)(\()('+macro_re+r')',
bygroups(Name.Entity, Text, Punctuation, Name.Label), '#pop'),
(atom_re, Name.Entity, '#pop'),
],
'map_key': [
include('root'),
(r'=>', Punctuation, 'map_val'),
(r':=', Punctuation, 'map_val'),
(r'\}', Punctuation, '#pop'),
],
'map_val': [
include('root'),
(r',', Punctuation, '#pop'),
(r'(?=\})', Punctuation, '#pop'),
],
}
class ErlangShellLexer(Lexer):
"""
Shell sessions in erl (for Erlang code).
.. versionadded:: 1.1
"""
name = 'Erlang erl session'
aliases = ['erl']
filenames = ['*.erl-sh']
mimetypes = ['text/x-erl-shellsession']
_prompt_re = re.compile(r'(?:\([\w@_.]+\))?\d+>(?=\s|\Z)')
def get_tokens_unprocessed(self, text):
erlexer = ErlangLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = self._prompt_re.match(line)
if m is not None:
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
for item in do_insertions(insertions,
erlexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
if line.startswith('*'):
yield match.start(), Generic.Traceback, line
else:
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
erlexer.get_tokens_unprocessed(curcode)):
yield item
def gen_elixir_string_rules(name, symbol, token):
states = {}
states['string_' + name] = [
(r'[^#%s\\]+' % (symbol,), token),
include('escapes'),
(r'\\.', token),
(r'(%s)' % (symbol,), bygroups(token), "#pop"),
include('interpol')
]
return states
def gen_elixir_sigstr_rules(term, token, interpol=True):
if interpol:
return [
(r'[^#%s\\]+' % (term,), token),
include('escapes'),
(r'\\.', token),
(r'%s[a-zA-Z]*' % (term,), token, '#pop'),
include('interpol')
]
else:
return [
(r'[^%s\\]+' % (term,), token),
(r'\\.', token),
(r'%s[a-zA-Z]*' % (term,), token, '#pop'),
]
class ElixirLexer(RegexLexer):
"""
For the `Elixir language <http://elixir-lang.org>`_.
.. versionadded:: 1.5
"""
name = 'Elixir'
aliases = ['elixir', 'ex', 'exs']
filenames = ['*.ex', '*.eex', '*.exs']
mimetypes = ['text/x-elixir']
KEYWORD = ('fn', 'do', 'end', 'after', 'else', 'rescue', 'catch')
KEYWORD_OPERATOR = ('not', 'and', 'or', 'when', 'in')
BUILTIN = (
'case', 'cond', 'for', 'if', 'unless', 'try', 'receive', 'raise',
'quote', 'unquote', 'unquote_splicing', 'throw', 'super',
)
BUILTIN_DECLARATION = (
'def', 'defp', 'defmodule', 'defprotocol', 'defmacro', 'defmacrop',
'defdelegate', 'defexception', 'defstruct', 'defimpl', 'defcallback',
)
BUILTIN_NAMESPACE = ('import', 'require', 'use', 'alias')
CONSTANT = ('nil', 'true', 'false')
PSEUDO_VAR = ('_', '__MODULE__', '__DIR__', '__ENV__', '__CALLER__')
OPERATORS3 = (
'<<<', '>>>', '|||', '&&&', '^^^', '~~~', '===', '!==',
'~>>', '<~>', '|~>', '<|>',
)
OPERATORS2 = (
'==', '!=', '<=', '>=', '&&', '||', '<>', '++', '--', '|>', '=~',
'->', '<-', '|', '.', '=', '~>', '<~',
)
OPERATORS1 = ('<', '>', '+', '-', '*', '/', '!', '^', '&')
PUNCTUATION = (
'\\\\', '<<', '>>', '=>', '(', ')', ':', ';', ',', '[', ']',
)
def get_tokens_unprocessed(self, text):
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if value in self.KEYWORD:
yield index, Keyword, value
elif value in self.KEYWORD_OPERATOR:
yield index, Operator.Word, value
elif value in self.BUILTIN:
yield index, Keyword, value
elif value in self.BUILTIN_DECLARATION:
yield index, Keyword.Declaration, value
elif value in self.BUILTIN_NAMESPACE:
yield index, Keyword.Namespace, value
elif value in self.CONSTANT:
yield index, Name.Constant, value
elif value in self.PSEUDO_VAR:
yield index, Name.Builtin.Pseudo, value
else:
yield index, token, value
else:
yield index, token, value
def gen_elixir_sigil_rules():
# all valid sigil terminators (excluding heredocs)
terminators = [
(r'\{', r'\}', 'cb'),
(r'\[', r'\]', 'sb'),
(r'\(', r'\)', 'pa'),
(r'<', r'>', 'ab'),
(r'/', r'/', 'slas'),
(r'\|', r'\|', 'pipe'),
('"', '"', 'quot'),
("'", "'", 'apos'),
]
# heredocs have slightly different rules
triquotes = [(r'"""', 'triquot'), (r"'''", 'triapos')]
token = String.Other
states = {'sigils': []}
for term, name in triquotes:
states['sigils'] += [
(r'(~[a-z])(%s)' % (term,), bygroups(token, String.Heredoc),
(name + '-end', name + '-intp')),
(r'(~[A-Z])(%s)' % (term,), bygroups(token, String.Heredoc),
(name + '-end', name + '-no-intp')),
]
states[name + '-end'] = [
(r'[a-zA-Z]+', token, '#pop'),
default('#pop'),
]
states[name + '-intp'] = [
(r'^\s*' + term, String.Heredoc, '#pop'),
include('heredoc_interpol'),
]
states[name + '-no-intp'] = [
(r'^\s*' + term, String.Heredoc, '#pop'),
include('heredoc_no_interpol'),
]
for lterm, rterm, name in terminators:
states['sigils'] += [
(r'~[a-z]' + lterm, token, name + '-intp'),
(r'~[A-Z]' + lterm, token, name + '-no-intp'),
]
states[name + '-intp'] = gen_elixir_sigstr_rules(rterm, token)
states[name + '-no-intp'] = \
gen_elixir_sigstr_rules(rterm, token, interpol=False)
return states
op3_re = "|".join(re.escape(s) for s in OPERATORS3)
op2_re = "|".join(re.escape(s) for s in OPERATORS2)
op1_re = "|".join(re.escape(s) for s in OPERATORS1)
ops_re = r'(?:%s|%s|%s)' % (op3_re, op2_re, op1_re)
punctuation_re = "|".join(re.escape(s) for s in PUNCTUATION)
alnum = r'\w'
name_re = r'(?:\.\.\.|[a-z_]%s*[!?]?)' % alnum
modname_re = r'[A-Z]%(alnum)s*(?:\.[A-Z]%(alnum)s*)*' % {'alnum': alnum}
complex_name_re = r'(?:%s|%s|%s)' % (name_re, modname_re, ops_re)
special_atom_re = r'(?:\.\.\.|<<>>|%\{\}|%|\{\})'
long_hex_char_re = r'(\\x\{)([\da-fA-F]+)(\})'
hex_char_re = r'(\\x[\da-fA-F]{1,2})'
escape_char_re = r'(\\[abdefnrstv])'
tokens = {
'root': [
(r'\s+', Text),
(r'#.*$', Comment.Single),
# Various kinds of characters
(r'(\?)' + long_hex_char_re,
bygroups(String.Char,
String.Escape, Number.Hex, String.Escape)),
(r'(\?)' + hex_char_re,
bygroups(String.Char, String.Escape)),
(r'(\?)' + escape_char_re,
bygroups(String.Char, String.Escape)),
(r'\?\\?.', String.Char),
# '::' has to go before atoms
(r':::', String.Symbol),
(r'::', Operator),
# atoms
(r':' + special_atom_re, String.Symbol),
(r':' + complex_name_re, String.Symbol),
(r':"', String.Symbol, 'string_double_atom'),
(r":'", String.Symbol, 'string_single_atom'),
# [keywords: ...]
(r'(%s|%s)(:)(?=\s|\n)' % (special_atom_re, complex_name_re),
bygroups(String.Symbol, Punctuation)),
# @attributes
(r'@' + name_re, Name.Attribute),
# identifiers
(name_re, Name),
(r'(%%?)(%s)' % (modname_re,), bygroups(Punctuation, Name.Class)),
# operators and punctuation
(op3_re, Operator),
(op2_re, Operator),
(punctuation_re, Punctuation),
(r'&\d', Name.Entity), # anon func arguments
(op1_re, Operator),
# numbers
(r'0b[01]+', Number.Bin),
(r'0o[0-7]+', Number.Oct),
(r'0x[\da-fA-F]+', Number.Hex),
(r'\d(_?\d)*\.\d(_?\d)*([eE][-+]?\d(_?\d)*)?', Number.Float),
(r'\d(_?\d)*', Number.Integer),
# strings and heredocs
(r'"""\s*', String.Heredoc, 'heredoc_double'),
(r"'''\s*$", String.Heredoc, 'heredoc_single'),
(r'"', String.Double, 'string_double'),
(r"'", String.Single, 'string_single'),
include('sigils'),
(r'%\{', Punctuation, 'map_key'),
(r'\{', Punctuation, 'tuple'),
],
'heredoc_double': [
(r'^\s*"""', String.Heredoc, '#pop'),
include('heredoc_interpol'),
],
'heredoc_single': [
(r"^\s*'''", String.Heredoc, '#pop'),
include('heredoc_interpol'),
],
'heredoc_interpol': [
(r'[^#\\\n]+', String.Heredoc),
include('escapes'),
(r'\\.', String.Heredoc),
(r'\n+', String.Heredoc),
include('interpol'),
],
'heredoc_no_interpol': [
(r'[^\\\n]+', String.Heredoc),
(r'\\.', String.Heredoc),
(r'\n+', String.Heredoc),
],
'escapes': [
(long_hex_char_re,
bygroups(String.Escape, Number.Hex, String.Escape)),
(hex_char_re, String.Escape),
(escape_char_re, String.Escape),
],
'interpol': [
(r'#\{', String.Interpol, 'interpol_string'),
],
'interpol_string': [
(r'\}', String.Interpol, "#pop"),
include('root')
],
'map_key': [
include('root'),
(r':', Punctuation, 'map_val'),
(r'=>', Punctuation, 'map_val'),
(r'\}', Punctuation, '#pop'),
],
'map_val': [
include('root'),
(r',', Punctuation, '#pop'),
(r'(?=\})', Punctuation, '#pop'),
],
'tuple': [
include('root'),
(r'\}', Punctuation, '#pop'),
],
}
tokens.update(gen_elixir_string_rules('double', '"', String.Double))
tokens.update(gen_elixir_string_rules('single', "'", String.Single))
tokens.update(gen_elixir_string_rules('double_atom', '"', String.Symbol))
tokens.update(gen_elixir_string_rules('single_atom', "'", String.Symbol))
tokens.update(gen_elixir_sigil_rules())
class ElixirConsoleLexer(Lexer):
"""
For Elixir interactive console (iex) output like:
.. sourcecode:: iex
iex> [head | tail] = [1,2,3]
[1,2,3]
iex> head
1
iex> tail
[2,3]
iex> [head | tail]
[1,2,3]
iex> length [head | tail]
3
.. versionadded:: 1.5
"""
name = 'Elixir iex session'
aliases = ['iex']
mimetypes = ['text/x-elixir-shellsession']
_prompt_re = re.compile(r'(iex|\.{3})((?:\([\w@_.]+\))?\d+|\(\d+\))?> ')
def get_tokens_unprocessed(self, text):
exlexer = ElixirLexer(**self.options)
curcode = ''
in_error = False
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith(u'** '):
in_error = True
insertions.append((len(curcode),
[(0, Generic.Error, line[:-1])]))
curcode += line[-1:]
else:
m = self._prompt_re.match(line)
if m is not None:
in_error = False
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
for item in do_insertions(
insertions, exlexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
token = Generic.Error if in_error else Generic.Output
yield match.start(), token, line
if curcode:
for item in do_insertions(
insertions, exlexer.get_tokens_unprocessed(curcode)):
yield item
| |
# Copyright 2019, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import json
import logging
import os
from opencensus.common.schedule import QueueExitEvent
from opencensus.ext.azure.common import Options, utils
from opencensus.ext.azure.common.exporter import BaseExporter
from opencensus.ext.azure.common.processor import ProcessorMixin
from opencensus.ext.azure.common.protocol import (
Data,
Envelope,
ExceptionData,
RemoteDependency,
Request,
)
from opencensus.ext.azure.common.storage import LocalFileStorage
from opencensus.ext.azure.common.transport import TransportMixin
from opencensus.ext.azure.metrics_exporter import statsbeat_metrics
from opencensus.trace import attributes_helper
from opencensus.trace.span import SpanKind
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
logger = logging.getLogger(__name__)
__all__ = ['AzureExporter']
HTTP_METHOD = attributes_helper.COMMON_ATTRIBUTES['HTTP_METHOD']
HTTP_PATH = attributes_helper.COMMON_ATTRIBUTES['HTTP_PATH']
HTTP_ROUTE = attributes_helper.COMMON_ATTRIBUTES['HTTP_ROUTE']
HTTP_URL = attributes_helper.COMMON_ATTRIBUTES['HTTP_URL']
HTTP_STATUS_CODE = attributes_helper.COMMON_ATTRIBUTES['HTTP_STATUS_CODE']
ERROR_MESSAGE = attributes_helper.COMMON_ATTRIBUTES['ERROR_MESSAGE']
ERROR_NAME = attributes_helper.COMMON_ATTRIBUTES['ERROR_NAME']
STACKTRACE = attributes_helper.COMMON_ATTRIBUTES['STACKTRACE']
class AzureExporter(BaseExporter, ProcessorMixin, TransportMixin):
"""An exporter that sends traces to Microsoft Azure Monitor.
:param options: Options for the exporter.
"""
def __init__(self, **options):
self.options = Options(**options)
utils.validate_instrumentation_key(self.options.instrumentation_key)
self.storage = None
if self.options.enable_local_storage:
self.storage = LocalFileStorage(
path=self.options.storage_path,
max_size=self.options.storage_max_size,
maintenance_period=self.options.storage_maintenance_period,
retention_period=self.options.storage_retention_period,
source=self.__class__.__name__,
)
self._telemetry_processors = []
super(AzureExporter, self).__init__(**options)
atexit.register(self._stop, self.options.grace_period)
# start statsbeat on exporter instantiation
if not os.environ.get("APPLICATIONINSIGHTS_STATSBEAT_DISABLED_ALL"):
statsbeat_metrics.collect_statsbeat_metrics(self.options)
# For redirects
self._consecutive_redirects = 0 # To prevent circular redirects
def span_data_to_envelope(self, sd):
envelope = Envelope(
iKey=self.options.instrumentation_key,
tags=dict(utils.azure_monitor_context),
time=sd.start_time,
)
envelope.tags['ai.operation.id'] = sd.context.trace_id
if sd.parent_span_id:
envelope.tags['ai.operation.parentId'] = '{}'.format(
sd.parent_span_id,
)
if sd.span_kind == SpanKind.SERVER:
if ERROR_MESSAGE in sd.attributes:
envelope.name = 'Microsoft.ApplicationInsights.Exception'
data = ExceptionData(
exceptions=[{
'id': 1,
'outerId': '{}'.format(sd.span_id),
'typeName': sd.attributes.get(ERROR_NAME, ''),
'message': sd.attributes[ERROR_MESSAGE],
'hasFullStack': STACKTRACE in sd.attributes,
'parsedStack': sd.attributes.get(STACKTRACE, None)
}],
)
envelope.data = Data(baseData=data, baseType='ExceptionData')
yield envelope
envelope.name = 'Microsoft.ApplicationInsights.Request'
data = Request(
id='{}'.format(sd.span_id),
duration=utils.timestamp_to_duration(
sd.start_time,
sd.end_time,
),
responseCode=str(sd.status.code),
success=False, # Modify based off attributes or status
properties={},
)
envelope.data = Data(baseData=data, baseType='RequestData')
data.name = ''
if HTTP_METHOD in sd.attributes:
data.name = sd.attributes[HTTP_METHOD]
if HTTP_ROUTE in sd.attributes:
data.name = data.name + ' ' + sd.attributes[HTTP_ROUTE]
envelope.tags['ai.operation.name'] = data.name
data.properties['request.name'] = data.name
elif HTTP_PATH in sd.attributes:
data.properties['request.name'] = data.name + \
' ' + sd.attributes[HTTP_PATH]
if HTTP_URL in sd.attributes:
data.url = sd.attributes[HTTP_URL]
data.properties['request.url'] = sd.attributes[HTTP_URL]
if HTTP_STATUS_CODE in sd.attributes:
status_code = sd.attributes[HTTP_STATUS_CODE]
data.responseCode = str(status_code)
data.success = (
status_code >= 200 and status_code <= 399
)
elif sd.status.code == 0:
data.success = True
else:
envelope.name = \
'Microsoft.ApplicationInsights.RemoteDependency'
data = RemoteDependency(
name=sd.name, # TODO
id='{}'.format(sd.span_id),
resultCode=str(sd.status.code),
duration=utils.timestamp_to_duration(
sd.start_time,
sd.end_time,
),
success=False, # Modify based off attributes or status
properties={},
)
envelope.data = Data(
baseData=data,
baseType='RemoteDependencyData',
)
if sd.span_kind == SpanKind.CLIENT:
data.type = sd.attributes.get('component')
if HTTP_URL in sd.attributes:
url = sd.attributes[HTTP_URL]
# TODO: error handling, probably put scheme as well
data.data = url
parse_url = urlparse(url)
# target matches authority (host:port)
data.target = parse_url.netloc
if HTTP_METHOD in sd.attributes:
# name is METHOD/path
data.name = sd.attributes[HTTP_METHOD] \
+ ' ' + parse_url.path
if HTTP_STATUS_CODE in sd.attributes:
status_code = sd.attributes[HTTP_STATUS_CODE]
data.resultCode = str(status_code)
data.success = 200 <= status_code < 400
elif sd.status.code == 0:
data.success = True
else:
data.type = 'INPROC'
data.success = True
if sd.links:
links = []
for link in sd.links:
links.append(
{"operation_Id": link.trace_id, "id": link.span_id})
data.properties["_MS.links"] = json.dumps(links)
# TODO: tracestate, tags
for key in sd.attributes:
# This removes redundant data from ApplicationInsights
if key.startswith('http.'):
continue
data.properties[key] = sd.attributes[key]
yield envelope
def emit(self, batch, event=None):
try:
if batch:
envelopes = [envelope
for sd in batch
for envelope in self.span_data_to_envelope(sd)]
envelopes = self.apply_telemetry_processors(envelopes)
result = self._transmit(envelopes)
# Only store files if local storage enabled
if self.storage and result > 0:
self.storage.put(envelopes, result)
if event:
if isinstance(event, QueueExitEvent):
self._transmit_from_storage() # send files before exit
event.set()
return
if len(batch) < self.options.max_batch_size:
self._transmit_from_storage()
except Exception:
logger.exception('Exception occurred while exporting the data.')
def _stop(self, timeout=None):
if self.storage:
self.storage.close()
if self._worker:
self._worker.stop(timeout)
| |
"""
Tests for Factor terms.
"""
from functools import partial
from itertools import product
from nose_parameterized import parameterized
from unittest import TestCase
from toolz import compose
from numpy import (
apply_along_axis,
arange,
array,
datetime64,
empty,
eye,
log1p,
nan,
ones,
rot90,
where,
)
from numpy.random import randn, seed
import pandas as pd
from scipy.stats.mstats import winsorize as scipy_winsorize
from zipline.errors import BadPercentileBounds, UnknownRankMethod
from zipline.lib.labelarray import LabelArray
from zipline.lib.rank import masked_rankdata_2d
from zipline.lib.normalize import naive_grouped_rowwise_apply as grouped_apply
from zipline.pipeline import Classifier, Factor, Filter
from zipline.pipeline.data import DataSet, Column
from zipline.pipeline.factors import (
CustomFactor,
Returns,
)
from zipline.testing import (
check_allclose,
check_arrays,
parameter_space,
permute_rows,
)
from zipline.testing.fixtures import ZiplineTestCase
from zipline.testing.predicates import assert_equal
from zipline.utils.numpy_utils import (
categorical_dtype,
datetime64ns_dtype,
float64_dtype,
int64_dtype,
NaTns,
)
from zipline.utils.math_utils import nanmean, nanstd
from .base import BasePipelineTestCase
class F(Factor):
dtype = float64_dtype
inputs = ()
window_length = 0
class OtherF(Factor):
dtype = float64_dtype
inputs = ()
window_length = 0
class C(Classifier):
dtype = int64_dtype
missing_value = -1
inputs = ()
window_length = 0
class OtherC(Classifier):
dtype = int64_dtype
missing_value = -1
inputs = ()
window_length = 0
class Mask(Filter):
inputs = ()
window_length = 0
for_each_factor_dtype = parameterized.expand([
('datetime64[ns]', datetime64ns_dtype),
('float', float64_dtype),
])
class FactorTestCase(BasePipelineTestCase):
def init_instance_fixtures(self):
super(FactorTestCase, self).init_instance_fixtures()
self.f = F()
def test_bad_input(self):
with self.assertRaises(UnknownRankMethod):
self.f.rank("not a real rank method")
@parameter_space(method_name=['isnan', 'notnan', 'isfinite'])
def test_float64_only_ops(self, method_name):
class NotFloat(Factor):
dtype = datetime64ns_dtype
inputs = ()
window_length = 0
nf = NotFloat()
meth = getattr(nf, method_name)
with self.assertRaises(TypeError):
meth()
@parameter_space(custom_missing_value=[-1, 0])
def test_isnull_int_dtype(self, custom_missing_value):
class CustomMissingValue(Factor):
dtype = int64_dtype
window_length = 0
missing_value = custom_missing_value
inputs = ()
factor = CustomMissingValue()
data = arange(25).reshape(5, 5)
data[eye(5, dtype=bool)] = custom_missing_value
self.check_terms(
{
'isnull': factor.isnull(),
'notnull': factor.notnull(),
},
{
'isnull': eye(5, dtype=bool),
'notnull': ~eye(5, dtype=bool),
},
initial_workspace={factor: data},
mask=self.build_mask(ones((5, 5))),
)
def test_isnull_datetime_dtype(self):
class DatetimeFactor(Factor):
dtype = datetime64ns_dtype
window_length = 0
inputs = ()
factor = DatetimeFactor()
data = arange(25).reshape(5, 5).astype('datetime64[ns]')
data[eye(5, dtype=bool)] = NaTns
self.check_terms(
{
'isnull': factor.isnull(),
'notnull': factor.notnull(),
},
{
'isnull': eye(5, dtype=bool),
'notnull': ~eye(5, dtype=bool),
},
initial_workspace={factor: data},
mask=self.build_mask(ones((5, 5))),
)
@for_each_factor_dtype
def test_rank_ascending(self, name, factor_dtype):
f = F(dtype=factor_dtype)
# Generated with:
# data = arange(25).reshape(5, 5).transpose() % 4
data = array([[0, 1, 2, 3, 0],
[1, 2, 3, 0, 1],
[2, 3, 0, 1, 2],
[3, 0, 1, 2, 3],
[0, 1, 2, 3, 0]], dtype=factor_dtype)
expected_ranks = {
'ordinal': array([[1., 3., 4., 5., 2.],
[2., 4., 5., 1., 3.],
[3., 5., 1., 2., 4.],
[4., 1., 2., 3., 5.],
[1., 3., 4., 5., 2.]]),
'average': array([[1.5, 3., 4., 5., 1.5],
[2.5, 4., 5., 1., 2.5],
[3.5, 5., 1., 2., 3.5],
[4.5, 1., 2., 3., 4.5],
[1.5, 3., 4., 5., 1.5]]),
'min': array([[1., 3., 4., 5., 1.],
[2., 4., 5., 1., 2.],
[3., 5., 1., 2., 3.],
[4., 1., 2., 3., 4.],
[1., 3., 4., 5., 1.]]),
'max': array([[2., 3., 4., 5., 2.],
[3., 4., 5., 1., 3.],
[4., 5., 1., 2., 4.],
[5., 1., 2., 3., 5.],
[2., 3., 4., 5., 2.]]),
'dense': array([[1., 2., 3., 4., 1.],
[2., 3., 4., 1., 2.],
[3., 4., 1., 2., 3.],
[4., 1., 2., 3., 4.],
[1., 2., 3., 4., 1.]]),
}
def check(terms):
self.check_terms(
terms,
expected={name: expected_ranks[name] for name in terms},
initial_workspace={f: data},
mask=self.build_mask(ones((5, 5))),
)
check({meth: f.rank(method=meth) for meth in expected_ranks})
check({
meth: f.rank(method=meth, ascending=True)
for meth in expected_ranks
})
# Not passing a method should default to ordinal.
check({'ordinal': f.rank()})
check({'ordinal': f.rank(ascending=True)})
@for_each_factor_dtype
def test_rank_descending(self, name, factor_dtype):
f = F(dtype=factor_dtype)
# Generated with:
# data = arange(25).reshape(5, 5).transpose() % 4
data = array([[0, 1, 2, 3, 0],
[1, 2, 3, 0, 1],
[2, 3, 0, 1, 2],
[3, 0, 1, 2, 3],
[0, 1, 2, 3, 0]], dtype=factor_dtype)
expected_ranks = {
'ordinal': array([[4., 3., 2., 1., 5.],
[3., 2., 1., 5., 4.],
[2., 1., 5., 4., 3.],
[1., 5., 4., 3., 2.],
[4., 3., 2., 1., 5.]]),
'average': array([[4.5, 3., 2., 1., 4.5],
[3.5, 2., 1., 5., 3.5],
[2.5, 1., 5., 4., 2.5],
[1.5, 5., 4., 3., 1.5],
[4.5, 3., 2., 1., 4.5]]),
'min': array([[4., 3., 2., 1., 4.],
[3., 2., 1., 5., 3.],
[2., 1., 5., 4., 2.],
[1., 5., 4., 3., 1.],
[4., 3., 2., 1., 4.]]),
'max': array([[5., 3., 2., 1., 5.],
[4., 2., 1., 5., 4.],
[3., 1., 5., 4., 3.],
[2., 5., 4., 3., 2.],
[5., 3., 2., 1., 5.]]),
'dense': array([[4., 3., 2., 1., 4.],
[3., 2., 1., 4., 3.],
[2., 1., 4., 3., 2.],
[1., 4., 3., 2., 1.],
[4., 3., 2., 1., 4.]]),
}
def check(terms):
self.check_terms(
terms,
expected={name: expected_ranks[name] for name in terms},
initial_workspace={f: data},
mask=self.build_mask(ones((5, 5))),
)
check({
meth: f.rank(method=meth, ascending=False)
for meth in expected_ranks
})
# Not passing a method should default to ordinal.
check({'ordinal': f.rank(ascending=False)})
@for_each_factor_dtype
def test_rank_after_mask(self, name, factor_dtype):
f = F(dtype=factor_dtype)
# data = arange(25).reshape(5, 5).transpose() % 4
data = array([[0, 1, 2, 3, 0],
[1, 2, 3, 0, 1],
[2, 3, 0, 1, 2],
[3, 0, 1, 2, 3],
[0, 1, 2, 3, 0]], dtype=factor_dtype)
mask_data = ~eye(5, dtype=bool)
initial_workspace = {f: data, Mask(): mask_data}
terms = {
"ascending_nomask": f.rank(ascending=True),
"ascending_mask": f.rank(ascending=True, mask=Mask()),
"descending_nomask": f.rank(ascending=False),
"descending_mask": f.rank(ascending=False, mask=Mask()),
}
expected = {
"ascending_nomask": array([[1., 3., 4., 5., 2.],
[2., 4., 5., 1., 3.],
[3., 5., 1., 2., 4.],
[4., 1., 2., 3., 5.],
[1., 3., 4., 5., 2.]]),
"descending_nomask": array([[4., 3., 2., 1., 5.],
[3., 2., 1., 5., 4.],
[2., 1., 5., 4., 3.],
[1., 5., 4., 3., 2.],
[4., 3., 2., 1., 5.]]),
# Diagonal should be all nans, and anything whose rank was less
# than the diagonal in the unmasked calc should go down by 1.
"ascending_mask": array([[nan, 2., 3., 4., 1.],
[2., nan, 4., 1., 3.],
[2., 4., nan, 1., 3.],
[3., 1., 2., nan, 4.],
[1., 2., 3., 4., nan]]),
"descending_mask": array([[nan, 3., 2., 1., 4.],
[2., nan, 1., 4., 3.],
[2., 1., nan, 4., 3.],
[1., 4., 3., nan, 2.],
[4., 3., 2., 1., nan]]),
}
self.check_terms(
terms,
expected,
initial_workspace,
mask=self.build_mask(ones((5, 5))),
)
@for_each_factor_dtype
def test_grouped_rank_ascending(self, name, factor_dtype=float64_dtype):
f = F(dtype=factor_dtype)
c = C()
str_c = C(dtype=categorical_dtype, missing_value=None)
# Generated with:
# data = arange(25).reshape(5, 5).transpose() % 4
data = array([[0, 1, 2, 3, 0],
[1, 2, 3, 0, 1],
[2, 3, 0, 1, 2],
[3, 0, 1, 2, 3],
[0, 1, 2, 3, 0]], dtype=factor_dtype)
# Generated with:
# classifier_data = arange(25).reshape(5, 5).transpose() % 2
classifier_data = array([[0, 1, 0, 1, 0],
[1, 0, 1, 0, 1],
[0, 1, 0, 1, 0],
[1, 0, 1, 0, 1],
[0, 1, 0, 1, 0]], dtype=int64_dtype)
string_classifier_data = LabelArray(
classifier_data.astype(str).astype(object),
missing_value=None,
)
expected_ranks = {
'ordinal': array(
[[1., 1., 3., 2., 2.],
[1., 2., 3., 1., 2.],
[2., 2., 1., 1., 3.],
[2., 1., 1., 2., 3.],
[1., 1., 3., 2., 2.]]
),
'average': array(
[[1.5, 1., 3., 2., 1.5],
[1.5, 2., 3., 1., 1.5],
[2.5, 2., 1., 1., 2.5],
[2.5, 1., 1., 2., 2.5],
[1.5, 1., 3., 2., 1.5]]
),
'min': array(
[[1., 1., 3., 2., 1.],
[1., 2., 3., 1., 1.],
[2., 2., 1., 1., 2.],
[2., 1., 1., 2., 2.],
[1., 1., 3., 2., 1.]]
),
'max': array(
[[2., 1., 3., 2., 2.],
[2., 2., 3., 1., 2.],
[3., 2., 1., 1., 3.],
[3., 1., 1., 2., 3.],
[2., 1., 3., 2., 2.]]
),
'dense': array(
[[1., 1., 2., 2., 1.],
[1., 2., 2., 1., 1.],
[2., 2., 1., 1., 2.],
[2., 1., 1., 2., 2.],
[1., 1., 2., 2., 1.]]
),
}
def check(terms):
self.check_terms(
terms,
expected={name: expected_ranks[name] for name in terms},
initial_workspace={
f: data,
c: classifier_data,
str_c: string_classifier_data,
},
mask=self.build_mask(ones((5, 5))),
)
# Not specifying the value of ascending param should default to True
check({
meth: f.rank(method=meth, groupby=c)
for meth in expected_ranks
})
check({
meth: f.rank(method=meth, groupby=str_c)
for meth in expected_ranks
})
check({
meth: f.rank(method=meth, groupby=c, ascending=True)
for meth in expected_ranks
})
check({
meth: f.rank(method=meth, groupby=str_c, ascending=True)
for meth in expected_ranks
})
# Not passing a method should default to ordinal
check({'ordinal': f.rank(groupby=c)})
check({'ordinal': f.rank(groupby=str_c)})
check({'ordinal': f.rank(groupby=c, ascending=True)})
check({'ordinal': f.rank(groupby=str_c, ascending=True)})
@for_each_factor_dtype
def test_grouped_rank_descending(self, name, factor_dtype):
f = F(dtype=factor_dtype)
c = C()
str_c = C(dtype=categorical_dtype, missing_value=None)
# Generated with:
# data = arange(25).reshape(5, 5).transpose() % 4
data = array([[0, 1, 2, 3, 0],
[1, 2, 3, 0, 1],
[2, 3, 0, 1, 2],
[3, 0, 1, 2, 3],
[0, 1, 2, 3, 0]], dtype=factor_dtype)
# Generated with:
# classifier_data = arange(25).reshape(5, 5).transpose() % 2
classifier_data = array([[0, 1, 0, 1, 0],
[1, 0, 1, 0, 1],
[0, 1, 0, 1, 0],
[1, 0, 1, 0, 1],
[0, 1, 0, 1, 0]], dtype=int64_dtype)
string_classifier_data = LabelArray(
classifier_data.astype(str).astype(object),
missing_value=None,
)
expected_ranks = {
'ordinal': array(
[[2., 2., 1., 1., 3.],
[2., 1., 1., 2., 3.],
[1., 1., 3., 2., 2.],
[1., 2., 3., 1., 2.],
[2., 2., 1., 1., 3.]]
),
'average': array(
[[2.5, 2., 1., 1., 2.5],
[2.5, 1., 1., 2., 2.5],
[1.5, 1., 3., 2., 1.5],
[1.5, 2., 3., 1., 1.5],
[2.5, 2., 1., 1., 2.5]]
),
'min': array(
[[2., 2., 1., 1., 2.],
[2., 1., 1., 2., 2.],
[1., 1., 3., 2., 1.],
[1., 2., 3., 1., 1.],
[2., 2., 1., 1., 2.]]
),
'max': array(
[[3., 2., 1., 1., 3.],
[3., 1., 1., 2., 3.],
[2., 1., 3., 2., 2.],
[2., 2., 3., 1., 2.],
[3., 2., 1., 1., 3.]]
),
'dense': array(
[[2., 2., 1., 1., 2.],
[2., 1., 1., 2., 2.],
[1., 1., 2., 2., 1.],
[1., 2., 2., 1., 1.],
[2., 2., 1., 1., 2.]]
),
}
def check(terms):
self.check_terms(
terms,
expected={name: expected_ranks[name] for name in terms},
initial_workspace={
f: data,
c: classifier_data,
str_c: string_classifier_data,
},
mask=self.build_mask(ones((5, 5))),
)
check({
meth: f.rank(method=meth, groupby=c, ascending=False)
for meth in expected_ranks
})
check({
meth: f.rank(method=meth, groupby=str_c, ascending=False)
for meth in expected_ranks
})
# Not passing a method should default to ordinal
check({'ordinal': f.rank(groupby=c, ascending=False)})
check({'ordinal': f.rank(groupby=str_c, ascending=False)})
@parameterized.expand([
(100, 15),
(101, 4),
(102, 100),
])
def test_returns(self, seed_value, window_length):
returns = Returns(window_length=window_length)
today = datetime64(1, 'ns')
assets = arange(3)
out = empty((3,), dtype=float)
seed(seed_value) # Seed so we get deterministic results.
test_data = abs(randn(window_length, 3))
# Calculate the expected returns
expected = (test_data[-1] - test_data[0]) / test_data[0]
out = empty((3,), dtype=float)
returns.compute(today, assets, out, test_data)
check_allclose(expected, out)
def gen_ranking_cases():
seeds = range(int(1e4), int(1e5), int(1e4))
methods = ('ordinal', 'average')
use_mask_values = (True, False)
set_missing_values = (True, False)
ascending_values = (True, False)
return product(
seeds,
methods,
use_mask_values,
set_missing_values,
ascending_values,
)
@parameterized.expand(gen_ranking_cases())
def test_masked_rankdata_2d(self,
seed_value,
method,
use_mask,
set_missing,
ascending):
eyemask = ~eye(5, dtype=bool)
nomask = ones((5, 5), dtype=bool)
seed(seed_value)
asfloat = (randn(5, 5) * seed_value)
asdatetime = (asfloat).copy().view('datetime64[ns]')
mask = eyemask if use_mask else nomask
if set_missing:
asfloat[:, 2] = nan
asdatetime[:, 2] = NaTns
float_result = masked_rankdata_2d(
data=asfloat,
mask=mask,
missing_value=nan,
method=method,
ascending=True,
)
datetime_result = masked_rankdata_2d(
data=asdatetime,
mask=mask,
missing_value=NaTns,
method=method,
ascending=True,
)
check_arrays(float_result, datetime_result)
def test_normalizations_hand_computed(self):
"""
Test the hand-computed example in factor.demean.
"""
f = self.f
m = Mask()
c = C()
str_c = C(dtype=categorical_dtype, missing_value=None)
factor_data = array(
[[1.0, 2.0, 3.0, 4.0],
[1.5, 2.5, 3.5, 1.0],
[2.0, 3.0, 4.0, 1.5],
[2.5, 3.5, 1.0, 2.0]],
)
filter_data = array(
[[False, True, True, True],
[True, False, True, True],
[True, True, False, True],
[True, True, True, False]],
dtype=bool,
)
classifier_data = array(
[[1, 1, 2, 2],
[1, 1, 2, 2],
[1, 1, 2, 2],
[1, 1, 2, 2]],
dtype=int64_dtype,
)
string_classifier_data = LabelArray(
classifier_data.astype(str).astype(object),
missing_value=None,
)
terms = {
'vanilla': f.demean(),
'masked': f.demean(mask=m),
'grouped': f.demean(groupby=c),
'grouped_str': f.demean(groupby=str_c),
'grouped_masked': f.demean(mask=m, groupby=c),
'grouped_masked_str': f.demean(mask=m, groupby=str_c),
}
expected = {
'vanilla': array(
[[-1.500, -0.500, 0.500, 1.500],
[-0.625, 0.375, 1.375, -1.125],
[-0.625, 0.375, 1.375, -1.125],
[0.250, 1.250, -1.250, -0.250]],
),
'masked': array(
[[nan, -1.000, 0.000, 1.000],
[-0.500, nan, 1.500, -1.000],
[-0.166, 0.833, nan, -0.666],
[0.166, 1.166, -1.333, nan]],
),
'grouped': array(
[[-0.500, 0.500, -0.500, 0.500],
[-0.500, 0.500, 1.250, -1.250],
[-0.500, 0.500, 1.250, -1.250],
[-0.500, 0.500, -0.500, 0.500]],
),
'grouped_masked': array(
[[nan, 0.000, -0.500, 0.500],
[0.000, nan, 1.250, -1.250],
[-0.500, 0.500, nan, 0.000],
[-0.500, 0.500, 0.000, nan]]
)
}
# Changing the classifier dtype shouldn't affect anything.
expected['grouped_str'] = expected['grouped']
expected['grouped_masked_str'] = expected['grouped_masked']
self.check_terms(
terms,
expected,
initial_workspace={
f: factor_data,
c: classifier_data,
str_c: string_classifier_data,
m: filter_data,
},
mask=self.build_mask(self.ones_mask(shape=factor_data.shape)),
# The hand-computed values aren't very precise (in particular,
# we truncate repeating decimals at 3 places) This is just
# asserting that the example isn't misleading by being totally
# wrong.
check=partial(check_allclose, atol=0.001),
)
def test_winsorize_hand_computed(self):
"""
Test the hand-computed example in factor.winsorize.
"""
f = self.f
m = Mask()
c = C()
str_c = C(dtype=categorical_dtype, missing_value=None)
factor_data = array([
[1., 2., 3., 4., 5., 6.],
[1., 8., 27., 64., 125., 216.],
[6., 5., 4., 3., 2., 1.]
])
filter_data = array(
[[False, True, True, True, True, True],
[True, False, True, True, True, True],
[True, True, False, True, True, True]],
dtype=bool,
)
classifier_data = array(
[[1, 1, 1, 2, 2, 2],
[1, 1, 1, 2, 2, 2],
[1, 1, 1, 2, 2, 2]],
dtype=int64_dtype,
)
string_classifier_data = LabelArray(
classifier_data.astype(str).astype(object),
missing_value=None,
)
terms = {
'winsor_1': f.winsorize(
min_percentile=0.33,
max_percentile=0.67
),
'winsor_2': f.winsorize(
min_percentile=0.49,
max_percentile=1
),
'winsor_3': f.winsorize(
min_percentile=0,
max_percentile=.67
),
'masked': f.winsorize(
min_percentile=0.33,
max_percentile=0.67,
mask=m
),
'grouped': f.winsorize(
min_percentile=0.34,
max_percentile=0.66,
groupby=c
),
'grouped_str': f.winsorize(
min_percentile=0.34,
max_percentile=0.66,
groupby=str_c
),
'grouped_masked': f.winsorize(
min_percentile=0.34,
max_percentile=0.66,
mask=m,
groupby=c
),
'grouped_masked_str': f.winsorize(
min_percentile=0.34,
max_percentile=0.66,
mask=m,
groupby=str_c
),
}
expected = {
'winsor_1': array([
[2., 2., 3., 4., 5., 5.],
[8., 8., 27., 64., 125., 125.],
[5., 5., 4., 3., 2., 2.]
]),
'winsor_2': array([
[3.0, 3., 3., 4., 5., 6.],
[27., 27., 27., 64., 125., 216.],
[6.0, 5., 4., 3., 3., 3.]
]),
'winsor_3': array([
[1., 2., 3., 4., 5., 5.],
[1., 8., 27., 64., 125., 125.],
[5., 5., 4., 3., 2., 1.]
]),
'masked': array([
[nan, 3., 3., 4., 5., 5.],
[27., nan, 27., 64., 125., 125.],
[5.0, 5., nan, 3., 2., 2.]
]),
'grouped': array([
[2., 2., 2., 5., 5., 5.],
[8., 8., 8., 125., 125., 125.],
[5., 5., 5., 2., 2., 2.]
]),
'grouped_masked': array([
[nan, 2., 3., 5., 5., 5.],
[1.0, nan, 27., 125., 125., 125.],
[6.0, 5., nan, 2., 2., 2.]
]),
}
# Changing the classifier dtype shouldn't affect anything.
expected['grouped_str'] = expected['grouped']
expected['grouped_masked_str'] = expected['grouped_masked']
self.check_terms(
terms,
expected,
initial_workspace={
f: factor_data,
c: classifier_data,
str_c: string_classifier_data,
m: filter_data,
},
mask=self.build_mask(self.ones_mask(shape=factor_data.shape)),
check=partial(check_allclose, atol=0.001),
)
def test_winsorize_bad_bounds(self):
"""
Test out of bounds input for factor.winsorize.
"""
f = self.f
bad_percentiles = [
(-.1, 1),
(0, 95),
(5, 95),
(5, 5),
(.6, .4)
]
for min_, max_ in bad_percentiles:
with self.assertRaises(BadPercentileBounds):
f.winsorize(min_percentile=min_, max_percentile=max_)
@parameter_space(
seed_value=range(1, 2),
normalizer_name_and_func=[
('demean', {}, lambda row: row - nanmean(row)),
('zscore', {}, lambda row: (row - nanmean(row)) / nanstd(row)),
(
'winsorize',
{"min_percentile": 0.25, "max_percentile": 0.75},
lambda row: scipy_winsorize(
row,
limits=0.25,
)
),
],
add_nulls_to_factor=(False, True,),
)
def test_normalizations_randomized(self,
seed_value,
normalizer_name_and_func,
add_nulls_to_factor):
name, kwargs, func = normalizer_name_and_func
shape = (20, 20)
# All Trues.
nomask = self.ones_mask(shape=shape)
# Falses on main diagonal.
eyemask = self.eye_mask(shape=shape)
# Falses on other diagonal.
eyemask90 = rot90(eyemask)
# Falses on both diagonals.
xmask = eyemask & eyemask90
# Block of random data.
factor_data = self.randn_data(seed=seed_value, shape=shape)
if add_nulls_to_factor:
factor_data = where(eyemask, factor_data, nan)
# Cycles of 0, 1, 2, 0, 1, 2, ...
classifier_data = (
(self.arange_data(shape=shape, dtype=int64_dtype) + seed_value) % 3
)
# With -1s on main diagonal.
classifier_data_eyenulls = where(eyemask, classifier_data, -1)
# With -1s on opposite diagonal.
classifier_data_eyenulls90 = where(eyemask90, classifier_data, -1)
# With -1s on both diagonals.
classifier_data_xnulls = where(xmask, classifier_data, -1)
f = self.f
c = C()
c_with_nulls = OtherC()
m = Mask()
method = partial(getattr(f, name), **kwargs)
terms = {
'vanilla': method(),
'masked': method(mask=m),
'grouped': method(groupby=c),
'grouped_with_nulls': method(groupby=c_with_nulls),
'both': method(mask=m, groupby=c),
'both_with_nulls': method(mask=m, groupby=c_with_nulls),
}
expected = {
'vanilla': apply_along_axis(func, 1, factor_data,),
'masked': where(
eyemask,
grouped_apply(factor_data, eyemask, func),
nan,
),
'grouped': grouped_apply(
factor_data,
classifier_data,
func,
),
# If the classifier has nulls, we should get NaNs in the
# corresponding locations in the output.
'grouped_with_nulls': where(
eyemask90,
grouped_apply(factor_data, classifier_data_eyenulls90, func),
nan,
),
# Passing a mask with a classifier should behave as though the
# classifier had nulls where the mask was False.
'both': where(
eyemask,
grouped_apply(
factor_data,
classifier_data_eyenulls,
func,
),
nan,
),
'both_with_nulls': where(
xmask,
grouped_apply(
factor_data,
classifier_data_xnulls,
func,
),
nan,
)
}
self.check_terms(
terms=terms,
expected=expected,
initial_workspace={
f: factor_data,
c: classifier_data,
c_with_nulls: classifier_data_eyenulls90,
Mask(): eyemask,
},
mask=self.build_mask(nomask),
)
@parameter_space(method_name=['demean', 'zscore'])
def test_cant_normalize_non_float(self, method_name):
class DateFactor(Factor):
dtype = datetime64ns_dtype
inputs = ()
window_length = 0
d = DateFactor()
with self.assertRaises(TypeError) as e:
getattr(d, method_name)()
errmsg = str(e.exception)
expected = (
"{normalizer}() is only defined on Factors of dtype float64,"
" but it was called on a Factor of dtype datetime64[ns]."
).format(normalizer=method_name)
self.assertEqual(errmsg, expected)
@parameter_space(seed=[1, 2, 3])
def test_quantiles_unmasked(self, seed):
permute = partial(permute_rows, seed)
shape = (6, 6)
# Shuffle the input rows to verify that we don't depend on the order.
# Take the log to ensure that we don't depend on linear scaling or
# integrality of inputs
factor_data = permute(log1p(arange(36, dtype=float).reshape(shape)))
f = self.f
# Apply the same shuffle we applied to the input rows to our
# expectations. Doing it this way makes it obvious that our
# expectation corresponds to our input, while still testing against
# a range of input orderings.
permuted_array = compose(permute, partial(array, dtype=int64_dtype))
self.check_terms(
terms={
'2': f.quantiles(bins=2),
'3': f.quantiles(bins=3),
'6': f.quantiles(bins=6),
},
initial_workspace={
f: factor_data,
},
expected={
# The values in the input are all increasing, so the first half
# of each row should be in the bottom bucket, and the second
# half should be in the top bucket.
'2': permuted_array([[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1]]),
# Similar for three buckets.
'3': permuted_array([[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 2, 2]]),
# In the limiting case, we just have every column different.
'6': permuted_array([[0, 1, 2, 3, 4, 5],
[0, 1, 2, 3, 4, 5],
[0, 1, 2, 3, 4, 5],
[0, 1, 2, 3, 4, 5],
[0, 1, 2, 3, 4, 5],
[0, 1, 2, 3, 4, 5]]),
},
mask=self.build_mask(self.ones_mask(shape=shape)),
)
@parameter_space(seed=[1, 2, 3])
def test_quantiles_masked(self, seed):
permute = partial(permute_rows, seed)
# 7 x 7 so that we divide evenly into 2/3/6-tiles after including the
# nan value in each row.
shape = (7, 7)
# Shuffle the input rows to verify that we don't depend on the order.
# Take the log to ensure that we don't depend on linear scaling or
# integrality of inputs
factor_data = permute(log1p(arange(49, dtype=float).reshape(shape)))
factor_data_w_nans = where(
permute(rot90(self.eye_mask(shape=shape))),
factor_data,
nan,
)
mask_data = permute(self.eye_mask(shape=shape))
f = F()
f_nans = OtherF()
m = Mask()
# Apply the same shuffle we applied to the input rows to our
# expectations. Doing it this way makes it obvious that our
# expectation corresponds to our input, while still testing against
# a range of input orderings.
permuted_array = compose(permute, partial(array, dtype=int64_dtype))
self.check_terms(
terms={
'2_masked': f.quantiles(bins=2, mask=m),
'3_masked': f.quantiles(bins=3, mask=m),
'6_masked': f.quantiles(bins=6, mask=m),
'2_nans': f_nans.quantiles(bins=2),
'3_nans': f_nans.quantiles(bins=3),
'6_nans': f_nans.quantiles(bins=6),
},
initial_workspace={
f: factor_data,
f_nans: factor_data_w_nans,
m: mask_data,
},
expected={
# Expected results here are the same as in
# test_quantiles_unmasked, except with diagonals of -1s
# interpolated to match the effects of masking and/or input
# nans.
'2_masked': permuted_array([[-1, 0, 0, 0, 1, 1, 1],
[0, -1, 0, 0, 1, 1, 1],
[0, 0, -1, 0, 1, 1, 1],
[0, 0, 0, -1, 1, 1, 1],
[0, 0, 0, 1, -1, 1, 1],
[0, 0, 0, 1, 1, -1, 1],
[0, 0, 0, 1, 1, 1, -1]]),
'3_masked': permuted_array([[-1, 0, 0, 1, 1, 2, 2],
[0, -1, 0, 1, 1, 2, 2],
[0, 0, -1, 1, 1, 2, 2],
[0, 0, 1, -1, 1, 2, 2],
[0, 0, 1, 1, -1, 2, 2],
[0, 0, 1, 1, 2, -1, 2],
[0, 0, 1, 1, 2, 2, -1]]),
'6_masked': permuted_array([[-1, 0, 1, 2, 3, 4, 5],
[0, -1, 1, 2, 3, 4, 5],
[0, 1, -1, 2, 3, 4, 5],
[0, 1, 2, -1, 3, 4, 5],
[0, 1, 2, 3, -1, 4, 5],
[0, 1, 2, 3, 4, -1, 5],
[0, 1, 2, 3, 4, 5, -1]]),
'2_nans': permuted_array([[0, 0, 0, 1, 1, 1, -1],
[0, 0, 0, 1, 1, -1, 1],
[0, 0, 0, 1, -1, 1, 1],
[0, 0, 0, -1, 1, 1, 1],
[0, 0, -1, 0, 1, 1, 1],
[0, -1, 0, 0, 1, 1, 1],
[-1, 0, 0, 0, 1, 1, 1]]),
'3_nans': permuted_array([[0, 0, 1, 1, 2, 2, -1],
[0, 0, 1, 1, 2, -1, 2],
[0, 0, 1, 1, -1, 2, 2],
[0, 0, 1, -1, 1, 2, 2],
[0, 0, -1, 1, 1, 2, 2],
[0, -1, 0, 1, 1, 2, 2],
[-1, 0, 0, 1, 1, 2, 2]]),
'6_nans': permuted_array([[0, 1, 2, 3, 4, 5, -1],
[0, 1, 2, 3, 4, -1, 5],
[0, 1, 2, 3, -1, 4, 5],
[0, 1, 2, -1, 3, 4, 5],
[0, 1, -1, 2, 3, 4, 5],
[0, -1, 1, 2, 3, 4, 5],
[-1, 0, 1, 2, 3, 4, 5]]),
},
mask=self.build_mask(self.ones_mask(shape=shape)),
)
def test_quantiles_uneven_buckets(self):
permute = partial(permute_rows, 5)
shape = (5, 5)
factor_data = permute(log1p(arange(25, dtype=float).reshape(shape)))
mask_data = permute(self.eye_mask(shape=shape))
f = F()
m = Mask()
permuted_array = compose(permute, partial(array, dtype=int64_dtype))
self.check_terms(
terms={
'3_masked': f.quantiles(bins=3, mask=m),
'7_masked': f.quantiles(bins=7, mask=m),
},
initial_workspace={
f: factor_data,
m: mask_data,
},
expected={
'3_masked': permuted_array([[-1, 0, 0, 1, 2],
[0, -1, 0, 1, 2],
[0, 0, -1, 1, 2],
[0, 0, 1, -1, 2],
[0, 0, 1, 2, -1]]),
'7_masked': permuted_array([[-1, 0, 2, 4, 6],
[0, -1, 2, 4, 6],
[0, 2, -1, 4, 6],
[0, 2, 4, -1, 6],
[0, 2, 4, 6, -1]]),
},
mask=self.build_mask(self.ones_mask(shape=shape)),
)
def test_quantile_helpers(self):
f = self.f
m = Mask()
self.assertIs(f.quartiles(), f.quantiles(bins=4))
self.assertIs(f.quartiles(mask=m), f.quantiles(bins=4, mask=m))
self.assertIsNot(f.quartiles(), f.quartiles(mask=m))
self.assertIs(f.quintiles(), f.quantiles(bins=5))
self.assertIs(f.quintiles(mask=m), f.quantiles(bins=5, mask=m))
self.assertIsNot(f.quintiles(), f.quintiles(mask=m))
self.assertIs(f.deciles(), f.quantiles(bins=10))
self.assertIs(f.deciles(mask=m), f.quantiles(bins=10, mask=m))
self.assertIsNot(f.deciles(), f.deciles(mask=m))
class ShortReprTestCase(TestCase):
"""
Tests for short_repr methods of Factors.
"""
def test_demean(self):
r = F().demean().short_repr()
self.assertEqual(r, "GroupedRowTransform('demean')")
def test_zscore(self):
r = F().zscore().short_repr()
self.assertEqual(r, "GroupedRowTransform('zscore')")
def test_winsorize(self):
r = F().winsorize(min_percentile=.05, max_percentile=.95).short_repr()
self.assertEqual(r, "GroupedRowTransform('winsorize')")
def test_recarray_field_repr(self):
class MultipleOutputs(CustomFactor):
outputs = ['a', 'b']
inputs = ()
window_length = 5
def short_repr(self):
return "CustomRepr()"
a = MultipleOutputs().a
b = MultipleOutputs().b
self.assertEqual(a.short_repr(), "CustomRepr().a")
self.assertEqual(b.short_repr(), "CustomRepr().b")
def test_latest_repr(self):
class SomeDataSet(DataSet):
a = Column(dtype=float64_dtype)
b = Column(dtype=float64_dtype)
self.assertEqual(
SomeDataSet.a.latest.short_repr(),
"SomeDataSet.a.latest"
)
self.assertEqual(
SomeDataSet.b.latest.short_repr(),
"SomeDataSet.b.latest"
)
class TestWindowSafety(TestCase):
def test_zscore_is_window_safe(self):
self.assertTrue(F().zscore().window_safe)
@parameter_space(__fail_fast=True, is_window_safe=[True, False])
def test_window_safety_propagates_to_recarray_fields(self, is_window_safe):
class MultipleOutputs(CustomFactor):
outputs = ['a', 'b']
inputs = ()
window_length = 5
window_safe = is_window_safe
mo = MultipleOutputs()
for attr in mo.a, mo.b:
self.assertEqual(attr.window_safe, mo.window_safe)
def test_demean_is_window_safe_if_input_is_window_safe(self):
self.assertFalse(F().demean().window_safe)
self.assertFalse(F(window_safe=False).demean().window_safe)
self.assertTrue(F(window_safe=True).demean().window_safe)
def test_winsorize_is_window_safe_if_input_is_window_safe(self):
self.assertFalse(
F().winsorize(min_percentile=.05, max_percentile=.95).window_safe
)
self.assertFalse(
F(window_safe=False).winsorize(
min_percentile=.05,
max_percentile=.95
).window_safe
)
self.assertTrue(
F(window_safe=True).winsorize(
min_percentile=.05,
max_percentile=.95
).window_safe
)
class TestPostProcessAndToWorkSpaceValue(ZiplineTestCase):
@parameter_space(dtype_=(float64_dtype, datetime64ns_dtype))
def test_reversability(self, dtype_):
class F(Factor):
inputs = ()
dtype = dtype_
window_length = 0
f = F()
column_data = array(
[[0, f.missing_value],
[1, f.missing_value],
[2, 3]],
dtype=dtype_,
)
assert_equal(f.postprocess(column_data.ravel()), column_data.ravel())
# only include the non-missing data
pipeline_output = pd.Series(
data=array([0, 1, 2, 3], dtype=dtype_),
index=pd.MultiIndex.from_arrays([
[pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-02'),
pd.Timestamp('2014-01-03'),
pd.Timestamp('2014-01-03')],
[0, 0, 0, 1],
]),
)
assert_equal(
f.to_workspace_value(pipeline_output, pd.Index([0, 1])),
column_data,
)
| |
import datetime
from django.conf import settings
from django.core.exceptions import FieldError
from django.db.backends.utils import truncate_name
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import select_related_descend, QueryWrapper
from django.db.models.sql.constants import (SINGLE, MULTI, ORDER_DIR,
GET_ITERATOR_CHUNK_SIZE, SelectInfo)
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.query import get_order_dir, Query
from django.db.utils import DatabaseError
from django.utils import six
from django.utils.six.moves import zip
from django.utils import timezone
class SQLCompiler(object):
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {}
# When ordering a queryset with distinct on a column not part of the
# select set, the ordering column needs to be added to the select
# clause. This information is needed both in SQL construction and
# masking away the ordering selects from the returned row.
self.ordering_aliases = []
self.ordering_params = []
def pre_sql_setup(self):
"""
Does any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
# TODO: after the query has been executed, the altered state should be
# cleaned. We are not using a clone() of the query here.
"""
if not self.query.tables:
self.query.join((None, self.query.get_meta().db_table, None))
if (not self.query.select and self.query.default_cols and not
self.query.included_inherited_models):
self.query.setup_inherited_models()
if self.query.select_related and not self.query.related_select_cols:
self.fill_related_selections()
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
self.pre_sql_setup()
# After executing the query, we must get rid of any joins the query
# setup created. So, take note of alias counts before the query ran.
# However we do not want to get rid of stuff done in pre_sql_setup(),
# as the pre_sql_setup will modify query state in a way that forbids
# another run of it.
self.refcounts_before = self.query.alias_refcount.copy()
out_cols, s_params = self.get_columns(with_col_aliases)
ordering, o_params, ordering_group_by = self.get_ordering()
distinct_fields = self.get_distinct()
# This must come after 'select', 'ordering' and 'distinct' -- see
# docstring of get_from_clause() for details.
from_, f_params = self.get_from_clause()
qn = self.quote_name_unless_alias
where, w_params = self.query.where.as_sql(qn=qn, connection=self.connection)
having, h_params = self.query.having.as_sql(qn=qn, connection=self.connection)
having_group_by = self.query.having.get_cols()
params = []
for val in six.itervalues(self.query.extra_select):
params.extend(val[1])
result = ['SELECT']
if self.query.distinct:
result.append(self.connection.ops.distinct_sql(distinct_fields))
params.extend(o_params)
result.append(', '.join(out_cols + self.ordering_aliases))
params.extend(s_params)
params.extend(self.ordering_params)
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping, gb_params = self.get_grouping(having_group_by, ordering_group_by)
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) not implemented.")
if not ordering:
ordering = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
params.extend(gb_params)
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if ordering:
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limits:
if self.query.high_mark is not None:
result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
if self.query.low_mark:
if self.query.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result.append('LIMIT %d' % val)
result.append('OFFSET %d' % self.query.low_mark)
if self.query.select_for_update and self.connection.features.has_select_for_update:
# If we've been asked for a NOWAIT query but the backend does not support it,
# raise a DatabaseError otherwise we could get an unexpected deadlock.
nowait = self.query.select_for_update_nowait
if nowait and not self.connection.features.has_select_for_update_nowait:
raise DatabaseError('NOWAIT is not supported on this database backend.')
result.append(self.connection.ops.for_update_sql(nowait=nowait))
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(self.refcounts_before)
return ' '.join(result), tuple(params)
def as_nested_sql(self):
"""
Perform the same functionality as the as_sql() method, returning an
SQL string and parameters. However, the alias prefixes are bumped
beforehand (in a copy -- the current query isn't changed), and any
ordering is removed if the query is unsliced.
Used when nesting this query inside another.
"""
obj = self.query.clone()
if obj.low_mark == 0 and obj.high_mark is None:
# If there is no slicing in use, then we can safely drop all ordering
obj.clear_ordering(True)
return obj.get_compiler(connection=self.connection).as_sql()
def get_columns(self, with_aliases=False):
"""
Returns the list of columns to use in the select statement, as well as
a list any extra parameters that need to be included. If no columns
have been specified, returns all columns relating to fields in the
model.
If 'with_aliases' is true, any column names that are duplicated
(without the table names) are given unique aliases. This is needed in
some cases to avoid ambiguity with nested queries.
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = ['(%s) AS %s' % (col[0], qn2(alias)) for alias, col in six.iteritems(self.query.extra_select)]
params = []
aliases = set(self.query.extra_select.keys())
if with_aliases:
col_aliases = aliases.copy()
else:
col_aliases = set()
if self.query.select:
only_load = self.deferred_to_columns()
for col, _ in self.query.select:
if isinstance(col, (list, tuple)):
alias, column = col
table = self.query.alias_map[alias].table_name
if table in only_load and column not in only_load[table]:
continue
r = '%s.%s' % (qn(alias), qn(column))
if with_aliases:
if col[1] in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append('%s AS %s' % (r, qn2(col[1])))
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(r)
aliases.add(r)
col_aliases.add(col[1])
else:
col_sql, col_params = col.as_sql(qn, self.connection)
result.append(col_sql)
params.extend(col_params)
if hasattr(col, 'alias'):
aliases.add(col.alias)
col_aliases.add(col.alias)
elif self.query.default_cols:
cols, new_aliases = self.get_default_columns(with_aliases,
col_aliases)
result.extend(cols)
aliases.update(new_aliases)
max_name_length = self.connection.ops.max_name_length()
for alias, aggregate in self.query.aggregate_select.items():
agg_sql, agg_params = aggregate.as_sql(qn, self.connection)
if alias is None:
result.append(agg_sql)
else:
result.append('%s AS %s' % (agg_sql, qn(truncate_name(alias, max_name_length))))
params.extend(agg_params)
for (table, col), _ in self.query.related_select_cols:
r = '%s.%s' % (qn(table), qn(col))
if with_aliases and col in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append(r)
aliases.add(r)
col_aliases.add(col)
self._select_aliases = aliases
return result, params
def get_default_columns(self, with_aliases=False, col_aliases=None,
start_alias=None, opts=None, as_pairs=False, from_parent=None):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.get_meta()
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
aliases = set()
only_load = self.deferred_to_columns()
if not start_alias:
start_alias = self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field, model in opts.get_concrete_fields_with_model():
if from_parent and model is not None and issubclass(from_parent, model):
# Avoid loading data for already loaded parents.
continue
alias = self.query.join_parent_model(opts, model, start_alias,
seen_models)
table = self.query.alias_map[alias].table_name
if table in only_load and field.column not in only_load[table]:
continue
if as_pairs:
result.append((alias, field.column))
aliases.add(alias)
continue
if with_aliases and field.column in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s.%s AS %s' % (qn(alias),
qn2(field.column), c_alias))
col_aliases.add(c_alias)
aliases.add(c_alias)
else:
r = '%s.%s' % (qn(alias), qn2(field.column))
result.append(r)
aliases.add(r)
if with_aliases:
col_aliases.add(field.column)
return result, aliases
def get_distinct(self):
"""
Returns a quoted list of fields to use in DISTINCT ON part of the query.
Note that this method can alter the tables in the query, and thus it
must be called before get_from_clause().
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
_, targets, alias, joins, path, _ = self._setup_joins(parts, opts, None)
targets, alias, _ = self.query.trim_joins(targets, joins, path)
for target in targets:
result.append("%s.%s" % (qn(alias), qn2(target.column)))
return result
def get_ordering(self):
"""
Returns a tuple containing a list representing the SQL elements in the
"order by" clause, and the list of SQL elements that need to be added
to the GROUP BY clause as a result of the ordering.
Also sets the ordering_aliases attribute on this instance to a list of
extra aliases needed in the select.
Determining the ordering SQL can change the tables we need to include,
so this should be run *before* get_from_clause().
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = (self.query.order_by
or self.query.get_meta().ordering
or [])
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
distinct = self.query.distinct
select_aliases = self._select_aliases
result = []
group_by = []
ordering_aliases = []
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
# It's possible, due to model inheritance, that normal usage might try
# to include the same field more than once in the ordering. We track
# the table/column pairs we use and discard any after the first use.
processed_pairs = set()
params = []
ordering_params = []
for pos, field in enumerate(ordering):
if field == '?':
result.append(self.connection.ops.random_function_sql())
continue
if isinstance(field, int):
if field < 0:
order = desc
field = -field
else:
order = asc
result.append('%s %s' % (field, order))
group_by.append((str(field), []))
continue
col, order = get_order_dir(field, asc)
if col in self.query.aggregate_select:
result.append('%s %s' % (qn(col), order))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), col)
processed_pairs.add((table, col))
if not distinct or elt in select_aliases:
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
elif not self.query._extra or get_order_dir(field)[0] not in self.query._extra:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
for table, cols, order in self.find_ordering_name(field,
self.query.get_meta(), default_order=asc):
for col in cols:
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), qn2(col))
processed_pairs.add((table, col))
if distinct and elt not in select_aliases:
ordering_aliases.append(elt)
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
else:
elt = qn2(col)
if col not in self.query.extra_select:
sql = "(%s) AS %s" % (self.query.extra[col][0], elt)
ordering_aliases.append(sql)
ordering_params.extend(self.query.extra[col][1])
else:
if distinct and col not in select_aliases:
ordering_aliases.append(elt)
ordering_params.extend(params)
result.append('%s %s' % (elt, order))
group_by.append(self.query.extra[col])
self.ordering_aliases = ordering_aliases
self.ordering_params = ordering_params
return result, params, group_by
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Returns the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
pieces = name.split(LOOKUP_SEP)
field, targets, alias, joins, path, opts = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model.
if field.rel and path and opts.ordering:
# Firstly, avoid infinite loops.
if not already_seen:
already_seen = set()
join_tuple = tuple(self.query.alias_map[j].table_name for j in joins)
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
targets, alias, _ = self.query.trim_joins(targets, joins, path)
return [(alias, [t.column for t in targets], order)]
def _setup_joins(self, pieces, opts, alias):
"""
A helper method for get_ordering and get_distinct. This method will
call query.setup_joins, handle refcounts and then promote the joins.
Note that get_ordering and get_distinct must produce same target
columns on same input, as the prefixes of get_ordering and get_distinct
must match. Executing SQL where this is not true is an error.
"""
if not alias:
alias = self.query.get_initial_alias()
field, targets, opts, joins, path = self.query.setup_joins(
pieces, opts, alias)
# We will later on need to promote those joins that were added to the
# query afresh above.
joins_to_promote = [j for j in joins if self.query.alias_refcount[j] < 2]
alias = joins[-1]
if not field.rel:
# To avoid inadvertent trimming of a necessary alias, use the
# refcount to show that we are referencing a non-relation field on
# the model.
self.query.ref_alias(alias)
# Must use left outer joins for nullable fields and their relations.
# Ordering or distinct must not affect the returned set, and INNER
# JOINS for nullable fields could do this.
self.query.promote_joins(joins_to_promote)
return field, targets, alias, joins, path, opts
def get_from_clause(self):
"""
Returns a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns,
ordering and distinct must be done first.
"""
result = []
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
first = True
from_params = []
for alias in self.query.tables:
if not self.query.alias_refcount[alias]:
continue
try:
name, alias, join_type, lhs, join_cols, _, join_field = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
alias_str = '' if alias == name else (' %s' % alias)
if join_type and not first:
extra_cond = join_field.get_extra_restriction(
self.query.where_class, alias, lhs)
if extra_cond:
extra_sql, extra_params = extra_cond.as_sql(
qn, self.connection)
extra_sql = 'AND (%s)' % extra_sql
from_params.extend(extra_params)
else:
extra_sql = ""
result.append('%s %s%s ON ('
% (join_type, qn(name), alias_str))
for index, (lhs_col, rhs_col) in enumerate(join_cols):
if index != 0:
result.append(' AND ')
result.append('%s.%s = %s.%s' %
(qn(lhs), qn2(lhs_col), qn(alias), qn2(rhs_col)))
result.append('%s)' % extra_sql)
else:
connector = '' if first else ', '
result.append('%s%s%s' % (connector, qn(name), alias_str))
first = False
for t in self.query.extra_tables:
alias, unused = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# calls increments the refcount, so an alias refcount of one means
# this is the only reference.
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
connector = '' if first else ', '
result.append('%s%s' % (connector, qn(alias)))
first = False
return result, from_params
def get_grouping(self, having_group_by, ordering_group_by):
"""
Returns a tuple representing the SQL elements in the "group by" clause.
"""
qn = self.quote_name_unless_alias
result, params = [], []
if self.query.group_by is not None:
select_cols = self.query.select + self.query.related_select_cols
# Just the column, not the fields.
select_cols = [s[0] for s in select_cols]
if (len(self.query.get_meta().concrete_fields) == len(self.query.select)
and self.connection.features.allows_group_by_pk):
self.query.group_by = [
(self.query.get_meta().db_table, self.query.get_meta().pk.column)
]
select_cols = []
seen = set()
cols = self.query.group_by + having_group_by + select_cols
for col in cols:
col_params = ()
if isinstance(col, (list, tuple)):
sql = '%s.%s' % (qn(col[0]), qn(col[1]))
elif hasattr(col, 'as_sql'):
sql, col_params = col.as_sql(qn, self.connection)
else:
sql = '(%s)' % str(col)
if sql not in seen:
result.append(sql)
params.extend(col_params)
seen.add(sql)
# Still, we need to add all stuff in ordering (except if the backend can
# group by just by PK).
if ordering_group_by and not self.connection.features.allows_group_by_pk:
for order, order_params in ordering_group_by:
# Even if we have seen the same SQL string, it might have
# different params, so, we add same SQL in "has params" case.
if order not in seen or params:
result.append(order)
params.extend(order_params)
seen.add(order)
# Unconditionally add the extra_select items.
for extra_select, extra_params in self.query.extra_select.values():
sql = '(%s)' % str(extra_select)
result.append(sql)
params.extend(extra_params)
return result, params
def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1,
requested=None, restricted=None, nullable=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
self.query.related_select_cols = []
only_load = self.query.get_loaded_field_names()
# Setup for the case when only particular related fields should be
# included in the related selection.
if requested is None:
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
for f, model in opts.get_fields_with_model():
# The get_fields_with_model() returns None for fields that live
# in the field's local model. So, for those fields we want to use
# the f.model - that is the field's local model.
field_model = model or f.model
if not select_related_descend(f, restricted, requested,
only_load.get(field_model)):
continue
promote = nullable or f.null
_, _, _, joins, _ = self.query.setup_joins(
[f.name], opts, root_alias, outer_if_first=promote)
alias = joins[-1]
columns, aliases = self.get_default_columns(start_alias=alias,
opts=f.rel.to._meta, as_pairs=True)
self.query.related_select_cols.extend(
SelectInfo(col, field) for col, field in zip(columns, f.rel.to._meta.concrete_fields))
if restricted:
next = requested.get(f.name, {})
else:
next = False
new_nullable = f.null or promote
self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1,
next, restricted, new_nullable)
if restricted:
related_fields = [
(o.field, o.model)
for o in opts.get_all_related_objects()
if o.field.unique
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested,
only_load.get(model), reverse=True):
continue
_, _, _, joins, _ = self.query.setup_joins(
[f.related_query_name()], opts, root_alias, outer_if_first=True)
alias = joins[-1]
from_parent = (opts.model if issubclass(model, opts.model)
else None)
columns, aliases = self.get_default_columns(start_alias=alias,
opts=model._meta, as_pairs=True, from_parent=from_parent)
self.query.related_select_cols.extend(
SelectInfo(col, field) for col, field
in zip(columns, model._meta.concrete_fields))
next = requested.get(f.related_query_name(), {})
# Use True here because we are looking at the _reverse_ side of
# the relation, which is always nullable.
new_nullable = True
self.fill_related_selections(model._meta, alias, cur_depth + 1,
next, restricted, new_nullable)
def deferred_to_columns(self):
"""
Converts the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Returns the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.deferred_to_columns_cb)
return columns
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
fields = None
has_aggregate_select = bool(self.query.aggregate_select)
for rows in self.execute_sql(MULTI):
for row in rows:
if resolve_columns:
if fields is None:
# We only set this up here because
# related_select_cols isn't populated until
# execute_sql() has been called.
# We also include types of fields of related models that
# will be included via select_related() for the benefit
# of MySQL/MySQLdb when boolean fields are involved
# (#15040).
# This code duplicates the logic for the order of fields
# found in get_columns(). It would be nice to clean this up.
if self.query.select:
fields = [f.field for f in self.query.select]
else:
fields = self.query.get_meta().concrete_fields
fields = fields + [f.field for f in self.query.related_select_cols]
# If the field was deferred, exclude it from being passed
# into `resolve_columns` because it wasn't selected.
only_load = self.deferred_to_columns()
if only_load:
db_table = self.query.get_meta().db_table
fields = [f for f in fields if db_table in only_load and
f.column in only_load[db_table]]
row = self.resolve_columns(row, fields)
if has_aggregate_select:
loaded_fields = self.query.get_loaded_field_names().get(self.query.model, set()) or self.query.select
aggregate_start = len(self.query.extra_select) + len(loaded_fields)
aggregate_end = aggregate_start + len(self.query.aggregate_select)
row = tuple(row[:aggregate_start]) + tuple(
self.query.resolve_aggregate(value, aggregate, self.connection)
for (alias, aggregate), value
in zip(self.query.aggregate_select.items(), row[aggregate_start:aggregate_end])
) + tuple(row[aggregate_end:])
yield row
def has_results(self):
"""
Backends (e.g. NoSQL) can override this in order to use optimized
versions of "query has any results."
"""
# This is always executed on a query clone, so we can modify self.query
self.query.add_extra({'a': 1}, None, None, None, None, None)
self.query.set_extra_mask(['a'])
return bool(self.execute_sql(SINGLE))
def execute_sql(self, result_type=MULTI):
"""
Run the query against the database and returns the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
cursor = self.connection.cursor()
cursor.execute(sql, params)
if not result_type:
return cursor
if result_type == SINGLE:
if self.ordering_aliases:
return cursor.fetchone()[:-len(self.ordering_aliases)]
return cursor.fetchone()
# The MULTI case.
if self.ordering_aliases:
result = order_modified_iter(cursor, len(self.ordering_aliases),
self.connection.features.empty_fetchmany_value)
else:
result = iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
self.connection.features.empty_fetchmany_value)
if not self.connection.features.can_use_chunked_reads:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further.
return list(result)
return result
def as_subquery_condition(self, alias, columns, qn):
inner_qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
if len(columns) == 1:
sql, params = self.as_sql()
return '%s.%s IN (%s)' % (qn(alias), qn2(columns[0]), sql), params
for index, select_col in enumerate(self.query.select):
lhs = '%s.%s' % (inner_qn(select_col.col[0]), qn2(select_col.col[1]))
rhs = '%s.%s' % (qn(alias), qn2(columns[index]))
self.query.where.add(
QueryWrapper('%s = %s' % (lhs, rhs), []), 'AND')
sql, params = self.as_sql()
return 'EXISTS (%s)' % sql, params
class SQLInsertCompiler(SQLCompiler):
def __init__(self, *args, **kwargs):
self.return_id = False
super(SQLInsertCompiler, self).__init__(*args, **kwargs)
def placeholder(self, field, val):
if field is None:
# A field value of None means the value is raw.
return val
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
return field.get_placeholder(val, self.connection)
else:
# Return the common case for the placeholder
return '%s'
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
result = ['INSERT INTO %s' % qn(opts.db_table)]
has_fields = bool(self.query.fields)
fields = self.query.fields if has_fields else [opts.pk]
result.append('(%s)' % ', '.join(qn(f.column) for f in fields))
if has_fields:
params = values = [
[
f.get_db_prep_save(getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True), connection=self.connection)
for f in fields
]
for obj in self.query.objs
]
else:
values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
params = [[]]
fields = [None]
can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and
not self.return_id and self.connection.features.has_bulk_insert)
if can_bulk:
placeholders = [["%s"] * len(fields)]
else:
placeholders = [
[self.placeholder(field, v) for field, v in zip(fields, val)]
for val in values
]
# Oracle Spatial needs to remove some values due to #10888
params = self.connection.ops.modify_insert_params(placeholders, params)
if self.return_id and self.connection.features.can_return_id_from_insert:
params = params[0]
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
result.append("VALUES (%s)" % ", ".join(placeholders[0]))
r_fmt, r_params = self.connection.ops.return_insert_id()
# Skip empty r_fmt to allow subclasses to customize behaviour for
# 3rd party backends. Refs #19096.
if r_fmt:
result.append(r_fmt % col)
params += r_params
return [(" ".join(result), tuple(params))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, len(values)))
return [(" ".join(result), tuple(v for val in values for v in val))]
else:
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholders, params)
]
def execute_sql(self, return_id=False):
assert not (return_id and len(self.query.objs) != 1)
self.return_id = return_id
cursor = self.connection.cursor()
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not (return_id and cursor):
return
if self.connection.features.can_return_id_from_insert:
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(cursor,
self.query.get_meta().db_table, self.query.get_meta().pk.column)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
assert len(self.query.tables) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
result = ['DELETE FROM %s' % qn(self.query.tables[0])]
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
table = self.query.tables[0]
qn = self.quote_name_unless_alias
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'prepare_database_save'):
val = val.prepare_database_save(field)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self.connection)
else:
placeholder = '%s'
if hasattr(val, 'evaluate'):
val = SQLEvaluator(val, self.query, allow_joins=False)
name = field.column
if hasattr(val, 'as_sql'):
sql, params = val.as_sql(qn, self.connection)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
if not values:
return '', ()
result.append(', '.join(values))
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Returns the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
del cursor
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, we need to do some
munging of the "where" conditions to match the format required for
(portable) SQL updates. That is done here.
Further, if we are going to be running multiple updates, we pull out
the id values to update at this point so that they don't change as a
result of the progressive updates.
"""
self.query.select_related = False
self.query.clear_ordering(True)
super(SQLUpdateCompiler, self).pre_sql_setup()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
# We need to use a sub-select in the where clause to filter on things
# from other tables.
query = self.query.clone(klass=Query)
query._extra = {}
query.select = []
query.add_fields([query.get_meta().pk.name])
# Recheck the count - it is possible that fiddling with the select
# fields above removes tables from the query. Refs #18304.
count = query.count_active_tables()
if not self.query.related_updates and count == 1:
return
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend(r[0] for r in rows)
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
for alias in self.query.tables[1:]:
self.query.alias_refcount[alias] = 0
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self, qn=None):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
if qn is None:
qn = self.quote_name_unless_alias
sql, params = [], []
for aggregate in self.query.aggregate_select.values():
agg_sql, agg_params = aggregate.as_sql(qn, self.connection)
sql.append(agg_sql)
params.extend(agg_params)
sql = ', '.join(sql)
params = tuple(params)
sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery)
params = params + self.query.sub_params
return sql, params
class SQLDateCompiler(SQLCompiler):
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateField
fields = [DateField()]
else:
from django.db.backends.utils import typecast_date
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
date = row[offset]
if resolve_columns:
date = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
date = typecast_date(str(date))
if isinstance(date, datetime.datetime):
date = date.date()
yield date
class SQLDateTimeCompiler(SQLCompiler):
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateTimeField
fields = [DateTimeField()]
else:
from django.db.backends.utils import typecast_timestamp
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
datetime = row[offset]
if resolve_columns:
datetime = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
datetime = typecast_timestamp(str(datetime))
# Datetimes are artifically returned in UTC on databases that
# don't support time zone. Restore the zone used in the query.
if settings.USE_TZ:
if datetime is None:
raise ValueError("Database returned an invalid value "
"in QuerySet.dates(). Are time zone "
"definitions and pytz installed?")
datetime = datetime.replace(tzinfo=None)
datetime = timezone.make_aware(datetime, self.query.tzinfo)
yield datetime
def order_modified_iter(cursor, trim, sentinel):
"""
Yields blocks of rows from a cursor. We use this iterator in the special
case when extra output columns have been added to support ordering
requirements. We must trim those extra columns before anything else can use
the results, since they're only needed to make the SQL valid.
"""
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield [r[:-trim] for r in rows]
| |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import pdb
import pickle
import time
import random
import keras
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('dataset_name', 'MNIST', 'Supported: MNIST, CIFAR-10, ImageNet, SVHN.')
flags.DEFINE_string('model_name', 'cleverhans', 'Supported: cleverhans, cleverhans_adv_trained and carlini for MNIST; carlini and DenseNet for CIFAR-10; ResNet50, VGG19, Inceptionv3 and MobileNet for ImageNet; tohinz for SVHN.')
flags.DEFINE_boolean('select', True, 'Select correctly classified examples for the experiement.')
flags.DEFINE_integer('nb_examples', 100, 'The number of examples selected for attacks.')
flags.DEFINE_boolean('balance_sampling', False, 'Select the same number of examples for each class.')
flags.DEFINE_boolean('test_mode', False, 'Only select one sample for each class.')
flags.DEFINE_string('attacks', "FGSM?eps=0.1;BIM?eps=0.1&eps_iter=0.02;JSMA?targeted=next;CarliniL2?targeted=next&batch_size=100&max_iterations=1000;CarliniL2?targeted=next&batch_size=100&max_iterations=1000&confidence=2", 'Attack name and parameters in URL style, separated by semicolon.')
flags.DEFINE_float('clip', -1, 'L-infinity clip on the adversarial perturbations.')
flags.DEFINE_boolean('visualize', True, 'Output the image examples for each attack, enabled by default.')
flags.DEFINE_string('robustness', '', 'Supported: FeatureSqueezing.')
flags.DEFINE_string('detection', '', 'Supported: feature_squeezing.')
flags.DEFINE_boolean('detection_train_test_mode', True, 'Split into train/test datasets.')
flags.DEFINE_string('result_folder', "results", 'The output folder for results.')
flags.DEFINE_boolean('verbose', False, 'Stdout level. The hidden content will be saved to log files anyway.')
FLAGS.model_name =FLAGS.model_name.lower()
def load_tf_session():
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
# Create TF session and set as Keras backend session
sess = tf.Session()
keras.backend.set_session(sess)
print("Created TensorFlow session and set Keras backend.")
return sess
def main(argv=None):
# 0. Select a dataset.
from datasets import MNISTDataset, CIFAR10Dataset, ImageNetDataset, SVHNDataset
from datasets import get_correct_prediction_idx, evaluate_adversarial_examples, calculate_mean_confidence, calculate_accuracy
if FLAGS.dataset_name == "MNIST":
dataset = MNISTDataset()
elif FLAGS.dataset_name == "CIFAR-10":
dataset = CIFAR10Dataset()
elif FLAGS.dataset_name == "ImageNet":
dataset = ImageNetDataset()
elif FLAGS.dataset_name == "SVHN":
dataset = SVHNDataset()
# 1. Load a dataset.
print ("\n===Loading %s data..." % FLAGS.dataset_name)
if FLAGS.dataset_name == 'ImageNet':
if FLAGS.model_name == 'inceptionv3':
img_size = 299
else:
img_size = 224
X_test_all, Y_test_all = dataset.get_test_data(img_size, 0, 200)
else:
X_test_all, Y_test_all = dataset.get_test_dataset()
# 2. Load a trained model.
sess = load_tf_session()
keras.backend.set_learning_phase(0)
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(None, dataset.image_size, dataset.image_size, dataset.num_channels))
y = tf.placeholder(tf.float32, shape=(None, dataset.num_classes))
with tf.variable_scope(FLAGS.model_name):
"""
Create a model instance for prediction.
The scaling argument, 'input_range_type': {1: [0,1], 2:[-0.5, 0.5], 3:[-1, 1]...}
"""
model = dataset.load_model_by_name(FLAGS.model_name, logits=False, input_range_type=1)
model.compile(loss='categorical_crossentropy',optimizer='sgd', metrics=['acc'])
# 3. Evaluate the trained model.
# TODO: add top-5 accuracy for ImageNet.
print ("Evaluating the pre-trained model...")
Y_pred_all = model.predict(X_test_all)
mean_conf_all = calculate_mean_confidence(Y_pred_all, Y_test_all)
accuracy_all = calculate_accuracy(Y_pred_all, Y_test_all)
print('Test accuracy on raw legitimate examples %.4f' % (accuracy_all))
print('Mean confidence on ground truth classes %.4f' % (mean_conf_all))
# 4. Select some examples to attack.
import hashlib
from datasets import get_first_n_examples_id_each_class
if FLAGS.select:
# Filter out the misclassified examples.
correct_idx = get_correct_prediction_idx(Y_pred_all, Y_test_all)
if FLAGS.test_mode:
# Only select the first example of each class.
correct_and_selected_idx = get_first_n_examples_id_each_class(Y_test_all[correct_idx])
selected_idx = [ correct_idx[i] for i in correct_and_selected_idx ]
else:
if not FLAGS.balance_sampling:
selected_idx = correct_idx[:FLAGS.nb_examples]
else:
# select the same number of examples for each class label.
nb_examples_per_class = int(FLAGS.nb_examples / Y_test_all.shape[1])
correct_and_selected_idx = get_first_n_examples_id_each_class(Y_test_all[correct_idx], n=nb_examples_per_class)
selected_idx = [ correct_idx[i] for i in correct_and_selected_idx ]
else:
selected_idx = np.array(range(FLAGS.nb_examples))
from utils.output import format_number_range
selected_example_idx_ranges = format_number_range(sorted(selected_idx))
print ( "Selected %d examples." % len(selected_idx))
print ( "Selected index in test set (sorted): %s" % selected_example_idx_ranges )
X_test, Y_test, Y_pred = X_test_all[selected_idx], Y_test_all[selected_idx], Y_pred_all[selected_idx]
# The accuracy should be 100%.
accuracy_selected = calculate_accuracy(Y_pred, Y_test)
mean_conf_selected = calculate_mean_confidence(Y_pred, Y_test)
print('Test accuracy on selected legitimate examples %.4f' % (accuracy_selected))
print('Mean confidence on ground truth classes, selected %.4f\n' % (mean_conf_selected))
task = {}
task['dataset_name'] = FLAGS.dataset_name
task['model_name'] = FLAGS.model_name
task['accuracy_test'] = accuracy_all
task['mean_confidence_test'] = mean_conf_all
task['test_set_selected_length'] = len(selected_idx)
task['test_set_selected_idx_ranges'] = selected_example_idx_ranges
task['test_set_selected_idx_hash'] = hashlib.sha1(str(selected_idx).encode('utf-8')).hexdigest()
task['accuracy_test_selected'] = accuracy_selected
task['mean_confidence_test_selected'] = mean_conf_selected
task_id = "%s_%d_%s_%s" % \
(task['dataset_name'], task['test_set_selected_length'], task['test_set_selected_idx_hash'][:5], task['model_name'])
FLAGS.result_folder = os.path.join(FLAGS.result_folder, task_id)
if not os.path.isdir(FLAGS.result_folder):
os.makedirs(FLAGS.result_folder)
from utils.output import save_task_descriptor
save_task_descriptor(FLAGS.result_folder, [task])
# 5. Generate adversarial examples.
from attacks import maybe_generate_adv_examples
from utils.squeeze import reduce_precision_py
from utils.parameter_parser import parse_params
attack_string_hash = hashlib.sha1(FLAGS.attacks.encode('utf-8')).hexdigest()[:5]
sample_string_hash = task['test_set_selected_idx_hash'][:5]
from datasets.datasets_utils import get_next_class, get_least_likely_class
Y_test_target_next = get_next_class(Y_test)
Y_test_target_ll = get_least_likely_class(Y_pred)
X_test_adv_list = []
X_test_adv_discretized_list = []
Y_test_adv_discretized_pred_list = []
attack_string_list = filter(lambda x:len(x)>0, FLAGS.attacks.lower().split(';'))
to_csv = []
X_adv_cache_folder = os.path.join(FLAGS.result_folder, 'adv_examples')
adv_log_folder = os.path.join(FLAGS.result_folder, 'adv_logs')
predictions_folder = os.path.join(FLAGS.result_folder, 'predictions')
for folder in [X_adv_cache_folder, adv_log_folder, predictions_folder]:
if not os.path.isdir(folder):
os.makedirs(folder)
predictions_fpath = os.path.join(predictions_folder, "legitimate.npy")
np.save(predictions_fpath, Y_pred, allow_pickle=False)
if FLAGS.clip >= 0:
epsilon = FLAGS.clip
print ("Clip the adversarial perturbations by +-%f" % epsilon)
max_clip = np.clip(X_test + epsilon, 0, 1)
min_clip = np.clip(X_test - epsilon, 0, 1)
for attack_string in attack_string_list:
attack_log_fpath = os.path.join(adv_log_folder, "%s_%s.log" % (task_id, attack_string))
attack_name, attack_params = parse_params(attack_string)
print ( "\nRunning attack: %s %s" % (attack_name, attack_params))
if 'targeted' in attack_params:
targeted = attack_params['targeted']
print ("targeted value: %s" % targeted)
if targeted == 'next':
Y_test_target = Y_test_target_next
elif targeted == 'll':
Y_test_target = Y_test_target_ll
elif targeted == False:
attack_params['targeted'] = False
Y_test_target = Y_test.copy()
else:
targeted = False
attack_params['targeted'] = False
Y_test_target = Y_test.copy()
x_adv_fname = "%s_%s.pickle" % (task_id, attack_string)
x_adv_fpath = os.path.join(X_adv_cache_folder, x_adv_fname)
X_test_adv, aux_info = maybe_generate_adv_examples(sess, model, x, y, X_test, Y_test_target, attack_name, attack_params, use_cache = x_adv_fpath, verbose=FLAGS.verbose, attack_log_fpath=attack_log_fpath)
if FLAGS.clip > 0:
# This is L-inf clipping.
X_test_adv = np.clip(X_test_adv, min_clip, max_clip)
X_test_adv_list.append(X_test_adv)
if isinstance(aux_info, float):
duration = aux_info
else:
duration = aux_info['duration']
dur_per_sample = duration / len(X_test_adv)
# 5.0 Output predictions.
Y_test_adv_pred = model.predict(X_test_adv)
predictions_fpath = os.path.join(predictions_folder, "%s.npy"% attack_string)
np.save(predictions_fpath, Y_test_adv_pred, allow_pickle=False)
# 5.1 Evaluate the adversarial examples being discretized to uint8.
print ("\n---Attack (uint8): %s" % attack_string)
# All data should be discretized to uint8.
X_test_adv_discret = reduce_precision_py(X_test_adv, 256)
X_test_adv_discretized_list.append(X_test_adv_discret)
Y_test_adv_discret_pred = model.predict(X_test_adv_discret)
Y_test_adv_discretized_pred_list.append(Y_test_adv_discret_pred)
rec = evaluate_adversarial_examples(X_test, Y_test, X_test_adv_discret, Y_test_target.copy(), targeted, Y_test_adv_discret_pred)
rec['dataset_name'] = FLAGS.dataset_name
rec['model_name'] = FLAGS.model_name
rec['attack_string'] = attack_string
rec['duration_per_sample'] = dur_per_sample
rec['discretization'] = True
to_csv.append(rec)
from utils.output import write_to_csv
attacks_evaluation_csv_fpath = os.path.join(FLAGS.result_folder,
"%s_attacks_%s_evaluation.csv" % \
(task_id, attack_string_hash))
fieldnames = ['dataset_name', 'model_name', 'attack_string', 'duration_per_sample', 'discretization', 'success_rate', 'mean_confidence', 'mean_l2_dist', 'mean_li_dist', 'mean_l0_dist_value', 'mean_l0_dist_pixel']
write_to_csv(to_csv, attacks_evaluation_csv_fpath, fieldnames)
if FLAGS.visualize is True:
from datasets.visualization import show_imgs_in_rows
if FLAGS.test_mode or FLAGS.balance_sampling:
selected_idx_vis = range(Y_test.shape[1])
else:
selected_idx_vis = get_first_n_examples_id_each_class(Y_test, 1)
legitimate_examples = X_test[selected_idx_vis]
rows = [legitimate_examples]
rows += map(lambda x:x[selected_idx_vis], X_test_adv_list)
img_fpath = os.path.join(FLAGS.result_folder, '%s_attacks_%s_examples.png' % (task_id, attack_string_hash) )
show_imgs_in_rows(rows, img_fpath)
print ('\n===Adversarial image examples are saved in ', img_fpath)
# TODO: output the prediction and confidence for each example, both legitimate and adversarial.
# 6. Evaluate robust classification techniques.
# Example: --robustness \
# "Base;FeatureSqueezing?squeezer=bit_depth_1;FeatureSqueezing?squeezer=median_filter_2;"
if FLAGS.robustness != '':
"""
Test the accuracy with robust classifiers.
Evaluate the accuracy on all the legitimate examples.
"""
from robustness import evaluate_robustness
result_folder_robustness = os.path.join(FLAGS.result_folder, "robustness")
fname_prefix = "%s_%s_robustness" % (task_id, attack_string_hash)
evaluate_robustness(FLAGS.robustness, model, Y_test_all, X_test_all, Y_test, \
attack_string_list, X_test_adv_discretized_list,
fname_prefix, selected_idx_vis, result_folder_robustness)
# 7. Detection experiment.
# Example: --detection "FeatureSqueezing?distance_measure=l1&squeezers=median_smoothing_2,bit_depth_4,bilateral_filter_15_15_60;"
if FLAGS.detection != '':
from detections.base import DetectionEvaluator
result_folder_detection = os.path.join(FLAGS.result_folder, "detection")
csv_fname = "%s_attacks_%s_detection.csv" % (task_id, attack_string_hash)
de = DetectionEvaluator(model, result_folder_detection, csv_fname, FLAGS.dataset_name)
Y_test_all_pred = model.predict(X_test_all)
de.build_detection_dataset(X_test_all, Y_test_all, Y_test_all_pred, selected_idx, X_test_adv_discretized_list, Y_test_adv_discretized_pred_list, attack_string_list, attack_string_hash, FLAGS.clip, Y_test_target_next, Y_test_target_ll)
de.evaluate_detections(FLAGS.detection)
if __name__ == '__main__':
main()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Openstack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import ConfigParser
import cStringIO
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import sys
import traceback
from oslo.config import cfg
# from quantum.openstack.common.gettextutils import _
from spectrometer.openstack.common import importutils
from spectrometer.openstack.common import jsonutils
# from quantum.openstack.common import local
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config',
metavar='PATH',
help='If this option is specified, the logging configuration '
'file specified is used and overrides any other logging '
'options specified. Please see the Python logging module '
'documentation for details on logging configuration '
'files.'),
cfg.StrOpt('log-format',
default=None,
metavar='FORMAT',
help='A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='syslog facility to receive log lines')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error')
]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user)s %(tenant)s] '
'%(instance)s%(message)s',
help='format string to use for log messages with context'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='format string to use for log messages without context'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='data to append to log format when level is DEBUG'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='prefix each line of exception output with this format'),
cfg.ListOpt('default_log_levels',
default=[
'amqplib=WARN',
'sqlalchemy=WARN',
'boto=WARN',
'suds=INFO',
'keystone=INFO',
'eventlet.wsgi.server=WARN'
],
help='list of logger=LEVEL pairs'),
cfg.BoolOpt('publish_errors',
default=False,
help='publish error events'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='make deprecations fatal'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='If an instance is passed with the log message, format '
'it like this'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='If an instance UUID is passed with the log message, '
'format it like this'),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
else:
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
# if not context:
# context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
else:
instance_uuid = kwargs.pop('instance_uuid', None)
if instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra.update({'instance': instance_extra})
extra.update({"project": self.project})
extra.update({"version": self.version})
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [itertools.ifilter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(type, value, tb):
extra = {}
if CONF.verbose:
extra['exc_info'] = (type, value, tb)
getLogger(product_name).critical(str(value), **extra)
return logging_excepthook
class LogConfigError(Exception):
message = ('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config):
try:
logging.config.fileConfig(log_config)
except ConfigParser.Error as exc:
raise LogConfigError(log_config, str(exc))
def setup(product_name):
"""Setup logging."""
if CONF.log_config:
_load_log_config(CONF.log_config)
else:
_setup_logging_from_conf()
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string):
cfg.set_defaults(log_opts,
logging_context_format_string=
logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
def _setup_logging_from_conf():
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
if CONF.use_syslog:
facility = _find_facility_from_conf()
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not CONF.log_file:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
handler = importutils.import_object(
"quantum.openstack.common.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
level = logging.getLevelName(level_name)
logger = logging.getLogger(mod)
logger.setLevel(level)
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg)
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
"""
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(sdague): default the fancier formating params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id', None):
self._fmt = CONF.logging_context_format_string
else:
self._fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formated copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = cStringIO.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = ("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import json
import os
import tempfile
from contextlib import redirect_stdout
import pytest
from airflow.cli.commands import user_command
TEST_USER1_EMAIL = 'test-user1@example.com'
TEST_USER2_EMAIL = 'test-user2@example.com'
def _does_user_belong_to_role(appbuilder, email, rolename):
user = appbuilder.sm.find_user(email=email)
role = appbuilder.sm.find_role(rolename)
if user and role:
return role in user.roles
return False
class TestCliUsers:
@pytest.fixture(autouse=True)
def _set_attrs(self, app, dagbag, parser):
self.app = app
self.dagbag = dagbag
self.parser = parser
self.appbuilder = self.app.appbuilder
self.clear_roles_and_roles()
yield
self.clear_roles_and_roles()
def clear_roles_and_roles(self):
for email in [TEST_USER1_EMAIL, TEST_USER2_EMAIL]:
test_user = self.appbuilder.sm.find_user(email=email)
if test_user:
self.appbuilder.sm.del_register_user(test_user)
for role_name in ['FakeTeamA', 'FakeTeamB']:
if self.appbuilder.sm.find_role(role_name):
self.appbuilder.sm.delete_role(role_name)
def test_cli_create_user_random_password(self):
args = self.parser.parse_args(
[
'users',
'create',
'--username',
'test1',
'--lastname',
'doe',
'--firstname',
'jon',
'--email',
'jdoe@foo.com',
'--role',
'Viewer',
'--use-random-password',
]
)
user_command.users_create(args)
def test_cli_create_user_supplied_password(self):
args = self.parser.parse_args(
[
'users',
'create',
'--username',
'test2',
'--lastname',
'doe',
'--firstname',
'jon',
'--email',
'jdoe@apache.org',
'--role',
'Viewer',
'--password',
'test',
]
)
user_command.users_create(args)
def test_cli_delete_user(self):
args = self.parser.parse_args(
[
'users',
'create',
'--username',
'test3',
'--lastname',
'doe',
'--firstname',
'jon',
'--email',
'jdoe@example.com',
'--role',
'Viewer',
'--use-random-password',
]
)
user_command.users_create(args)
args = self.parser.parse_args(
[
'users',
'delete',
'--username',
'test3',
]
)
with redirect_stdout(io.StringIO()) as stdout:
user_command.users_delete(args)
assert 'User "test3" deleted' in stdout.getvalue()
def test_cli_delete_user_by_email(self):
args = self.parser.parse_args(
[
'users',
'create',
'--username',
'test4',
'--lastname',
'doe',
'--firstname',
'jon',
'--email',
'jdoe2@example.com',
'--role',
'Viewer',
'--use-random-password',
]
)
user_command.users_create(args)
args = self.parser.parse_args(
[
'users',
'delete',
'--email',
'jdoe2@example.com',
]
)
with redirect_stdout(io.StringIO()) as stdout:
user_command.users_delete(args)
assert 'User "test4" deleted' in stdout.getvalue()
@pytest.mark.parametrize(
'args,raise_match',
[
(
[
'users',
'delete',
],
'Missing args: must supply one of --username or --email',
),
(
[
'users',
'delete',
'--username',
'test_user_name99',
'--email',
'jdoe2@example.com',
],
'Conflicting args: must supply either --username or --email, but not both',
),
(
[
'users',
'delete',
'--username',
'test_user_name99',
],
'User "test_user_name99" does not exist',
),
(
[
'users',
'delete',
'--email',
'jode2@example.com',
],
'User "jode2@example.com" does not exist',
),
],
)
def test_find_user_exceptions(self, args, raise_match):
args = self.parser.parse_args(args)
with pytest.raises(
SystemExit,
match=raise_match,
):
user_command._find_user(args)
def test_cli_list_users(self):
for i in range(0, 3):
args = self.parser.parse_args(
[
'users',
'create',
'--username',
f'user{i}',
'--lastname',
'doe',
'--firstname',
'jon',
'--email',
f'jdoe+{i}@gmail.com',
'--role',
'Viewer',
'--use-random-password',
]
)
user_command.users_create(args)
with redirect_stdout(io.StringIO()) as stdout:
user_command.users_list(self.parser.parse_args(['users', 'list']))
stdout = stdout.getvalue()
for i in range(0, 3):
assert f'user{i}' in stdout
def test_cli_list_users_with_args(self):
user_command.users_list(self.parser.parse_args(['users', 'list', '--output', 'json']))
def test_cli_import_users(self):
def assert_user_in_roles(email, roles):
for role in roles:
assert _does_user_belong_to_role(self.appbuilder, email, role)
def assert_user_not_in_roles(email, roles):
for role in roles:
assert not _does_user_belong_to_role(self.appbuilder, email, role)
assert_user_not_in_roles(TEST_USER1_EMAIL, ['Admin', 'Op'])
assert_user_not_in_roles(TEST_USER2_EMAIL, ['Public'])
users = [
{
"username": "imported_user1",
"lastname": "doe1",
"firstname": "jon",
"email": TEST_USER1_EMAIL,
"roles": ["Admin", "Op"],
},
{
"username": "imported_user2",
"lastname": "doe2",
"firstname": "jon",
"email": TEST_USER2_EMAIL,
"roles": ["Public"],
},
]
self._import_users_from_file(users)
assert_user_in_roles(TEST_USER1_EMAIL, ['Admin', 'Op'])
assert_user_in_roles(TEST_USER2_EMAIL, ['Public'])
users = [
{
"username": "imported_user1",
"lastname": "doe1",
"firstname": "jon",
"email": TEST_USER1_EMAIL,
"roles": ["Public"],
},
{
"username": "imported_user2",
"lastname": "doe2",
"firstname": "jon",
"email": TEST_USER2_EMAIL,
"roles": ["Admin"],
},
]
self._import_users_from_file(users)
assert_user_not_in_roles(TEST_USER1_EMAIL, ['Admin', 'Op'])
assert_user_in_roles(TEST_USER1_EMAIL, ['Public'])
assert_user_not_in_roles(TEST_USER2_EMAIL, ['Public'])
assert_user_in_roles(TEST_USER2_EMAIL, ['Admin'])
def test_cli_export_users(self):
user1 = {
"username": "imported_user1",
"lastname": "doe1",
"firstname": "jon",
"email": TEST_USER1_EMAIL,
"roles": ["Public"],
}
user2 = {
"username": "imported_user2",
"lastname": "doe2",
"firstname": "jon",
"email": TEST_USER2_EMAIL,
"roles": ["Admin"],
}
self._import_users_from_file([user1, user2])
users_filename = self._export_users_to_file()
with open(users_filename) as file:
retrieved_users = json.loads(file.read())
os.remove(users_filename)
# ensure that an export can be imported
self._import_users_from_file(retrieved_users)
def find_by_username(username):
matches = [u for u in retrieved_users if u['username'] == username]
assert matches, f"Couldn't find user with username {username}"
matches[0].pop('id') # this key not required for import
return matches[0]
assert find_by_username('imported_user1') == user1
assert find_by_username('imported_user2') == user2
def _import_users_from_file(self, user_list):
json_file_content = json.dumps(user_list)
with tempfile.NamedTemporaryFile(delete=False) as f:
try:
f.write(json_file_content.encode())
f.flush()
args = self.parser.parse_args(['users', 'import', f.name])
user_command.users_import(args)
finally:
os.remove(f.name)
def _export_users_to_file(self):
with tempfile.NamedTemporaryFile(delete=False) as f:
args = self.parser.parse_args(['users', 'export', f.name])
user_command.users_export(args)
return f.name
@pytest.fixture()
def create_user_test4(self):
args = self.parser.parse_args(
[
'users',
'create',
'--username',
'test4',
'--lastname',
'doe',
'--firstname',
'jon',
'--email',
TEST_USER1_EMAIL,
'--role',
'Viewer',
'--use-random-password',
]
)
user_command.users_create(args)
def test_cli_add_user_role(self, create_user_test4):
assert not _does_user_belong_to_role(
appbuilder=self.appbuilder, email=TEST_USER1_EMAIL, rolename='Op'
), "User should not yet be a member of role 'Op'"
args = self.parser.parse_args(['users', 'add-role', '--username', 'test4', '--role', 'Op'])
with redirect_stdout(io.StringIO()) as stdout:
user_command.users_manage_role(args, remove=False)
assert 'User "test4" added to role "Op"' in stdout.getvalue()
assert _does_user_belong_to_role(
appbuilder=self.appbuilder, email=TEST_USER1_EMAIL, rolename='Op'
), "User should have been added to role 'Op'"
def test_cli_remove_user_role(self, create_user_test4):
assert _does_user_belong_to_role(
appbuilder=self.appbuilder, email=TEST_USER1_EMAIL, rolename='Viewer'
), "User should have been created with role 'Viewer'"
args = self.parser.parse_args(['users', 'remove-role', '--username', 'test4', '--role', 'Viewer'])
with redirect_stdout(io.StringIO()) as stdout:
user_command.users_manage_role(args, remove=True)
assert 'User "test4" removed from role "Viewer"' in stdout.getvalue()
assert not _does_user_belong_to_role(
appbuilder=self.appbuilder, email=TEST_USER1_EMAIL, rolename='Viewer'
), "User should have been removed from role 'Viewer'"
@pytest.mark.parametrize(
"action, role, message",
[
["add-role", "Viewer", 'User "test4" is already a member of role "Viewer"'],
["add-role", "Foo", '"Foo" is not a valid role. Valid roles are'],
["remove-role", "Admin", 'User "test4" is not a member of role "Admin"'],
["remove-role", "Foo", '"Foo" is not a valid role. Valid roles are'],
],
)
def test_cli_manage_roles_exceptions(self, create_user_test4, action, role, message):
args = self.parser.parse_args(['users', action, '--username', 'test4', '--role', role])
with pytest.raises(SystemExit, match=message):
if action == 'add-role':
user_command.add_role(args)
else:
user_command.remove_role(args)
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'ColumnMapping', fields ['super_organization', 'column_raw', 'source_type']
db.delete_unique(u'seed_columnmapping', ['super_organization_id', 'column_raw', 'source_type'])
# Adding model 'Column'
db.create_table(u'seed_column', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('organization', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['orgs.Organization'], null=True, blank=True)),
('column_name', self.gf('django.db.models.fields.CharField')(max_length=512)),
))
db.send_create_signal(u'seed', ['Column'])
# Deleting field 'ColumnMapping.column_raw'
db.delete_column(u'seed_columnmapping', 'column_raw')
# Deleting field 'ColumnMapping.column_mapped'
db.delete_column(u'seed_columnmapping', 'column_mapped')
# Adding M2M table for field column_raw on 'ColumnMapping'
m2m_table_name = db.shorten_name(u'seed_columnmapping_column_raw')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('columnmapping', models.ForeignKey(orm[u'seed.columnmapping'], null=False)),
('column', models.ForeignKey(orm[u'seed.column'], null=False))
))
db.create_unique(m2m_table_name, ['columnmapping_id', 'column_id'])
# Adding M2M table for field column_mapped on 'ColumnMapping'
m2m_table_name = db.shorten_name(u'seed_columnmapping_column_mapped')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('columnmapping', models.ForeignKey(orm[u'seed.columnmapping'], null=False)),
('column', models.ForeignKey(orm[u'seed.column'], null=False))
))
db.create_unique(m2m_table_name, ['columnmapping_id', 'column_id'])
def backwards(self, orm):
# Deleting model 'Column'
db.delete_table(u'seed_column')
# User chose to not deal with backwards NULL issues for 'ColumnMapping.column_raw'
raise RuntimeError("Cannot reverse this migration. 'ColumnMapping.column_raw' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'ColumnMapping.column_raw'
db.add_column(u'seed_columnmapping', 'column_raw',
self.gf('django.db.models.fields.CharField')(max_length=512),
keep_default=False)
# Adding field 'ColumnMapping.column_mapped'
db.add_column(u'seed_columnmapping', 'column_mapped',
self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True),
keep_default=False)
# Removing M2M table for field column_raw on 'ColumnMapping'
db.delete_table(db.shorten_name(u'seed_columnmapping_column_raw'))
# Removing M2M table for field column_mapped on 'ColumnMapping'
db.delete_table(db.shorten_name(u'seed_columnmapping_column_mapped'))
# Adding unique constraint on 'ColumnMapping', fields ['super_organization', 'column_raw', 'source_type']
db.create_unique(u'seed_columnmapping', ['super_organization_id', 'column_raw', 'source_type'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'data_importer.importfile': {
'Meta': {'object_name': 'ImportFile'},
'cached_first_row': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'cached_second_to_fifth_row': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'export_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'file_size_in_bytes': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'has_header_row': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'import_record': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['data_importer.ImportRecord']"}),
'mapping_completion': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'mapping_done': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mapping_error_messages': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'matching_completion': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'matching_done': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'num_coercion_errors': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'num_coercions_total': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'num_columns': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_mapping_errors': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_mapping_warnings': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_rows': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_tasks_complete': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_tasks_total': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_validation_errors': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'raw_save_completion': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'raw_save_done': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source_type': ('django.db.models.fields.CharField', [], {'max_length': '63', 'null': 'True', 'blank': 'True'})
},
u'data_importer.importrecord': {
'Meta': {'ordering': "('-updated_at',)", 'object_name': 'ImportRecord'},
'app': ('django.db.models.fields.CharField', [], {'default': "'seed'", 'max_length': '64'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'finish_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'import_completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'is_imported_live': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'keep_missing_buildings': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'last_modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modified_import_records'", 'null': 'True', 'to': u"orm['landing.SEEDUser']"}),
'matching_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'matching_done': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mcm_version': ('django.db.models.fields.IntegerField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'merge_analysis_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'merge_analysis_done': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'merge_analysis_queued': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'merge_completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Unnamed Dataset'", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['organizations.Organization']", 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['landing.SEEDUser']", 'null': 'True', 'blank': 'True'}),
'premerge_analysis_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'premerge_analysis_done': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'premerge_analysis_queued': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'super_organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'import_records'", 'null': 'True', 'to': u"orm['orgs.Organization']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'})
},
u'landing.seeduser': {
'Meta': {'object_name': 'SEEDUser'},
'api_key': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'db_index': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_custom_columns': ('djorm_pgjson.fields.JSONField', [], {'default': '{}'}),
'default_organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'default_users'", 'null': 'True', 'to': u"orm['orgs.Organization']"}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'show_shared_buildings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'})
},
u'organizations.organization': {
'Meta': {'ordering': "['name']", 'object_name': 'Organization'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '200', 'separator': "u'-'", 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['landing.SEEDUser']", 'through': u"orm['organizations.OrganizationUser']", 'symmetrical': 'False'})
},
u'organizations.organizationuser': {
'Meta': {'ordering': "['organization', 'user']", 'unique_together': "(('user', 'organization'),)", 'object_name': 'OrganizationUser'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'organization_users'", 'to': u"orm['organizations.Organization']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'organization_users'", 'to': u"orm['landing.SEEDUser']"})
},
u'orgs.organization': {
'Meta': {'ordering': "['name']", 'object_name': 'Organization'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent_org': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_orgs'", 'null': 'True', 'to': u"orm['orgs.Organization']"}),
'query_threshold': ('django.db.models.fields.IntegerField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'orgs'", 'symmetrical': 'False', 'through': u"orm['orgs.OrganizationUser']", 'to': u"orm['landing.SEEDUser']"})
},
u'orgs.organizationuser': {
'Meta': {'ordering': "['organization', '-role_level']", 'object_name': 'OrganizationUser'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orgs.Organization']"}),
'role_level': ('django.db.models.fields.IntegerField', [], {'default': '20'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '12'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['landing.SEEDUser']"})
},
u'seed.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'building_variant': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'options'", 'null': 'True', 'to': u"orm['seed.BuildingAttributeVariant']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'value_source': ('django.db.models.fields.IntegerField', [], {})
},
u'seed.buildingattributevariant': {
'Meta': {'unique_together': "(('field_name', 'building_snapshot'),)", 'object_name': 'BuildingAttributeVariant'},
'building_snapshot': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'field_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'seed.buildingsnapshot': {
'Meta': {'ordering': "('-modified', '-created')", 'object_name': 'BuildingSnapshot'},
'address_line_1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address_line_1_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'address_line_2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address_line_2_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'best_guess_canonical_building': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'best_guess'", 'null': 'True', 'to': u"orm['seed.CanonicalBuilding']"}),
'best_guess_confidence': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'block_number': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'block_number_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'building_certification': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'building_certification_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'building_count': ('django.db.models.fields.IntegerField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'building_count_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'canonical_building': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['seed.CanonicalBuilding']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'canonical_for_ds': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['data_importer.ImportRecord']"}),
'children': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'parents'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['seed.BuildingSnapshot']"}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'conditioned_floor_area': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'conditioned_floor_area_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'confidence': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'custom_id_1': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'custom_id_1_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'district': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'district_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'energy_alerts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'energy_alerts_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'energy_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'energy_score_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'extra_data': ('djorm_pgjson.fields.JSONField', [], {'default': '{}'}),
'extra_data_sources': ('djorm_pgjson.fields.JSONField', [], {'default': '{}'}),
'generation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'generation_date_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'gross_floor_area': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'gross_floor_area_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'import_file': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['data_importer.ImportFile']", 'null': 'True', 'blank': 'True'}),
'last_modified_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['landing.SEEDUser']", 'null': 'True', 'blank': 'True'}),
'lot_number': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'lot_number_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'match_type': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'occupied_floor_area': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'occupied_floor_area_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'owner': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'owner_address': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'owner_address_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'owner_city_state': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'owner_city_state_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'owner_email': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'owner_email_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'owner_postal_code': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'owner_postal_code_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'owner_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'owner_telephone': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'owner_telephone_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'pm_property_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'pm_property_id_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'postal_code_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'property_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'property_name_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'property_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'property_notes_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'recent_sale_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'recent_sale_date_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'release_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'release_date_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'site_eui': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'site_eui_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'site_eui_weather_normalized': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'site_eui_weather_normalized_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'source_eui': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'source_eui_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'source_eui_weather_normalized': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'source_eui_weather_normalized_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'source_type': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'space_alerts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'space_alerts_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'state_province': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'state_province_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'super_organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'building_snapshots'", 'null': 'True', 'to': u"orm['orgs.Organization']"}),
'tax_lot_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'tax_lot_id_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'use_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'use_description_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'year_built': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'year_built_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"}),
'year_ending': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'year_ending_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['seed.BuildingSnapshot']"})
},
u'seed.canonicalbuilding': {
'Meta': {'object_name': 'CanonicalBuilding'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'canonical_snapshot': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['seed.BuildingSnapshot']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'seed.column': {
'Meta': {'object_name': 'Column'},
'column_name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orgs.Organization']", 'null': 'True', 'blank': 'True'})
},
u'seed.columnmapping': {
'Meta': {'object_name': 'ColumnMapping'},
'column_mapped': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'mapped_mappings'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['seed.Column']"}),
'column_raw': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'raw_mappings'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['seed.Column']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source_type': ('django.db.models.fields.IntegerField', [], {}),
'super_organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'column_mappings'", 'null': 'True', 'to': u"orm['orgs.Organization']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['landing.SEEDUser']", 'null': 'True', 'blank': 'True'})
},
u'seed.compliance': {
'Meta': {'ordering': "('-modified', '-created')", 'object_name': 'Compliance'},
'compliance_type': ('django.db.models.fields.CharField', [], {'default': "'Benchmarking'", 'max_length': '30'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'deadline_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['seed.Project']"}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'seed.custombuildingheaders': {
'Meta': {'object_name': 'CustomBuildingHeaders'},
'building_headers': ('djorm_pgjson.fields.JSONField', [], {'default': '{}'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'super_organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'custom_headers'", 'null': 'True', 'to': u"orm['orgs.Organization']"})
},
u'seed.datacolumn': {
'Meta': {'unique_together': "(('organization', 'column_name'),)", 'object_name': 'DataColumn'},
'column_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_bedes': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orgs.Organization']"})
},
u'seed.meter': {
'Meta': {'object_name': 'Meter'},
'building_snapshot': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'meters'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['seed.BuildingSnapshot']"}),
'energy_type': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'energy_units': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'seed.project': {
'Meta': {'ordering': "('-modified', '-created')", 'object_name': 'Project'},
'building_snapshots': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['seed.BuildingSnapshot']", 'null': 'True', 'through': u"orm['seed.ProjectBuilding']", 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_modified_user'", 'null': 'True', 'to': u"orm['landing.SEEDUser']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['landing.SEEDUser']", 'null': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': "'name'", 'unique_with': '()'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'super_organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'projects'", 'null': 'True', 'to': u"orm['orgs.Organization']"})
},
u'seed.projectbuilding': {
'Meta': {'ordering': "['project', 'building_snapshot']", 'unique_together': "(('building_snapshot', 'project'),)", 'object_name': 'ProjectBuilding'},
'approved_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'approver': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['landing.SEEDUser']", 'null': 'True', 'blank': 'True'}),
'building_snapshot': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'project_building_snapshots'", 'to': u"orm['seed.BuildingSnapshot']"}),
'compliant': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'project_building_snapshots'", 'to': u"orm['seed.Project']"}),
'status_label': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['seed.StatusLabel']", 'null': 'True', 'blank': 'True'})
},
u'seed.statuslabel': {
'Meta': {'ordering': "['-name']", 'unique_together': "(('name', 'super_organization'),)", 'object_name': 'StatusLabel'},
'color': ('django.db.models.fields.CharField', [], {'default': "'green'", 'max_length': '30'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'super_organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'status_labels'", 'null': 'True', 'to': u"orm['orgs.Organization']"})
},
u'seed.timeseries': {
'Meta': {'object_name': 'TimeSeries'},
'begin_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'cost': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '11', 'decimal_places': '4'}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meter': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'timeseries_data'", 'null': 'True', 'to': u"orm['seed.Meter']"}),
'reading': ('django.db.models.fields.FloatField', [], {'null': 'True'})
}
}
complete_apps = ['seed']
| |
#!/usr/bin/python
# CGI SimpleFileUpload WebServer Ver. 0.1.5
# Author: Srinivas Gowda
# Email: srinivas@solancer.com
import string,cgi,time, re
import json
import math
import time
from os import curdir, sep
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import os # os. path
CWD = os.path.abspath('.') + '/uploads'
print CWD
# PORT = 8080
UPLOAD_PAGE = 'upload'
def convertSize(size):
size_name = ("B","KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size,1024)))
p = math.pow(1024,i)
s = round(size/p,2)
if (s > 0):
return '%s %s' % (s,size_name[i])
else:
return '0B'
def json_view(relpath):
abspath = os.path.abspath(relpath) + '/uploads'
flist = os.listdir( abspath )
modlist = []
slist = []
for i in flist:
modlist.append(time.ctime(os.path.getmtime(abspath+sep+i)))
slist.append(convertSize(os.path.getsize(abspath+sep+i)))
print modlist
return json.dumps([{'name':k,'mod': v,'size':n} for k, v, n in zip(flist,modlist,slist)])
# -----------------------------------------------------------------------
class MyHandler(BaseHTTPRequestHandler):
def do_GET(self):
try:
if self.path == '/' :
f = open(curdir + sep + 'indexer.html')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(f.read())
return
if self.path.endswith('/json'):
page2 = json_view( '.' )
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(page2)
return
if self.path == '/upload' :
f = open(curdir + sep + 'upload.html')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(f.read())
return
if self.path.endswith(".html"):
## print curdir + sep + self.path
f = open(curdir + sep + self.path)
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(f.read())
f.close()
return
if self.path == ("/static/python-powered.png"):
## print curdir + sep + self.path
f = open(curdir + sep + self.path)
self.send_response(200)
self.send_header('Content-type', 'image/png')
self.end_headers()
self.wfile.write(f.read())
f.close()
return
if self.path.endswith(".css"):
## print curdir + sep + self.path
f = open(curdir + sep + self.path)
self.send_response(200)
self.send_header('Content-type', 'text/css')
self.end_headers()
self.wfile.write(f.read())
f.close()
return
if self.path.endswith(".js"):
## print curdir + sep + self.path
f = open(curdir + sep + self.path)
self.send_response(200)
self.send_header('Content-type', 'text/javascript')
self.end_headers()
self.wfile.write(f.read())
f.close()
return
if self.path.endswith(".eot"):
## print curdir + sep + self.path
f = open(curdir + sep + self.path)
self.send_response(200)
self.send_header('Content-type', 'application/vnd.ms-fontobject')
self.end_headers()
self.wfile.write(f.read())
f.close()
return
if self.path.endswith(".ttf"):
## print curdir + sep + self.path
f = open(curdir + sep + self.path)
self.send_response(200)
self.send_header('Content-type', 'application/x-font-ttf')
self.end_headers()
self.wfile.write(f.read())
f.close()
return
if self.path.endswith(".svg"):
## print curdir + sep + self.path
f = open(curdir + sep + self.path)
self.send_response(200)
self.send_header('Content-type', 'image/svg+xml')
self.end_headers()
self.wfile.write(f.read())
f.close()
return
if self.path.endswith(".woff"):
## print curdir + sep + self.path
f = open(curdir + sep + self.path)
self.send_response(200)
self.send_header('Content-type', 'application/font-woff')
self.end_headers()
self.wfile.write(f.read())
f.close()
return
else : # default: just send the file
filepath = self.path[1:] # remove leading '/'
f = open( os.path.join(CWD, filepath), 'rb' )
self.send_response(200)
self.send_header('Content-type', 'application/octet-stream')
self.end_headers()
self.wfile.write(f.read())
f.close()
return
return # be sure not to fall into "except:" clause ?
except IOError as e :
# debug
print e
self.send_error(404,'File Not Found: %s' % self.path)
def do_POST(self):
# global rootnode ## something remained in the orig. code
try:
ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))
if ctype == 'multipart/form-data' :
fs = cgi.FieldStorage( fp = self.rfile,
headers = self.headers, # headers_,
environ={ 'REQUEST_METHOD':'POST' }
)
else: raise Exception("Unexpected POST request")
fs_up = fs['upfile']
filename = os.path.split(fs_up.filename)[1]
f_filename = str(filename).replace(" ", "_")
fullname = os.path.join(CWD, f_filename)
# check for copies :
if os.path.exists( fullname ):
fullname_test = fullname + '.copy'
i = 0
while os.path.exists( fullname_test ):
fullname_test = "%s.copy(%d)" % (fullname, i)
i += 1
fullname = fullname_test
if not os.path.exists(fullname):
with open(fullname, 'wb') as o:
# self.copyfile(fs['upfile'].file, o)
o.write( fs_up.file.read() )
self.send_response(200)
self.end_headers()
self.wfile.write("<HTML><HEAD></HEAD><BODY>Upload Successful!<BR><BR>");
self.wfile.write( "File uploaded under name: " + os.path.split(fullname)[1] );
self.wfile.write( '<BR><A HREF="/">Main page</A>')
self.wfile.write( '<BR><A HREF=%s>back</A>' % ( UPLOAD_PAGE, ) )
self.wfile.write("</BODY></HTML>");
except Exception as e:
# pass
print e
self.send_error(404,'POST to "%s" failed: %s' % (self.path, str(e)) )
def main():
try:
server = HTTPServer(('', 8080), MyHandler)
print 'started httpserver...'
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down server'
server.socket.close()
if __name__ == '__main__':
main()
| |
import tensorflow as tf # neural network for function approximation
import gym # environment
import numpy as np # matrix operation and math functions
from gym import wrappers
import gym_morph # customized environment for cart-pole
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import time
start_time = time.time()
MAX_TEST = 10
for test_num in range(1, MAX_TEST+1):
# Hyperparameters
RANDOM_NUMBER_SEED = test_num
ENVIRONMENT1 = "morph-v0"
MAX_EPISODES = 50000 # number of episodes
EPISODE_LENGTH = 8000 # single episode length
HIDDEN_SIZE = 16
DISPLAY_WEIGHTS = False # Help debug weight update
gamma = 0.99 # Discount per step
RENDER = False # Render the cart-pole system
VIDEO_INTERVAL = 100 # Generate a video at this interval
CONSECUTIVE_TARGET = 100 # Including previous 100 rewards
CONST_LR = True # Constant or decaying learing rate
# Constant learning rate
const_learning_rate_in = 0.0005
# Decay learning rate
start_learning_rate_in = 0.003
decay_steps_in = 100
decay_rate_in = 0.95
DIR_PATH_SAVEFIG = "/root/cartpole_plot/"
if CONST_LR:
learning_rate = const_learning_rate_in
file_name_savefig = "el" + str(EPISODE_LENGTH) \
+ "_hn" + str(HIDDEN_SIZE) \
+ "_clr" + str(learning_rate).replace(".", "p") \
+ "_test" + str(test_num) \
+ ".png"
else:
start_learning_rate = start_learning_rate_in
decay_steps = decay_steps_in
decay_rate = decay_rate_in
file_name_savefig = "el" + str(EPISODE_LENGTH) \
+ "_hn" + str(HIDDEN_SIZE) \
+ "_dlr_slr" + str(start_learning_rate).replace(".", "p") \
+ "_ds" + str(decay_steps) \
+ "_dr" + str(decay_rate).replace(".", "p") \
+ "_test" + str(test_num) \
+ ".png"
env = gym.make(ENVIRONMENT1)
env.seed(RANDOM_NUMBER_SEED)
np.random.seed(RANDOM_NUMBER_SEED)
tf.set_random_seed(RANDOM_NUMBER_SEED)
# Input and output sizes
input_size = 4
output_size = 2
# input_size = env.observation_space.shape[0]
# try:
# output_size = env.action_space.shape[0]
# except AttributeError:
# output_size = env.action_space.n
# Tensorflow network setup
x = tf.placeholder(tf.float32, shape=(None, input_size))
y = tf.placeholder(tf.float32, shape=(None, 1))
if not CONST_LR:
# decay learning rate
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(start_learning_rate, global_step, decay_steps, decay_rate, staircase=False)
expected_returns = tf.placeholder(tf.float32, shape=(None, 1))
# Xavier (2010) weights initializer for uniform distribution:
# x = sqrt(6. / (in + out)); [-x, x]
w_init = tf.contrib.layers.xavier_initializer()
hidden_W = tf.get_variable("W1", shape=[input_size, HIDDEN_SIZE],
initializer=w_init)
hidden_B = tf.Variable(tf.zeros(HIDDEN_SIZE))
dist_W = tf.get_variable("W2", shape=[HIDDEN_SIZE, output_size],
initializer=w_init)
dist_B = tf.Variable(tf.zeros(output_size))
hidden = tf.nn.elu(tf.matmul(x, hidden_W) + hidden_B)
dist = tf.tanh(tf.matmul(hidden, dist_W) + dist_B)
dist_soft = tf.nn.log_softmax(dist)
dist_in = tf.matmul(dist_soft, tf.Variable([[1.], [0.]]))
pi = tf.contrib.distributions.Bernoulli(dist_in)
pi_sample = pi.sample()
log_pi = pi.log_prob(y)
if CONST_LR:
optimizer = tf.train.RMSPropOptimizer(learning_rate)
train = optimizer.minimize(-1.0 * expected_returns * log_pi)
else:
optimizer = tf.train.RMSPropOptimizer(learning_rate)
train = optimizer.minimize(-1.0 * expected_returns * log_pi, global_step=global_step)
# saver = tf.train.Saver()
# Create and initialize a session
sess = tf.Session()
sess.run(tf.global_variables_initializer())
def run_episode(environment, ep, render=False):
raw_reward = 0
discounted_reward = 0
cumulative_reward = []
discount = 1.0
states = []
actions = []
obs = environment.reset()
done = False
while not done:
states.append(obs)
cumulative_reward.append(discounted_reward)
if render and ((ep % VIDEO_INTERVAL) == 0):
environment.render()
action = sess.run(pi_sample, feed_dict={x: [obs]})[0]
actions.append(action)
obs, reward, done, info = env.step(action[0])
raw_reward += reward
if reward > 0:
discounted_reward += reward * discount
else:
discounted_reward += reward
discount *= gamma
return raw_reward, discounted_reward, cumulative_reward, states, actions
def display_weights(session):
w1 = session.run(hidden_W)
b1 = session.run(hidden_B)
w2 = session.run(dist_W)
b2 = session.run(dist_B)
print(w1, b1, w2, b2)
returns = []
mean_returns = []
for ep in range(MAX_EPISODES):
raw_G, discounted_G, cumulative_G, ep_states, ep_actions = \
run_episode(env, ep, RENDER)
expected_R = np.transpose([discounted_G - np.array(cumulative_G)])
sess.run(train, feed_dict={x: ep_states, y: ep_actions,
expected_returns: expected_R})
if DISPLAY_WEIGHTS:
display_weights(sess)
returns.append(raw_G)
running_returns = returns[max(0, ep-CONSECUTIVE_TARGET):(ep+1)]
mean_return = np.mean(running_returns)
mean_returns.append(mean_return)
if CONST_LR:
msg = "Test: {}/{}, Episode: {}/{}, Time: {}, Learning rate: {}, Return: {}, Last {} returns mean: {}"
msg = msg.format(test_num, MAX_TEST, ep+1, MAX_EPISODES, time.strftime('%H:%M:%S', time.gmtime(time.time()-start_time)), learning_rate, raw_G, CONSECUTIVE_TARGET, mean_return)
print(msg)
else:
msg = "Test: {}/{}, Episode: {}/{}, Time: {}, Learning rate: {}, Return: {}, Last {} returns mean: {}"
msg = msg.format(test_num, MAX_TEST, ep+1, MAX_EPISODES, time.strftime('%H:%M:%S', time.gmtime(time.time()-start_time)), sess.run(learning_rate), raw_G, CONSECUTIVE_TARGET, mean_return)
print(msg)
env.close() # close openai gym environment
tf.reset_default_graph() # clear tensorflow graph
# Plot
# plt.style.use('ggplot')
plt.style.use('dark_background')
episodes_plot = np.arange(MAX_EPISODES)
fig = plt.figure()
ax = fig.add_subplot(111)
fig.subplots_adjust(top=0.85)
if CONST_LR:
ax.set_title("The Cart-Pole Problem Test %i \n \
Episode Length: %i \
Discount Factor: %.2f \n \
Number of Hidden Neuron: %i \
Constant Learning Rate: %.5f" % (test_num, EPISODE_LENGTH, gamma, HIDDEN_SIZE, learning_rate))
else:
ax.set_title("The Cart-Pole Problem Test %i \n \
EpisodeLength: %i DiscountFactor: %.2f NumHiddenNeuron: %i \n \
Decay Learning Rate: (start: %.5f, steps: %i, rate: %.2f)" % (test_num, EPISODE_LENGTH, gamma, HIDDEN_SIZE, start_learning_rate, decay_steps, decay_rate))
ax.set_xlabel("Episode")
ax.set_ylabel("Return")
ax.set_ylim((0, EPISODE_LENGTH))
ax.grid(linestyle='--')
ax.plot(episodes_plot, returns, label='Instant return')
ax.plot(episodes_plot, mean_returns, label='Averaged return')
legend = ax.legend(loc='best', shadow=True)
fig.savefig(DIR_PATH_SAVEFIG + file_name_savefig, dpi=500)
# plt.show()
| |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9248")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9248")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Nestorcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Nestorcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| |
##########################################################################
#
# Copyright (c) 2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import os, re
import maya.cmds
import IECore
from UIElement import UIElement
from FnParameterisedHolder import FnParameterisedHolder
from ClassParameterUI import ClassParameterUI
from ClassVectorParameterUI import ClassVectorParameterUI
from FnTransientParameterisedHolderNode import FnTransientParameterisedHolderNode
__all__ = [ 'PresetsUI', 'SavePresetUI', 'LoadPresetUI' ]
def __savePresetMenuModifierVectorClass( menuDefinition, parent, parameter, node ) :
__savePresetMenuModifier( menuDefinition, parameter, node, parent=parent )
def __savePresetMenuModifier( menuDefinition, parameter, node, parent=None ) :
fnPh = FnParameterisedHolder( node )
plugPath = fnPh.parameterPlugPath( parameter )
if len( menuDefinition.items() ):
menuDefinition.append( "/PresetsDivider", { "divider" : True } )
saveItemName = "/Presets/Save Preset..."
loadItemName = "/Presets/Load Preset..."
# If we are actually a class in a vector, use slightly different names
# so that its more obvious whats going on
## \todo Add an item to save the class as a preset, rather than its values.
if parent is not None and (
isinstance( parent, IECore.ClassVectorParameter )
or isinstance( parent, IECore.ClassParameter )
):
saveItemName = "/Presets/Save Parameter Values Preset..."
loadItemName = "/Presets/Load Parameter Values Preset..."
menuDefinition.append( saveItemName, { "command" : IECore.curry( maya.cmds.evalDeferred, 'import IECoreMaya; IECoreMaya.SavePresetUI( "%s", "%s" )' % ( fnPh.fullPathName(), plugPath ) ) } )
menuDefinition.append( loadItemName, { "command" : IECore.curry( maya.cmds.evalDeferred, 'import IECoreMaya; IECoreMaya.LoadPresetUI( "%s", "%s" )' % ( fnPh.fullPathName(), plugPath ) ) } )
ClassParameterUI.registerClassMenuCallback( __savePresetMenuModifier )
ClassVectorParameterUI.registerClassMenuCallback( __savePresetMenuModifierVectorClass )
ClassVectorParameterUI.registerToolsMenuCallback( __savePresetMenuModifier )
### @name Wrapper functions
### These wrappers take only string arguments, to allow the PresetsUI
### To be invoked from a evalDeferred call. This is needed to make sure that
### all the tasks performed internally by the UI undo in one step.
### @{
def SavePresetUI( nodeName, attribute ) :
fnPh = FnParameterisedHolder( nodeName )
rootParam = fnPh.plugParameter( attribute )
PresetsUI( nodeName, rootParam ).save()
def LoadPresetUI( nodeName, attribute ) :
fnPh = FnParameterisedHolder( nodeName )
rootParam = fnPh.plugParameter( attribute )
PresetsUI( nodeName, rootParam ).load()
### @}
### This class provides a UI for loading and saving presets for nodes
### derived from the ParameterisedHolder class. Currently, it creates
### BasicPresets in one of the locations set in the relevant search
### paths for the Parameterised objects. Categories, and titles aren't
### yet implemented.
###
### \todo Currently, the LoadUI, has to instantiate every preset in the
### search path, and call 'applicableTo'. This is potentially a huge
### bottle neck, so, well see what happens when we use it in earnest...
class PresetsUI() :
def __init__( self, node, rootParameter=None ) :
try :
fn = FnParameterisedHolder( node )
except:
raise ValueError, 'PresetsUI: "%s" is not a valid Parameterised object.' % node
self.__node = node
self.__rootParameter = rootParameter
### Call to save a preset.
def save( self ) :
SaveUI( self.__node, self.__rootParameter )
### Call to copy a preset.
## \param callback, f( preset ), A callable, that will be
## called with the Preset instance after the user has selected
## a number of prameters
def copy( self, callback ) :
CopyUI( self.__node, self.__rootParameter, callback )
### Call to load a preset.
def load( self ) :
LoadUI( self.__node, self.__rootParameter )
### Call to select parameters within the current rootParameter
## \param callback, f( node, rootParameter, parameters ), A Callable, that will be
## called with the node, and chosed parameters after the user has
## made their selection. This can be usefull for a variety of cases where
## it's needed for the user to select parameters within a hierarchy.
def selectParameters( self, callback ) :
SelectUI( self.__node, self.__rootParameter, callback )
# Private implementation classes
# This is a base class for all the UIs which need to display a list of available parameters
# and obtain a subset which the user is interested in. Thi takes care of drawing a list in
# a form layout. Derived classes can then edit/add to this layout to add additional controls.
# self._fnP will contain a parameterised holder around the node passed to the constructor.
# self._rootParamter will contain the rootParameter passed to the constructor.
# self._form is the main form layout
# self._scroll is the main scroll layout
# self._selector is the actual parameter list
class ParamSelectUI( UIElement ) :
def __init__( self, node, rootParameter=None, buttonTitle="Select", autoCollapseDepth=2 ) :
self._fnP = FnParameterisedHolder( node )
parameterised = self._fnP.getParameterised()
self._rootParameter = rootParameter if rootParameter else parameterised[0].parameters()
self._window = maya.cmds.window(
title="%s: %s" % ( buttonTitle, node ),
width=500,
height=600
)
UIElement.__init__( self, self._window )
self._form = maya.cmds.formLayout()
self._scroll = maya.cmds.scrollLayout( parent=self._form )
self._selector = ParameterSelector( self._rootParameter, self._scroll, autoCollapseDepth=autoCollapseDepth )
maya.cmds.formLayout( self._form, edit=True,
attachForm=[ ( self._scroll, "top", 0 ),
( self._scroll, "left", 0 ),
( self._scroll, "right", 0 ),
( self._scroll, "bottom", 0 ), ],
)
# The SelectUI allows parameter selection, then calls the supplied callback with
# the node, rootParameter, and a list of chosen parameters. The button title can
# be customised with the label argument to the constructor. This may be useful
# outside this file, and can be accessed by the PresetUI.selectParameters() method
# which takes a callback.
class SelectUI( ParamSelectUI ) :
def __init__( self, node, rootParameter=None, callback=None, label="Select" ) :
self.__callback = callback
self.__node = node
ParamSelectUI.__init__( self, node, rootParameter )
self.__button = maya.cmds.button(
l=label,
parent=self._form,
height=30,
c=self._createCallback( self.__doAction )
)
maya.cmds.formLayout( self._form, edit=True,
attachForm=[ ( self._scroll, "top", 0 ),
( self._scroll, "left", 0 ),
( self._scroll, "right", 0 ),
( self.__button, "bottom", 0 ),
( self.__button, "left", 0 ),
( self.__button, "right", 0 ) ],
attachControl=[ ( self._scroll, "bottom", 0, self.__button ), ],
)
maya.cmds.showWindow( self._window )
def __doAction( self ) :
parameters = self._selector.getActiveParameters()
if not parameters :
maya.cmds.confirmDialog( message="Please select at least one paremeter.", button="OK" )
return
maya.cmds.deleteUI( self._window )
if self.__callback:
self.__callback( self.__node, self._rootParameter, parameters )
# The CopyUI extends the selector to create a preset from the users selection, and call a callback
# passing that preset.
class CopyUI( SelectUI ) :
def __init__( self, node, rootParameter=None, callback=None ) :
self.__callback = callback
SelectUI.__init__( self, node, rootParameter, callback=self.__copyCallback, label="copy" )
# The copy callback simply creates a preset, then forwards this to whatever other callback was registered
def __copyCallback( self, node, rootParameter, parameters ) :
preset = IECore.BasicPreset( self._fnP.getParameterised()[0], rootParameter, parameters=parameters )
self.__callback( preset )
# The SaveUI extends the selector to add path selection, as well as description and name fields.
class SaveUI( ParamSelectUI ) :
def __init__( self, node, rootParameter=None, autoCollapseDepth=2 ) :
fnP = FnParameterisedHolder( node )
parameterised = fnP.getParameterised()
self.__envVar = parameterised[3].replace( "_PATHS", "_PRESET_PATHS" )
if self.__envVar not in os.environ :
maya.cmds.confirmDialog( message="Environment variable not set:\n\n$%s\n\nPlease set "%self.__envVar+\
"this variable to point to one or more paths.\nPresets can then be saved to these "+\
"locations.", button="OK" )
return
ParamSelectUI.__init__( self, node, rootParameter, autoCollapseDepth=autoCollapseDepth )
self.__location = SearchPathMenu(
os.getenv( self.__envVar ),
self._form,
label = "Save to:",
ann = self.__envVar,
cw = ( 1, 65 ),
adj = 2,
)
self.__name = maya.cmds.textFieldGrp(
parent = self._form,
label = "Name:",
adj = 2,
columnWidth = ( 1, 65 )
)
descripLabel = maya.cmds.text(
parent = self._form,
label = "Description:",
align = "left",
)
self.__description = maya.cmds.scrollField(
parent = self._form,
numberOfLines = 5,
height = 100,
)
self.__saveButton = maya.cmds.button(
l = "Save",
parent = self._form,
height = 30,
c = self._createCallback( self.__doSave )
)
maya.cmds.formLayout( self._form, edit=True,
attachForm=[ ( self._scroll, "top", 0 ),
( self._scroll, "left", 0 ),
( self._scroll, "right", 0 ),
( self.__location.menu(), "left", 10 ),
( self.__location.menu(), "right", 10 ),
( self.__name, "left", 10 ),
( self.__name, "right", 10 ),
( descripLabel, "left", 10 ),
( descripLabel, "right", 10 ),
( self.__description, "left", 10 ),
( self.__description, "right", 10 ),
( self.__saveButton, "bottom", 0 ),
( self.__saveButton, "left", 0 ),
( self.__saveButton, "right", 0 ) ],
attachControl=[ ( self._scroll, "bottom", 5, self.__location.menu() ),
( self.__location.menu(), "bottom", 3, self.__name ),
( self.__name, "bottom", 5, descripLabel ),
( descripLabel, "bottom", 5, self.__description ),
( self.__description, "bottom", 5, self.__saveButton ), ]
)
maya.cmds.showWindow( self._window )
def __doSave( self ) :
name = maya.cmds.textFieldGrp( self.__name, query=True, text=True )
if not name:
maya.cmds.confirmDialog( message="Please enter a name for the preset.", button="OK" )
return
# Sanitise the name a little
name = name.replace( " ", "_" )
name = re.sub( '[^a-zA-Z0-9_]*', "", name )
# We have to also make sure that the name doesnt begin with a number,
# as it wouldn't be a legal class name in the resulting py stub.
name = re.sub( '^[0-9]+', "", name )
description = maya.cmds.scrollField( self.__description, query=True, text=True )
parameters = self._selector.getActiveParameters()
if not parameters :
maya.cmds.confirmDialog( message="Select at least one parameter to save.", button="OK" )
return
path = self.__location.getValue()
self._fnP.setParameterisedValues()
preset = IECore.BasicPreset(
self._fnP.getParameterised()[0],
self._rootParameter,
parameters = parameters
)
preset.save(
path,
name,
description = description,
)
maya.cmds.deleteUI( self._window )
class LoadUI( UIElement ) :
def __init__( self, node, rootParameter=None ) :
fn = FnParameterisedHolder( node )
parameterised = fn.getParameterised()
self.__parameterised = parameterised
# Just using 'not' on a ClassVector takes its length, which equates to False if its empty.
self.__rootParameter = rootParameter if rootParameter is not None else parameterised[0].parameters()
self.__fnP = fn
self.__envVar = parameterised[3].replace( "_PATHS", "_PRESET_PATHS" )
if self.__envVar not in os.environ :
maya.cmds.confirmDialog( message="Environment variable not set:\n\n$%s\n\nPlease set "%self.__envVar+\
"this variable to point to one or more paths.\nPresets can then be loaded from these "+\
"locations.", button="OK" )
return
paths = os.environ[self.__envVar]
sp = IECore.SearchPath( os.path.expandvars( paths ) )
self.__classLoader = IECore.ClassLoader( sp )
presets = self.__getPresets( parameterised[0], self.__rootParameter )
if not presets:
maya.cmds.confirmDialog( message="No presets applicable to %s found in the current search paths ($%s)." % ( self.__rootParameter.name, self.__envVar ), button="OK" )
return
self.__loadedPresets = {}
self.__window = maya.cmds.window( title="Load: %s" % node, width=300, height=500 )
UIElement.__init__( self, self.__window )
self.__form = maya.cmds.formLayout()
self.__infoColumn = PresetInfo( parent=self.__form )
self.__selector = PresetSelector( presets, self.__form, allowMultiSelection=True, selectCommand=self._createCallback( self.__selectionChanged ) )
self.__loadButton = maya.cmds.button( l="Load", parent=self.__form, height=30, c=self._createCallback( self.__doLoad ) )
if not presets:
maya.cmds.control( self.__loadButton, edit=True, enable=False )
maya.cmds.formLayout( self.__form, edit=True,
attachForm=[ ( self.__selector.list(), "top" , 0 ),
( self.__selector.list(), "left" , 0 ),
( self.__selector.list(), "right" , 0 ),
( self.__infoColumn.layout(), "left" , 5 ),
( self.__infoColumn.layout(), "right" , 0 ),
( self.__loadButton, "bottom", 0 ),
( self.__loadButton, "left" , 0 ),
( self.__loadButton, "right" , 0 ) ],
attachControl=[ ( self.__selector.list(), "bottom", 4, self.__infoColumn.layout() ),
( self.__infoColumn.layout(), "bottom", 5, self.__loadButton ), ]
)
maya.cmds.showWindow( self.__window )
def __selectionChanged( self, *args ) :
self.__loadedPresets = {}
classNames = self.__classLoader.classNames()
selected = [ s for s in self.__selector.selected() if s in classNames ]
presets = []
for s in selected:
self.__loadedPresets[s] = self.__classLoader.load( s )()
presets.append( self.__loadedPresets[s] )
self.__infoColumn.setPresets( presets )
def __doLoad( self ) :
loaded = self.__loadedPresets.keys()
selected = [ s for s in self.__selector.selected() if s in loaded ]
if not selected :
maya.cmds.confirmDialog( message="Please select at least one preset to load.", button="OK" )
return
parameterised = self.__fnP.getParameterised()[0]
# Make sure the any parameter changes get set back into
# the parameterised objects for each preset.
self.__infoColumn.commitParameters()
# We need to make sure we have the right values in the first place.
self.__fnP.setParameterisedValues()
with self.__fnP.parameterModificationContext() :
for s in selected:
# These should have been loaded by the selectCommand callback
self.__loadedPresets[s]( self.__parameterised, self.__rootParameter )
maya.cmds.deleteUI( self.__window )
self.__loadedPrestes = {}
def __getPresets( self, parameterised, parameter ) :
validPresets = []
self.__classLoader.refresh()
presets = self.__classLoader.classNames( "*" )
for name in presets:
p = self.__classLoader.load( name )()
if not isinstance( p, IECore.Preset ):
continue
if p.applicableTo( parameterised, parameter ):
validPresets.append( ( name, p ) )
return validPresets
# Extracts metadata from a preset, and displays in a layout, complete
# with a UI for any parameters of the preset. Any selected presets
# are temporarily instantiated into a FnTransientParameterisedHolderNode.
class PresetInfo() :
def __init__( self, parent=None ) :
oldParent = maya.cmds.setParent( q=True )
if not parent :
parent = oldParent
maya.cmds.setParent( parent )
self.__parent = parent
self.__layout = maya.cmds.columnLayout( co=( "both", 5 ), adj=True )
maya.cmds.setParent( oldParent )
def layout( self ):
return self.__layout
def setPresets( self, presets=() ) :
children = maya.cmds.columnLayout( self.__layout, q=True, ca=True )
if children :
for c in children:
maya.cmds.deleteUI( c )
self.__parameterHolders = {}
for p in presets:
meta = p.metadata()
name = meta["title"] if "description" in meta else p.__class__
maya.cmds.text(
parent = self.__layout,
label = name,
font = "boldLabelFont",
recomputeSize = True,
align = "left"
)
wrapWidth = ( int(maya.cmds.layout( self.__parent, query=True, width=True )) - 5 ) / 5
if "description" in meta and meta["description"]:
descripWrap = IECore.StringUtil.wrap( meta["description"], wrapWidth )
lines = descripWrap.split( "\n" )
for l in lines:
maya.cmds.text( parent=self.__layout, label=l, font="smallPlainLabelFont", align="left" )
maya.cmds.separator(
parent = self.__layout,
width = 5,
height = 10,
style = "none",
)
if len( p.parameters().keys() ) :
self.__parameterHolders[ name ] = FnTransientParameterisedHolderNode.create( self.__layout, p )
# This must be called before querying the parameters of any presets passed to this UI
# section, in order to update the Parameterised object with any changed made in the UI
def commitParameters( self ) :
for s in self.__parameterHolders.keys():
self.__parameterHolders[s].setParameterisedValues()
del self.__parameterHolders[s]
# Provides an optionMenu to select from paths in the supplied search path string.
class SearchPathMenu() :
# *args, **kwargs are passed to maya.cmds.optionMenuGrp on creation.
def __init__( self, searchPaths, parent=None, *args, **kwargs ) :
oldParent = maya.cmds.setParent( q=True )
if not parent :
parent = oldParent
maya.cmds.setParent( parent )
self.__menu = maya.cmds.optionMenuGrp( *args, **kwargs )
for p in searchPaths.split( ":" ) :
maya.cmds.menuItem( label = p )
maya.cmds.setParent( oldParent )
def setValue( self, value ) :
maya.cmds.optionMenuGrp( self.__menu, edit=True, value=value )
def getValue( self ) :
return maya.cmds.optionMenuGrp( self.__menu, query=True, value=True )
def menu( self ):
return self.__menu
# Provides a simple list of the supplied presets
class PresetSelector( UIElement ) :
# *args, **kwargs are passed to maya.cmds.textScrollList on creation.
def __init__( self, presets, parent=None, *args, **kwargs ) :
oldParent = maya.cmds.setParent( q=True )
if not parent :
parent = oldParent
maya.cmds.setParent( parent )
self.__list = maya.cmds.textScrollList( *args, **kwargs )
UIElement.__init__( self, self.__list )
if not presets:
maya.cmds.textScrollList(
self.__list,
edit=True,
append="No presets found...",
enable=False
)
else :
presetsByPath = {}
for ( name, p ) in presets :
print name, p
path = os.path.dirname( p._cob ).rpartition( p.typeName() )[0]
if path not in presetsByPath :
presetsByPath[path] = []
presetsByPath[path].append( name )
for ( path, names ) in presetsByPath.items() :
maya.cmds.textScrollList( self.__list, edit=True, append=path )
for name in names :
maya.cmds.textScrollList( self.__list, edit=True, append=name )
maya.cmds.textScrollList( self.__list, edit=True, append="" )
maya.cmds.setParent( oldParent )
# \return A list of selected names
def selected( self ) :
selection = maya.cmds.textScrollList( self.__list, query=True, selectItem=True )
if not selection:
return []
else:
return selection
# \return The Maya ELF handle for the list.
def list( self ) :
return self.__list
# Provides a maya.cmds.columnLayout containing a hierarchical selection
# interface for the supplied parameter. Each parameter is presented with
# A checkbox to allow selection.
class ParameterSelector( UIElement ) :
def __init__( self, parameter, parent=None, autoCollapseDepth=2 ) :
oldParent = maya.cmds.setParent( query=True )
if not parent :
parent = oldParent
self.__mainColumn = maya.cmds.columnLayout( adj=True, parent=parent )
if isinstance( parameter, IECore.CompoundParameter ) :
self.__controls = ParameterSelector.ParameterGroup( parameter, autoCollapseDepth=autoCollapseDepth )
else :
self.__controls = ParameterSelector.Parameter( parameter )
maya.cmds.setParent( oldParent )
# \return A list of the selected parameters.
def getActiveParameters( self ) :
return self.__controls.getActiveParameters()
# Provides an interface for selecting an individual parameter.
class Parameter() :
def __init__( self, parameter, **kw ) :
self.__depth = kw["depth"] if "depth" in kw else 0
self.__checkbox = maya.cmds.checkBox( label=parameter.name, align="left", value=True )
self.__parameter = parameter
# Sets the active state of the parameter
def setState( self, state ) :
maya.cmds.checkBox( self.__checkbox, edit=True, value=state )
# Returns the active state of the parameter
def getState( self ) :
state = maya.cmds.checkBox( self.__checkbox, query=True, value=True )
if state:
return True
else :
return False
# \return the IECore Parameter represented by the control.
def parameter( self ) :
return self.__parameter
# \return Either an empty list, or a list with the parameter, depending
# on its state. The list syntax is used for interchangeability with the
# ParameterGroup class.
def getActiveParameters( self ) :
if self.getState():
return [ self.__parameter ]
else:
return []
# Provides a hierarchical interface for selecting parameters in a CompoundParameter
class ParameterGroup( UIElement ) :
def __init__( self, compoundParameter, **kw ) :
self.__depth = kw["depth"] if "depth" in kw else 0
self.__autoCollapseDepth = kw["autoCollapseDepth"] if "autoCollapseDepth" in kw else 2
self.__parameter = compoundParameter
self.__row = maya.cmds.rowLayout( numberOfColumns = 2, columnWidth=( 1, 20 ) )
UIElement.__init__( self, self.__row )
self.__checkbox = maya.cmds.checkBox( label = "", cc=self._createCallback( self.syncState ), value=True )
name = compoundParameter.name if compoundParameter.name else "All Parameters"
if "label" in compoundParameter:
name = compoundParameter["label"].getTypedValue()
collapsed = False if self.__depth < self.__autoCollapseDepth else True
self.__frame = maya.cmds.frameLayout(
label = name,
labelIndent = 5,
marginWidth = 5,
borderVisible = False,
collapsable = True,
collapse = collapsed,
)
self.__column = maya.cmds.columnLayout( adj=True )
self.__children = {}
for p in compoundParameter.values() :
if isinstance( p, IECore.CompoundParameter ) :
self.__children[ p.name ] = ParameterSelector.ParameterGroup(
p,
depth = self.__depth+1,
autoCollapseDepth = self.__autoCollapseDepth
)
else:
self.__children[ p.name ] = ParameterSelector.Parameter( p, depth=self.__depth+1 )
maya.cmds.setParent( ".." )
maya.cmds.setParent( ".." )
maya.cmds.setParent( ".." )
maya.cmds.separator( style="none", height=3 )
# Called by a callback or directly, to set the state of all child
# parameters of the CompundParameter. If a state is not provided
# then the curent checked state of the group is propogated
def syncState( self, state=None ):
if state == None:
state = self.getState()
for p in self.__children.values() :
p.setState( state )
# Can be called to set the state of the group and its children.
def setState( self, state ) :
maya.cmds.checkBox( self.__checkbox, edit=True, value=state )
self.syncState( state )
# \return (Bool) The checked state of the group itself. Note, this does not
# take into account whether or not any children are checked.
def getState( self ) :
state = maya.cmds.checkBox( self.__checkbox, query=True, value=True )
if state == 1 :
return True
else :
return False
# \return A list of active parameters in the group.
def getActiveParameters( self ) :
params = []
if self.getState():
params.append( self.__parameter )
for p in self.__children.values() :
params.extend( p.getActiveParameters() )
return params
| |
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DatasetInfo records the information we know about a dataset.
This includes things that we know about the dataset statically, i.e.:
- schema
- description
- canonical location
- does it have validation and tests splits
- size
- etc.
This also includes the things that can and should be computed once we've
processed the dataset as well:
- number of examples (in each split)
- feature statistics (in each split)
- etc.
"""
import abc
import json
import os
import posixpath
import tempfile
from typing import Dict, Optional, Tuple, Union
from absl import logging
from etils import epath
import six
import tensorflow as tf
from tensorflow_datasets.core import file_adapters
from tensorflow_datasets.core import lazy_imports_lib
from tensorflow_datasets.core import naming
from tensorflow_datasets.core import splits as splits_lib
from tensorflow_datasets.core import utils
from tensorflow_datasets.core.features import feature as feature_lib
from tensorflow_datasets.core.features import top_level_feature
from tensorflow_datasets.core.proto import dataset_info_pb2
from tensorflow_datasets.core.utils import gcs_utils
from google.protobuf import json_format
# TODO(b/109648354): Remove the "pytype: disable" comment.
Nest = Union[Tuple["Nest", ...], Dict[str, "Nest"], str] # pytype: disable=not-supported-yet
SupervisedKeysType = Union[Tuple[Nest, Nest], Tuple[Nest, Nest, Nest]]
# Name of the file to output the DatasetInfo protobuf object.
DATASET_INFO_FILENAME = "dataset_info.json"
LICENSE_FILENAME = "LICENSE"
METADATA_FILENAME = "metadata.json"
@six.add_metaclass(abc.ABCMeta)
class Metadata(dict):
"""Abstract base class for DatasetInfo metadata container.
`builder.info.metadata` allows the dataset to expose additional general
information about the dataset which are not specific to a feature or
individual example.
To implement the interface, overwrite `save_metadata` and
`load_metadata`.
See `tfds.core.MetadataDict` for a simple implementation that acts as a
dict that saves data to/from a JSON file.
"""
@abc.abstractmethod
def save_metadata(self, data_dir):
"""Save the metadata."""
raise NotImplementedError()
@abc.abstractmethod
def load_metadata(self, data_dir):
"""Restore the metadata."""
raise NotImplementedError()
class DatasetInfo(object):
"""Information about a dataset.
`DatasetInfo` documents datasets, including its name, version, and features.
See the constructor arguments and properties for a full list.
Note: Not all fields are known if the dataset hasn't been generated yet
(before the first `builder.download_and_prepare()` call). For example splits
names or number of examples might be missing (as they are computed
at dataset creation time).
"""
def __init__(
self,
*,
builder,
description: Optional[str] = None,
features: Optional[feature_lib.FeatureConnector] = None,
supervised_keys: Optional[SupervisedKeysType] = None,
disable_shuffling: bool = False,
homepage: Optional[str] = None,
citation: Optional[str] = None,
metadata: Optional[Metadata] = None,
license: Optional[str] = None, # pylint: disable=redefined-builtin
redistribution_info: Optional[Dict[str, str]] = None,
split_dict: Optional[splits_lib.SplitDict] = None):
"""Constructs DatasetInfo.
Args:
builder: `DatasetBuilder`, dataset builder for this info.
description: `str`, description of this dataset.
features: `tfds.features.FeaturesDict`, Information on the feature dict of
the `tf.data.Dataset()` object from the `builder.as_dataset()` method.
supervised_keys: Specifies the input structure for supervised learning, if
applicable for the dataset, used with "as_supervised". The keys
correspond to the feature names to select in `info.features`. When
calling `tfds.core.DatasetBuilder.as_dataset()` with
`as_supervised=True`, the `tf.data.Dataset` object will yield the
structure defined by the keys passed here, instead of that defined by
the `features` argument. Typically this is a `(input_key, target_key)`
tuple, and the dataset yields a tuple of tensors `(input, target)`
tensors.
To yield a more complex structure, pass a tuple of `tf.nest` compatible
structures of feature keys. The resulting `Dataset` will yield
structures with each key replaced by the coresponding tensor. For
example, passing a triple of keys would return a dataset
that yields `(feature, target, sample_weights)` triples for keras.
Using `supervised_keys=({'a':'a','b':'b'}, 'c')` would create a dataset
yielding a tuple with a dictionary of features in the `features`
position.
Note that selecting features in nested `tfds.features.FeaturesDict`
objects is not supported.
disable_shuffling: `bool`, specify whether to shuffle the examples.
homepage: `str`, optional, the homepage for this dataset.
citation: `str`, optional, the citation to use for this dataset.
metadata: `tfds.core.Metadata`, additonal object which will be
stored/restored with the dataset. This allows for storing additional
information with the dataset.
license: license of the dataset.
redistribution_info: information needed for redistribution, as specified
in `dataset_info_pb2.RedistributionInfo`. The content of the `license`
subfield will automatically be written to a LICENSE file stored with the
dataset.
split_dict: information about the splits in this dataset.
"""
self._builder = builder
if builder.builder_config:
config_name = builder.builder_config.name
config_description = builder.builder_config.description
else:
config_name = None
config_description = None
self._info_proto = dataset_info_pb2.DatasetInfo(
name=builder.name,
description=utils.dedent(description),
version=str(builder.version),
release_notes=builder.release_notes,
disable_shuffling=disable_shuffling,
config_name=config_name,
config_description=config_description,
citation=utils.dedent(citation),
module_name=str(builder.__module__),
redistribution_info=dataset_info_pb2.RedistributionInfo(
license=utils.dedent(license or redistribution_info.pop("license")),
**redistribution_info) if redistribution_info else None)
if homepage:
self._info_proto.location.urls[:] = [homepage]
if features:
if not isinstance(features, top_level_feature.TopLevelFeature):
raise ValueError(
"DatasetInfo.features only supports FeaturesDict or Sequence at "
"the top-level. Got {}".format(features))
self._features = features
self._splits = splits_lib.SplitDict([])
if split_dict:
self.set_splits(split_dict)
if supervised_keys is not None:
self._info_proto.supervised_keys.CopyFrom(
_supervised_keys_to_proto(supervised_keys))
if metadata and not isinstance(metadata, Metadata):
raise ValueError(
"Metadata should be a `tfds.core.Metadata` instance. Received "
"{}".format(metadata))
self._metadata = metadata
# Is this object initialized with both the static and the dynamic data?
self._fully_initialized = False
@classmethod
def from_proto(cls, builder,
proto: dataset_info_pb2.DatasetInfo) -> "DatasetInfo":
"""Instantiates DatasetInfo from the given builder and proto."""
if builder.builder_config:
assert builder.builder_config.name == proto.config_name
assert str(builder.version) == proto.version
features = None
if proto.HasField("features"):
features = feature_lib.FeatureConnector.from_proto(proto.features)
supervised_keys = None
if proto.HasField("supervised_keys"):
supervised_keys = _supervised_keys_from_proto(proto.supervised_keys)
filename_template = naming.ShardedFileTemplate(
dataset_name=builder.name,
data_dir=builder.data_dir,
filetype_suffix=proto.file_format or "tfrecord")
return cls(
builder=builder,
description=proto.description,
features=features,
supervised_keys=supervised_keys,
disable_shuffling=proto.disable_shuffling,
citation=proto.citation,
license=proto.redistribution_info.license,
split_dict=splits_lib.SplitDict.from_proto(
repeated_split_infos=proto.splits,
filename_template=filename_template),
)
@property
def as_proto(self) -> dataset_info_pb2.DatasetInfo:
return self._info_proto
@property
def name(self) -> str:
return self.as_proto.name
@property
def config_name(self) -> str:
return self.as_proto.config_name
@property
def full_name(self):
"""Full canonical name: (<dataset_name>/<config_name>/<version>)."""
names = [self._builder.name]
if self._builder.builder_config:
names.append(self._builder.builder_config.name)
names.append(str(self.version))
return posixpath.join(*names)
@property
def description(self):
return self.as_proto.description
@property
def version(self):
return self._builder.version
@property
def release_notes(self) -> Optional[Dict[str, str]]:
return self._builder.release_notes
@property
def disable_shuffling(self) -> bool:
return self.as_proto.disable_shuffling
@property
def homepage(self):
urls = self.as_proto.location.urls
tfds_homepage = f"https://www.tensorflow.org/datasets/catalog/{self.name}"
return urls and urls[0] or tfds_homepage
@property
def citation(self) -> str:
return self.as_proto.citation
@property
def data_dir(self):
return self._builder.data_dir
@property
def dataset_size(self) -> utils.Size:
"""Generated dataset files size, in bytes."""
# For old datasets, maybe empty.
return utils.Size(sum(split.num_bytes for split in self.splits.values()))
@property
def download_size(self) -> utils.Size:
"""Downloaded files size, in bytes."""
# Fallback to deprecated `size_in_bytes` if `download_size` is empty.
return utils.Size(self.as_proto.download_size or
self.as_proto.size_in_bytes)
@download_size.setter
def download_size(self, size):
self.as_proto.download_size = size
@property
def features(self):
return self._features
@property
def metadata(self) -> Optional[Metadata]:
return self._metadata
@property
def supervised_keys(self) -> Optional[SupervisedKeysType]:
if not self.as_proto.HasField("supervised_keys"):
return None
supervised_keys = self.as_proto.supervised_keys
return _supervised_keys_from_proto(supervised_keys)
@property
def redistribution_info(self):
return self.as_proto.redistribution_info
@property
def module_name(self) -> str:
return self.as_proto.module_name
@property
def file_format(self) -> Optional[file_adapters.FileFormat]:
if not self.as_proto.file_format:
return None
return file_adapters.FileFormat(self.as_proto.file_format)
def set_file_format(
self,
file_format: Union[None, str, file_adapters.FileFormat],
) -> None:
"""Internal function to define the file format.
The file format is set during `FileReaderBuilder.__init__`,
not `DatasetInfo.__init__`.
Args:
file_format: The file format.
"""
# If file format isn't present already, fallback to `DEFAULT_FILE_FORMAT`
file_format = (
file_format # Format explicitly given: tfds.builder(..., file_format=x)
or self.file_format # Format restored from dataset_info.json
or file_adapters.DEFAULT_FILE_FORMAT)
try:
new_file_format = file_adapters.FileFormat(file_format)
except ValueError as e:
all_values = [f.value for f in file_adapters.FileFormat]
utils.reraise(e, suffix=f". Valid file formats: {all_values}")
# If the file format has been set once, file format should be consistent
if self.file_format and self.file_format != new_file_format:
raise ValueError(f"File format is already set to {self.file_format}. "
f"Got {new_file_format}")
self.as_proto.file_format = new_file_format.value
@property
def splits(self) -> splits_lib.SplitDict:
return self._splits
def set_splits(self, split_dict: splits_lib.SplitDict) -> None:
"""Split setter (private method)."""
for split, split_info in split_dict.items():
if isinstance(split_info, splits_lib.MultiSplitInfo):
# When splits are from multiple folders, the dataset can be different.
continue
if (split_info.filename_template and
self._builder.name != split_info.filename_template.dataset_name):
raise AssertionError(
f"SplitDict contains SplitInfo for split {split} whose "
"dataset_name does not match to the dataset name in dataset_info. "
f"{self._builder.name} != {split_info.filename_template.dataset_name}"
)
# If the statistics have been pre-loaded, forward the statistics
# into the new split_dict. Also add the filename template if it's not set.
new_split_infos = []
incomplete_filename_template = naming.ShardedFileTemplate(
dataset_name=self.name,
data_dir=self.data_dir,
filetype_suffix=self.as_proto.file_format or "tfrecord")
for split_info in split_dict.values():
if isinstance(split_info, splits_lib.MultiSplitInfo):
new_split_infos.append(split_info)
continue
old_split_info = self._splits.get(split_info.name)
if (not split_info.statistics.ByteSize() and old_split_info and
old_split_info.statistics.ByteSize() and
old_split_info.shard_lengths == split_info.shard_lengths):
split_info = split_info.replace(statistics=old_split_info.statistics)
if not split_info.filename_template:
filename_template = incomplete_filename_template.replace(
split=split_info.name)
split_info = split_info.replace(filename_template=filename_template)
new_split_infos.append(split_info)
# Update the dictionary representation.
self._splits = splits_lib.SplitDict(new_split_infos)
# Update the proto
# Note that the proto should not be saved or used for multi-folder datasets.
del self.as_proto.splits[:] # Clear previous
for split_info in self._splits.values():
if isinstance(split_info, splits_lib.MultiSplitInfo):
for si in split_info.split_infos:
self.as_proto.splits.add().CopyFrom(si.to_proto())
else:
self.as_proto.splits.add().CopyFrom(split_info.to_proto())
def update_data_dir(self, data_dir: str) -> None:
"""Updates the data dir for each split."""
new_split_infos = []
for split_info in self._splits.values():
if isinstance(split_info, splits_lib.MultiSplitInfo):
raise RuntimeError(
"Updating the data_dir for MultiSplitInfo is not supported!")
filename_template = split_info.filename_template.replace(
data_dir=data_dir)
new_split_info = split_info.replace(filename_template=filename_template)
new_split_infos.append(new_split_info)
self.set_splits(splits_lib.SplitDict(new_split_infos))
@property
def initialized(self) -> bool:
"""Whether DatasetInfo has been fully initialized."""
return self._fully_initialized
def _dataset_info_path(self, dataset_info_dir):
return os.path.join(dataset_info_dir, DATASET_INFO_FILENAME)
def _license_path(self, dataset_info_dir):
return os.path.join(dataset_info_dir, LICENSE_FILENAME)
@property
def as_json(self) -> str:
return json_format.MessageToJson(self.as_proto, sort_keys=True)
def write_to_directory(self, dataset_info_dir) -> None:
"""Write `DatasetInfo` as JSON to `dataset_info_dir`."""
# Save the features structure & metadata (vocabulary, labels,...)
if self.features:
self.features.save_config(dataset_info_dir)
# Save any additional metadata
if self.metadata is not None:
self.metadata.save_metadata(dataset_info_dir)
if self.redistribution_info.license:
with tf.io.gfile.GFile(self._license_path(dataset_info_dir), "w") as f:
f.write(self.redistribution_info.license)
with tf.io.gfile.GFile(self._dataset_info_path(dataset_info_dir), "w") as f:
f.write(self.as_json)
def read_from_directory(self, dataset_info_dir: str) -> None:
"""Update DatasetInfo from the JSON files in `dataset_info_dir`.
This function updates all the dynamically generated fields (num_examples,
hash, time of creation,...) of the DatasetInfo.
This will overwrite all previous metadata.
Args:
dataset_info_dir: `str` The directory containing the metadata file. This
should be the root directory of a specific dataset version.
Raises:
FileNotFoundError: If the dataset_info.json can't be found.
"""
logging.info("Load dataset info from %s", dataset_info_dir)
json_filename = self._dataset_info_path(dataset_info_dir)
if not tf.io.gfile.exists(json_filename):
raise FileNotFoundError(
"Tried to load `DatasetInfo` from a directory which does not exist or"
" does not contain `dataset_info.json`. Please delete the directory "
f"`{dataset_info_dir}` if you are trying to re-generate the "
"dataset.")
# Load the metadata from disk
parsed_proto = read_from_json(json_filename)
# Update splits
filename_template = naming.ShardedFileTemplate(
dataset_name=self._builder.name,
data_dir=self.data_dir,
filetype_suffix=parsed_proto.file_format or "tfrecord")
split_dict = splits_lib.SplitDict.from_proto(
repeated_split_infos=parsed_proto.splits,
filename_template=filename_template)
self.set_splits(split_dict)
# Restore the feature metadata (vocabulary, labels names,...)
if self.features:
self.features.load_metadata(dataset_info_dir)
# For `ReadOnlyBuilder`, reconstruct the features from the config.
elif tf.io.gfile.exists(feature_lib.make_config_path(dataset_info_dir)):
self._features = feature_lib.FeatureConnector.from_config(
dataset_info_dir)
# Restore the MetaDataDict from metadata.json if there is any
if (self.metadata is not None or
tf.io.gfile.exists(_metadata_filepath(dataset_info_dir))):
# If the dataset was loaded from file, self.metadata will be `None`, so
# we create a MetadataDict first.
if self.metadata is None:
self._metadata = MetadataDict()
self.metadata.load_metadata(dataset_info_dir)
# Update fields which are not defined in the code. This means that
# the code will overwrite fields which are present in
# dataset_info.json.
for field_name, field in self.as_proto.DESCRIPTOR.fields_by_name.items():
field_value = getattr(self._info_proto, field_name)
field_value_restored = getattr(parsed_proto, field_name)
try:
is_defined = self._info_proto.HasField(field_name)
except ValueError:
is_defined = bool(field_value)
try:
is_defined_in_restored = parsed_proto.HasField(field_name)
except ValueError:
is_defined_in_restored = bool(field_value_restored)
# If field is defined in code, we ignore the value
if is_defined:
if field_value != field_value_restored:
logging.info(
"Field info.%s from disk and from code do not match. Keeping "
"the one from code.", field_name)
continue
# If the field is also not defined in JSON file, we do nothing
if not is_defined_in_restored:
continue
# Otherwise, we restore the dataset_info.json value
if field.type == field.TYPE_MESSAGE:
field_value.MergeFrom(field_value_restored)
else:
setattr(self._info_proto, field_name, field_value_restored)
if self._builder._version != self.version: # pylint: disable=protected-access
raise AssertionError(
"The constructed DatasetInfo instance and the restored proto version "
"do not match. Builder version: {}. Proto version: {}".format(
self._builder._version, self.version)) # pylint: disable=protected-access
# Mark as fully initialized.
self._fully_initialized = True
def initialize_from_bucket(self) -> None:
"""Initialize DatasetInfo from GCS bucket info files."""
# In order to support Colab, we use the HTTP GCS API to access the metadata
# files. They are copied locally and then loaded.
tmp_dir = tempfile.mkdtemp("tfds")
data_files = gcs_utils.gcs_dataset_info_files(self.full_name)
if not data_files:
return
logging.info(
"Load pre-computed DatasetInfo (eg: splits, num examples,...) "
"from GCS: %s", self.full_name)
for fname in data_files:
out_fname = os.path.join(tmp_dir, os.path.basename(fname))
tf.io.gfile.copy(os.fspath(gcs_utils.gcs_path(fname)), out_fname)
self.read_from_directory(tmp_dir)
def __repr__(self):
SKIP = object() # pylint: disable=invalid-name
splits = _indent("\n".join(
["{"] +
[f" '{k}': {split}," for k, split in sorted(self.splits.items())] +
["}"]))
if self._info_proto.config_description:
config_description = _indent(
f'"""\n{self._info_proto.config_description}\n"""')
else:
config_description = SKIP
lines = ["tfds.core.DatasetInfo("]
for key, value in [
("name", repr(self.name)),
("full_name", repr(self.full_name)),
("description", _indent(f'"""\n{self.description}\n"""')),
("config_description", config_description),
("homepage", repr(self.homepage)),
("data_path", repr(self.data_dir)),
("download_size", self.download_size),
("dataset_size", self.dataset_size),
("features", _indent(repr(self.features))),
("supervised_keys", self.supervised_keys),
("disable_shuffling", self.disable_shuffling),
("splits", splits),
("citation", _indent(f'"""{self.citation}"""')),
# Proto add a \n that we strip.
("redistribution_info", str(self.redistribution_info).strip() or SKIP),
]:
if value != SKIP:
lines.append(f" {key}={value},")
lines.append(")")
return "\n".join(lines)
def _nest_to_proto(nest: Nest) -> dataset_info_pb2.SupervisedKeys.Nest:
"""Creates a `SupervisedKeys.Nest` from a limited `tf.nest` style structure.
Args:
nest: A `tf.nest` structure of tuples, dictionaries or string feature keys.
Returns:
The same structure as a `SupervisedKeys.Nest` proto.
"""
nest_type = type(nest)
proto = dataset_info_pb2.SupervisedKeys.Nest()
if nest_type is tuple:
for item in nest:
proto.tuple.items.append(_nest_to_proto(item))
elif nest_type is dict:
nest = {key: _nest_to_proto(value) for key, value in nest.items()}
proto.dict.CopyFrom(dataset_info_pb2.SupervisedKeys.Dict(dict=nest))
elif nest_type is str:
proto.feature_key = nest
else:
raise ValueError("The nested structures in `supervised_keys` must only "
"contain instances of (tuple, dict, str), no subclasses.\n"
f"Found type: {nest_type}")
return proto
def _supervised_keys_to_proto(
keys: SupervisedKeysType) -> dataset_info_pb2.SupervisedKeys:
"""Converts a `supervised_keys` tuple to a SupervisedKeys proto."""
if not isinstance(keys, tuple) or len(keys) not in [2, 3]:
raise ValueError(
"`supervised_keys` must contain a tuple of 2 or 3 elements.\n"
f"got: {keys!r}")
proto = dataset_info_pb2.SupervisedKeys(
tuple=dataset_info_pb2.SupervisedKeys.Tuple(
items=(_nest_to_proto(key) for key in keys)))
return proto
def _nest_from_proto(proto: dataset_info_pb2.SupervisedKeys.Nest) -> Nest:
"""Creates a `tf.nest` style structure from a `SupervisedKeys.Nest` proto.
Args:
proto: A `SupervisedKeys.Nest` proto.
Returns:
The proto converted to a `tf.nest` style structure of tuples, dictionaries
or strings.
"""
if proto.HasField("tuple"):
return tuple(_nest_from_proto(item) for item in proto.tuple.items)
elif proto.HasField("dict"):
return {
key: _nest_from_proto(value)
for key, value in sorted(proto.dict.dict.items())
}
elif proto.HasField("feature_key"):
return proto.feature_key
else:
raise ValueError("`SupervisedKeys.Nest` proto must contain one of "
f"(tuple, dict, feature_key). Got: {proto}")
def _supervised_keys_from_proto(
proto: dataset_info_pb2.SupervisedKeys) -> SupervisedKeysType:
"""Converts a `SupervisedKeys` proto back to a simple python tuple."""
if proto.input and proto.output:
return (proto.input, proto.output)
elif proto.tuple:
return tuple(_nest_from_proto(item) for item in proto.tuple.items)
else:
raise ValueError("A `SupervisedKeys` proto must have either `input` and "
"`output` defined, or `tuple`, got: {proto}")
def _indent(content):
"""Add indentation to all lines except the first."""
lines = content.split("\n")
return "\n".join([lines[0]] + [" " + l for l in lines[1:]])
def _populate_shape(shape_or_dict, prefix, schema_features):
"""Populates shape in the schema."""
if isinstance(shape_or_dict, (tuple, list)):
feature_name = "/".join(prefix)
if shape_or_dict and feature_name in schema_features:
schema_feature = schema_features[feature_name]
schema_feature.ClearField("shape")
for dim in shape_or_dict:
# We denote `None`s as -1 in the shape proto.
schema_feature.shape.dim.add().size = -1 if dim is None else dim
return
for name, val in shape_or_dict.items():
prefix.append(name)
_populate_shape(val, prefix, schema_features)
prefix.pop()
def get_dataset_feature_statistics(builder, split):
"""Calculate statistics for the specified split."""
tfdv = lazy_imports_lib.lazy_imports.tensorflow_data_validation
# TODO(epot): Avoid hardcoding file format.
filetype_suffix = "tfrecord"
if filetype_suffix not in ["tfrecord", "csv"]:
raise ValueError(
"Cannot generate statistics for filetype {}".format(filetype_suffix))
filename_template = naming.ShardedFileTemplate(
data_dir=builder.data_dir,
dataset_name=builder.name,
split=split,
filetype_suffix=filetype_suffix)
filepattern = filename_template.sharded_filepaths_pattern()
# Avoid generating a large number of buckets in rank histogram
# (default is 1000).
stats_options = tfdv.StatsOptions(
num_top_values=10, num_rank_histogram_buckets=10)
if filetype_suffix == "csv":
statistics = tfdv.generate_statistics_from_csv(
filepattern, stats_options=stats_options)
else:
statistics = tfdv.generate_statistics_from_tfrecord(
filepattern, stats_options=stats_options)
schema = tfdv.infer_schema(statistics)
schema_features = {feature.name: feature for feature in schema.feature}
# Override shape in the schema.
for feature_name, feature in builder.info.features.items():
_populate_shape(feature.shape, [feature_name], schema_features)
# Remove legacy field.
if getattr(schema, "generate_legacy_feature_spec", None) is not None:
schema.ClearField("generate_legacy_feature_spec")
return statistics.datasets[0], schema
def read_from_json(path: epath.PathLike) -> dataset_info_pb2.DatasetInfo:
"""Read JSON-formatted proto into DatasetInfo proto."""
json_str = epath.Path(path).read_text()
# Parse it back into a proto.
parsed_proto = json_format.Parse(json_str, dataset_info_pb2.DatasetInfo())
return parsed_proto
def read_proto_from_builder_dir(
builder_dir: epath.PathLike) -> dataset_info_pb2.DatasetInfo:
"""Reads the dataset info from the given builder dir.
Args:
builder_dir: The folder that contains the dataset info files.
Returns:
The DatasetInfo proto as read from the builder dir.
Raises:
FileNotFoundError: If the builder_dir does not exists.
"""
info_path = os.path.join(
os.path.expanduser(builder_dir), DATASET_INFO_FILENAME)
if not tf.io.gfile.exists(info_path):
raise FileNotFoundError(
f"Could not load dataset info: {info_path} does not exists.")
return read_from_json(info_path)
def pack_as_supervised_ds(
ds: tf.data.Dataset,
ds_info: DatasetInfo,
) -> tf.data.Dataset:
"""Pack `(input, label)` dataset as `{'key0': input, 'key1': label}`."""
if (ds_info.supervised_keys and isinstance(ds.element_spec, tuple) and
len(ds.element_spec) == 2):
x_key, y_key = ds_info.supervised_keys
ds = ds.map(lambda x, y: {x_key: x, y_key: y})
return ds
else: # If dataset isn't a supervised tuple (input, label), return as-is
return ds
def _metadata_filepath(data_dir):
return os.path.join(data_dir, METADATA_FILENAME)
class MetadataDict(Metadata, dict):
"""A `tfds.core.Metadata` object that acts as a `dict`.
By default, the metadata will be serialized as JSON.
"""
def save_metadata(self, data_dir):
"""Save the metadata."""
with tf.io.gfile.GFile(_metadata_filepath(data_dir), "w") as f:
json.dump(self, f)
def load_metadata(self, data_dir):
"""Restore the metadata."""
self.clear()
with tf.io.gfile.GFile(_metadata_filepath(data_dir), "r") as f:
self.update(json.load(f))
class BeamMetadataDict(MetadataDict):
"""A `tfds.core.Metadata` object supporting Beam-generated datasets."""
def __init__(self, *args, **kwargs):
super(BeamMetadataDict, self).__init__(*args, **kwargs)
self._tempdir = tempfile.mkdtemp("tfds_beam_metadata")
def _temp_filepath(self, key):
return os.path.join(self._tempdir, "%s.json" % key)
def __setitem__(self, key, item):
"""Creates write sink for beam PValues or sets value of key in `dict`.
If the item is a PValue, it is expected to contain exactly one element,
which will be written out as a temporary JSON file once the beam pipeline
runs. These outputs will be loaded and stored in a single JSON when
`save_metadata` is called after the pipeline completes.
Args:
key: hashable type, the key for the item.
item: `beam.pvalue.PValue` or other, the metadata value.
"""
beam = lazy_imports_lib.lazy_imports.apache_beam
if isinstance(item, beam.PTransform):
# Implementing Beam support might be possible but would
# require very careful implementation to avoid computing the
# PTransform twice (once for the split and once for the metadata).
raise NotImplementedError(
"`tfds.core.BeamMetadataDict` can\'t be used on `beam.PTransform`, "
"only on `beam.PCollection`. See `_generate_examples` doc on how "
"to use `beam.PCollection`, or wrap your `_generate_examples` inside "
f"a @beam.ptransform_fn. Got: {key}: {item}")
elif isinstance(item, beam.pvalue.PValue):
if key in self:
raise ValueError("Already added PValue with key: %s" % key)
logging.info("Lazily adding metadata item with Beam: %s", key)
def _to_json(item_list):
if len(item_list) != 1:
raise ValueError(
"Each metadata PValue must contain a single element. Got %d." %
len(item_list))
item = item_list[0]
return json.dumps(item)
_ = (
item
| "metadata_%s_tolist" % key >> beam.combiners.ToList()
| "metadata_%s_tojson" % key >> beam.Map(_to_json)
| "metadata_%s_write" % key >> beam.io.WriteToText(
self._temp_filepath(key),
num_shards=1,
shard_name_template="",
))
super(BeamMetadataDict, self).__setitem__(key, item)
def save_metadata(self, data_dir):
"""Save the metadata inside the beam job."""
beam = lazy_imports_lib.lazy_imports.apache_beam
for key, item in self.items():
if isinstance(item, beam.pvalue.PValue):
with tf.io.gfile.GFile(self._temp_filepath(key), "r") as f:
self[key] = json.load(f)
tf.io.gfile.rmtree(self._tempdir)
super(BeamMetadataDict, self).save_metadata(data_dir)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import shutil
import tempfile
import unittest
import numpy as np
import pandas as pd
from mleap.sklearn.preprocessing.data import FeatureExtractor, MathUnary, MathBinary, StringMap
from mleap.sklearn.preprocessing.data import StandardScaler, MinMaxScaler, LabelEncoder, Binarizer, PolynomialFeatures
from pandas.util.testing import assert_frame_equal
class TransformerTests(unittest.TestCase):
def setUp(self):
self.df = pd.DataFrame(np.random.randn(10, 5), columns=['a', 'b', 'c', 'd', 'e'])
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def test_standard_scaler_serializer(self):
standard_scaler = StandardScaler(with_mean=True,
with_std=True
)
extract_features = ['a']
feature_extractor = FeatureExtractor(input_scalars=['a'],
output_vector='extracted_a_output',
output_vector_items=["{}_out".format(x) for x in extract_features])
standard_scaler.mlinit(prior_tf=feature_extractor,
output_features='a_scaled')
standard_scaler.fit(self.df[['a']])
standard_scaler.serialize_to_bundle(self.tmp_dir, standard_scaler.name)
expected_mean = self.df.a.mean()
expected_std = np.sqrt(np.var(self.df.a))
expected_model = {
"op": "standard_scaler",
"attributes": {
"mean": {
"double": [expected_mean],
"shape": {
"dimensions": [{
"size": 1,
"name": ""
}]
},
"type": "tensor"
},
"std": {
"double": [expected_std],
"shape": {
"dimensions": [{
"size": 1,
"name": ""
}]
},
"type": "tensor"
}
}
}
self.assertAlmostEqual(expected_mean, standard_scaler.mean_.tolist()[0], places = 7)
self.assertAlmostEqual(expected_std, np.sqrt(standard_scaler.var_.tolist()[0]), places = 7)
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, standard_scaler.name)) as json_data:
model = json.load(json_data)
self.assertEqual(standard_scaler.op, expected_model['op'])
self.assertEqual(expected_model['attributes']['mean']['shape']['dimensions'][0]['size'], model['attributes']['mean']['shape']['dimensions'][0]['size'])
self.assertEqual(expected_model['attributes']['std']['shape']['dimensions'][0]['size'], model['attributes']['std']['shape']['dimensions'][0]['size'])
self.assertAlmostEqual(expected_model['attributes']['mean']['double'][0], model['attributes']['mean']['double'][0], places = 7)
self.assertAlmostEqual(expected_model['attributes']['std']['double'][0], model['attributes']['std']['double'][0], places = 7)
# Test node.json
with open("{}/{}.node/node.json".format(self.tmp_dir, standard_scaler.name)) as json_data:
node = json.load(json_data)
self.assertEqual(standard_scaler.name, node['name'])
self.assertEqual(standard_scaler.input_features, node['shape']['inputs'][0]['name'])
self.assertEqual(standard_scaler.output_features, node['shape']['outputs'][0]['name'])
def test_standard_scaler_deserializer(self):
extract_features = ['a']
feature_extractor = FeatureExtractor(input_scalars=['a'],
output_vector='extracted_a_output',
output_vector_items=["{}_out".format(x) for x in extract_features])
# Serialize a standard scaler to a bundle
standard_scaler = StandardScaler(with_mean=True,
with_std=True
)
standard_scaler.mlinit(prior_tf=feature_extractor,
output_features='a_scaled')
standard_scaler.fit(self.df[['a']])
standard_scaler.serialize_to_bundle(self.tmp_dir, standard_scaler.name)
# Now deserialize it back
node_name = "{}.node".format(standard_scaler.name)
standard_scaler_tf = StandardScaler()
standard_scaler_tf = standard_scaler_tf.deserialize_from_bundle(self.tmp_dir, node_name)
# Transform some sample data
res_a = standard_scaler.transform(self.df[['a']])
res_b = standard_scaler_tf.transform(self.df[['a']])
self.assertEqual(res_a[0], res_b[0])
self.assertEqual(standard_scaler.name, standard_scaler_tf.name)
self.assertEqual(standard_scaler.op, standard_scaler_tf.op)
self.assertEqual(standard_scaler.mean_, standard_scaler_tf.mean_)
self.assertEqual(standard_scaler.scale_, standard_scaler_tf.scale_)
def test_standard_scaler_multi_deserializer(self):
extract_features = ['a', 'b']
feature_extractor = FeatureExtractor(input_scalars=['a', 'b'],
output_vector='extracted_multi_outputs',
output_vector_items=["{}_out".format(x) for x in extract_features])
# Serialize a standard scaler to a bundle
standard_scaler = StandardScaler(with_mean=True,
with_std=True
)
standard_scaler.mlinit(prior_tf=feature_extractor,
output_features=['a_scaled', 'b_scaled'])
standard_scaler.fit(self.df[['a', 'b']])
standard_scaler.serialize_to_bundle(self.tmp_dir, standard_scaler.name)
# Now deserialize it back
node_name = "{}.node".format(standard_scaler.name)
standard_scaler_tf = StandardScaler()
standard_scaler_tf = standard_scaler_tf.deserialize_from_bundle(self.tmp_dir, node_name)
# Transform some sample data
res_a = standard_scaler.transform(self.df[['a', 'b']])
res_b = standard_scaler_tf.transform(self.df[['a', 'b']])
self.assertEqual(res_a[0][0], res_b[0][0])
self.assertEqual(res_a[0][1], res_b[0][1])
self.assertEqual(standard_scaler.name, standard_scaler_tf.name)
self.assertEqual(standard_scaler.op, standard_scaler_tf.op)
self.assertEqual(standard_scaler.mean_[0], standard_scaler_tf.mean_[0])
self.assertEqual(standard_scaler.mean_[1], standard_scaler_tf.mean_[1])
self.assertEqual(standard_scaler.scale_[0], standard_scaler_tf.scale_[0])
self.assertEqual(standard_scaler.scale_[1], standard_scaler_tf.scale_[1])
def test_min_max_scaler_serializer(self):
extract_features = ['a']
feature_extractor = FeatureExtractor(input_scalars=['a'],
output_vector='extracted_a_output',
output_vector_items=["{}_out".format(x) for x in extract_features])
scaler = MinMaxScaler()
scaler.mlinit(prior_tf = feature_extractor,
output_features='a_scaled')
scaler.fit(self.df[['a']])
scaler.serialize_to_bundle(self.tmp_dir, scaler.name)
expected_min = self.df.a.min()
expected_max = self.df.a.max()
expected_model = {
"op": "min_max_scaler",
"attributes": {
"min": {
"double": [expected_min],
"shape": {
"dimensions": [{
"size": 1,
"name": ""
}]
},
"type": "tensor"
},
"max": {
"double": [expected_max],
"shape": {
"dimensions": [{
"size": 1,
"name": ""
}]
},
"type": "tensor"
}
}
}
self.assertEqual(expected_min, scaler.data_min_.tolist()[0])
self.assertEqual(expected_max, scaler.data_max_.tolist()[0])
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, scaler.name)) as json_data:
model = json.load(json_data)
self.assertEqual(scaler.op, expected_model['op'])
self.assertEqual(expected_model['attributes']['min']['shape']['dimensions'][0]['size'], model['attributes']['min']['shape']['dimensions'][0]['size'])
self.assertEqual(expected_model['attributes']['max']['shape']['dimensions'][0]['size'], model['attributes']['max']['shape']['dimensions'][0]['size'])
self.assertEqual(expected_model['attributes']['min']['double'][0], model['attributes']['min']['double'][0])
self.assertEqual(expected_model['attributes']['max']['double'][0], model['attributes']['max']['double'][0])
# Test node.json
with open("{}/{}.node/node.json".format(self.tmp_dir, scaler.name)) as json_data:
node = json.load(json_data)
self.assertEqual(scaler.name, node['name'])
self.assertEqual(scaler.input_features, node['shape']['inputs'][0]['name'])
self.assertEqual(scaler.output_features, node['shape']['outputs'][0]['name'])
def test_min_max_scaler_deserializer(self):
extract_features = ['a']
feature_extractor = FeatureExtractor(input_scalars=['a'],
output_vector='extracted_a_output',
output_vector_items=["{}_out".format(x) for x in extract_features])
scaler = MinMaxScaler()
scaler.mlinit(prior_tf=feature_extractor,
output_features='a_scaled')
scaler.fit(self.df[['a']])
scaler.serialize_to_bundle(self.tmp_dir, scaler.name)
# Deserialize the MinMaxScaler
node_name = "{}.node".format(scaler.name)
min_max_scaler_tf = MinMaxScaler()
min_max_scaler_tf.deserialize_from_bundle(self.tmp_dir, node_name)
# Transform some sample data
res_a = scaler.transform(self.df[['a']])
res_b = min_max_scaler_tf.transform(self.df[['a']])
self.assertEqual(res_a[0], res_b[0])
self.assertEqual(scaler.name, min_max_scaler_tf.name)
self.assertEqual(scaler.op, min_max_scaler_tf.op)
def test_min_max_scaler_multi_deserializer(self):
extract_features = ['a', 'b']
feature_extractor = FeatureExtractor(input_scalars=['a', 'b'],
output_vector='extracted_multi_outputs',
output_vector_items=["{}_out".format(x) for x in extract_features])
scaler = MinMaxScaler()
scaler.mlinit(prior_tf=feature_extractor,
output_features=['a_scaled', 'b_scaled'])
scaler.fit(self.df[['a']])
scaler.serialize_to_bundle(self.tmp_dir, scaler.name)
# Deserialize the MinMaxScaler
node_name = "{}.node".format(scaler.name)
min_max_scaler_tf = MinMaxScaler()
min_max_scaler_tf.deserialize_from_bundle(self.tmp_dir, node_name)
# Transform some sample data
res_a = scaler.transform(self.df[['a', 'b']])
res_b = min_max_scaler_tf.transform(self.df[['a', 'b']])
self.assertEqual(res_a[0][0], res_b[0][0])
self.assertEqual(res_a[0][1], res_b[0][1])
self.assertEqual(scaler.name, min_max_scaler_tf.name)
self.assertEqual(scaler.op, min_max_scaler_tf.op)
def label_encoder_test(self):
labels = ['a', 'b', 'c']
le = LabelEncoder(input_features=['label_feature'],
output_features='label_feature_le_encoded')
le.fit(labels)
self.assertEqual(labels, le.classes_.tolist())
le.serialize_to_bundle(self.tmp_dir, le.name)
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, le.name)) as json_data:
model = json.load(json_data)
self.assertEqual(le.op, model['op'])
self.assertTrue('nullable_input' in model['attributes'])
self.assertTrue('labels' in model['attributes'])
# Test node.json
with open("{}/{}.node/node.json".format(self.tmp_dir, le.name)) as json_data:
node = json.load(json_data)
self.assertEqual(le.name, node['name'])
self.assertEqual(le.input_features[0], node['shape']['inputs'][0]['name'])
self.assertEqual(le.output_features, node['shape']['outputs'][0]['name'])
def label_encoder_deserializer_test(self):
labels = ['a', 'b', 'c']
le = LabelEncoder(input_features=['label_feature'],
output_features='label_feature_le_encoded')
le.fit(labels)
self.assertEqual(labels, le.classes_.tolist())
le.serialize_to_bundle(self.tmp_dir, le.name)
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, le.name)) as json_data:
model = json.load(json_data)
# Deserialize the LabelEncoder
node_name = "{}.node".format(le.name)
label_encoder_tf = LabelEncoder()
label_encoder_tf.deserialize_from_bundle(self.tmp_dir, node_name)
# Transform some sample data
res_a = le.transform(labels)
res_b = label_encoder_tf.transform(labels)
print("le.output_features: {}".format(le.output_features))
print("label_encoder_tf.output_features: {}".format(label_encoder_tf.output_features))
self.assertEqual(res_a[0], res_b[0])
self.assertEqual(res_a[1], res_b[1])
self.assertEqual(res_a[2], res_b[2])
self.assertEqual(le.input_features, label_encoder_tf.input_features)
self.assertEqual(le.output_features, label_encoder_tf.output_features[0])
def feature_extractor_test(self):
extract_features = ['a', 'd']
feature_extractor = FeatureExtractor(input_scalars=extract_features,
output_vector='extract_features_output',
output_vector_items=["{}_out".format(x) for x in extract_features])
res = feature_extractor.fit_transform(self.df)
self.assertEqual(len(res.columns), 2)
feature_extractor.serialize_to_bundle(self.tmp_dir, feature_extractor.name)
# Test node.json
with open("{}/{}.node/node.json".format(self.tmp_dir, feature_extractor.name)) as json_data:
node = json.load(json_data)
self.assertEqual(feature_extractor.name, node['name'])
self.assertEqual(feature_extractor.input_features[0], node['shape']['inputs'][0]['name'])
self.assertEqual(feature_extractor.input_features[1], node['shape']['inputs'][1]['name'])
self.assertEqual(feature_extractor.output_vector, node['shape']['outputs'][0]['name'])
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, feature_extractor.name)) as json_data:
model = json.load(json_data)
expected_model = {
"op": "vector_assembler",
"attributes": {
"input_shapes": {
"data_shape": [
{
"base": "scalar",
"isNullable": False
},
{
"base": "scalar",
"isNullable": False
}],
"type": "list"
}
}
}
self.assertEqual(expected_model['op'], model['op'])
self.assertEqual(expected_model['attributes']['input_shapes']['data_shape'][0]['base'],
model['attributes']['input_shapes']['data_shape'][0]['base'])
self.assertEqual(expected_model['attributes']['input_shapes']['data_shape'][0]['isNullable'],
model['attributes']['input_shapes']['data_shape'][0]['isNullable'])
self.assertEqual(expected_model['attributes']['input_shapes']['data_shape'][1]['base'],
model['attributes']['input_shapes']['data_shape'][1]['base'])
self.assertEqual(expected_model['attributes']['input_shapes']['data_shape'][1]['isNullable'],
model['attributes']['input_shapes']['data_shape'][1]['isNullable'])
def binarizer_test(self):
extract_features = ['a']
feature_extractor = FeatureExtractor(input_scalars=['a'],
output_vector='extracted_a_output',
output_vector_items=["{}_out".format(x) for x in extract_features])
binarizer = Binarizer(threshold=0)
binarizer.mlinit(prior_tf=feature_extractor,
output_features='a_binary')
Xres = binarizer.fit_transform(self.df[['a']])
# Test that the binarizer functions as expected
self.assertEqual(float(len(self.df[self.df.a >= 0]))/10.0, Xres.mean())
binarizer.serialize_to_bundle(self.tmp_dir, binarizer.name)
expected_model = {
"op": "sklearn_binarizer",
"attributes": {
"threshold": {
"double": 0.0
}
}
}
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, binarizer.name)) as json_data:
model = json.load(json_data)
self.assertEqual(expected_model['attributes']['threshold']['double'],
model['attributes']['threshold']['double'])
self.assertEqual(expected_model['op'], model['op'])
# Test node.json
with open("{}/{}.node/node.json".format(self.tmp_dir, binarizer.name)) as json_data:
node = json.load(json_data)
self.assertEqual(binarizer.name, node['name'])
self.assertEqual(binarizer.input_features, node['shape']['inputs'][0]['name'])
self.assertEqual(binarizer.output_features, node['shape']['outputs'][0]['name'])
def binarizer_deserializer_test(self):
extract_features = ['a']
feature_extractor = FeatureExtractor(input_scalars=['a'],
output_vector='extracted_a_output',
output_vector_items=["{}_out".format(x) for x in extract_features])
binarizer = Binarizer(threshold=0.0)
binarizer.mlinit(prior_tf=feature_extractor,
output_features='a_binary')
Xres = binarizer.fit_transform(self.df[['a']])
# Test that the binarizer functions as expected
self.assertEqual(float(len(self.df[self.df.a >= 0]))/10.0, Xres.mean())
binarizer.serialize_to_bundle(self.tmp_dir, binarizer.name)
# Deserialize the Binarizer
node_name = "{}.node".format(binarizer.name)
binarizer_tf_ds = Binarizer()
binarizer_tf_ds.deserialize_from_bundle(self.tmp_dir, node_name)
# Transform some sample data
res_a = binarizer.transform(self.df[['a']])
res_b = binarizer_tf_ds.transform(self.df[['a']])
self.assertEqual(res_a[0][0], res_b[0][0])
self.assertEqual(res_a[1][0], res_b[1][0])
self.assertEqual(res_a[2][0], res_b[2][0])
self.assertEqual(res_a[3][0], res_b[3][0])
def polynomial_expansion_test(self):
extract_features = ['a']
feature_extractor = FeatureExtractor(input_scalars=['a'],
output_vector='extracted_a_output',
output_vector_items=["{}_out".format(x) for x in extract_features])
polynomial_exp = PolynomialFeatures(degree=2, include_bias=False)
polynomial_exp.mlinit(prior_tf=feature_extractor,
output_features='poly')
Xres = polynomial_exp.fit_transform(self.df[['a']])
self.assertEqual(Xres[0][1], Xres[0][0] * Xres[0][0])
polynomial_exp.serialize_to_bundle(self.tmp_dir, polynomial_exp.name)
expected_model = {
"op": "sklearn_polynomial_expansion",
"attributes": {
"combinations": {
"string": "[x0,x0^2]"
}
}
}
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, polynomial_exp.name)) as json_data:
model = json.load(json_data)
self.assertEqual(expected_model['op'], model['op'])
self.assertEqual(expected_model['attributes']['combinations']['string'], model['attributes']['combinations']['string'])
# Test node.json
with open("{}/{}.node/node.json".format(self.tmp_dir, polynomial_exp.name)) as json_data:
node = json.load(json_data)
self.assertEqual(polynomial_exp.name, node['name'])
self.assertEqual(polynomial_exp.input_features, node['shape']['inputs'][0]['name'])
self.assertEqual(polynomial_exp.output_features, node['shape']['outputs'][0]['name'])
def math_unary_exp_test(self):
math_unary_tf = MathUnary(input_features=['a'], output_features='exp_a', transform_type='exp')
Xres = math_unary_tf.fit_transform(self.df.a)
self.assertEqual(np.exp(self.df.a[0]), Xres[0])
math_unary_tf.serialize_to_bundle(self.tmp_dir, math_unary_tf.name)
expected_model = {
"op": "math_unary",
"attributes": {
"operation": {
"string": 'exp'
}
}
}
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, math_unary_tf.name)) as json_data:
model = json.load(json_data)
self.assertEqual(expected_model['attributes']['operation']['string'], model['attributes']['operation']['string'])
# Test node.json
with open("{}/{}.node/node.json".format(self.tmp_dir, math_unary_tf.name)) as json_data:
node = json.load(json_data)
self.assertEqual(math_unary_tf.name, node['name'])
self.assertEqual(math_unary_tf.input_features[0], node['shape']['inputs'][0]['name'])
self.assertEqual(math_unary_tf.output_features, node['shape']['outputs'][0]['name'])
def math_unary_deserialize_exp_test(self):
math_unary_tf = MathUnary(input_features=['a'], output_features='exp_a', transform_type='exp')
Xres = math_unary_tf.fit_transform(self.df.a)
self.assertEqual(np.exp(self.df.a[0]), Xres[0])
math_unary_tf.serialize_to_bundle(self.tmp_dir, math_unary_tf.name)
node_name = "{}.node".format(math_unary_tf.name)
math_unary_ds_tf = MathUnary()
math_unary_ds_tf = math_unary_ds_tf.deserialize_from_bundle(self.tmp_dir, node_name)
with open("{}/{}.node/model.json".format(self.tmp_dir, math_unary_tf.name)) as json_data:
model = json.load(json_data)
res_a = math_unary_tf.transform(self.df['a'])
res_b = math_unary_ds_tf.transform(self.df['a'])
self.assertEqual(res_a[0], res_b[0])
def math_unary_sin_test(self):
math_unary_tf = MathUnary(input_features=['a'], output_features='sin_a', transform_type='sin')
Xres = math_unary_tf.fit_transform(self.df.a)
self.assertEqual(np.sin(self.df.a[0]), Xres[0])
math_unary_tf.serialize_to_bundle(self.tmp_dir, math_unary_tf.name)
expected_model = {
"op": "math_unary",
"attributes": {
"operation": {
"string": 'sin'
}
}
}
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, math_unary_tf.name)) as json_data:
model = json.load(json_data)
self.assertEqual(expected_model['attributes']['operation']['string'], model['attributes']['operation']['string'])
# Test node.json
with open("{}/{}.node/node.json".format(self.tmp_dir, math_unary_tf.name)) as json_data:
node = json.load(json_data)
self.assertEqual(math_unary_tf.name, node['name'])
self.assertEqual(math_unary_tf.input_features[0], node['shape']['inputs'][0]['name'])
self.assertEqual(math_unary_tf.output_features, node['shape']['outputs'][0]['name'])
def math_binary_test(self):
math_binary_tf = MathBinary(input_features=['a', 'b'], output_features='a_plus_b', transform_type='add')
Xres = math_binary_tf.fit_transform(self.df[['a', 'b']])
assert_frame_equal(pd.DataFrame(self.df.a + self.df.b, columns=['a']), Xres)
math_binary_tf.serialize_to_bundle(self.tmp_dir, math_binary_tf.name)
expected_model = {
"op": "math_binary",
"attributes": {
"operation": {
"string": 'add'
}
}
}
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, math_binary_tf.name)) as json_data:
model = json.load(json_data)
self.assertEqual(expected_model['attributes']['operation']['string'], model['attributes']['operation']['string'])
# Test node.json
with open("{}/{}.node/node.json".format(self.tmp_dir, math_binary_tf.name)) as json_data:
node = json.load(json_data)
self.assertEqual(math_binary_tf.name, node['name'])
self.assertEqual(math_binary_tf.input_features[0], node['shape']['inputs'][0]['name'])
self.assertEqual(math_binary_tf.input_features[1], node['shape']['inputs'][1]['name'])
self.assertEqual(math_binary_tf.output_features, node['shape']['outputs'][0]['name'])
def math_binary_deserialize_add_test(self):
math_binary_tf = MathBinary(input_features=['a', 'b'], output_features='a_plus_b', transform_type='add')
Xres = math_binary_tf.fit_transform(self.df[['a', 'b']])
assert_frame_equal(pd.DataFrame(self.df.a + self.df.b, columns=['a']), Xres)
math_binary_tf.serialize_to_bundle(self.tmp_dir, math_binary_tf.name)
node_name = "{}.node".format(math_binary_tf.name)
math_binary_ds_tf = MathBinary()
math_binary_ds_tf = math_binary_ds_tf.deserialize_from_bundle(self.tmp_dir, node_name)
res_a = math_binary_tf.transform(self.df[['a', 'b']])
res_b = math_binary_ds_tf.transform(self.df[['a', 'b']])
assert_frame_equal(res_a, res_b)
def math_binary_subtract_test(self):
math_binary_tf = MathBinary(input_features=['a', 'b'], output_features='a_less_b', transform_type='sub')
Xres = math_binary_tf.fit_transform(self.df[['a', 'b']])
assert_frame_equal(pd.DataFrame(self.df.a - self.df.b, columns=['a']), Xres)
math_binary_tf.serialize_to_bundle(self.tmp_dir, math_binary_tf.name)
expected_model = {
"op": "math_binary",
"attributes": {
"operation": {
"string": 'sub'
}
}
}
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, math_binary_tf.name)) as json_data:
model = json.load(json_data)
self.assertEqual(expected_model['attributes']['operation']['string'], model['attributes']['operation']['string'])
# Test node.json
with open("{}/{}.node/node.json".format(self.tmp_dir, math_binary_tf.name)) as json_data:
node = json.load(json_data)
self.assertEqual(math_binary_tf.name, node['name'])
self.assertEqual(math_binary_tf.input_features[0], node['shape']['inputs'][0]['name'])
self.assertEqual(math_binary_tf.input_features[1], node['shape']['inputs'][1]['name'])
self.assertEqual(math_binary_tf.output_features, node['shape']['outputs'][0]['name'])
def math_binary_multiply_test(self):
math_binary_tf = MathBinary(input_features=['a', 'b'], output_features='a_mul_b', transform_type='mul')
Xres = math_binary_tf.fit_transform(self.df[['a', 'b']])
assert_frame_equal(pd.DataFrame(self.df.a * self.df.b, columns=['a']), Xres)
math_binary_tf.serialize_to_bundle(self.tmp_dir, math_binary_tf.name)
expected_model = {
"op": "math_binary",
"attributes": {
"operation": {
"string": 'mul'
}
}
}
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, math_binary_tf.name)) as json_data:
model = json.load(json_data)
self.assertEqual(expected_model['attributes']['operation']['string'], model['attributes']['operation']['string'])
# Test node.json
with open("{}/{}.node/node.json".format(self.tmp_dir, math_binary_tf.name)) as json_data:
node = json.load(json_data)
self.assertEqual(math_binary_tf.name, node['name'])
self.assertEqual(math_binary_tf.input_features[0], node['shape']['inputs'][0]['name'])
self.assertEqual(math_binary_tf.input_features[1], node['shape']['inputs'][1]['name'])
self.assertEqual(math_binary_tf.output_features, node['shape']['outputs'][0]['name'])
def math_binary_divide_test(self):
math_binary_tf = MathBinary(input_features=['a', 'b'], output_features='a_mul_b', transform_type='div')
Xres = math_binary_tf.fit_transform(self.df[['a', 'b']])
assert_frame_equal(pd.DataFrame(self.df.a / self.df.b, columns=['a']), Xres)
math_binary_tf.serialize_to_bundle(self.tmp_dir, math_binary_tf.name)
expected_model = {
"op": "math_binary",
"attributes": {
"operation": {
"string": 'div'
}
}
}
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, math_binary_tf.name)) as json_data:
model = json.load(json_data)
self.assertEqual(expected_model['attributes']['operation']['string'], model['attributes']['operation']['string'])
# Test node.json
with open("{}/{}.node/node.json".format(self.tmp_dir, math_binary_tf.name)) as json_data:
node = json.load(json_data)
self.assertEqual(math_binary_tf.name, node['name'])
self.assertEqual(math_binary_tf.input_features[0], node['shape']['inputs'][0]['name'])
self.assertEqual(math_binary_tf.input_features[1], node['shape']['inputs'][1]['name'])
self.assertEqual(math_binary_tf.output_features, node['shape']['outputs'][0]['name'])
def string_map_test(self):
df = pd.DataFrame(['test_one', 'test_two', 'test_one', 'test_one', 'test_two'], columns=['a'])
string_map_tf = StringMap(input_features=['a'], output_features='a_mapped', labels={"test_one":1.0, "test_two": 0.0})
Xres = string_map_tf.fit_transform(df)
self.assertEqual(1.0, Xres[0])
self.assertEqual(0.0, Xres[1])
self.assertEqual(1.0, Xres[2])
self.assertEqual(1.0, Xres[3])
self.assertEqual(0.0, Xres[4])
string_map_tf.serialize_to_bundle(self.tmp_dir, string_map_tf.name)
expected_model = {
"op": "string_map",
"attributes": {
"labels": {
"type": "list",
"string": ["test_one", "test_two"]
},
"values": {
"type": "list",
"double": [1.0, 0.0]
}
}
}
#
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, string_map_tf.name)) as json_data:
model = json.load(json_data)
self.assertEqual(expected_model['attributes']['labels']['string'], model['attributes']['labels']['string'])
self.assertEqual(expected_model['attributes']['values']['double'], model['attributes']['values']['double'])
# Test node.json
with open("{}/{}.node/node.json".format(self.tmp_dir, string_map_tf.name)) as json_data:
node = json.load(json_data)
self.assertEqual(string_map_tf.name, node['name'])
self.assertEqual(string_map_tf.input_features[0], node['shape']['inputs'][0]['name'])
self.assertEqual(string_map_tf.output_features, node['shape']['outputs'][0]['name'])
def string_map_deserializer_test(self):
df = pd.DataFrame(['test_one', 'test_two', 'test_one', 'test_one', 'test_two'], columns=['a'])
string_map = StringMap(input_features=['a'], output_features='a_mapped', labels={"test_one":1.0, "test_two": 0.0})
string_map.serialize_to_bundle(self.tmp_dir, string_map.name)
# Now deserialize it back
node_name = "{}.node".format(string_map.name)
string_map_tf = StringMap()
string_map_tf = string_map_tf.deserialize_from_bundle(self.tmp_dir, node_name)
# Transform some sample data
res_a = string_map.fit_transform(df)
res_b = string_map_tf.fit_transform(df)
self.assertEqual(res_a[0], res_b[0])
self.assertEqual(res_a[1], res_b[1])
self.assertEqual(res_a[2], res_b[2])
self.assertEqual(res_a[3], res_b[3])
self.assertEqual(res_a[4], res_b[4])
self.assertEqual(string_map.name, string_map_tf.name)
self.assertEqual(string_map.op, string_map_tf.op)
self.assertEqual(string_map.labels, string_map_tf.labels)
| |
import functools
from django import http
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.db.models import Q
from django.db.transaction import non_atomic_requests
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext, ugettext_lazy as _lazy
import six
from django_statsd.clients import statsd
from rest_framework import serializers
from rest_framework.viewsets import ModelViewSet
import olympia.core.logger
from olympia import amo
from olympia.access import acl
from olympia.accounts.utils import redirect_for_login
from olympia.accounts.views import AccountViewSet
from olympia.addons.models import Addon
from olympia.addons.views import BaseFilter
from olympia.amo import messages
from olympia.amo.decorators import (
allow_mine, json_view, login_required, post_required, use_primary_db)
from olympia.amo.urlresolvers import reverse
from olympia.amo.utils import paginate, render, urlparams
from olympia.api.filters import OrderingAliasFilter
from olympia.api.permissions import (
AllOf, AllowReadOnlyIfPublic, AnyOf, PreventActionPermission)
from olympia.legacy_api.utils import addon_to_dict
from olympia.translations.query import order_by_translation
from olympia.users.decorators import process_user_id
from olympia.users.models import UserProfile
from . import forms
from .models import SPECIAL_SLUGS, Collection, CollectionAddon
from .permissions import (
AllowCollectionAuthor, AllowCollectionContributor, AllowContentCurators)
from .serializers import (
CollectionAddonSerializer, CollectionSerializer,
CollectionWithAddonsSerializer)
log = olympia.core.logger.getLogger('z.collections')
@non_atomic_requests
def get_collection(request, user_id, slug):
if (slug in SPECIAL_SLUGS.values() and request.user.is_authenticated and
request.user.id == user_id):
return getattr(request.user, slug + '_collection')()
else:
return get_object_or_404(Collection.objects,
author_id=user_id, slug=slug)
def owner_required(f=None, require_owner=True):
"""Requires collection to be owned, by someone."""
def decorator(func):
@functools.wraps(func)
def wrapper(request, user_id, slug, *args, **kw):
collection = get_collection(request, user_id, slug)
if acl.check_collection_ownership(request, collection,
require_owner=require_owner):
return func(request, collection, user_id, slug, *args, **kw)
else:
raise PermissionDenied
return wrapper
return decorator(f) if f else decorator
@non_atomic_requests
def legacy_redirect(request, uuid, edit=False):
# Nicknames have a limit of 30, so len == 36 implies a uuid.
key = 'uuid' if len(uuid) == 36 else 'nickname'
collection = get_object_or_404(Collection.objects, **{key: uuid})
if edit:
return http.HttpResponseRedirect(collection.edit_url())
to = collection.get_url_path()
params = request.GET.urlencode()
if params:
to += '?' + params
return http.HttpResponseRedirect(to)
@non_atomic_requests
def legacy_directory_redirects(request, page):
sorts = {'editors_picks': 'featured', 'popular': 'popular'}
loc = base = reverse('collections.list')
if page in sorts:
loc = urlparams(base, sort=sorts[page])
elif request.user.is_authenticated:
if page == 'mine':
loc = reverse('collections.user', args=[request.user.id])
return http.HttpResponseRedirect(loc)
@non_atomic_requests
def render_cat(request, template, data=None, extra=None):
if extra is None:
extra = {}
if data is None:
data = {}
data.update(dict(search_cat='collections'))
return render(request, template, data, **extra)
@non_atomic_requests
def collection_listing(request, base=None):
qs = (
Collection.objects.listed()
.filter(Q(application=request.APP.id) | Q(application=None))
.filter(type=amo.COLLECTION_FEATURED)
.exclude(addon_count=0)
)
collections = paginate(request, qs, count=qs.count())
return render_cat(request, 'bandwagon/impala/collection_listing.html',
{'collections': collections, 'src': 'co-hc-sidebar',
'dl_src': 'co-dp-sidebar'})
@allow_mine
@process_user_id
@non_atomic_requests
def user_listing(request, user_id):
author = get_object_or_404(UserProfile, id=user_id)
qs = (Collection.objects.filter(author_id=user_id)
.order_by('-created'))
mine = (request.user.is_authenticated and
request.user.id == user_id)
if mine:
page = 'mine'
else:
page = 'user'
qs = qs.filter(listed=True)
collections = paginate(request, qs)
return render_cat(request, 'bandwagon/user_listing.html',
{'collections': collections,
'page': page, 'author': author})
class CollectionAddonFilter(BaseFilter):
opts = (('added', _lazy(u'Added')),
('popular', _lazy(u'Popularity')),
('name', _lazy(u'Name')))
def filter_added(self):
return self.base_queryset.order_by('collectionaddon__created')
def filter_name(self):
return order_by_translation(self.base_queryset, 'name')
def filter_popular(self):
return self.base_queryset.order_by('-weekly_downloads')
@allow_mine
@process_user_id
@non_atomic_requests
def collection_detail(request, user_id, slug):
collection = get_collection(request, user_id, slug)
if not collection.listed:
if not request.user.is_authenticated:
return redirect_for_login(request)
if not acl.check_collection_ownership(request, collection):
raise PermissionDenied
base = Addon.objects.valid() & collection.addons.all()
filter = CollectionAddonFilter(request, base,
key='sort', default='popular')
notes = get_notes(collection)
# Go directly to CollectionAddon for the count to avoid joins.
count = CollectionAddon.objects.filter(
Addon.objects.all().valid_q(
amo.VALID_ADDON_STATUSES, prefix='addon__'),
collection=collection.id)
addons = paginate(request, filter.qs, per_page=15, count=count.count())
# `perms` is defined in django.contrib.auth.context_processors. Gotcha!
user_perms = {
'view_stats': acl.check_ownership(
request, collection, require_owner=False),
}
return render_cat(request, 'bandwagon/collection_detail.html',
{'collection': collection, 'filter': filter,
'addons': addons, 'notes': notes,
'user_perms': user_perms})
@json_view(has_trans=True)
@allow_mine
@process_user_id
@non_atomic_requests
def collection_detail_json(request, user_id, slug):
collection = get_collection(request, user_id, slug)
if not (collection.listed or acl.check_collection_ownership(
request, collection)):
raise PermissionDenied
# We evaluate the QuerySet with `list` to work around bug 866454.
addons_dict = [addon_to_dict(a) for a in list(collection.addons.valid())]
return {
'name': collection.name,
'url': collection.get_abs_url(),
'addons': addons_dict
}
def get_notes(collection, raw=False):
# This might hurt in a big collection with lots of notes.
# It's a generator so we don't evaluate anything by default.
notes = CollectionAddon.objects.filter(collection=collection,
comments__isnull=False)
rv = {}
for note in notes:
# Watch out for comments in a language we didn't pick up.
if note.comments:
rv[note.addon_id] = (note.comments.localized_string if raw
else note.comments)
yield rv
def initial_data_from_request(request):
return {'author': request.user, 'application': request.APP.id}
def collection_message(request, collection, option):
if option == 'add':
title = ugettext('Collection created!')
msg = ugettext(
'Your new collection is shown below. You can '
'<a href="%(url)s">edit additional settings</a> if you\'d '
'like.'
) % {'url': collection.edit_url()}
elif option == 'update':
title = ugettext('Collection updated!')
msg = ugettext(
'<a href="%(url)s">View your collection</a> to see the changes.'
) % {'url': collection.get_url_path()}
else:
raise ValueError('Incorrect option "%s", '
'takes only "add" or "update".' % option)
messages.success(request, title, msg, message_safe=True)
@use_primary_db
@login_required
def add(request):
"""Displays/processes a form to create a collection."""
ctx = {}
if request.method == 'POST':
form = forms.CollectionForm(
request.POST, request.FILES,
initial=initial_data_from_request(request))
aform = forms.AddonsForm(request.POST)
if form.is_valid():
collection = form.save(default_locale=request.LANG)
collection.save()
if aform.is_valid():
aform.save(collection)
collection_message(request, collection, 'add')
statsd.incr('collections.created')
log.info('Created collection %s' % collection.id)
return http.HttpResponseRedirect(collection.get_url_path())
else:
ctx['addons'] = Addon.objects.filter(pk__in=aform.clean_addon())
ctx['comments'] = aform.clean_addon_comment()
else:
form = forms.CollectionForm()
ctx['form'] = form
return render_cat(request, 'bandwagon/add.html', ctx)
@use_primary_db
@login_required(redirect=False)
def ajax_new(request):
form = forms.CollectionForm(
request.POST or None,
initial=initial_data_from_request(request))
if request.method == 'POST' and form.is_valid():
collection = form.save()
addon_id = request.POST['addon_id']
collection.add_addon(Addon.objects.get(pk=addon_id))
log.info('Created collection %s' % collection.id)
return http.HttpResponseRedirect(reverse('collections.ajax_list') +
'?addon_id=%s' % addon_id)
return render(request, 'bandwagon/ajax_new.html', {'form': form})
@login_required(redirect=False)
@non_atomic_requests
def ajax_list(request):
try:
addon_id = int(request.GET['addon_id'])
except (KeyError, ValueError):
return http.HttpResponseBadRequest()
qs = Collection.objects.owned_by(request.user).with_has_addon(addon_id)
return render(request, 'bandwagon/ajax_list.html',
{'collections': order_by_translation(qs, 'name')})
@use_primary_db
@login_required
@post_required
@process_user_id
def collection_alter(request, user_id, slug, action):
collection = get_collection(request, user_id, slug)
return change_addon(request, collection, action)
def change_addon(request, collection, action):
if not acl.check_collection_ownership(request, collection):
raise PermissionDenied
try:
addon = get_object_or_404(Addon.objects, pk=request.POST['addon_id'])
except (ValueError, KeyError):
return http.HttpResponseBadRequest()
getattr(collection, action + '_addon')(addon)
log.info(u'%s: %s %s to collection %s' %
(request.user, action, addon.id, collection.id))
if request.is_ajax():
url = '%s?addon_id=%s' % (reverse('collections.ajax_list'), addon.id)
else:
url = collection.get_url_path()
return http.HttpResponseRedirect(url)
@use_primary_db
@login_required
@post_required
def ajax_collection_alter(request, action):
try:
collection = get_object_or_404(
Collection.objects, pk=request.POST['id'])
except (ValueError, KeyError):
return http.HttpResponseBadRequest()
return change_addon(request, collection, action)
@use_primary_db
@login_required
@process_user_id
# Contributors are allowed to *see* the page, but there is another
# permission check below to prevent them from doing any modifications.
@owner_required(require_owner=False)
def edit(request, collection, user_id, slug):
is_admin = acl.action_allowed(request, amo.permissions.ADMIN_CURATION)
if not acl.check_collection_ownership(
request, collection, require_owner=True):
if request.method == 'POST':
raise PermissionDenied
form = None
elif request.method == 'POST':
initial = initial_data_from_request(request)
if collection.author_id: # Don't try to change the author.
initial['author'] = collection.author
form = forms.CollectionForm(request.POST, request.FILES,
initial=initial,
instance=collection)
if form.is_valid():
collection = form.save()
collection_message(request, collection, 'update')
log.info(u'%s edited collection %s' %
(request.user, collection.id))
return http.HttpResponseRedirect(collection.edit_url())
else:
form = forms.CollectionForm(instance=collection)
qs = (CollectionAddon.objects.using('default')
.filter(collection=collection))
meta = {c.addon_id: c for c in qs}
addons = collection.addons.all()
comments = next(get_notes(collection, raw=True))
data = {
'collection': collection,
'form': form,
'user_id': user_id,
'slug': slug,
'meta': meta,
'is_admin': is_admin,
'addons': addons,
'comments': comments
}
return render_cat(request, 'bandwagon/edit.html', data)
@use_primary_db
@login_required
@process_user_id
@owner_required(require_owner=False)
@post_required
def edit_addons(request, collection, user_id, slug):
if request.method == 'POST':
form = forms.AddonsForm(request.POST)
if form.is_valid():
form.save(collection)
collection_message(request, collection, 'update')
log.info(u'%s added add-ons to %s' %
(request.user, collection.id))
return http.HttpResponseRedirect(collection.edit_url() + '#addons-edit')
@use_primary_db
@login_required
@process_user_id
@owner_required
@post_required
def edit_privacy(request, collection, user_id, slug):
collection.listed = not collection.listed
collection.save()
log.info(u'%s changed privacy on collection %s' %
(request.user, collection.id))
return http.HttpResponseRedirect(collection.get_url_path())
@use_primary_db
@login_required
@process_user_id
def delete(request, user_id, slug):
collection = get_object_or_404(Collection, author_id=user_id, slug=slug)
if not acl.check_collection_ownership(request, collection, True):
log.info(u'%s is trying to delete collection %s'
% (request.user, collection.id))
raise PermissionDenied
data = dict(collection=collection, user_id=user_id, slug=slug)
if request.method == 'POST':
if request.POST['sure'] == '1':
collection.delete()
log.info(u'%s deleted collection %s' %
(request.user, collection.id))
url = reverse('collections.user', args=[user_id])
return http.HttpResponseRedirect(url)
else:
return http.HttpResponseRedirect(collection.get_url_path())
return render_cat(request, 'bandwagon/delete.html', data)
@login_required
@allow_mine
@non_atomic_requests
def mine(request, user_id=None, slug=None):
if slug is None:
return user_listing(request, user_id)
else:
return collection_detail(request, user_id, slug)
class CollectionViewSet(ModelViewSet):
# Note: CollectionAddonViewSet will call CollectionViewSet().get_object(),
# causing the has_object_permission() method of these permissions to be
# called. It will do so without setting an action however, bypassing the
# PreventActionPermission() parts.
permission_classes = [
AnyOf(
# Collection authors can do everything.
AllowCollectionAuthor,
# Collection contributors can access the featured themes collection
# (it's community-managed) and change it's addons, but can't delete
# or edit it's details.
AllOf(AllowCollectionContributor,
PreventActionPermission(('create', 'list', 'update',
'destroy', 'partial_update'))),
# Content curators can modify existing mozilla collections as they
# see fit, but can't list or delete them.
AllOf(AllowContentCurators,
PreventActionPermission(('create', 'destroy', 'list'))),
# Everyone else can do read-only stuff, except list.
AllOf(AllowReadOnlyIfPublic,
PreventActionPermission('list'))),
]
lookup_field = 'slug'
def get_account_viewset(self):
if not hasattr(self, 'account_viewset'):
self.account_viewset = AccountViewSet(
request=self.request,
permission_classes=[], # We handled permissions already.
kwargs={'pk': self.kwargs['user_pk']})
return self.account_viewset
def get_serializer_class(self):
with_addons = ('with_addons' in self.request.GET and
self.action == 'retrieve')
return (CollectionSerializer if not with_addons
else CollectionWithAddonsSerializer)
def get_queryset(self):
return Collection.objects.filter(
author=self.get_account_viewset().get_object()).order_by(
'-modified')
def get_addons_queryset(self):
collection_addons_viewset = CollectionAddonViewSet(
request=self.request
)
# Set this to avoid a pointless lookup loop.
collection_addons_viewset.collection = self.get_object()
# This needs to be list to make the filtering work.
collection_addons_viewset.action = 'list'
qs = collection_addons_viewset.get_queryset()
# Now limit and sort
limit = settings.REST_FRAMEWORK['PAGE_SIZE']
sort = collection_addons_viewset.ordering[0]
return qs.order_by(sort)[:limit]
class TranslationAwareOrderingAliasFilter(OrderingAliasFilter):
def filter_queryset(self, request, queryset, view):
ordering = self.get_ordering(request, queryset, view)
if len(ordering) > 1:
# We can't support multiple orderings easily because of
# how order_by_translation works.
raise serializers.ValidationError(
'You can only specify one "sort" argument. Multiple '
'orderings are not supported')
order_by = ordering[0]
if order_by in ('name', '-name'):
return order_by_translation(queryset, order_by, Addon)
sup = super(TranslationAwareOrderingAliasFilter, self)
return sup.filter_queryset(request, queryset, view)
class CollectionAddonViewSet(ModelViewSet):
permission_classes = [] # We don't need extra permissions.
serializer_class = CollectionAddonSerializer
lookup_field = 'addon'
filter_backends = (TranslationAwareOrderingAliasFilter,)
ordering_fields = ()
ordering_field_aliases = {'popularity': 'addon__weekly_downloads',
'name': 'name',
'added': 'created'}
ordering = ('-addon__weekly_downloads',)
def get_collection(self):
if not hasattr(self, 'collection'):
# We're re-using CollectionViewSet and making sure its get_object()
# method is called, which triggers the permission checks for that
# class so we don't need our own.
# Note that we don't pass `action`, so the PreventActionPermission
# part of the permission checks won't do anything.
self.collection = CollectionViewSet(
request=self.request,
kwargs={'user_pk': self.kwargs['user_pk'],
'slug': self.kwargs['collection_slug']}).get_object()
return self.collection
def get_object(self):
self.lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
lookup_value = self.kwargs.get(self.lookup_url_kwarg)
# if the lookup is not a number, its probably the slug instead.
if lookup_value and not six.text_type(lookup_value).isdigit():
self.lookup_field = '%s__slug' % self.lookup_field
return super(CollectionAddonViewSet, self).get_object()
def get_queryset(self):
qs = (
CollectionAddon.objects
.filter(collection=self.get_collection())
.prefetch_related('addon'))
filter_param = self.request.GET.get('filter')
# We only filter list action.
include_all_with_deleted = (filter_param == 'all_with_deleted' or
self.action != 'list')
# If deleted addons are requested, that implies all addons.
include_all = filter_param == 'all' or include_all_with_deleted
if not include_all:
qs = qs.filter(
addon__status=amo.STATUS_PUBLIC, addon__disabled_by_user=False)
elif not include_all_with_deleted:
qs = qs.exclude(addon__status=amo.STATUS_DELETED)
return qs
| |
from __future__ import unicode_literals
import boto
import boto.ec2
import boto3
from boto.exception import EC2ResponseError
from botocore.exceptions import ClientError
import pytest
import sure # noqa
from moto import mock_ec2_deprecated, mock_ec2
from moto.ec2.models import AMIS, OWNER_ID
from moto.core import ACCOUNT_ID
from tests import EXAMPLE_AMI_ID
from tests.helpers import requires_boto_gte
@mock_ec2_deprecated
def test_ami_create_and_delete():
conn = boto.connect_ec2("the_key", "the_secret")
initial_ami_count = len(AMIS)
conn.get_all_volumes().should.have.length_of(0)
conn.get_all_snapshots().should.have.length_of(initial_ami_count)
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
with pytest.raises(EC2ResponseError) as ex:
image_id = conn.create_image(
instance.id, "test-ami", "this is a test ami", dry_run=True
)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the CreateImage operation: Request would have succeeded, but DryRun flag is set"
)
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
all_images = conn.get_all_images()
set([i.id for i in all_images]).should.contain(image_id)
retrieved_image = [i for i in all_images if i.id == image_id][0]
retrieved_image.id.should.equal(image_id)
retrieved_image.virtualization_type.should.equal(instance.virtualization_type)
retrieved_image.architecture.should.equal(instance.architecture)
retrieved_image.kernel_id.should.equal(instance.kernel)
retrieved_image.platform.should.equal(instance.platform)
retrieved_image.creationDate.should_not.be.none
instance.terminate()
# Ensure we're no longer creating a volume
volumes = conn.get_all_volumes()
volumes.should.have.length_of(0)
# Validate auto-created snapshot
snapshots = conn.get_all_snapshots()
snapshots.should.have.length_of(initial_ami_count + 1)
retrieved_image_snapshot_id = (
retrieved_image.block_device_mapping.current_value.snapshot_id
)
[s.id for s in snapshots].should.contain(retrieved_image_snapshot_id)
snapshot = [s for s in snapshots if s.id == retrieved_image_snapshot_id][0]
snapshot.description.should.equal(
"Auto-created snapshot for AMI {0}".format(retrieved_image.id)
)
# root device should be in AMI's block device mappings
root_mapping = retrieved_image.block_device_mapping.get(
retrieved_image.root_device_name
)
root_mapping.should_not.be.none
# Deregister
with pytest.raises(EC2ResponseError) as ex:
success = conn.deregister_image(image_id, dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the DeregisterImage operation: Request would have succeeded, but DryRun flag is set"
)
success = conn.deregister_image(image_id)
success.should.be.true
with pytest.raises(EC2ResponseError) as cm:
conn.deregister_image(image_id)
cm.value.code.should.equal("InvalidAMIID.NotFound")
cm.value.status.should.equal(400)
cm.value.request_id.should_not.be.none
@requires_boto_gte("2.14.0")
@mock_ec2_deprecated
def test_ami_copy():
conn = boto.ec2.connect_to_region("us-west-1")
initial_ami_count = len(AMIS)
conn.get_all_volumes().should.have.length_of(0)
conn.get_all_snapshots().should.have.length_of(initial_ami_count)
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
source_image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
instance.terminate()
source_image = conn.get_all_images(image_ids=[source_image_id])[0]
# Boto returns a 'CopyImage' object with an image_id attribute here. Use
# the image_id to fetch the full info.
with pytest.raises(EC2ResponseError) as ex:
copy_image_ref = conn.copy_image(
source_image.region.name,
source_image.id,
"test-copy-ami",
"this is a test copy ami",
dry_run=True,
)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the CopyImage operation: Request would have succeeded, but DryRun flag is set"
)
copy_image_ref = conn.copy_image(
source_image.region.name,
source_image.id,
"test-copy-ami",
"this is a test copy ami",
)
copy_image_id = copy_image_ref.image_id
copy_image = conn.get_all_images(image_ids=[copy_image_id])[0]
copy_image.id.should.equal(copy_image_id)
copy_image.virtualization_type.should.equal(source_image.virtualization_type)
copy_image.architecture.should.equal(source_image.architecture)
copy_image.kernel_id.should.equal(source_image.kernel_id)
copy_image.platform.should.equal(source_image.platform)
# Ensure we're no longer creating a volume
conn.get_all_volumes().should.have.length_of(0)
# Validate auto-created snapshot
conn.get_all_snapshots().should.have.length_of(initial_ami_count + 2)
copy_image.block_device_mapping.current_value.snapshot_id.should_not.equal(
source_image.block_device_mapping.current_value.snapshot_id
)
# Copy from non-existent source ID.
with pytest.raises(EC2ResponseError) as cm:
conn.copy_image(
source_image.region.name,
"ami-abcd1234",
"test-copy-ami",
"this is a test copy ami",
)
cm.value.code.should.equal("InvalidAMIID.NotFound")
cm.value.status.should.equal(400)
cm.value.request_id.should_not.be.none
# Copy from non-existent source region.
with pytest.raises(EC2ResponseError) as cm:
invalid_region = (
"us-east-1" if (source_image.region.name != "us-east-1") else "us-west-1"
)
conn.copy_image(
invalid_region, source_image.id, "test-copy-ami", "this is a test copy ami"
)
cm.value.code.should.equal("InvalidAMIID.NotFound")
cm.value.status.should.equal(400)
cm.value.request_id.should_not.be.none
@mock_ec2
def test_copy_image_changes_owner_id():
conn = boto3.client("ec2", region_name="us-east-1")
# this source AMI ID is from moto/ec2/resources/amis.json
source_ami_id = "ami-03cf127a"
# confirm the source ami owner id is different from the default owner id.
# if they're ever the same it means this test is invalid.
check_resp = conn.describe_images(ImageIds=[source_ami_id])
check_resp["Images"][0]["OwnerId"].should_not.equal(OWNER_ID)
copy_resp = conn.copy_image(
SourceImageId=source_ami_id,
Name="new-image",
Description="a copy of an image",
SourceRegion="us-east-1",
)
describe_resp = conn.describe_images(Owners=["self"])
describe_resp["Images"][0]["OwnerId"].should.equal(OWNER_ID)
describe_resp["Images"][0]["ImageId"].should.equal(copy_resp["ImageId"])
@mock_ec2_deprecated
def test_ami_tagging():
conn = boto.connect_vpc("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_all_images()[0]
with pytest.raises(EC2ResponseError) as ex:
image.add_tag("a key", "some value", dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set"
)
image.add_tag("a key", "some value")
tag = conn.get_all_tags()[0]
tag.name.should.equal("a key")
tag.value.should.equal("some value")
# Refresh the DHCP options
image = conn.get_all_images()[0]
image.tags.should.have.length_of(1)
image.tags["a key"].should.equal("some value")
@mock_ec2_deprecated
def test_ami_create_from_missing_instance():
conn = boto.connect_ec2("the_key", "the_secret")
args = ["i-abcdefg", "test-ami", "this is a test ami"]
with pytest.raises(EC2ResponseError) as cm:
conn.create_image(*args)
cm.value.code.should.equal("InvalidInstanceID.NotFound")
cm.value.status.should.equal(400)
cm.value.request_id.should_not.be.none
@mock_ec2_deprecated
def test_ami_pulls_attributes_from_instance():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.modify_attribute("kernel", "test-kernel")
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
image.kernel_id.should.equal("test-kernel")
@mock_ec2_deprecated
def test_ami_uses_account_id_if_valid_access_key_is_supplied():
access_key = "AKIAXXXXXXXXXXXXXXXX"
conn = boto.connect_ec2(access_key, "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.modify_attribute("kernel", "test-kernel")
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
images = conn.get_all_images(owners=["self"])
[(ami.id, ami.owner_id) for ami in images].should.equal([(image_id, ACCOUNT_ID)])
@mock_ec2_deprecated
def test_ami_filters():
conn = boto.connect_ec2("the_key", "the_secret")
reservationA = conn.run_instances(EXAMPLE_AMI_ID)
instanceA = reservationA.instances[0]
instanceA.modify_attribute("architecture", "i386")
instanceA.modify_attribute("kernel", "k-1234abcd")
instanceA.modify_attribute("platform", "windows")
instanceA.modify_attribute("virtualization_type", "hvm")
imageA_id = conn.create_image(instanceA.id, "test-ami-A", "this is a test ami")
imageA = conn.get_image(imageA_id)
reservationB = conn.run_instances(EXAMPLE_AMI_ID)
instanceB = reservationB.instances[0]
instanceB.modify_attribute("architecture", "x86_64")
instanceB.modify_attribute("kernel", "k-abcd1234")
instanceB.modify_attribute("platform", "linux")
instanceB.modify_attribute("virtualization_type", "paravirtual")
imageB_id = conn.create_image(instanceB.id, "test-ami-B", "this is a test ami")
imageB = conn.get_image(imageB_id)
imageB.set_launch_permissions(group_names=("all"))
amis_by_architecture = conn.get_all_images(filters={"architecture": "x86_64"})
set([ami.id for ami in amis_by_architecture]).should.contain(imageB.id)
len(amis_by_architecture).should.equal(35)
amis_by_kernel = conn.get_all_images(filters={"kernel-id": "k-abcd1234"})
set([ami.id for ami in amis_by_kernel]).should.equal(set([imageB.id]))
amis_by_virtualization = conn.get_all_images(
filters={"virtualization-type": "paravirtual"}
)
set([ami.id for ami in amis_by_virtualization]).should.contain(imageB.id)
len(amis_by_virtualization).should.equal(3)
amis_by_platform = conn.get_all_images(filters={"platform": "windows"})
set([ami.id for ami in amis_by_platform]).should.contain(imageA.id)
len(amis_by_platform).should.equal(24)
amis_by_id = conn.get_all_images(filters={"image-id": imageA.id})
set([ami.id for ami in amis_by_id]).should.equal(set([imageA.id]))
amis_by_state = conn.get_all_images(filters={"state": "available"})
ami_ids_by_state = [ami.id for ami in amis_by_state]
ami_ids_by_state.should.contain(imageA.id)
ami_ids_by_state.should.contain(imageB.id)
len(amis_by_state).should.equal(36)
amis_by_name = conn.get_all_images(filters={"name": imageA.name})
set([ami.id for ami in amis_by_name]).should.equal(set([imageA.id]))
amis_by_public = conn.get_all_images(filters={"is-public": "true"})
set([ami.id for ami in amis_by_public]).should.contain(imageB.id)
len(amis_by_public).should.equal(35)
amis_by_nonpublic = conn.get_all_images(filters={"is-public": "false"})
set([ami.id for ami in amis_by_nonpublic]).should.contain(imageA.id)
len(amis_by_nonpublic).should.equal(1)
@mock_ec2_deprecated
def test_ami_filtering_via_tag():
conn = boto.connect_vpc("the_key", "the_secret")
reservationA = conn.run_instances(EXAMPLE_AMI_ID)
instanceA = reservationA.instances[0]
imageA_id = conn.create_image(instanceA.id, "test-ami-A", "this is a test ami")
imageA = conn.get_image(imageA_id)
imageA.add_tag("a key", "some value")
reservationB = conn.run_instances(EXAMPLE_AMI_ID)
instanceB = reservationB.instances[0]
imageB_id = conn.create_image(instanceB.id, "test-ami-B", "this is a test ami")
imageB = conn.get_image(imageB_id)
imageB.add_tag("another key", "some other value")
amis_by_tagA = conn.get_all_images(filters={"tag:a key": "some value"})
set([ami.id for ami in amis_by_tagA]).should.equal(set([imageA.id]))
amis_by_tagB = conn.get_all_images(filters={"tag:another key": "some other value"})
set([ami.id for ami in amis_by_tagB]).should.equal(set([imageB.id]))
@mock_ec2_deprecated
def test_getting_missing_ami():
conn = boto.connect_ec2("the_key", "the_secret")
with pytest.raises(EC2ResponseError) as cm:
conn.get_image("ami-missing")
cm.value.code.should.equal("InvalidAMIID.NotFound")
cm.value.status.should.equal(400)
cm.value.request_id.should_not.be.none
@mock_ec2_deprecated
def test_getting_malformed_ami():
conn = boto.connect_ec2("the_key", "the_secret")
with pytest.raises(EC2ResponseError) as cm:
conn.get_image("foo-missing")
cm.value.code.should.equal("InvalidAMIID.Malformed")
cm.value.status.should.equal(400)
cm.value.request_id.should_not.be.none
@mock_ec2_deprecated
def test_ami_attribute_group_permissions():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
# Baseline
attributes = conn.get_image_attribute(image.id, attribute="launchPermission")
attributes.name.should.equal("launch_permission")
attributes.attrs.should.have.length_of(0)
ADD_GROUP_ARGS = {
"image_id": image.id,
"attribute": "launchPermission",
"operation": "add",
"groups": "all",
}
REMOVE_GROUP_ARGS = {
"image_id": image.id,
"attribute": "launchPermission",
"operation": "remove",
"groups": "all",
}
# Add 'all' group and confirm
with pytest.raises(EC2ResponseError) as ex:
conn.modify_image_attribute(**dict(ADD_GROUP_ARGS, **{"dry_run": True}))
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the ModifyImageAttribute operation: Request would have succeeded, but DryRun flag is set"
)
conn.modify_image_attribute(**ADD_GROUP_ARGS)
attributes = conn.get_image_attribute(image.id, attribute="launchPermission")
attributes.attrs["groups"].should.have.length_of(1)
attributes.attrs["groups"].should.equal(["all"])
image = conn.get_image(image_id)
image.is_public.should.equal(True)
# Add is idempotent
conn.modify_image_attribute.when.called_with(**ADD_GROUP_ARGS).should_not.throw(
EC2ResponseError
)
# Remove 'all' group and confirm
conn.modify_image_attribute(**REMOVE_GROUP_ARGS)
attributes = conn.get_image_attribute(image.id, attribute="launchPermission")
attributes.attrs.should.have.length_of(0)
image = conn.get_image(image_id)
image.is_public.should.equal(False)
# Remove is idempotent
conn.modify_image_attribute.when.called_with(**REMOVE_GROUP_ARGS).should_not.throw(
EC2ResponseError
)
@mock_ec2_deprecated
def test_ami_attribute_user_permissions():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
# Baseline
attributes = conn.get_image_attribute(image.id, attribute="launchPermission")
attributes.name.should.equal("launch_permission")
attributes.attrs.should.have.length_of(0)
# Both str and int values should work.
USER1 = "123456789011"
USER2 = 123456789022
ADD_USERS_ARGS = {
"image_id": image.id,
"attribute": "launchPermission",
"operation": "add",
"user_ids": [USER1, USER2],
}
REMOVE_USERS_ARGS = {
"image_id": image.id,
"attribute": "launchPermission",
"operation": "remove",
"user_ids": [USER1, USER2],
}
REMOVE_SINGLE_USER_ARGS = {
"image_id": image.id,
"attribute": "launchPermission",
"operation": "remove",
"user_ids": [USER1],
}
# Add multiple users and confirm
conn.modify_image_attribute(**ADD_USERS_ARGS)
attributes = conn.get_image_attribute(image.id, attribute="launchPermission")
attributes.attrs["user_ids"].should.have.length_of(2)
set(attributes.attrs["user_ids"]).should.equal(set([str(USER1), str(USER2)]))
image = conn.get_image(image_id)
image.is_public.should.equal(False)
# Add is idempotent
conn.modify_image_attribute.when.called_with(**ADD_USERS_ARGS).should_not.throw(
EC2ResponseError
)
# Remove single user and confirm
conn.modify_image_attribute(**REMOVE_SINGLE_USER_ARGS)
attributes = conn.get_image_attribute(image.id, attribute="launchPermission")
attributes.attrs["user_ids"].should.have.length_of(1)
set(attributes.attrs["user_ids"]).should.equal(set([str(USER2)]))
image = conn.get_image(image_id)
image.is_public.should.equal(False)
# Remove multiple users and confirm
conn.modify_image_attribute(**REMOVE_USERS_ARGS)
attributes = conn.get_image_attribute(image.id, attribute="launchPermission")
attributes.attrs.should.have.length_of(0)
image = conn.get_image(image_id)
image.is_public.should.equal(False)
# Remove is idempotent
conn.modify_image_attribute.when.called_with(**REMOVE_USERS_ARGS).should_not.throw(
EC2ResponseError
)
@mock_ec2
def test_ami_describe_executable_users():
conn = boto3.client("ec2", region_name="us-east-1")
ec2 = boto3.resource("ec2", "us-east-1")
ec2.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1)
response = conn.describe_instances(
Filters=[{"Name": "instance-state-name", "Values": ["running"]}]
)
instance_id = response["Reservations"][0]["Instances"][0]["InstanceId"]
image_id = conn.create_image(InstanceId=instance_id, Name="TestImage")["ImageId"]
USER1 = "123456789011"
ADD_USER_ARGS = {
"ImageId": image_id,
"Attribute": "launchPermission",
"OperationType": "add",
"UserIds": [USER1],
}
# Add users and get no images
conn.modify_image_attribute(**ADD_USER_ARGS)
attributes = conn.describe_image_attribute(
ImageId=image_id, Attribute="LaunchPermissions", DryRun=False
)
attributes["LaunchPermissions"].should.have.length_of(1)
attributes["LaunchPermissions"][0]["UserId"].should.equal(USER1)
images = conn.describe_images(ExecutableUsers=[USER1])["Images"]
images.should.have.length_of(1)
images[0]["ImageId"].should.equal(image_id)
@mock_ec2
def test_ami_describe_executable_users_negative():
conn = boto3.client("ec2", region_name="us-east-1")
ec2 = boto3.resource("ec2", "us-east-1")
ec2.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1)
response = conn.describe_instances(
Filters=[{"Name": "instance-state-name", "Values": ["running"]}]
)
instance_id = response["Reservations"][0]["Instances"][0]["InstanceId"]
image_id = conn.create_image(InstanceId=instance_id, Name="TestImage")["ImageId"]
USER1 = "123456789011"
USER2 = "113355789012"
ADD_USER_ARGS = {
"ImageId": image_id,
"Attribute": "launchPermission",
"OperationType": "add",
"UserIds": [USER1],
}
# Add users and get no images
# Add users and get no images
conn.modify_image_attribute(**ADD_USER_ARGS)
attributes = conn.describe_image_attribute(
ImageId=image_id, Attribute="LaunchPermissions", DryRun=False
)
attributes["LaunchPermissions"].should.have.length_of(1)
attributes["LaunchPermissions"][0]["UserId"].should.equal(USER1)
images = conn.describe_images(ExecutableUsers=[USER2])["Images"]
images.should.have.length_of(0)
@mock_ec2
def test_ami_describe_executable_users_and_filter():
conn = boto3.client("ec2", region_name="us-east-1")
ec2 = boto3.resource("ec2", "us-east-1")
ec2.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1)
response = conn.describe_instances(
Filters=[{"Name": "instance-state-name", "Values": ["running"]}]
)
instance_id = response["Reservations"][0]["Instances"][0]["InstanceId"]
image_id = conn.create_image(InstanceId=instance_id, Name="ImageToDelete")[
"ImageId"
]
USER1 = "123456789011"
ADD_USER_ARGS = {
"ImageId": image_id,
"Attribute": "launchPermission",
"OperationType": "add",
"UserIds": [USER1],
}
# Add users and get no images
conn.modify_image_attribute(**ADD_USER_ARGS)
attributes = conn.describe_image_attribute(
ImageId=image_id, Attribute="LaunchPermissions", DryRun=False
)
attributes["LaunchPermissions"].should.have.length_of(1)
attributes["LaunchPermissions"][0]["UserId"].should.equal(USER1)
images = conn.describe_images(
ExecutableUsers=[USER1], Filters=[{"Name": "state", "Values": ["available"]}]
)["Images"]
images.should.have.length_of(1)
images[0]["ImageId"].should.equal(image_id)
@mock_ec2_deprecated
def test_ami_attribute_user_and_group_permissions():
"""
Boto supports adding/removing both users and groups at the same time.
Just spot-check this -- input variations, idempotency, etc are validated
via user-specific and group-specific tests above.
"""
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
# Baseline
attributes = conn.get_image_attribute(image.id, attribute="launchPermission")
attributes.name.should.equal("launch_permission")
attributes.attrs.should.have.length_of(0)
USER1 = "123456789011"
USER2 = "123456789022"
ADD_ARGS = {
"image_id": image.id,
"attribute": "launchPermission",
"operation": "add",
"groups": ["all"],
"user_ids": [USER1, USER2],
}
REMOVE_ARGS = {
"image_id": image.id,
"attribute": "launchPermission",
"operation": "remove",
"groups": ["all"],
"user_ids": [USER1, USER2],
}
# Add and confirm
conn.modify_image_attribute(**ADD_ARGS)
attributes = conn.get_image_attribute(image.id, attribute="launchPermission")
attributes.attrs["user_ids"].should.have.length_of(2)
set(attributes.attrs["user_ids"]).should.equal(set([USER1, USER2]))
set(attributes.attrs["groups"]).should.equal(set(["all"]))
image = conn.get_image(image_id)
image.is_public.should.equal(True)
# Remove and confirm
conn.modify_image_attribute(**REMOVE_ARGS)
attributes = conn.get_image_attribute(image.id, attribute="launchPermission")
attributes.attrs.should.have.length_of(0)
image = conn.get_image(image_id)
image.is_public.should.equal(False)
@mock_ec2_deprecated
def test_ami_attribute_error_cases():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
# Error: Add with group != 'all'
with pytest.raises(EC2ResponseError) as cm:
conn.modify_image_attribute(
image.id, attribute="launchPermission", operation="add", groups="everyone"
)
cm.value.code.should.equal("InvalidAMIAttributeItemValue")
cm.value.status.should.equal(400)
cm.value.request_id.should_not.be.none
# Error: Add with user ID that isn't an integer.
with pytest.raises(EC2ResponseError) as cm:
conn.modify_image_attribute(
image.id,
attribute="launchPermission",
operation="add",
user_ids="12345678901A",
)
cm.value.code.should.equal("InvalidAMIAttributeItemValue")
cm.value.status.should.equal(400)
cm.value.request_id.should_not.be.none
# Error: Add with user ID that is > length 12.
with pytest.raises(EC2ResponseError) as cm:
conn.modify_image_attribute(
image.id,
attribute="launchPermission",
operation="add",
user_ids="1234567890123",
)
cm.value.code.should.equal("InvalidAMIAttributeItemValue")
cm.value.status.should.equal(400)
cm.value.request_id.should_not.be.none
# Error: Add with user ID that is < length 12.
with pytest.raises(EC2ResponseError) as cm:
conn.modify_image_attribute(
image.id,
attribute="launchPermission",
operation="add",
user_ids="12345678901",
)
cm.value.code.should.equal("InvalidAMIAttributeItemValue")
cm.value.status.should.equal(400)
cm.value.request_id.should_not.be.none
# Error: Add with one invalid user ID among other valid IDs, ensure no
# partial changes.
with pytest.raises(EC2ResponseError) as cm:
conn.modify_image_attribute(
image.id,
attribute="launchPermission",
operation="add",
user_ids=["123456789011", "foo", "123456789022"],
)
cm.value.code.should.equal("InvalidAMIAttributeItemValue")
cm.value.status.should.equal(400)
cm.value.request_id.should_not.be.none
attributes = conn.get_image_attribute(image.id, attribute="launchPermission")
attributes.attrs.should.have.length_of(0)
# Error: Add with invalid image ID
with pytest.raises(EC2ResponseError) as cm:
conn.modify_image_attribute(
"ami-abcd1234", attribute="launchPermission", operation="add", groups="all"
)
cm.value.code.should.equal("InvalidAMIID.NotFound")
cm.value.status.should.equal(400)
cm.value.request_id.should_not.be.none
# Error: Remove with invalid image ID
with pytest.raises(EC2ResponseError) as cm:
conn.modify_image_attribute(
"ami-abcd1234",
attribute="launchPermission",
operation="remove",
groups="all",
)
cm.value.code.should.equal("InvalidAMIID.NotFound")
cm.value.status.should.equal(400)
cm.value.request_id.should_not.be.none
@mock_ec2
def test_ami_describe_non_existent():
ec2 = boto3.resource("ec2", region_name="us-west-1")
# Valid pattern but non-existent id
img = ec2.Image("ami-abcd1234")
with pytest.raises(ClientError):
img.load()
# Invalid ami pattern
img = ec2.Image("not_an_ami_id")
with pytest.raises(ClientError):
img.load()
@mock_ec2
def test_ami_registration():
ec2 = boto3.client("ec2", region_name="us-east-1")
image_id = ec2.register_image(Name="test-register-image").get("ImageId", "")
images = ec2.describe_images(ImageIds=[image_id]).get("Images", [])
assert images[0]["Name"] == "test-register-image", "No image was registered."
assert images[0]["RootDeviceName"] == "/dev/sda1", "Wrong root device name."
assert images[0]["State"] == "available", "State should be available."
@mock_ec2
def test_ami_registration():
ec2 = boto3.client("ec2", region_name="us-east-1")
image_id = ec2.register_image(Name="test-register-image").get("ImageId", "")
images = ec2.describe_images(ImageIds=[image_id]).get("Images", [])
assert images[0]["Name"] == "test-register-image", "No image was registered."
assert images[0]["RootDeviceName"] == "/dev/sda1", "Wrong root device name."
assert images[0]["State"] == "available", "State should be available."
@mock_ec2
def test_ami_filter_wildcard():
ec2_resource = boto3.resource("ec2", region_name="us-west-1")
ec2_client = boto3.client("ec2", region_name="us-west-1")
instance = ec2_resource.create_instances(
ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1
)[0]
instance.create_image(Name="test-image")
# create an image with the same owner but will not match the filter
instance.create_image(Name="not-matching-image")
my_images = ec2_client.describe_images(
Owners=[ACCOUNT_ID], Filters=[{"Name": "name", "Values": ["test*"]}]
)["Images"]
my_images.should.have.length_of(1)
@mock_ec2
def test_ami_filter_by_owner_id():
client = boto3.client("ec2", region_name="us-east-1")
ubuntu_id = "099720109477"
ubuntu_images = client.describe_images(Owners=[ubuntu_id])
all_images = client.describe_images()
ubuntu_ids = [ami["OwnerId"] for ami in ubuntu_images["Images"]]
all_ids = [ami["OwnerId"] for ami in all_images["Images"]]
# Assert all ubuntu_ids are the same and one equals ubuntu_id
assert all(ubuntu_ids) and ubuntu_ids[0] == ubuntu_id
# Check we actually have a subset of images
assert len(ubuntu_ids) < len(all_ids)
@mock_ec2
def test_ami_filter_by_self():
ec2_resource = boto3.resource("ec2", region_name="us-west-1")
ec2_client = boto3.client("ec2", region_name="us-west-1")
my_images = ec2_client.describe_images(Owners=["self"])["Images"]
my_images.should.have.length_of(0)
# Create a new image
instance = ec2_resource.create_instances(
ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1
)[0]
instance.create_image(Name="test-image")
my_images = ec2_client.describe_images(Owners=["self"])["Images"]
my_images.should.have.length_of(1)
@mock_ec2
def test_ami_snapshots_have_correct_owner():
ec2_client = boto3.client("ec2", region_name="us-west-1")
images_response = ec2_client.describe_images()
owner_id_to_snapshot_ids = {}
for image in images_response["Images"]:
owner_id = image["OwnerId"]
snapshot_ids = [
block_device_mapping["Ebs"]["SnapshotId"]
for block_device_mapping in image["BlockDeviceMappings"]
]
existing_snapshot_ids = owner_id_to_snapshot_ids.get(owner_id, [])
owner_id_to_snapshot_ids[owner_id] = existing_snapshot_ids + snapshot_ids
# adding an assertion to volumeType
assert (
image.get("BlockDeviceMappings", {})[0].get("Ebs", {}).get("VolumeType")
== "standard"
)
for owner_id in owner_id_to_snapshot_ids:
snapshots_rseponse = ec2_client.describe_snapshots(
SnapshotIds=owner_id_to_snapshot_ids[owner_id]
)
for snapshot in snapshots_rseponse["Snapshots"]:
assert owner_id == snapshot["OwnerId"]
@mock_ec2
def test_create_image_with_tag_specification():
ec2 = boto3.resource("ec2", region_name="us-west-1")
client = boto3.client("ec2", region_name="us-west-1")
tag_specifications = [
{
"ResourceType": "image",
"Tags": [
{
"Key": "Base_AMI_Name",
"Value": "Deep Learning Base AMI (Amazon Linux 2) Version 31.0",
},
{"Key": "OS_Version", "Value": "AWS Linux 2",},
],
},
]
instance = ec2.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1)[0]
image_id = client.create_image(
InstanceId=instance.instance_id,
Name="test-image",
Description="test ami",
TagSpecifications=tag_specifications,
)["ImageId"]
image = client.describe_images(ImageIds=[image_id])["Images"][0]
image["Tags"].should.equal(tag_specifications[0]["Tags"])
with pytest.raises(ClientError) as ex:
client.create_image(
InstanceId=instance.instance_id,
Name="test-image",
Description="test ami",
TagSpecifications=[
{
"ResourceType": "invalid-resource-type",
"Tags": [{"Key": "key", "Value": "value"}],
}
],
)
ex.value.response["Error"]["Code"].should.equal("InvalidParameterValue")
ex.value.response["Error"]["Message"].should.equal(
"'invalid-resource-type' is not a valid taggable resource type for this operation."
)
@mock_ec2
def test_ami_filter_by_empty_tag():
ec2 = boto3.resource("ec2", region_name="us-west-1")
client = boto3.client("ec2", region_name="us-west-1")
fake_images = []
instance = ec2.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1)[0]
for i in range(10):
image = client.create_image(
InstanceId=instance.instance_id,
Name="MyAMI{}".format(i),
Description="Test",
)
ec2.create_tags(
Resources=[image["ImageId"]],
Tags=[
{
"Key": "Base_AMI_Name",
"Value": "Deep Learning Base AMI (Amazon Linux 2) Version 31.0",
},
{"Key": "OS_Version", "Value": "AWS Linux 2"},
],
)
fake_images.append(image)
# Add release tags to some of the images in the middle
for image in fake_images[3:6]:
ec2.create_tags(
Resources=[image["ImageId"]], Tags=[{"Key": "RELEASE", "Value": ""}]
)
images_filter = [
{
"Name": "tag:Base_AMI_Name",
"Values": ["Deep Learning Base AMI (Amazon Linux 2) Version 31.0"],
},
{"Name": "tag:OS_Version", "Values": ["AWS Linux 2"]},
{"Name": "tag:RELEASE", "Values": [""]},
]
assert len(client.describe_images(Filters=images_filter)["Images"]) == 3
| |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The main training script."""
import multiprocessing
import os
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow_examples.lite.model_maker.third_party.efficientdet import dataloader
from tensorflow_examples.lite.model_maker.third_party.efficientdet import det_model_fn
from tensorflow_examples.lite.model_maker.third_party.efficientdet import hparams_config
from tensorflow_examples.lite.model_maker.third_party.efficientdet import utils
flags.DEFINE_string(
'tpu',
default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 '
'url.')
flags.DEFINE_string(
'gcp_project',
default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone',
default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string('eval_name', default=None, help='Eval job name')
flags.DEFINE_enum('strategy', None, ['tpu', 'gpus', ''],
'Training: gpus for multi-gpu, if None, use TF default.')
flags.DEFINE_bool('use_fake_data', False, 'Use fake input.')
flags.DEFINE_bool(
'use_xla', False,
'Use XLA even if strategy is not tpu. If strategy is tpu, always use XLA, '
'and this flag has no effect.')
flags.DEFINE_string('model_dir', None, 'Location of model_dir')
flags.DEFINE_string(
'backbone_ckpt', '', 'Location of the ResNet50 checkpoint to use for model '
'initialization.')
flags.DEFINE_string('ckpt', None,
'Start training from this EfficientDet checkpoint.')
flags.DEFINE_string(
'hparams', '', 'Comma separated k=v pairs of hyperparameters or a module'
' containing attributes to use as hyperparameters.')
flags.DEFINE_integer(
'num_cores', default=8, help='Number of TPU cores for training')
flags.DEFINE_bool('use_spatial_partition', False, 'Use spatial partition.')
flags.DEFINE_integer(
'num_cores_per_replica',
default=2,
help='Number of TPU cores per replica when using spatial partition.')
flags.DEFINE_multi_integer(
'input_partition_dims', [1, 2, 1, 1],
'A list that describes the partition dims for all the tensors.')
flags.DEFINE_integer('train_batch_size', 64, 'global training batch size')
flags.DEFINE_integer('eval_batch_size', 1, 'global evaluation batch size')
flags.DEFINE_integer('eval_samples', 5000, 'Number of samples for eval.')
flags.DEFINE_integer('iterations_per_loop', 1000,
'Number of iterations per TPU training loop')
flags.DEFINE_integer('save_checkpoints_steps', 1000,
'Number of iterations per checkpoint save')
flags.DEFINE_string(
'train_file_pattern', None,
'Glob for training data files (e.g., COCO train - minival set)')
flags.DEFINE_string('val_file_pattern', None,
'Glob for evaluation tfrecords (e.g., COCO val2017 set)')
flags.DEFINE_string(
'val_json_file', None,
'COCO validation JSON containing golden bounding boxes. If None, use the '
'ground truth from the dataloader. Ignored if testdev_dir is not None.')
flags.DEFINE_string('testdev_dir', None,
'COCO testdev dir. If not None, ignorer val_json_file.')
flags.DEFINE_integer('num_examples_per_epoch', 120000,
'Number of examples in one epoch')
flags.DEFINE_integer('num_epochs', None, 'Number of epochs for training')
flags.DEFINE_string('mode', 'train',
'Mode to run: train or eval (default: train)')
flags.DEFINE_string('model_name', 'efficientdet-d1', 'Model name.')
flags.DEFINE_bool('eval_after_train', False, 'Run one eval after the '
'training finishes.')
flags.DEFINE_bool('profile', False, 'Profile training performance.')
flags.DEFINE_integer(
'tf_random_seed', None, 'Sets the TF graph seed for deterministic execution'
' across runs (for debugging).')
# For Eval mode
flags.DEFINE_integer('min_eval_interval', 180,
'Minimum seconds between evaluations.')
flags.DEFINE_integer(
'eval_timeout', None,
'Maximum seconds between checkpoints before evaluation terminates.')
# for train_and_eval mode
flags.DEFINE_bool(
'run_epoch_in_child_process', False,
'This option helps to rectify CPU memory leak. If True, every epoch is '
'run in a separate process for train and eval and memory will be cleared.'
'Drawback: need to kill 2 processes if trainining needs to be interrupted.')
FLAGS = flags.FLAGS
def main(_):
if FLAGS.strategy == 'tpu':
tf.disable_eager_execution()
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
tpu_grpc_url = tpu_cluster_resolver.get_master()
tf.Session.reset(tpu_grpc_url)
else:
tpu_cluster_resolver = None
# Check data path
if FLAGS.mode in ('train', 'train_and_eval'):
if FLAGS.train_file_pattern is None:
raise RuntimeError('Must specify --train_file_pattern for train.')
if FLAGS.mode in ('eval', 'train_and_eval'):
if FLAGS.val_file_pattern is None:
raise RuntimeError('Must specify --val_file_pattern for eval.')
# Parse and override hparams
config = hparams_config.get_detection_config(FLAGS.model_name)
config.override(FLAGS.hparams)
if FLAGS.num_epochs: # NOTE: remove this flag after updating all docs.
config.num_epochs = FLAGS.num_epochs
# Parse image size in case it is in string format.
config.image_size = utils.parse_image_size(config.image_size)
# The following is for spatial partitioning. `features` has one tensor while
# `labels` had 4 + (`max_level` - `min_level` + 1) * 2 tensors. The input
# partition is performed on `features` and all partitionable tensors of
# `labels`, see the partition logic below.
# In the TPUEstimator context, the meaning of `shard` and `replica` is the
# same; follwing the API, here has mixed use of both.
if FLAGS.use_spatial_partition:
# Checks input_partition_dims agrees with num_cores_per_replica.
if FLAGS.num_cores_per_replica != np.prod(FLAGS.input_partition_dims):
raise RuntimeError('--num_cores_per_replica must be a product of array'
'elements in --input_partition_dims.')
labels_partition_dims = {
'mean_num_positives': None,
'source_ids': None,
'groundtruth_data': None,
'image_scales': None,
'image_masks': None,
}
# The Input Partition Logic: We partition only the partition-able tensors.
feat_sizes = utils.get_feat_sizes(
config.get('image_size'), config.get('max_level'))
for level in range(config.get('min_level'), config.get('max_level') + 1):
def _can_partition(spatial_dim):
partitionable_index = np.where(
spatial_dim % np.array(FLAGS.input_partition_dims) == 0)
return len(partitionable_index[0]) == len(FLAGS.input_partition_dims)
spatial_dim = feat_sizes[level]
if _can_partition(spatial_dim['height']) and _can_partition(
spatial_dim['width']):
labels_partition_dims['box_targets_%d' %
level] = FLAGS.input_partition_dims
labels_partition_dims['cls_targets_%d' %
level] = FLAGS.input_partition_dims
else:
labels_partition_dims['box_targets_%d' % level] = None
labels_partition_dims['cls_targets_%d' % level] = None
num_cores_per_replica = FLAGS.num_cores_per_replica
input_partition_dims = [FLAGS.input_partition_dims, labels_partition_dims]
num_shards = FLAGS.num_cores // num_cores_per_replica
else:
num_cores_per_replica = None
input_partition_dims = None
num_shards = FLAGS.num_cores
params = dict(
config.as_dict(),
model_name=FLAGS.model_name,
iterations_per_loop=FLAGS.iterations_per_loop,
model_dir=FLAGS.model_dir,
num_shards=num_shards,
num_examples_per_epoch=FLAGS.num_examples_per_epoch,
strategy=FLAGS.strategy,
backbone_ckpt=FLAGS.backbone_ckpt,
ckpt=FLAGS.ckpt,
val_json_file=FLAGS.val_json_file,
testdev_dir=FLAGS.testdev_dir,
profile=FLAGS.profile,
mode=FLAGS.mode)
config_proto = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False)
if FLAGS.strategy != 'tpu':
if FLAGS.use_xla:
config_proto.graph_options.optimizer_options.global_jit_level = (
tf.OptimizerOptions.ON_1)
config_proto.gpu_options.allow_growth = True
model_dir = FLAGS.model_dir
model_fn_instance = det_model_fn.get_model_fn(FLAGS.model_name)
max_instances_per_image = config.max_instances_per_image
if FLAGS.eval_samples:
eval_steps = int((FLAGS.eval_samples + FLAGS.eval_batch_size - 1) //
FLAGS.eval_batch_size)
else:
eval_steps = None
total_examples = int(config.num_epochs * FLAGS.num_examples_per_epoch)
train_steps = total_examples // FLAGS.train_batch_size
logging.info(params)
if not tf.io.gfile.exists(model_dir):
tf.io.gfile.makedirs(model_dir)
config_file = os.path.join(model_dir, 'config.yaml')
if not tf.io.gfile.exists(config_file):
tf.io.gfile.GFile(config_file, 'w').write(str(config))
train_input_fn = dataloader.InputReader(
FLAGS.train_file_pattern,
is_training=True,
use_fake_data=FLAGS.use_fake_data,
max_instances_per_image=max_instances_per_image)
eval_input_fn = dataloader.InputReader(
FLAGS.val_file_pattern,
is_training=False,
use_fake_data=FLAGS.use_fake_data,
max_instances_per_image=max_instances_per_image)
if FLAGS.strategy == 'tpu':
tpu_config = tf.estimator.tpu.TPUConfig(
FLAGS.iterations_per_loop if FLAGS.strategy == 'tpu' else 1,
num_cores_per_replica=num_cores_per_replica,
input_partition_dims=input_partition_dims,
per_host_input_for_training=tf.estimator.tpu.InputPipelineConfig
.PER_HOST_V2)
run_config = tf.estimator.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=model_dir,
log_step_count_steps=FLAGS.iterations_per_loop,
session_config=config_proto,
tpu_config=tpu_config,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tf_random_seed=FLAGS.tf_random_seed,
)
# TPUEstimator can do both train and eval.
train_est = tf.estimator.tpu.TPUEstimator(
model_fn=model_fn_instance,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
config=run_config,
params=params)
eval_est = train_est
else:
strategy = None
if FLAGS.strategy == 'gpus':
strategy = tf.distribute.MirroredStrategy()
run_config = tf.estimator.RunConfig(
model_dir=model_dir,
train_distribute=strategy,
log_step_count_steps=FLAGS.iterations_per_loop,
session_config=config_proto,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tf_random_seed=FLAGS.tf_random_seed,
)
def get_estimator(global_batch_size):
params['num_shards'] = getattr(strategy, 'num_replicas_in_sync', 1)
params['batch_size'] = global_batch_size // params['num_shards']
return tf.estimator.Estimator(
model_fn=model_fn_instance, config=run_config, params=params)
# train and eval need different estimator due to different batch size.
train_est = get_estimator(FLAGS.train_batch_size)
eval_est = get_estimator(FLAGS.eval_batch_size)
# start train/eval flow.
if FLAGS.mode == 'train':
train_est.train(input_fn=train_input_fn, max_steps=train_steps)
if FLAGS.eval_after_train:
eval_est.evaluate(input_fn=eval_input_fn, steps=eval_steps)
elif FLAGS.mode == 'eval':
# Run evaluation when there's a new checkpoint
for ckpt in tf.train.checkpoints_iterator(
FLAGS.model_dir,
min_interval_secs=FLAGS.min_eval_interval,
timeout=FLAGS.eval_timeout):
logging.info('Starting to evaluate.')
try:
eval_results = eval_est.evaluate(
eval_input_fn, steps=eval_steps, name=FLAGS.eval_name)
# Terminate eval job when final checkpoint is reached.
try:
current_step = int(os.path.basename(ckpt).split('-')[1])
except IndexError:
logging.info('%s has no global step info: stop!', ckpt)
break
utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)
if current_step >= train_steps:
logging.info('Eval finished step %d/%d', current_step, train_steps)
break
except tf.errors.NotFoundError:
# Checkpoint might be not already deleted by the time eval finished.
# We simply skip ssuch case.
logging.info('Checkpoint %s no longer exists, skipping.', ckpt)
elif FLAGS.mode == 'train_and_eval':
ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
try:
step = int(os.path.basename(ckpt).split('-')[1])
current_epoch = (
step * FLAGS.train_batch_size // FLAGS.num_examples_per_epoch)
logging.info('found ckpt at step %d (epoch %d)', step, current_epoch)
except (IndexError, TypeError):
logging.info('Folder %s has no ckpt with valid step.', FLAGS.model_dir)
current_epoch = 0
def run_train_and_eval(e):
print('\n =====> Starting training, epoch: %d.' % e)
train_est.train(
input_fn=train_input_fn,
max_steps=e * FLAGS.num_examples_per_epoch // FLAGS.train_batch_size)
print('\n =====> Starting evaluation, epoch: %d.' % e)
eval_results = eval_est.evaluate(input_fn=eval_input_fn, steps=eval_steps)
ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)
epochs_per_cycle = 1 # higher number has less graph construction overhead.
for e in range(current_epoch + 1, config.num_epochs + 1, epochs_per_cycle):
if FLAGS.run_epoch_in_child_process:
p = multiprocessing.Process(target=run_train_and_eval, args=(e,))
p.start()
p.join()
if p.exitcode != 0:
return p.exitcode
else:
tf.compat.v1.reset_default_graph()
run_train_and_eval(e)
else:
logging.info('Invalid mode: %s', FLAGS.mode)
if __name__ == '__main__':
app.run(main)
| |
import os
import tempfile
import shutil
from xml.etree import ElementTree as ET
from django.test import TransactionTestCase
from django.core.exceptions import ValidationError
from django.core.files.uploadedfile import UploadedFile
from django.contrib.auth.models import Group
from django.db import IntegrityError
from hs_core import hydroshare
from hs_core.hydroshare import utils
from hs_core.models import CoreMetaData, Creator, Contributor, Coverage, Rights, Title, Language, \
Publisher, Identifier, Type, Subject, Description, Date, Format, Relation, Source
from hs_core.testing import MockIRODSTestCaseMixin, TestCaseCommonUtilities
from hs_geo_raster_resource.models import RasterResource, OriginalCoverage, BandInformation, \
CellInformation
class TestRasterMetaData(MockIRODSTestCaseMixin, TestCaseCommonUtilities, TransactionTestCase):
def setUp(self):
super(TestRasterMetaData, self).setUp()
self.group, _ = Group.objects.get_or_create(name='Resource Author')
self.user = hydroshare.create_account(
'user1@nowhere.com',
username='user1',
first_name='Creator_FirstName',
last_name='Creator_LastName',
superuser=False,
groups=[self.group]
)
self.resRaster = hydroshare.create_resource(
resource_type='RasterResource',
owner=self.user,
title='My Test Raster Resource'
)
self.temp_dir = tempfile.mkdtemp()
self.raster_tif_file_name = 'raster_tif_valid.tif'
self.raster_tif_file = 'hs_geo_raster_resource/tests/{}'.format(self.raster_tif_file_name)
target_temp_raster_tif_file = os.path.join(self.temp_dir, self.raster_tif_file_name)
shutil.copy(self.raster_tif_file, target_temp_raster_tif_file)
self.raster_tif_file_obj = open(target_temp_raster_tif_file, 'r')
self.raster_bearcr_tif_file_name = 'BearCk.tif'
self.raster_bearcr_tif_file = 'hs_geo_raster_resource/tests/{}'.format(
self.raster_bearcr_tif_file_name)
target_temp_raster_bearcr_tif_file = os.path.join(self.temp_dir,
self.raster_bearcr_tif_file_name)
shutil.copy(self.raster_bearcr_tif_file, target_temp_raster_bearcr_tif_file)
self.raster_bearcr_tif_file_obj = open(target_temp_raster_bearcr_tif_file, 'r')
self.raster_honduras_tif_file_name = 'Honduras.tif'
self.raster_honduras_tif_file = 'hs_geo_raster_resource/tests/{}'.format(
self.raster_honduras_tif_file_name)
target_temp_raster_honduras_tif_file = os.path.join(self.temp_dir,
self.raster_honduras_tif_file_name)
shutil.copy(self.raster_honduras_tif_file, target_temp_raster_honduras_tif_file)
self.raster_honduras_tif_file_obj = open(target_temp_raster_honduras_tif_file, 'r')
self.raster_mawhefel_tif_file_name = 'mawhefel.tif'
self.raster_mawhefel_tif_file = 'hs_geo_raster_resource/tests/{}'.format(
self.raster_mawhefel_tif_file_name)
target_temp_raster_mawhefel_tif_file = os.path.join(self.temp_dir,
self.raster_mawhefel_tif_file_name)
shutil.copy(self.raster_mawhefel_tif_file, target_temp_raster_mawhefel_tif_file)
self.raster_mawhefel_tif_file_obj = open(target_temp_raster_mawhefel_tif_file, 'r')
self.raster_lidardem_tif_file_name = 'lidarDem.tif'
self.raster_lidardem_tif_file = 'hs_geo_raster_resource/tests/{}'.format(
self.raster_lidardem_tif_file_name)
target_temp_raster_lidardem_tif_file = os.path.join(self.temp_dir,
self.raster_lidardem_tif_file_name)
shutil.copy(self.raster_lidardem_tif_file, target_temp_raster_lidardem_tif_file)
self.raster_lidardem_tif_file_obj = open(target_temp_raster_lidardem_tif_file, 'r')
self.raster_htelevation_tif_file_name = 'HT_Elevation.tif'
self.raster_htelevation_tif_file = 'hs_geo_raster_resource/tests/{}'.format(
self.raster_htelevation_tif_file_name)
target_temp_raster_htelevation_tif_file = os.path.join(
self.temp_dir, self.raster_htelevation_tif_file_name)
shutil.copy(self.raster_htelevation_tif_file, target_temp_raster_htelevation_tif_file)
self.raster_htelevation_tif_file_obj = open(target_temp_raster_htelevation_tif_file, 'r')
self.raster_bad_tif_file_name = 'raster_tif_invalid.tif'
self.raster_bad_tif_file = 'hs_geo_raster_resource/tests/{}'.format(
self.raster_bad_tif_file_name)
target_temp_raster_bad_tif_file = os.path.join(self.temp_dir, self.raster_bad_tif_file_name)
shutil.copy(self.raster_bad_tif_file, target_temp_raster_bad_tif_file)
self.raster_bad_tif_file_obj = open(target_temp_raster_bad_tif_file, 'r')
self.raster_zip_file_name = 'raster_zip_valid.zip'
self.raster_zip_file = 'hs_geo_raster_resource/tests/{}'.format(self.raster_zip_file_name)
target_temp_raster_zip_file = os.path.join(self.temp_dir, self.raster_zip_file_name)
shutil.copy(self.raster_zip_file, target_temp_raster_zip_file)
self.raster_zip_file_obj = open(target_temp_raster_zip_file, 'r')
self.raster_bad_zip_file_name = 'raster_zip_invalid.zip'
self.raster_bad_zip_file = 'hs_geo_raster_resource/tests/{}'.format(
self.raster_bad_zip_file_name)
target_temp_raster_bad_zip_file = os.path.join(self.temp_dir, self.raster_bad_zip_file_name)
shutil.copy(self.raster_bad_zip_file, target_temp_raster_bad_zip_file)
self.raster_bad_zip_file_obj = open(target_temp_raster_bad_zip_file, 'r')
temp_text_file = os.path.join(self.temp_dir, 'raster_text.txt')
text_file = open(temp_text_file, 'w')
text_file.write("Raster records")
self.text_file_obj = open(temp_text_file, 'r')
def tearDown(self):
super(TestRasterMetaData, self).tearDown()
if os.path.exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
def test_allowed_file_types(self):
# test allowed file type is '.tif, .zip'
self.assertIn('.tif', RasterResource.get_supported_upload_file_types())
self.assertIn('.zip', RasterResource.get_supported_upload_file_types())
self.assertEqual(len(RasterResource.get_supported_upload_file_types()), 2)
# there should not be any content file
self.assertEqual(self.resRaster.files.all().count(), 0)
# trying to add a text file to this resource should raise exception
files = [UploadedFile(file=self.text_file_obj, name=self.text_file_obj.name)]
with self.assertRaises(utils.ResourceFileValidationException):
utils.resource_file_add_pre_process(resource=self.resRaster, files=files,
user=self.user, extract_metadata=False)
utils.resource_file_add_process(resource=self.resRaster, files=files,
user=self.user,
extract_metadata=False)
# trying to add bad .tif file should raise file validation error
files = [UploadedFile(file=self.raster_bad_tif_file_obj,
name=self.raster_bad_tif_file_name)]
with self.assertRaises(utils.ResourceFileValidationException):
utils.resource_file_add_pre_process(resource=self.resRaster, files=files,
user=self.user, extract_metadata=False)
utils.resource_file_add_process(resource=self.resRaster, files=files,
user=self.user, extract_metadata=False)
# trying to add good .tif file should pass the file check
files = [UploadedFile(file=self.raster_tif_file_obj, name=self.raster_tif_file_name)]
utils.resource_file_add_pre_process(resource=self.resRaster, files=files,
user=self.user, extract_metadata=False)
utils.resource_file_add_process(resource=self.resRaster, files=files, user=self.user,
extract_metadata=False)
# there should be 2 content file: with .vrt file created by system
self.assertEqual(self.resRaster.files.all().count(), 2)
file_names = [os.path.basename(f.resource_file.name) for f in self.resRaster.files.all()]
self.assertIn('raster_tif_valid.vrt', file_names)
# delete content file that we added above
hydroshare.delete_resource_file(self.resRaster.short_id, self.raster_tif_file_name,
self.user)
# there should be no content file
self.assertEqual(self.resRaster.files.all().count(), 0)
# trying to add good .zip file should pass the file check
files = [UploadedFile(file=self.raster_zip_file_obj, name=self.raster_zip_file_name)]
utils.resource_file_add_pre_process(resource=self.resRaster, files=files,
user=self.user, extract_metadata=False)
utils.resource_file_add_process(resource=self.resRaster, files=files,
user=self.user, extract_metadata=False)
# there should be 10 content file:
self.assertEqual(self.resRaster.files.all().count(), 10)
# file pre add process should raise validation error if we try to add a 2nd file when
# the resource has already content files
with self.assertRaises(utils.ResourceFileValidationException):
utils.resource_file_add_pre_process(resource=self.resRaster, files=files,
user=self.user, extract_metadata=False)
def test_metadata_extraction_on_resource_creation(self):
# passing the file object that points to the temp dir doesn't work - create_resource throws
# error. open the file from the fixed file location
files = [UploadedFile(file=self.raster_tif_file_obj, name=self.raster_tif_file_name)]
self.resRaster = hydroshare.create_resource(
'RasterResource',
self.user,
'My Test Raster Resource',
files=files,
metadata=[]
)
# uploaded file validation and metadata extraction happens in post resource
# creation handler
utils.resource_post_create_actions(resource=self.resRaster, user=self.user, metadata=[])
super(TestRasterMetaData, self).raster_metadata_extraction()
def test_metadata_extraction_on_content_file_add(self):
# test the core metadata at this point
self.assertEqual(self.resRaster.metadata.title.value, 'My Test Raster Resource')
# there shouldn't any abstract element
self.assertEqual(self.resRaster.metadata.description, None)
# there shouldn't any coverage element
self.assertEqual(self.resRaster.metadata.coverages.all().count(), 0)
# there shouldn't any format element
self.assertEqual(self.resRaster.metadata.formats.all().count(), 0)
# there shouldn't any subject element
self.assertEqual(self.resRaster.metadata.subjects.all().count(), 0)
# there shouldn't any contributor element
self.assertEqual(self.resRaster.metadata.contributors.all().count(), 0)
# there should not be any extended metadata
self.assertEqual(self.resRaster.metadata.originalCoverage, None)
self.assertEqual(self.resRaster.metadata.cellInformation, None)
self.assertEqual(self.resRaster.metadata.bandInformations.count(), 0)
# adding a valid tiff file should generate some core metadata and all extended metadata
files = [UploadedFile(file=self.raster_tif_file_obj, name=self.raster_tif_file_name)]
utils.resource_file_add_process(resource=self.resRaster, files=files, user=self.user,
extract_metadata=False)
super(TestRasterMetaData, self).raster_metadata_extraction()
def test_wgs84_coverage_extraction(self):
# this is test that the resource level coverage (wsg84) is being created correctly
# test for BearCr.tif
files = [UploadedFile(file=self.raster_bearcr_tif_file_obj,
name=self.raster_bearcr_tif_file_name)]
self.resRaster = hydroshare.create_resource(
'RasterResource',
self.user,
'My Test Raster Resource Coverage',
files=files,
metadata=[]
)
# uploaded file validation and metadata extraction happens in post resource
# creation handler
utils.resource_post_create_actions(resource=self.resRaster, user=self.user, metadata=[])
box_coverage = self.resRaster.metadata.coverages.all().filter(type='box').first()
self.assertEqual(box_coverage.value['projection'], 'WGS 84 EPSG:4326')
self.assertEqual(box_coverage.value['units'], 'Decimal degrees')
self.assertEqual(box_coverage.value['northlimit'], 30.214583003567654)
self.assertEqual(box_coverage.value['eastlimit'], -97.92170777387547)
self.assertEqual(box_coverage.value['southlimit'], 30.127513332692264)
self.assertEqual(box_coverage.value['westlimit'], -98.01556648306897)
self.resRaster.delete()
# test for Honduras.tif
files = [UploadedFile(file=self.raster_honduras_tif_file_obj,
name=self.raster_honduras_tif_file_name)]
self.resRaster = hydroshare.create_resource(
'RasterResource',
self.user,
'My Test Raster Resource Coverage',
files=files,
metadata=[]
)
# uploaded file validation and metadata extraction happens in post resource
# creation handler
utils.resource_post_create_actions(resource=self.resRaster, user=self.user, metadata=[])
self.assertEqual(self.resRaster.files.all().count(), 2)
# there should not be any coverage extracted in the case of Honduras
self.assertEqual(self.resRaster.metadata.coverages.all().count(), 0)
self.resRaster.delete()
# test for mawhefel.tif
files = [UploadedFile(file=self.raster_mawhefel_tif_file_obj,
name=self.raster_mawhefel_tif_file_name)]
self.resRaster = hydroshare.create_resource(
'RasterResource',
self.user,
'My Test Raster Resource Coverage',
files=files,
metadata=[]
)
# uploaded file validation and metadata extraction happens in post resource
# creation handler
utils.resource_post_create_actions(resource=self.resRaster, user=self.user, metadata=[])
self.assertEqual(self.resRaster.files.all().count(), 2)
# there should not be any coverage extracted in the case of Honduras
self.assertEqual(self.resRaster.metadata.coverages.all().count(), 0)
self.resRaster.delete()
# test for lidarDEM.tif
files = [UploadedFile(file=self.raster_lidardem_tif_file_obj,
name=self.raster_lidardem_tif_file_name)]
self.resRaster = hydroshare.create_resource(
'RasterResource',
self.user,
'My Test Raster Resource Coverage',
files=files,
metadata=[]
)
# uploaded file validation and metadata extraction happens in post resource
# creation handler
utils.resource_post_create_actions(resource=self.resRaster, user=self.user, metadata=[])
box_coverage = self.resRaster.metadata.coverages.all().filter(type='box').first()
self.assertEqual(box_coverage.value['projection'], 'WGS 84 EPSG:4326')
self.assertEqual(box_coverage.value['units'], 'Decimal degrees')
self.assertEqual(box_coverage.value['northlimit'], 40.678328957336106)
self.assertEqual(box_coverage.value['eastlimit'], -77.88875113570533)
self.assertEqual(box_coverage.value['southlimit'], 40.650831557543725)
self.assertEqual(box_coverage.value['westlimit'], -77.92486166705822)
self.resRaster.delete()
# test for HT_Elevation.tif
files = [UploadedFile(file=self.raster_htelevation_tif_file_obj,
name=self.raster_htelevation_tif_file_name)]
self.resRaster = hydroshare.create_resource(
'RasterResource',
self.user,
'My Test Raster Resource Coverage',
files=files,
metadata=[]
)
# uploaded file validation and metadata extraction happens in post resource
# creation handler
utils.resource_post_create_actions(resource=self.resRaster, user=self.user, metadata=[])
box_coverage = self.resRaster.metadata.coverages.all().filter(type='box').first()
self.assertEqual(box_coverage.value['projection'], 'WGS 84 EPSG:4326')
self.assertEqual(box_coverage.value['units'], 'Decimal degrees')
self.assertEqual(box_coverage.value['northlimit'], 40.75042571735219)
self.assertEqual(box_coverage.value['eastlimit'], -77.84406750606215)
self.assertEqual(box_coverage.value['southlimit'], 40.58088597175731)
self.assertEqual(box_coverage.value['westlimit'], -78.01781620659817)
self.resRaster.delete()
def test_metadata_on_content_file_delete(self):
# test that some of the metadata is not deleted on content file deletion
files = [UploadedFile(file=self.raster_tif_file_obj, name=self.raster_tif_file_name)]
utils.resource_file_add_pre_process(resource=self.resRaster, files=files, user=self.user,
extract_metadata=False)
utils.resource_file_add_process(resource=self.resRaster, files=files, user=self.user,
extract_metadata=True)
# there should be 2 content files
self.assertEqual(self.resRaster.files.all().count(), 2)
# there should be 2 format elements
self.assertEqual(self.resRaster.metadata.formats.all().count(), 2)
self.assertEqual(self.resRaster.metadata.formats.all().filter(
value='application/vrt').count(), 1)
self.assertEqual(self.resRaster.metadata.formats.all().filter(
value='image/tiff').count(), 1)
# delete content file that we added above
hydroshare.delete_resource_file(self.resRaster.short_id, self.raster_tif_file_name,
self.user)
# there should no content file
self.assertEqual(self.resRaster.files.all().count(), 0)
# there should be a title element
self.assertNotEqual(self.resRaster.metadata.title, None)
# there should be no abstract element
self.assertEqual(self.resRaster.metadata.description, None)
# there should be 1 creator element
self.assertEqual(self.resRaster.metadata.creators.all().count(), 1)
# there should be no contributor element
self.assertEqual(self.resRaster.metadata.contributors.all().count(), 0)
# there should be no coverage element
self.assertEqual(self.resRaster.metadata.coverages.all().count(), 0)
# there should be no format element
self.assertEqual(self.resRaster.metadata.formats.all().count(), 0)
# there should be no subject element
self.assertEqual(self.resRaster.metadata.subjects.all().count(), 0)
# testing extended metadata elements - there should not be any resource specific metadata
self.assertEqual(self.resRaster.metadata.originalCoverage, None)
self.assertEqual(self.resRaster.metadata.cellInformation, None)
self.assertEqual(self.resRaster.metadata.bandInformations.count(), 0)
self.resRaster.delete()
def test_metadata_delete_on_resource_delete(self):
# adding a valid raster tif file should generate some core metadata and all extended
# metadata
files = [UploadedFile(file=self.raster_tif_file_obj, name=self.raster_tif_file_name)]
utils.resource_file_add_process(resource=self.resRaster, files=files, user=self.user,
extract_metadata=True)
# before resource delete
# resource core metadata
raster_metadata_obj = self.resRaster.metadata
self.assertEqual(CoreMetaData.objects.all().count(), 1)
# there should be Creator metadata objects
self.assertTrue(Creator.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be no Contributor metadata objects
self.assertFalse(Contributor.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be Identifier metadata objects
self.assertTrue(Identifier.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be Type metadata objects
self.assertTrue(Type.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be no Source metadata objects
self.assertFalse(Source.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be no Relation metadata objects
self.assertFalse(Relation.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be no Publisher metadata objects
self.assertFalse(Publisher.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be Title metadata objects
self.assertTrue(Title.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be no Description (Abstract) metadata objects
self.assertFalse(Description.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be Date metadata objects
self.assertTrue(Date.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be no Subject metadata objects
self.assertFalse(Subject.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be Coverage metadata objects
self.assertTrue(Coverage.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be Format metadata objects
self.assertTrue(Format.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be Language metadata objects
self.assertTrue(Language.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be Rights metadata objects
self.assertTrue(Rights.objects.filter(object_id=raster_metadata_obj.id).exists())
# resource specific metadata
# there should be original coverage metadata objects
self.assertTrue(OriginalCoverage.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be CellInformation metadata objects
self.assertTrue(CellInformation.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be BandInformation metadata objects
self.assertTrue(BandInformation.objects.filter(object_id=raster_metadata_obj.id).exists())
# delete resource
hydroshare.delete_resource(self.resRaster.short_id)
self.assertEqual(CoreMetaData.objects.all().count(), 0)
# there should be no Creator metadata objects
self.assertFalse(Creator.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be no Contributor metadata objects
self.assertFalse(Contributor.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be no Identifier metadata objects
self.assertFalse(Identifier.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be no Type metadata objects
self.assertFalse(Type.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be no Source metadata objects
self.assertFalse(Source.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be no Relation metadata objects
self.assertFalse(Relation.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be no Publisher metadata objects
self.assertFalse(Publisher.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be no Title metadata objects
self.assertFalse(Title.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be no Description (Abstract) metadata objects
self.assertFalse(Description.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be no Date metadata objects
self.assertFalse(Date.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be no Subject metadata objects
self.assertFalse(Subject.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be no Coverage metadata objects
self.assertFalse(Coverage.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be no Format metadata objects
self.assertFalse(Format.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be no Language metadata objects
self.assertFalse(Language.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be no Rights metadata objects
self.assertFalse(Rights.objects.filter(object_id=raster_metadata_obj.id).exists())
# resource specific metadata
# there should be no original coverage metadata objects
self.assertFalse(OriginalCoverage.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be CellInformation metadata objects
self.assertFalse(CellInformation.objects.filter(object_id=raster_metadata_obj.id).exists())
# there should be bandInformation metadata objects
self.assertFalse(BandInformation.objects.filter(object_id=raster_metadata_obj.id).exists())
def test_extended_metadata_CRUD(self):
# create new original coverage metadata with meaningful value
value = {"northlimit": 12, "projection": "transverse_mercator", "units": "meter",
"southlimit": 10, "eastlimit": 23, "westlimit": 2}
self.resRaster.metadata.create_element('originalcoverage', value=value)
self.assertEqual(self.resRaster.metadata.originalCoverage.value, value)
# multiple original coverage elements are not allowed - should raise exception
with self.assertRaises(IntegrityError):
self.resRaster.metadata.create_element('originalcoverage', value=value)
# create new cell information metadata with meaningful value
self.resRaster.metadata.create_element('cellinformation', name='cellinfo',
cellDataType='Float32',
rows=1660, columns=985, cellSizeXValue=30.0,
cellSizeYValue=30.0,
)
cell_info = self.resRaster.metadata.cellInformation
self.assertEqual(cell_info.rows, 1660)
self.assertEqual(cell_info.columns, 985)
self.assertEqual(cell_info.cellSizeXValue, 30.0)
self.assertEqual(cell_info.cellSizeYValue, 30.0)
self.assertEqual(cell_info.cellDataType, 'Float32')
# multiple cell Information elements are not allowed - should raise exception
with self.assertRaises(IntegrityError):
self.resRaster.metadata.create_element('cellinformation', name='cellinfo',
cellDataType='Float32',
rows=1660, columns=985,
cellSizeXValue=30.0, cellSizeYValue=30.0,
)
# create band information element with meaningful value
self.resRaster.metadata.create_element('bandinformation', name='bandinfo',
variableName='diginal elevation',
variableUnit='meter',
method='this is method',
comment='this is comment',
maximumValue=1000, minimumValue=0, noDataValue=-9999)
band_info = self.resRaster.metadata.bandInformations.first()
self.assertEqual(band_info.name, 'bandinfo')
self.assertEqual(band_info.variableName, 'diginal elevation')
self.assertEqual(band_info.variableUnit, 'meter')
self.assertEqual(band_info.method, 'this is method')
self.assertEqual(band_info.comment, 'this is comment')
self.assertEqual(band_info.maximumValue, '1000')
self.assertEqual(band_info.minimumValue, '0')
self.assertEqual(band_info.noDataValue, '-9999')
# multiple band information elements are allowed
self.resRaster.metadata.create_element('bandinformation', name='bandinfo',
variableName='diginal elevation2',
variableUnit='meter',
method='this is method',
comment='this is comment',
maximumValue=1000, minimumValue=0, noDataValue=-9999)
self.assertEqual(self.resRaster.metadata.bandInformations.all().count(), 2)
# delete
# original coverage deletion is not allowed
with self.assertRaises(ValidationError):
self.resRaster.metadata.delete_element('originalcoverage',
self.resRaster.metadata.originalCoverage.id)
# cell information deletion is not allowed
with self.assertRaises(ValidationError):
self.resRaster.metadata.delete_element('cellinformation',
self.resRaster.metadata.cellInformation.id)
# band information deletion is not allowed
with self.assertRaises(ValidationError):
self.resRaster.metadata.delete_element(
'bandinformation',
self.resRaster.metadata.bandInformations.first().id)
# update
# update original coverage element
value_2 = {"northlimit": 12.5, "projection": "transverse_mercator", "units": "meter",
"southlimit": 10.5, "eastlimit": 23.5, "westlimit": 2.5}
self.resRaster.metadata.update_element('originalcoverage',
self.resRaster.metadata.originalCoverage.id,
value=value_2)
self.assertEqual(self.resRaster.metadata.originalCoverage.value, value_2)
# update cell info element
self.resRaster.metadata.update_element('cellinformation',
self.resRaster.metadata.cellInformation.id,
name='cellinfo', cellDataType='Double',
rows=166, columns=98,
cellSizeXValue=3.0, cellSizeYValue=3.0,
)
cell_info = self.resRaster.metadata.cellInformation
self.assertEqual(cell_info.rows, 166)
self.assertEqual(cell_info.columns, 98)
self.assertEqual(cell_info.cellSizeXValue, 3.0)
self.assertEqual(cell_info.cellSizeYValue, 3.0)
self.assertEqual(cell_info.cellDataType, 'Double')
# update band info element
self.resRaster.metadata.update_element('bandinformation',
self.resRaster.metadata.bandInformations.first().id,
name='bandinfo',
variableName='precipitation',
variableUnit='mm/h',
method='this is method2',
comment='this is comment2',
maximumValue=1001, minimumValue=1,
noDataValue=-9998)
band_info = self.resRaster.metadata.bandInformations.first()
self.assertEqual(band_info.name, 'bandinfo')
self.assertEqual(band_info.variableName, 'precipitation')
self.assertEqual(band_info.variableUnit, 'mm/h')
self.assertEqual(band_info.method, 'this is method2')
self.assertEqual(band_info.comment, 'this is comment2')
self.assertEqual(band_info.maximumValue, '1001')
self.assertEqual(band_info.minimumValue, '1')
self.assertEqual(band_info.noDataValue, '-9998')
self.resRaster.delete()
def test_bulk_metadata_update(self):
# here we are testing the update() method of the RasterMetaData class
# update of resource specific metadata should fail when the resource does not have content
# files
self.assertEqual(self.resRaster.files.all().count(), 0)
self.assertEqual(self.resRaster.metadata.bandInformations.all().count(), 0)
band_data = {'original_band_name': 'bandinfo',
'name': 'Band_1',
'variableName': 'digital elevation',
'variableUnit': 'meter',
'method': 'this is method',
'comment': 'this is comment',
'maximumValue': 1000,
'minimumValue': 0,
'noDataValue': -9999
}
metadata = []
metadata.append({'bandinformation': band_data})
with self.assertRaises(ValidationError):
self.resRaster.metadata.update(metadata)
self.assertEqual(self.resRaster.metadata.bandInformations.all().count(), 0)
del metadata[:]
# adding a valid tiff file should generate some core metadata and all extended metadata
files = [UploadedFile(file=self.raster_tif_file_obj, name=self.raster_tif_file_name)]
utils.resource_file_add_process(resource=self.resRaster, files=files, user=self.user,
extract_metadata=False)
# testing extended metadata element: band information
self.assertEqual(self.resRaster.metadata.bandInformations.count(), 1)
band_info = self.resRaster.metadata.bandInformations.first()
self.assertEqual(band_info.noDataValue, '-3.40282346639e+38')
self.assertEqual(band_info.maximumValue, '3031.44311523')
self.assertEqual(band_info.minimumValue, '1358.33459473')
self.assertEqual(band_info.name, 'Band_1')
# updating of bandinformation using a name that does not exist (band-name) should fail
band_data = {'original_band_name': 'band-name',
'name': 'Band_1',
'variableName': 'digital elevation',
'variableUnit': 'meter',
'method': 'this is method',
'comment': 'this is comment',
'maximumValue': 1000,
'minimumValue': 0,
'noDataValue': -9999
}
metadata.append({'bandinformation': band_data})
with self.assertRaises(ValidationError):
self.resRaster.metadata.update(metadata)
self.assertEqual(self.resRaster.metadata.bandInformations.all().count(), 1)
# updating of bandinformation using a valid band lookup name (Band_1) should be successful
band_data = {'original_band_name': 'Band_1',
'name': 'Band_2',
'variableName': 'digital elevation',
'variableUnit': 'meter',
'method': 'this is method',
'comment': 'this is comment',
'maximumValue': 1000,
'minimumValue': 0,
'noDataValue': -9999
}
del metadata[:]
metadata.append({'bandinformation': band_data})
self.resRaster.metadata.update(metadata)
self.assertEqual(self.resRaster.metadata.bandInformations.all().count(), 1)
band_info = self.resRaster.metadata.bandInformations.first()
self.assertEqual(band_info.name, 'Band_2')
self.assertEqual(band_info.variableName, 'digital elevation')
self.assertEqual(band_info.variableUnit, 'meter')
self.assertEqual(band_info.method, 'this is method')
self.assertEqual(band_info.comment, 'this is comment')
self.assertEqual(band_info.maximumValue, '1000')
self.assertEqual(band_info.minimumValue, '0')
self.assertEqual(band_info.noDataValue, '-9999')
# test updating only one attribute of bandinformation
band_data = {'original_band_name': 'Band_2',
'name': 'Band_1'
}
del metadata[:]
metadata.append({'bandinformation': band_data})
self.resRaster.metadata.update(metadata)
self.assertEqual(self.resRaster.metadata.bandInformations.all().count(), 1)
band_info = self.resRaster.metadata.bandInformations.first()
self.assertEqual(band_info.name, 'Band_1')
# test updating both core and resource specific metadata
# there should be 1 creator
self.assertEqual(self.resRaster.metadata.creators.all().count(), 1)
# there should be no contributor
self.assertEqual(self.resRaster.metadata.contributors.all().count(), 0)
del metadata[:]
metadata.append({'creator': {'name': 'creator one'}})
metadata.append({'creator': {'name': 'creator two'}})
metadata.append({'contributor': {'name': 'contributor one'}})
metadata.append({'contributor': {'name': 'contributor two'}})
band_data = {'original_band_name': 'Band_1',
'name': 'Band_3'
}
metadata.append({'bandinformation': band_data})
self.resRaster.metadata.update(metadata)
band_info = self.resRaster.metadata.bandInformations.first()
self.assertEqual(band_info.name, 'Band_3')
# there should be 2 creators
self.assertEqual(self.resRaster.metadata.creators.all().count(), 2)
# there should be 2 contributor
self.assertEqual(self.resRaster.metadata.contributors.all().count(), 2)
self.resRaster.delete()
def test_get_xml(self):
# add a valid raster file to generate metadata
files = [UploadedFile(file=self.raster_tif_file_obj, name=self.raster_tif_file_name)]
utils.resource_file_add_pre_process(resource=self.resRaster, files=files, user=self.user,
extract_metadata=False)
utils.resource_file_add_process(resource=self.resRaster, files=files, user=self.user,
extract_metadata=True)
# test if xml from get_xml() is well formed
ET.fromstring(self.resRaster.metadata.get_xml())
self.resRaster.delete()
def test_can_have_multiple_content_files(self):
self.assertFalse(RasterResource.can_have_multiple_files())
def test_can_upload_multiple_content_files(self):
# only one file can be uploaded
self.assertFalse(RasterResource.allow_multiple_file_upload())
def test_public_or_discoverable(self):
self.assertFalse(self.resRaster.has_required_content_files())
self.assertFalse(self.resRaster.metadata.has_all_required_elements())
self.assertFalse(self.resRaster.can_be_public_or_discoverable)
# adding a valid raster file
files = [UploadedFile(file=self.raster_tif_file_obj, name=self.raster_tif_file_name)]
utils.resource_file_add_pre_process(resource=self.resRaster, files=files, user=self.user,
extract_metadata=False)
utils.resource_file_add_process(resource=self.resRaster, files=files, user=self.user,
extract_metadata=True)
# adding required metadata
self.resRaster.metadata.create_element('description', abstract='example abstract')
self.resRaster.metadata.create_element('subject', value='logan')
self.assertTrue(self.resRaster.has_required_content_files())
self.assertTrue(self.resRaster.metadata.has_all_required_elements())
self.assertTrue(self.resRaster.can_be_public_or_discoverable)
self.resRaster.delete()
| |
# The Nexus software is licensed under the BSD 2-Clause license.
#
# You should have recieved a copy of this license with the software.
# If you did not, you can find one at the following link.
#
# http://opensource.org/licenses/bsd-license.php
VERSION = "v1.1.-1"
INFO_VIPLIST = [
#AKA: List of awesome people.
# Mojang
"jeb_",
"notch",
# Important TA people
"aera",
"andrewgodwin",
"arbot",
"Adam01",
"aythrea",
"NotMeh",
"revenant",
"tf_holiday", # arbot's new account
"tyteen4a03",
# Code contributors to products before Arc
"destroyerx1",
"erronjason",
"goober",
"kelraider",
"willempiee",
"varriount",
# TA legacy staff
"alka",
"haxelnut",
"aexis_rai",
"bjzaba",
"Animator",
# Others we give our bows to.
"fragmer",
"pyropyro",
"tktech"
]
FORMAT_LENGTHS = {
"b": 1,
"a": 1024,
"s": 64,
"h": 2,
"i": 4,
}
from format import Format
TYPE_INITIAL = 0
TYPE_KEEPALIVE = 1
TYPE_PRECHUNK = 2
TYPE_CHUNK = 3
TYPE_LEVELSIZE = 4
TYPE_BLOCKCHANGE = 5
TYPE_BLOCKSET = 6
TYPE_SPAWNPOINT = 7
TYPE_PLAYERPOS = 8
TYPE_NINE = 9
TYPE_TEN = 10
TYPE_PLAYERDIR = 11
TYPE_PLAYERLEAVE = 12
TYPE_MESSAGE = 13
TYPE_ERROR = 14
TYPE_FORMATS = {
TYPE_INITIAL: Format("bssb"),
TYPE_KEEPALIVE: Format(""),
TYPE_PRECHUNK: Format(""),
TYPE_CHUNK: Format("hab"),
TYPE_LEVELSIZE: Format("hhh"),
TYPE_BLOCKCHANGE: Format("hhhbb"),
TYPE_BLOCKSET: Format("hhhb"),
TYPE_SPAWNPOINT: Format("bshhhbb"),
TYPE_PLAYERPOS: Format("bhhhbb"),
TYPE_NINE: Format("bbbbbb"),
TYPE_TEN: Format("bbbb"),
TYPE_PLAYERDIR: Format("bbb"),
TYPE_PLAYERLEAVE: Format("b"),
TYPE_MESSAGE: Format("bs"),
TYPE_ERROR: Format("s"),
}
TASK_BLOCKSET = 1
TASK_PLAYERPOS = 2
TASK_MESSAGE = 3
TASK_NEWPLAYER = 4
TASK_PLAYERLEAVE = 5
TASK_PLAYERDIR = 6
TASK_WORLDCHANGE = 7
TASK_ADMINMESSAGE = 8
TASK_WORLDMESSAGE = 9
TASK_ACTION = 10
TASK_SERVERMESSAGE = 11
TASK_PHYSICSON = 12
TASK_PHYSICSOFF = 13
TASK_FLUSH = 14
TASK_BLOCKGET = 15
TASK_STOP = 16
TASK_PLAYERCONNECT = 17
TASK_UNFLOOD = 18
TASK_FWATERON = 19
TASK_FWATEROFF = 20
TASK_PLAYERRESPAWN = 21
TASK_SERVERURGENTMESSAGE = 22
TASK_STAFFMESSAGE = 23
TASK_IRCSTAFFMESSAGE = 24
TASK_ONMESSAGE = 25
TASK_OPMESSAGE = 26
TASK_IRCMESSAGE = 27
TASK_AWAYMESSAGE = 28
TASK_GLOBALMESSAGE = 29
TASK_INSTANTRESPAWN = 30
TASK_INGAMEMESSAGE = 31
TASK_OWNERMESSAGE = 32
TASK_DIRECTORMESSAGE = 33
TASK_MODMESSAGE = 34
TASK_ADMINCHATMESSAGE = 35
TASK_BLACK = 36
TASK_GREEN = 37
TASK_CODERMESSAGE = 38
TASK_CYAN = 39
TASK_AWARD = 40
ACTION_KICK = 1
ACTION_BAN = 2
ACTION_UNBAN = 3
ACTION_SILENCE = 4
ACTION_UNSILENCE = 5
ACTION_IPBAN = 6
COLOUR_BLACK = "&0"
COLOUR_DARKBLUE = "&1"
COLOUR_DARKGREEN = "&2"
COLOUR_DARKCYAN = "&3"
COLOUR_DARKRED = "&4"
COLOUR_DARKPURPLE = "&5"
COLOUR_DARKYELLOW = "&6"
COLOUR_GREY = "&7"
COLOUR_DARKGREY = "&8"
COLOUR_BLUE = "&9"
COLOUR_GREEN = "&a"
COLOUR_CYAN = "&b"
COLOUR_RED = "&c"
COLOUR_PURPLE = "&d"
COLOUR_YELLOW = "&e"
COLOUR_WHITE = "&f"
BLOCK_NOTHING = 0
BLOCK_NONE = 0
BLOCK_EMPTY = 0
BLOCK_AIR = 0
BLOCK_BLANK = 0
BLOCK_CLEAR = 0
BLOCK_ROCK = 1
BLOCK_ROCKS = 1
BLOCK_GRASS = 2
BLOCK_SOIL = 3
BLOCK_DIRT = 3
BLOCK_MUD = 3
BLOCK_BROWN = 3
BLOCK_GROUND = 3
BLOCK_STONE = 4
BLOCK_STONES = 4
BLOCK_COBBLESTONE = 4
BLOCK_COBBLESTONES = 4
BLOCK_COBBLE = 4
BLOCK_WOOD = 5
BLOCK_PLANK = 5
BLOCK_PLANKS = 5
BLOCK_BOARD = 5
BLOCK_BOARDS = 5
BLOCK_PLANT = 6
BLOCK_PLANTS = 6
BLOCK_SHRUB = 6
BLOCK_SHRUBS = 6
BLOCK_TREE = 6
BLOCK_TREES = 6
BLOCK_SAPPLING = 6
BLOCK_SAPLING = 6
BLOCK_SAPPLINGS = 6
BLOCK_SAPLINGS = 6
BLOCK_ADMINIUM = 7
BLOCK_OPCRETE = 7
BLOCK_ADMINCRETE = 7
BLOCK_DENSE = 7
BLOCK_HARDROCK = 7
BLOCK_HARDROCKS = 7
BLOCK_HARDEN = 7
BLOCK_ADMINBLOCK = 7
BLOCK_ADMINBLOCKS = 7
BLOCK_ADMIN_BLOCK = 7
BLOCK_ADMIN_BLOCKS = 7
BLOCK_HARD_ROCK = 7
BLOCK_HARD_ROCKS = 7
BLOCK_GROUND_ROCKS = 7
BLOCK_GROUND_ROCK = 7
BLOCK_GROUNDROCKS = 7
BLOCK_GROUNDROCK = 7
BLOCK_SOLID = 7
BLOCK_SOLIDS = 7
BLOCK_GROUNDSTONE = 7
BLOCK_HARDSTONE = 7
BLOCK_ADMINSTONE = 7
BLOCK_GROUNDSTONES = 7
BLOCK_HARDSTONES = 7
BLOCK_ADMINSTONES = 7
BLOCK_GROUND_STONE = 7
BLOCK_HARD_STONE = 7
BLOCK_ADMIN_STONE = 7
BLOCK_GROUND_STONES = 7
BLOCK_HARD_STONES = 7
BLOCK_ADMIN_STONES = 7
BLOCK_WATER = 8
BLOCK_REALWATER = 8
BLOCK_REAL_WATER = 8
BLOCK_H2O = 8
BLOCK_STILLH2O = 9
BLOCK_STILL_H2O = 9
BLOCK_STILL_WATER = 9
BLOCK_STILLWATER = 9
BLOCK_WATERVATOR = 9
BLOCK_LAVA = 10
BLOCK_MAGMA = 10
BLOCK_STILL_LAVA = 11
BLOCK_STILLLAVA= 11
BLOCK_STILL_MAGMA = 11
BLOCK_STILLMAGMA= 11
BLOCK_LAVAVATOR = 11
BLOCK_SAND = 12
BLOCK_GRAVEL = 13
BLOCK_GOLD_ORE = 14
BLOCK_GOLDORE = 14
BLOCK_GOLDROCK = 14
BLOCK_GOLD_ORES = 14
BLOCK_GOLDORES = 14
BLOCK_GOLDROCKS = 14
BLOCK_COPPER_ORE = 15
BLOCK_COPPERORE = 15
BLOCK_IRON_ORE = 15
BLOCK_IRONORE = 15
BLOCK_COPPERROCK = 15
BLOCK_IRONROCK = 15
BLOCK_COPPER_ORES = 15
BLOCK_COPPERORES = 15
BLOCK_IRON_ORES = 15
BLOCK_IRONORES = 15
BLOCK_COPPERROCKS = 15
BLOCK_IRONROCKS = 15
BLOCK_COAL_ORES = 16
BLOCK_COALORES = 16
BLOCK_COALORE = 16
BLOCK_COAL_ORE = 16
BLOCK_COAL = 16
BLOCK_COALS = 16
BLOCK_ORE = 16
BLOCK_ORES = 16
BLOCK_OIL_ORES = 16
BLOCK_OIL = 16
BLOCK_BLACK_ORES = 16
BLOCK_BLACKORES = 16
BLOCK_BLACK_ORE = 16
BLOCK_BLACKORE = 16
BLOCK_LOG = 17
BLOCK_LOGS = 17
BLOCK_TRUNK = 17
BLOCK_STUMP = 17
BLOCK_TRUNKS = 17
BLOCK_STUMPS = 17
BLOCK_TREETRUNK = 17
BLOCK_TREESTUMP = 17
BLOCK_TREETRUNKS = 17
BLOCK_TREESTUMPS = 17
BLOCK_LEAVES = 18
BLOCK_LEAF = 18
BLOCK_FOLIAGE = 18
BLOCK_SPONGE = 19
BLOCK_SPONGES = 19
BLOCK_CHEESE = 19
BLOCK_CHEESES = 19
BLOCK_GLASS = 20
BLOCK_RED_CLOTH = 21
BLOCK_RED = 21
BLOCK_ORANGE_CLOTH = 22
BLOCK_ORANGE = 22
BLOCK_YELLOW_CLOTH = 23
BLOCK_YELLOW = 23
BLOCK_LIME_CLOTH = 24
BLOCK_LIME = 24
BLOCK_GREENYELLOW = 24
BLOCK_GREENYELLOW_CLOTH = 24
BLOCK_LIGHTGREEN = 24
BLOCK_LIGHTGREEN_CLOTH = 24
BLOCK_GREEN_YELLOW = 24
BLOCK_GREEN_YELLOW_CLOTH = 24
BLOCK_LIGHT_GREEN = 24
BLOCK_LIGHT_GREEN_CLOTH = 24
BLOCK_GREEN_CLOTH = 25
BLOCK_GREEN = 25
BLOCK_TURQUOISE_CLOTH = 26
BLOCK_TURQUOISE = 26
BLOCK_AQUA = 26
BLOCK_TEAL = 26
BLOCK_AQUAGREEN = 26
BLOCK_AQUAGREEN_CLOTH = 26
BLOCK_AQUA_GREEN = 26
BLOCK_AQUA_GREEN_CLOTH = 26
BLOCK_AQUA_CLOTH = 26
BLOCK_TEAL_CLOTH = 26
BLOCK_SPRINGGREEN_CLOTH = 26
BLOCK_SPRINGGREEN = 26
BLOCK_CYAN_CLOTH = 27
BLOCK_CYAN = 27
BLOCK_BLUE_CLOTH = 28
BLOCK_BLUE = 28
BLOCK_PURPLE_CLOTH = 29
BLOCK_PURPLE = 29
BLOCK_DARKBLUE = 29
BLOCK_DARKBLUE_CLOTH = 29
BLOCK_DARK_BLUE = 29
BLOCK_DARK_BLUE_CLOTH = 29
BLOCK_INDIGO_CLOTH = 30
BLOCK_INDIGO = 30
BLOCK_VIOLET_CLOTH = 31
BLOCK_VIOLET = 31
BLOCK_MAGENTA_CLOTH = 32
BLOCK_MAGENTA = 32
BLOCK_PINK_CLOTH = 33
BLOCK_PINK = 33
BLOCK_DARKGREY_CLOTH = 34
BLOCK_DARKGREY = 34
BLOCK_DARKGRAY_CLOTH = 34
BLOCK_DARKGRAY = 34
BLOCK_DARK_GREY_CLOTH = 34
BLOCK_DARK_GREY = 34
BLOCK_DARK_GRAY_CLOTH = 34
BLOCK_DARK_GRAY = 34
BLOCK_BLACK = 34
BLOCK_BLACK_CLOTH = 34
BLOCK_GREY_CLOTH = 35
BLOCK_GRAY_CLOTH = 35
BLOCK_GREY = 35
BLOCK_GRAY = 35
BLOCK_WHITE_CLOTH = 36
BLOCK_WHITE = 36
BLOCK_YELLOW_FLOWER = 37
BLOCK_YELLOWFLOWER = 37
BLOCK_YELLOW_FLOWERS = 37
BLOCK_YELLOWFLOWERS = 37
BLOCK_RED_FLOWER = 38
BLOCK_REDFLOWER = 38
BLOCK_RED_FLOWERS = 38
BLOCK_REDFLOWERS = 38
BLOCK_BROWN_MUSHROOM = 39
BLOCK_BROWN_SHROOM = 39
BLOCK_SHROOM = 39
BLOCK_BROWN_SHROOMS = 39
BLOCK_SHROOMS = 39
BLOCK_MUSHROOM = 39
BLOCK_BROWN_MUSHROOMS = 39
BLOCK_MUSHROOMS = 39
BLOCK_RED_MUSHROOM = 40
BLOCK_TOADSTOOL = 40
BLOCK_RED_MUSHROOMS = 40
BLOCK_TOADSTOOLS = 40
BLOCK_RED_SHROOM = 40
BLOCK_RED_SHROOMS = 40
BLOCK_GOLD = 41
BLOCK_STEEL = 42
BLOCK_IRON = 42
BLOCK_SILVER = 42
BLOCK_METAL = 42
BLOCK_DOUBLE_STAIR = 43
BLOCK_DOUBLESTEP = 43
BLOCK_DOUBLE_STEP = 43
BLOCK_DOUBLESTAIR = 43
BLOCK_DOUBLE_STAIRS = 43
BLOCK_DOUBLESTEPS = 43
BLOCK_DOUBLE_STEPS = 43
BLOCK_DOUBLESTAIRS = 43
BLOCK_DOUBLESLAB = 43
BLOCK_DOUBLESLABS = 43
BLOCK_DOUBLE_SLAB = 43
BLOCK_DOUBLE_SLABS = 43
BLOCK_SLAB = 44
BLOCK_SLABS = 44
BLOCK_STAIR = 44
BLOCK_STEP = 44
BLOCK_STAIRS = 44
BLOCK_STEPS = 44
BLOCK_BRICK = 45
BLOCK_BRICKS = 45
BLOCK_TNT = 46
BLOCK_DYNAMITE = 46
BLOCK_EXPLOSIVE = 46
BLOCK_EXPLOSIVES = 46
BLOCK_BOOKCASE = 47
BLOCK_BOOKSHELF = 47
BLOCK_SHELF = 47
BLOCK_BOOKCASES = 47
BLOCK_BOOKSHELVES = 47
BLOCK_SHELVES = 47
BLOCK_BOOKS = 47
BLOCK_MOSSY_COBBLESTONE = 48
BLOCK_MOSS = 48
BLOCK_MOSSY = 48
BLOCK_MOSSYCOBBLESTONE = 48
BLOCK_MOSSY_STONE = 48
BLOCK_MOSSYSTONE = 48
BLOCK_MOSSY_COBBLESTONES = 48
BLOCK_MOSSYCOBBLESTONES = 48
BLOCK_MOSSY_STONES = 48
BLOCK_MOSSYSTONES = 48
BLOCK_MOSSY_ROCKS = 48
BLOCK_MOSSY_ROCK = 48
BLOCK_MOSSYROCKS = 48
BLOCK_MOSSYROCK = 48
BLOCK_OBSIDIAN = 49
BLOCK_OPSIDIAN = 49
BlockList = []
while len(BlockList) != 50:
BlockList.append('')
BlockList[0]="air"
BlockList[1]="rock"
BlockList[2]="grass"
BlockList[3]="dirt"
BlockList[4]="stone"
BlockList[5]="wood"
BlockList[6]="plant"
BlockList[7]="adminblock"
BlockList[8]="water"
BlockList[9]="still water"
BlockList[10]="lava"
BlockList[11]="still lava"
BlockList[12]="sand"
BlockList[13]="gravel"
BlockList[14]="goldore"
BlockList[15]="ironore"
BlockList[16]="coal"
BlockList[17]="log"
BlockList[18]="leaves"
BlockList[19]="sponge"
BlockList[20]="glass"
BlockList[21]="red"
BlockList[22]="orange"
BlockList[23]="yellow"
BlockList[24]="lime"
BlockList[25]="green"
BlockList[26]="turquoise"
BlockList[27]="cyan"
BlockList[28]="blue"
BlockList[29]="indigo"
BlockList[30]="violet"
BlockList[31]="purple"
BlockList[32]="magenta"
BlockList[33]="pink"
BlockList[34]="black"
BlockList[35]="grey"
BlockList[36]="white"
BlockList[37]="yellow flower"
BlockList[38]="red flower"
BlockList[39]="brown mushroom"
BlockList[40]="red mushroom"
BlockList[41]="gold"
BlockList[42]="iron"
BlockList[43]="step"
BlockList[44]="doublestep"
BlockList[45]="brick"
BlockList[46]="tnt"
BlockList[47]="bookcase"
BlockList[48]="moss"
BlockList[49]="obsidian"
class ServerFull(Exception):
pass
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import json
from django.contrib.auth import get_user_model
from django.test import TestCase
from model_mommy import mommy
from rest_framework.test import APIClient
from core.models import DataFile, Process, ProcessNode, Sample, Substrate
class TestProcessAPI(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user('username1',
password='')
self.client = APIClient()
self.client.login(username='username1', password='')
def test_list_view_get(self):
"""
Test that the list api returns correct items .
"""
processes = [
mommy.make(Process),
mommy.make(Process),
]
response = self.client.get('/api/v0/process/')
self.assertEqual(response.status_code, 200)
results = json.loads(response.content.decode('utf-8'))
self.assertEqual(len(results), len(processes))
for process, result in zip(processes, results):
self.assertEqual(result.get('uuid_full'), str(process.uuid_full))
def test_retrieve_view_get_full_uuid(self):
"""
Test retrieval of a process using the full uuid.
"""
process = mommy.make(Process)
response = self.client.get(
'/api/v0/process/{}/'.format(process.uuid_full.hex))
self.assertEqual(response.status_code, 200)
results = json.loads(response.content.decode('utf-8'))
self.assertEqual(results.get('uuid_full'), str(process.uuid_full))
self.assertIsNotNone(results.get('comment'))
def test_retrieve_view_get_short_uuid(self):
"""
Test retrieval of a process using the short uuid.
"""
process = mommy.make(Process)
response = self.client.get(
'/api/v0/process/{}/'.format(process.uuid))
results = json.loads(response.content.decode('utf-8'))
self.assertEqual(results.get('uuid_full'), str(process.uuid_full))
self.assertIsNotNone(results.get('comment'))
def test_retrieve_node_view_get_full_uuid(self):
"""
Test retrieval of a process node using the full uuid.
"""
sample = Sample.objects.create(substrate=mommy.make(Substrate))
sample.run_process(mommy.make(Process))
node = sample.leaf_nodes[0]
response = self.client.get(
'/api/v0/process/node/{}/'.format(node.uuid_full.hex))
self.assertEqual(response.status_code, 200)
results = json.loads(response.content.decode('utf-8'))
self.assertEqual(results.get('uuid_full'), str(node.uuid_full))
self.assertIsNotNone(results.get('comment'))
self.assertEqual(results.get('sample'), sample.uuid)
def test_retrieve_node_view_get_short_uuid(self):
"""
Test retrieval of a process node using the short uuid.
"""
sample = Sample.objects.create(substrate=mommy.make(Substrate))
sample.run_process(mommy.make(Process))
node = sample.leaf_nodes[0]
response = self.client.get(
'/api/v0/process/node/{}/'.format(node.uuid))
results = json.loads(response.content.decode('utf-8'))
self.assertEqual(results.get('uuid_full'), str(node.uuid_full))
self.assertIsNotNone(results.get('comment'))
self.assertEqual(results.get('sample'), sample.uuid)
def test_retrieve_file_view(self):
sample = Sample.objects.create(substrate=mommy.make(Substrate))
process = mommy.make(Process)
datafile = mommy.make(DataFile)
process.datafiles.add(datafile)
sample.run_process(process)
response = self.client.get(
'/api/v0/process/{}/files/'.format(process.uuid))
results = json.loads(response.content.decode('utf-8'))
self.assertEqual(len(results), 1)
self.assertIsNotNone(results[0].get('id'))
self.assertEqual(results[0].get('id'), datafile.id)
class TestSampleAPI(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user('username1',
password='')
self.client = APIClient()
self.client.login(username='username1', password='')
def test_list_view_get(self):
samples = [Sample.objects.create(substrate=mommy.make(Substrate))
for i in range(5)]
response = self.client.get('/api/v0/sample/')
self.assertEqual(response.status_code, 200)
results = json.loads(response.content.decode('utf-8'))
self.assertEqual(len(results), len(samples))
for sample, result in zip(samples, results):
self.assertEqual(result.get('uuid'), sample.uuid)
def test_retrieve_view_get(self):
sample = Sample.objects.create(substrate=mommy.make(Substrate))
response = self.client.get('/api/v0/sample/{}/'.format(sample.uuid))
self.assertEqual(response.status_code, 200)
results = json.loads(response.content.decode('utf-8'))
self.assertEqual(results.get('uuid'), sample.uuid)
def test_retrieve_view_tree(self):
sample = Sample.objects.create(substrate=mommy.make(Substrate))
processes = {
'step-1': [
mommy.make(Process),
mommy.make(Process),
],
'step-2': [
mommy.make(Process),
mommy.make(Process),
mommy.make(Process),
],
}
for process in processes['step-1']:
sample.run_process(process)
sample.split(self.user, 3)
for piece, process in zip(['a', 'b', 'c'], processes['step-2']):
sample.run_process(process, piece)
response = self.client.get('/api/v0/sample/{}/node/tree/'.format(sample.uuid))
self.assertEqual(response.status_code, 200)
results = json.loads(response.content.decode('utf-8'))
children = results.get('nodes').get('children')
self.assertEqual(len(children), 1)
child = children[0]
self.assertEqual(child.get('process').get('uuid'),
processes['step-1'][0].uuid)
self.assertEqual(len(child.get('children')), 1)
child = child.get('children')[0]
self.assertEqual(child.get('process').get('uuid'),
processes['step-1'][1].uuid)
self.assertEqual(len(child.get('children')), 3)
for c, process, piece in zip(child.get('children'),
processes['step-2'],
['a', 'b', 'c']):
self.assertEqual(c.get('piece'), piece)
self.assertEqual(len(c.get('children')), 1)
self.assertEqual(c.get('children')[0].get('process').get('uuid'),
process.uuid)
def test_retrieve_view_leaf(self):
sample = Sample.objects.create(substrate=mommy.make(Substrate))
processes = {
'step-1': [
mommy.make(Process),
mommy.make(Process),
],
'step-2': [
mommy.make(Process),
mommy.make(Process),
mommy.make(Process),
],
}
for process in processes['step-1']:
sample.run_process(process)
sample.split(self.user, 3)
for piece, process in zip(['a', 'b', 'c'], processes['step-2']):
sample.run_process(process, piece)
response = self.client.get('/api/v0/sample/{}/node/leaf/'.format(sample.uuid))
self.assertEqual(response.status_code, 200)
results = json.loads(response.content.decode('utf-8'))
leaf_nodes = results.get('nodes')
leaf_uuids = [p.uuid for p in processes['step-2']]
for node in leaf_nodes:
self.assertIn(node.get('process').get('uuid'), leaf_uuids)
class TestUserAPI(TestCase):
def setUp(self):
get_user_model().objects.create_user('username1', password='')
self.client = APIClient()
self.client.login(username='username1', password='')
def test_list_view_get(self):
response = self.client.get('/api/v0/users/')
self.assertEqual(response.status_code, 200)
results = json.loads(response.content.decode('utf-8'))
self.assertEqual(len(results), 1)
user = results[0]
self.assertIsNotNone(user)
self.assertEqual(user.get('username'), 'username1')
| |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Sundry function and class decorators."""
from __future__ import print_function
import functools
import inspect
import sys
import textwrap
import types
import warnings
from .codegen import make_function_with_signature
from .exceptions import (AstropyDeprecationWarning,
AstropyPendingDeprecationWarning)
from ..extern import six
__all__ = ['deprecated', 'deprecated_attribute', 'lazyproperty',
'sharedmethod', 'wraps']
def deprecated(since, message='', name='', alternative='', pending=False,
obj_type=None):
"""
Used to mark a function or class as deprecated.
To mark an attribute as deprecated, use `deprecated_attribute`.
Parameters
------------
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier ``func`` may be used for the name of the function,
and ``alternative`` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. ``obj_type`` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated function or class; if not provided
the name is automatically determined from the passed in
function or class, though this is useful in the case of
renamed functions, where the new function is just assigned to
the name of the deprecated function. For example::
def new_function():
...
oldFunction = new_function
alternative : str, optional
An alternative function or class name that the user may use in
place of the deprecated object. The deprecation warning will
tell the user about this alternative if provided.
pending : bool, optional
If True, uses a AstropyPendingDeprecationWarning instead of a
AstropyDeprecationWarning.
obj_type : str, optional
The type of this object, if the automatically determined one
needs to be overridden.
"""
method_types = (classmethod, staticmethod, types.MethodType)
def deprecate_doc(old_doc, message):
"""
Returns a given docstring with a deprecation message prepended
to it.
"""
if not old_doc:
old_doc = ''
old_doc = textwrap.dedent(old_doc).strip('\n')
new_doc = (('\n.. deprecated:: %(since)s'
'\n %(message)s\n\n' %
{'since': since, 'message': message.strip()}) + old_doc)
if not old_doc:
# This is to prevent a spurious 'unexpected unindent' warning from
# docutils when the original docstring was blank.
new_doc += r'\ '
return new_doc
def get_function(func):
"""
Given a function or classmethod (or other function wrapper type), get
the function object.
"""
if isinstance(func, method_types):
try:
func = func.__func__
except AttributeError:
# classmethods in Python2.6 and below lack the __func__
# attribute so we need to hack around to get it
method = func.__get__(None, object)
if isinstance(method, types.FunctionType):
# For staticmethods anyways the wrapped object is just a
# plain function (not a bound method or anything like that)
func = method
elif hasattr(method, '__func__'):
func = method.__func__
elif hasattr(method, 'im_func'):
func = method.im_func
else:
# Nothing we can do really... just return the original
# classmethod, etc.
return func
return func
def deprecate_function(func, message):
"""
Returns a wrapped function that displays an
``AstropyDeprecationWarning`` when it is called.
"""
if isinstance(func, method_types):
func_wrapper = type(func)
else:
func_wrapper = lambda f: f
func = get_function(func)
def deprecated_func(*args, **kwargs):
if pending:
category = AstropyPendingDeprecationWarning
else:
category = AstropyDeprecationWarning
warnings.warn(message, category, stacklevel=2)
return func(*args, **kwargs)
# If this is an extension function, we can't call
# functools.wraps on it, but we normally don't care.
# This crazy way to get the type of a wrapper descriptor is
# straight out of the Python 3.3 inspect module docs.
if type(func) != type(str.__dict__['__add__']):
deprecated_func = functools.wraps(func)(deprecated_func)
deprecated_func.__doc__ = deprecate_doc(
deprecated_func.__doc__, message)
return func_wrapper(deprecated_func)
def deprecate_class(cls, message):
"""
Returns a wrapper class with the docstrings updated and an
__init__ function that will raise an
``AstropyDeprectationWarning`` warning when called.
"""
# Creates a new class with the same name and bases as the
# original class, but updates the dictionary with a new
# docstring and a wrapped __init__ method. __module__ needs
# to be manually copied over, since otherwise it will be set
# to *this* module (astropy.utils.misc).
# This approach seems to make Sphinx happy (the new class
# looks enough like the original class), and works with
# extension classes (which functools.wraps does not, since
# it tries to modify the original class).
# We need to add a custom pickler or you'll get
# Can't pickle <class ..>: it's not found as ...
# errors. Picklability is required for any class that is
# documented by Sphinx.
members = cls.__dict__.copy()
members.update({
'__doc__': deprecate_doc(cls.__doc__, message),
'__init__': deprecate_function(get_function(cls.__init__),
message),
})
return type(cls)(cls.__name__, cls.__bases__, members)
def deprecate(obj, message=message, name=name, alternative=alternative,
pending=pending):
if obj_type is None:
if isinstance(obj, type):
obj_type_name = 'class'
elif inspect.isfunction(obj):
obj_type_name = 'function'
elif inspect.ismethod(obj) or isinstance(obj, method_types):
obj_type_name = 'method'
else:
obj_type_name = 'object'
else:
obj_type_name = obj_type
if not name:
name = get_function(obj).__name__
altmessage = ''
if not message or type(message) == type(deprecate):
if pending:
message = ('The %(func)s %(obj_type)s will be deprecated in a '
'future version.')
else:
message = ('The %(func)s %(obj_type)s is deprecated and may '
'be removed in a future version.')
if alternative:
altmessage = '\n Use %s instead.' % alternative
message = ((message % {
'func': name,
'name': name,
'alternative': alternative,
'obj_type': obj_type_name}) +
altmessage)
if isinstance(obj, type):
return deprecate_class(obj, message)
else:
return deprecate_function(obj, message)
if type(message) == type(deprecate):
return deprecate(message)
return deprecate
def deprecated_attribute(name, since, message=None, alternative=None,
pending=False):
"""
Used to mark a public attribute as deprecated. This creates a
property that will warn when the given attribute name is accessed.
To prevent the warning (i.e. for internal code), use the private
name for the attribute by prepending an underscore
(i.e. ``self._name``).
Parameters
----------
name : str
The name of the deprecated attribute.
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier ``name`` may be used for the name of the attribute,
and ``alternative`` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function.
alternative : str, optional
An alternative attribute that the user may use in place of the
deprecated attribute. The deprecation warning will tell the
user about this alternative if provided.
pending : bool, optional
If True, uses a AstropyPendingDeprecationWarning instead of a
AstropyDeprecationWarning.
Examples
--------
::
class MyClass:
# Mark the old_name as deprecated
old_name = misc.deprecated_attribute('old_name', '0.1')
def method(self):
self._old_name = 42
"""
private_name = '_' + name
@deprecated(since, name=name, obj_type='attribute')
def get(self):
return getattr(self, private_name)
@deprecated(since, name=name, obj_type='attribute')
def set(self, val):
setattr(self, private_name, val)
@deprecated(since, name=name, obj_type='attribute')
def delete(self):
delattr(self, private_name)
return property(get, set, delete)
class lazyproperty(object):
"""
Works similarly to property(), but computes the value only once.
This essentially memorizes the value of the property by storing the result
of its computation in the ``__dict__`` of the object instance. This is
useful for computing the value of some property that should otherwise be
invariant. For example::
>>> class LazyTest(object):
... @lazyproperty
... def complicated_property(self):
... print('Computing the value for complicated_property...')
... return 42
...
>>> lt = LazyTest()
>>> lt.complicated_property
Computing the value for complicated_property...
42
>>> lt.complicated_property
42
As the example shows, the second time ``complicated_property`` is accessed,
the ``print`` statement is not executed. Only the return value from the
first access off ``complicated_property`` is returned.
If a setter for this property is defined, it will still be possible to
manually update the value of the property, if that capability is desired.
Adapted from the recipe at
http://code.activestate.com/recipes/363602-lazy-property-evaluation
"""
def __init__(self, fget, fset=None, fdel=None, doc=None):
self._fget = fget
self._fset = fset
self._fdel = fdel
if doc is None:
self.__doc__ = fget.__doc__
else:
self.__doc__ = doc
self._key = self._fget.__name__
def __get__(self, obj, owner=None):
if obj is None:
return self
try:
return obj.__dict__[self._key]
except KeyError:
val = self._fget(obj)
obj.__dict__[self._key] = val
return val
def __set__(self, obj, val):
obj_dict = obj.__dict__
if self._fset:
ret = self._fset(obj, val)
if ret is not None and obj_dict.get(self._key) is ret:
# By returning the value set the setter signals that it took
# over setting the value in obj.__dict__; this mechanism allows
# it to override the input value
return
obj_dict[self._key] = val
def __delete__(self, obj):
if self._fdel:
self._fdel(obj)
if self._key in obj.__dict__:
del obj.__dict__[self._key]
def getter(self, fget):
return self.__ter(fget, 0)
def setter(self, fset):
return self.__ter(fset, 1)
def deleter(self, fdel):
return self.__ter(fdel, 2)
def __ter(self, f, arg):
args = [self._fget, self._fset, self._fdel, self.__doc__]
args[arg] = f
cls_ns = sys._getframe(1).f_locals
for k, v in six.iteritems(cls_ns):
if v is self:
property_name = k
break
cls_ns[property_name] = lazyproperty(*args)
return cls_ns[property_name]
class sharedmethod(classmethod):
"""
This is a method decorator that allows both an instancemethod and a
`classmethod` to share the same name.
When using `sharedmethod` on a method defined in a class's body, it
may be called on an instance, or on a class. In the former case it
behaves like a normal instance method (a reference to the instance is
automatically passed as the first ``self`` argument of the method)::
>>> class Example(object):
... @sharedmethod
... def identify(self, *args):
... print('self was', self)
... print('additional args were', args)
...
>>> ex = Example()
>>> ex.identify(1, 2)
self was <astropy.utils.decorators.Example object at 0x...>
additional args were (1, 2)
In the latter case, when the `sharedmethod` is called directly from a
class, it behaves like a `classmethod`::
>>> Example.identify(3, 4)
self was <class 'astropy.utils.decorators.Example'>
additional args were (3, 4)
This also supports a more advanced usage, where the `classmethod`
implementation can be written separately. If the class's *metaclass*
has a method of the same name as the `sharedmethod`, the version on
the metaclass is delegated to::
>>> from astropy.extern.six import add_metaclass
>>> class ExampleMeta(type):
... def identify(self):
... print('this implements the {0}.identify '
... 'classmethod'.format(self.__name__))
...
>>> @add_metaclass(ExampleMeta)
... class Example(object):
... @sharedmethod
... def identify(self):
... print('this implements the instancemethod')
...
>>> Example().identify()
this implements the instancemethod
>>> Example.identify()
this implements the Example.identify classmethod
"""
if sys.version_info[:2] < (2, 7):
# Workaround for Python 2.6 which does not have classmethod.__func__
@property
def __func__(self):
try:
meth = classmethod.__get__(self, self.__obj__,
self.__objtype__)
except AttributeError:
# self.__obj__ not set when called from __get__, but then it
# doesn't matter anyways
meth = classmethod.__get__(self, None, object)
return meth.__func__
def __getobjwrapper(orig_get):
"""
Used to temporarily set/unset self.__obj__ and self.__objtype__
for use by __func__.
"""
def __get__(self, obj, objtype=None):
self.__obj__ = obj
self.__objtype__ = objtype
try:
return orig_get(self, obj, objtype)
finally:
del self.__obj__
del self.__objtype__
return __get__
else:
def __getobjwrapper(func):
return func
@__getobjwrapper
def __get__(self, obj, objtype=None):
if obj is None:
mcls = type(objtype)
clsmeth = getattr(mcls, self.__func__.__name__, None)
if callable(clsmeth):
if isinstance(clsmeth, types.MethodType):
# This case will generally only apply on Python 2, which
# uses MethodType for unbound methods; Python 3 has no
# particular concept of unbound methods and will just
# return a function
func = clsmeth.__func__
else:
func = clsmeth
else:
func = self.__func__
return self._make_method(func, objtype)
else:
return self._make_method(self.__func__, obj)
del __getobjwrapper
if six.PY3:
# The 'instancemethod' type of Python 2 and the method type of
# Python 3 have slightly different constructors
@staticmethod
def _make_method(func, instance):
return types.MethodType(func, instance)
else:
@staticmethod
def _make_method(func, instance):
return types.MethodType(func, instance, type(instance))
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
"""
An alternative to `functools.wraps` which also preserves the original
function's call signature by way of
`~astropy.utils.codegen.make_function_with_signature`.
The documentation for the original `functools.wraps` follows:
"""
def wrapper(func):
if '__name__' in assigned:
name = wrapped.__name__
else:
name = func.__name__
func = make_function_with_signature(func, name=name,
**_get_function_args(wrapped))
func = functools.update_wrapper(func, wrapped, assigned=assigned,
updated=updated)
return func
return wrapper
if isinstance(wraps.__doc__, six.string_types):
wraps.__doc__ += functools.wraps.__doc__
if six.PY3:
def _get_function_args(func):
"""
Utility function for `wraps`.
Reads the argspec for the given function and converts it to arguments
for `make_function_with_signature`. This requires different
implementations on Python 2 versus Python 3.
"""
argspec = inspect.getfullargspec(func)
if argspec.defaults:
args = argspec.args[:-len(argspec.defaults)]
kwargs = zip(argspec.args[len(args):], argspec.defaults)
else:
args = argspec.args
kwargs = []
if argspec.kwonlyargs:
kwargs.extend((argname, argspec.kwonlydefaults[argname])
for argname in argspec.kwonlyargs)
return {'args': args, 'kwargs': kwargs, 'varargs': argspec.varargs,
'varkwargs': argspec.varkw}
else:
def _get_function_args(func):
"""
Utility function for `wraps`.
Reads the argspec for the given function and converts it to arguments
for `make_function_with_signature`. This requires different
implementations on Python 2 versus Python 3.
"""
argspec = inspect.getargspec(func)
if argspec.defaults:
args = argspec.args[:-len(argspec.defaults)]
kwargs = zip(argspec.args[len(args):], argspec.defaults)
else:
args = argspec.args
kwargs = {}
return {'args': args, 'kwargs': kwargs, 'varargs': argspec.varargs,
'varkwargs': argspec.keywords}
| |
"""
Routers provide a convenient and consistent way of automatically
determining the URL conf for your API.
They are used by simply instantiating a Router class, and then registering
all the required ViewSets with that router.
For example, you might have a `urls.py` that looks something like this:
router = routers.DefaultRouter()
router.register('users', UserViewSet, 'user')
router.register('accounts', AccountViewSet, 'account')
urlpatterns = router.urls
"""
from __future__ import unicode_literals
import itertools
from collections import OrderedDict, namedtuple
from django.conf.urls import url
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import NoReverseMatch
from rest_framework import views
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.urlpatterns import format_suffix_patterns
Route = namedtuple('Route', ['url', 'mapping', 'name', 'initkwargs'])
DynamicDetailRoute = namedtuple('DynamicDetailRoute', ['url', 'name', 'initkwargs'])
DynamicListRoute = namedtuple('DynamicListRoute', ['url', 'name', 'initkwargs'])
def replace_methodname(format_string, methodname):
"""
Partially format a format_string, swapping out any
'{methodname}' or '{methodnamehyphen}' components.
"""
methodnamehyphen = methodname.replace('_', '-')
ret = format_string
ret = ret.replace('{methodname}', methodname)
ret = ret.replace('{methodnamehyphen}', methodnamehyphen)
return ret
def flatten(list_of_lists):
"""
Takes an iterable of iterables, returns a single iterable containing all items
"""
return itertools.chain(*list_of_lists)
class BaseRouter(object):
def __init__(self):
self.registry = []
def register(self, prefix, viewset, base_name=None):
if base_name is None:
base_name = self.get_default_base_name(viewset)
self.registry.append((prefix, viewset, base_name))
def get_default_base_name(self, viewset):
"""
If `base_name` is not specified, attempt to automatically determine
it from the viewset.
"""
raise NotImplementedError('get_default_base_name must be overridden')
def get_urls(self):
"""
Return a list of URL patterns, given the registered viewsets.
"""
raise NotImplementedError('get_urls must be overridden')
@property
def urls(self):
if not hasattr(self, '_urls'):
self._urls = self.get_urls()
return self._urls
class SimpleRouter(BaseRouter):
routes = [
# List route.
Route(
url=r'^{prefix}{trailing_slash}$',
mapping={
'get': 'list',
'post': 'create'
},
name='{basename}-list',
initkwargs={'suffix': 'List'}
),
# Dynamically generated list routes.
# Generated using @list_route decorator
# on methods of the viewset.
DynamicListRoute(
url=r'^{prefix}/{methodname}{trailing_slash}$',
name='{basename}-{methodnamehyphen}',
initkwargs={}
),
# Detail route.
Route(
url=r'^{prefix}/{lookup}{trailing_slash}$',
mapping={
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'
},
name='{basename}-detail',
initkwargs={'suffix': 'Instance'}
),
# Dynamically generated detail routes.
# Generated using @detail_route decorator on methods of the viewset.
DynamicDetailRoute(
url=r'^{prefix}/{lookup}/{methodname}{trailing_slash}$',
name='{basename}-{methodnamehyphen}',
initkwargs={}
),
]
def __init__(self, trailing_slash=True):
self.trailing_slash = trailing_slash and '/' or ''
super(SimpleRouter, self).__init__()
def get_default_base_name(self, viewset):
"""
If `base_name` is not specified, attempt to automatically determine
it from the viewset.
"""
queryset = getattr(viewset, 'queryset', None)
assert queryset is not None, '`base_name` argument not specified, and could ' \
'not automatically determine the name from the viewset, as ' \
'it does not have a `.queryset` attribute.'
return queryset.model._meta.object_name.lower()
def get_routes(self, viewset):
"""
Augment `self.routes` with any dynamically generated routes.
Returns a list of the Route namedtuple.
"""
known_actions = flatten([route.mapping.values() for route in self.routes if isinstance(route, Route)])
# Determine any `@detail_route` or `@list_route` decorated methods on the viewset
detail_routes = []
list_routes = []
for methodname in dir(viewset):
attr = getattr(viewset, methodname)
httpmethods = getattr(attr, 'bind_to_methods', None)
detail = getattr(attr, 'detail', True)
if httpmethods:
if methodname in known_actions:
raise ImproperlyConfigured('Cannot use @detail_route or @list_route '
'decorators on method "%s" '
'as it is an existing route' % methodname)
httpmethods = [method.lower() for method in httpmethods]
if detail:
detail_routes.append((httpmethods, methodname))
else:
list_routes.append((httpmethods, methodname))
def _get_dynamic_routes(route, dynamic_routes):
ret = []
for httpmethods, methodname in dynamic_routes:
method_kwargs = getattr(viewset, methodname).kwargs
initkwargs = route.initkwargs.copy()
initkwargs.update(method_kwargs)
url_path = initkwargs.pop("url_path", None) or methodname
ret.append(Route(
url=replace_methodname(route.url, url_path),
mapping=dict((httpmethod, methodname) for httpmethod in httpmethods),
name=replace_methodname(route.name, url_path),
initkwargs=initkwargs,
))
return ret
ret = []
for route in self.routes:
if isinstance(route, DynamicDetailRoute):
# Dynamic detail routes (@detail_route decorator)
ret += _get_dynamic_routes(route, detail_routes)
elif isinstance(route, DynamicListRoute):
# Dynamic list routes (@list_route decorator)
ret += _get_dynamic_routes(route, list_routes)
else:
# Standard route
ret.append(route)
return ret
def get_method_map(self, viewset, method_map):
"""
Given a viewset, and a mapping of http methods to actions,
return a new mapping which only includes any mappings that
are actually implemented by the viewset.
"""
bound_methods = {}
for method, action in method_map.items():
if hasattr(viewset, action):
bound_methods[method] = action
return bound_methods
def get_lookup_regex(self, viewset, lookup_prefix=''):
"""
Given a viewset, return the portion of URL regex that is used
to match against a single instance.
Note that lookup_prefix is not used directly inside REST rest_framework
itself, but is required in order to nicely support nested router
implementations, such as drf-nested-routers.
https://github.com/alanjds/drf-nested-routers
"""
base_regex = '(?P<{lookup_prefix}{lookup_url_kwarg}>{lookup_value})'
# Use `pk` as default field, unset set. Default regex should not
# consume `.json` style suffixes and should break at '/' boundaries.
lookup_field = getattr(viewset, 'lookup_field', 'pk')
lookup_url_kwarg = getattr(viewset, 'lookup_url_kwarg', None) or lookup_field
lookup_value = getattr(viewset, 'lookup_value_regex', '[^/.]+')
return base_regex.format(
lookup_prefix=lookup_prefix,
lookup_url_kwarg=lookup_url_kwarg,
lookup_value=lookup_value
)
def get_urls(self):
"""
Use the registered viewsets to generate a list of URL patterns.
"""
ret = []
for prefix, viewset, basename in self.registry:
lookup = self.get_lookup_regex(viewset)
routes = self.get_routes(viewset)
for route in routes:
# Only actions which actually exist on the viewset will be bound
mapping = self.get_method_map(viewset, route.mapping)
if not mapping:
continue
# Build the url pattern
regex = route.url.format(
prefix=prefix,
lookup=lookup,
trailing_slash=self.trailing_slash
)
view = viewset.as_view(mapping, **route.initkwargs)
name = route.name.format(basename=basename)
ret.append(url(regex, view, name=name))
return ret
class DefaultRouter(SimpleRouter):
"""
The default router extends the SimpleRouter, but also adds in a default
API root view, and adds format suffix patterns to the URLs.
"""
include_root_view = True
include_format_suffixes = True
root_view_name = 'api-root'
def get_api_root_view(self):
"""
Return a view to use as the API root.
"""
api_root_dict = OrderedDict()
list_name = self.routes[0].name
for prefix, viewset, basename in self.registry:
api_root_dict[prefix] = list_name.format(basename=basename)
class APIRoot(views.APIView):
_ignore_model_permissions = True
def get(self, request, *args, **kwargs):
ret = OrderedDict()
namespace = request.resolver_match.namespace
for key, url_name in api_root_dict.items():
if namespace:
url_name = namespace + ':' + url_name
try:
ret[key] = reverse(
url_name,
args=args,
kwargs=kwargs,
request=request,
format=kwargs.get('format', None)
)
except NoReverseMatch:
# Don't bail out if eg. no list routes exist, only detail routes.
continue
return Response(ret)
return APIRoot.as_view()
def get_urls(self):
"""
Generate the list of URL patterns, including a default root view
for the API, and appending `.json` style format suffixes.
"""
urls = []
if self.include_root_view:
root_url = url(r'^$', self.get_api_root_view(), name=self.root_view_name)
urls.append(root_url)
default_urls = super(DefaultRouter, self).get_urls()
urls.extend(default_urls)
if self.include_format_suffixes:
urls = format_suffix_patterns(urls)
return urls
| |
#!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script packages the PNaCl translator files as:
(1) a Chrome Extension (crx), which can be used as a straight-forward CRX,
or used with the Chrome incremental installer (component updater)
(2) a Chrome Extension as a zip for uploading to the CWS.
(3) layout files for a normal Chrome installer.
This script depends on and pulls in the translator nexes and libraries
from the toolchain directory (so that must be downloaded first) and
it depends on the pnacl_irt_shim.
"""
import glob
import logging
import optparse
import os
import platform
import re
import shutil
import subprocess
import sys
import tempfile
import zipfile
# shutil.copytree does not allow the target directory to exist.
# Borrow this copy_tree, which does allow it (overwrites conflicts?).
from distutils.dir_util import copy_tree as copytree_existing
J = os.path.join
######################################################################
# Target arch and build arch junk to convert between all the
# silly conventions between SCons, Chrome and PNaCl.
# The version of the arch used by NaCl manifest files.
# This is based on the machine "building" this extension.
# We also used this to identify the arch-specific different versions of
# this extension.
def CanonicalArch(arch):
if arch in ('x86_64', 'x86-64', 'x64', 'amd64'):
return 'x86-64'
# TODO(jvoung): be more specific about the arm architecture version?
if arch in ('arm', 'armv7'):
return 'arm'
if re.match('^i.86$', arch) or arch in ('x86_32', 'x86-32', 'ia32', 'x86'):
return 'x86-32'
return None
def GetBuildArch():
arch = platform.machine()
return CanonicalArch(arch)
BUILD_ARCH = GetBuildArch()
ARCHES = ['x86-32', 'x86-64', 'arm']
def IsValidArch(arch):
return arch in ARCHES
# The version of the arch used by configure and pnacl's build.sh.
def StandardArch(arch):
return {'x86-32': 'i686',
'x86-64': 'x86_64',
'arm' : 'armv7'}[arch]
######################################################################
def GetNaClRoot():
""" Find the native_client path, relative to this script.
This script is in ppapi/... and native_client is a sibling of ppapi.
"""
script_file = os.path.abspath(__file__)
def SearchForNaCl(cur_dir):
if cur_dir.endswith('ppapi'):
parent = os.path.dirname(cur_dir)
sibling = os.path.join(parent, 'native_client')
if not os.path.isdir(sibling):
raise Exception('Could not find native_client relative to %s' %
script_file)
return sibling
# Detect when we've the root (linux is /, but windows is not...)
next_dir = os.path.dirname(cur_dir)
if cur_dir == next_dir:
raise Exception('Could not find native_client relative to %s' %
script_file)
return SearchForNaCl(next_dir)
return SearchForNaCl(script_file)
NACL_ROOT = GetNaClRoot()
######################################################################
# Normalize the platform name to be the way SCons finds chrome binaries.
# This is based on the platform "building" the extension.
def GetBuildPlatform():
if sys.platform == 'darwin':
platform = 'mac'
elif sys.platform.startswith('linux'):
platform = 'linux'
elif sys.platform in ('cygwin', 'win32'):
platform = 'windows'
else:
raise Exception('Unknown platform: %s' % sys.platform)
return platform
BUILD_PLATFORM = GetBuildPlatform()
def DetermineInstallerArches(target_arch):
arch = CanonicalArch(target_arch)
if not IsValidArch(arch):
raise Exception('Unknown target_arch %s' % target_arch)
# On windows, we need x86-32 and x86-64 (assuming non-windows RT).
if BUILD_PLATFORM == 'windows':
if arch.startswith('x86'):
return ['x86-32', 'x86-64']
else:
raise Exception('Unknown target_arch on windows w/ target_arch == %s' %
target_arch)
else:
return [arch]
class CRXGen(object):
""" Generate a CRX file. Can generate a fresh CRX and private key, or
create a version of new CRX with the same AppID, using an existing
private key.
NOTE: We use the chrome binary to do CRX packing. There is also a bash
script available at: http://code.google.com/chrome/extensions/crx.html
but it is not featureful (doesn't know how to generate private keys).
We should probably make a version of this that doesn't require chrome.
"""
@staticmethod
def RunCRXGen(chrome_path, manifest_dir, private_key=None):
if chrome_path is None:
raise Exception('Chrome binary not specified!')
if not os.path.isfile(chrome_path):
raise Exception('Chrome binary not found: %s' % chrome_path)
cmdline = []
if BUILD_PLATFORM == 'linux':
# In linux, run chrome in headless mode (even though crx-packing should
# be headless, it's not quite with the zygote). This allows you to
# run the tool under ssh or screen, etc.
cmdline.append('xvfb-run')
cmdline += [chrome_path, '--pack-extension=%s' % manifest_dir]
if private_key is not None:
cmdline.append('--pack-extension-key=%s' % private_key)
StepBanner('GEN CRX', str(cmdline))
if subprocess.call(cmdline) != 0:
raise Exception('Failed to RunCRXGen: %s' % (cmdline))
######################################################################
def IsValidVersion(version):
""" Return true if the version is a valid ID (a quad like 0.0.0.0).
"""
pat = re.compile('^\d+\.\d+\.\d+\.\d+$')
return pat.search(version)
######################################################################
class PnaclPackaging(object):
# For dogfooding, we also create a webstore extension.
# See: https://chrome.google.com/webstore/a/google.com/detail/gcodniebolpnpaiggndmcmmfpldlknih
# To test offline, we need to be able to load via the command line on chrome,
# but we also need the AppID to remain the same. Thus we supply the
# public key in the unpacked/offline extension manifest. See:
# http://code.google.com/chrome/extensions/manifest.html#key
# Summary:
# 1) install the extension, then look for key in
# 2) <profile>/Default/Extensions/<extensionId>/<versionString>/manifest.json
# (Fret not -- this is not the private key, it's just a key stored in the
# user's profile directory).
WEBSTORE_PUBLIC_KEY = ("MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC7zhW8iyt"
"dYid7SXLokWfxNoz2Co9x2ItkVUS53Iq12xDLfcKkUZ2RNX"
"Qtua+yKgRTRMP0HigPtn2KZeeJYzvBYLP/kz62B3nM5nS8M"
"o0qQKEsJiNgTf1uOgYGPyrE6GrFBFolLGstnZ1msVgNHEv2"
"dZruC2XewOJihvmeQsOjjwIDAQAB")
package_base = os.path.dirname(__file__)
# The extension system's manifest.json.
manifest_template = J(package_base, 'pnacl_manifest_template.json')
# Pnacl-specific info
pnacl_template = J(package_base, 'pnacl_info_template.json')
# Agreed-upon name for pnacl-specific info.
pnacl_json = 'pnacl.json'
@staticmethod
def GenerateManifests(target_dir, version, arch, web_accessible,
all_host_permissions,
manifest_key=None):
PnaclPackaging.GenerateExtensionManifest(target_dir, version,
web_accessible,
all_host_permissions,
manifest_key)
# For now, make the ABI version the same as pnacl-version...
# It should probably be separate though.
PnaclPackaging.GeneratePnaclInfo(target_dir, version, arch)
@staticmethod
def GenerateExtensionManifest(target_dir, version,
web_accessible, all_host_permissions,
manifest_key):
manifest_template_fd = open(PnaclPackaging.manifest_template, 'r')
manifest_template = manifest_template_fd.read()
manifest_template_fd.close()
output_fd = open(J(target_dir, 'manifest.json'), 'w')
extra = ''
if web_accessible != []:
extra += '"web_accessible_resources": [\n%s],\n' % ',\n'.join(
[ ' "%s"' % to_quote for to_quote in web_accessible ])
if manifest_key is not None:
extra += ' "key": "%s",\n' % manifest_key
if all_host_permissions:
extra += ' "permissions": ["http://*/"],\n'
output_fd.write(manifest_template % { "version" : version,
"extra" : extra, })
output_fd.close()
@staticmethod
def GeneratePnaclInfo(target_dir, version, arch, is_installer=False):
pnacl_template_fd = open(PnaclPackaging.pnacl_template, 'r')
pnacl_template = pnacl_template_fd.read()
pnacl_template_fd.close()
if is_installer:
out_name = J(target_dir, UseWhitelistedChars(PnaclPackaging.pnacl_json,
None))
else:
out_name = J(target_dir, PnaclPackaging.pnacl_json)
output_fd = open(out_name, 'w')
if isinstance(arch, list):
# FIXME: Handle a list of arches, not just a wildcard "all".
# Alternatively, perhaps we shouldn't bother checking what arch is
# installed and assume the installer does the right thing.
arch = 'all'
output_fd.write(pnacl_template % { "abi-version" : version,
"arch" : arch, })
output_fd.close()
######################################################################
class PnaclDirs(object):
toolchain_dir = J(NACL_ROOT, 'toolchain')
output_dir = J(toolchain_dir, 'pnacl-package')
@staticmethod
def TranslatorRoot():
return J(PnaclDirs.toolchain_dir, 'pnacl_translator')
@staticmethod
def LibDir(target_arch):
return J(PnaclDirs.TranslatorRoot(), 'lib-%s' % target_arch)
@staticmethod
def SandboxedCompilerDir(target_arch):
return J(PnaclDirs.toolchain_dir,
'pnacl_translator', StandardArch(target_arch), 'bin')
@staticmethod
def SetOutputDir(d):
PnaclDirs.output_dir = d
@staticmethod
def OutputDir():
return PnaclDirs.output_dir
@staticmethod
def OutputAllDir(version_quad):
return J(PnaclDirs.OutputDir(), version_quad)
@staticmethod
def OutputArchBase(arch):
return '%s' % arch
@staticmethod
def OutputArchDir(arch):
# Nest this in another directory so that the layout will be the same
# as the "all"/universal version.
parent_dir = J(PnaclDirs.OutputDir(), PnaclDirs.OutputArchBase(arch))
return (parent_dir, J(parent_dir, PnaclDirs.OutputArchBase(arch)))
######################################################################
def StepBanner(short_desc, long_desc):
logging.info("**** %s\t%s", short_desc, long_desc)
def Clean():
out_dir = PnaclDirs.OutputDir()
StepBanner('CLEAN', 'Cleaning out old packaging: %s' % out_dir)
if os.path.isdir(out_dir):
shutil.rmtree(out_dir)
else:
logging.info('Clean skipped -- no previous output directory!')
######################################################################
def ZipDirectory(base_dir, zipfile):
""" Zip all the files in base_dir into the given opened zipfile object.
"""
for (root, dirs, files) in os.walk(base_dir, followlinks=True):
for f in files:
full_name = J(root, f)
zipfile.write(full_name, os.path.relpath(full_name, base_dir))
def ListDirectoryRecursivelyAsURLs(base_dir):
""" List all files that can be found from base_dir. Return names as
URLs relative to the base_dir.
"""
file_list = []
for (root, dirs, files) in os.walk(base_dir, followlinks=True):
for f in files:
full_name = J(root, f)
if os.path.isfile(full_name):
rel_name = os.path.relpath(full_name, base_dir)
url = '/'.join(rel_name.split(os.path.sep))
file_list.append(url)
return file_list
def GetWebAccessibleResources(base_dir):
''' Return the default list of web_accessible_resources to allow us
to do a CORS request to get extension files. '''
resources = ListDirectoryRecursivelyAsURLs(base_dir)
# Make sure that the pnacl.json file is accessible.
resources.append(os.path.basename(PnaclPackaging.pnacl_json))
return resources
def GeneratePrivateKey(options):
""" Generate a dummy extension to generate a fresh private key. This will
be left in the build dir, and the dummy extension will be cleaned up.
"""
StepBanner('GEN PRIVATE KEY', 'Generating fresh private key')
tempdir = tempfile.mkdtemp(dir=PnaclDirs.OutputDir())
ext_dir = J(tempdir, 'dummy_extension')
os.mkdir(ext_dir)
PnaclPackaging.GenerateManifests(ext_dir,
'0.0.0.0',
'dummy_arch',
[],
False)
CRXGen.RunCRXGen(options.chrome_path, ext_dir)
shutil.copy2(J(tempdir, 'dummy_extension.pem'),
PnaclDirs.OutputDir())
shutil.rmtree(tempdir)
logging.info('\n<<< Fresh key is now in %s/dummy_extension.pem >>>\n' %
PnaclDirs.OutputDir())
def BuildArchCRXForComponentUpdater(version_quad, arch, lib_overrides,
options):
""" Build an architecture specific version for the chrome component
install (an actual CRX, vs a zip file). Though this is a CRX,
it is not used as a chrome extension as the CWS and unpacked version.
"""
parent_dir, target_dir = PnaclDirs.OutputArchDir(arch)
StepBanner('BUILD ARCH CRX %s' % arch,
'Packaging for arch %s in %s' % (arch, target_dir))
# Copy llc and ld.
copytree_existing(PnaclDirs.SandboxedCompilerDir(arch), target_dir)
# Rename llc.nexe to llc, ld.nexe to ld
for tool in ('llc', 'ld'):
shutil.move(J(target_dir, '%s.nexe' % tool), J(target_dir, tool))
# Copy native libraries.
copytree_existing(PnaclDirs.LibDir(arch), target_dir)
# Also copy files from the list of overrides.
if arch in lib_overrides:
for override in lib_overrides[arch]:
logging.info('Copying override %s to %s' % (override, target_dir))
shutil.copy2(override, target_dir)
# Skip the CRX generation if we are only building the unpacked version
# for commandline testing.
if options.unpacked_only:
return
# Generate manifest one level up (to have layout look like the "all" package).
# NOTE: this does not have 'web_accessible_resources' and does not have
# the all_host_permissions, since it isn't used via chrome-extension://
# URL requests.
PnaclPackaging.GenerateManifests(parent_dir,
version_quad,
arch,
[],
False)
CRXGen.RunCRXGen(options.chrome_path, parent_dir, options.prev_priv_key)
def LayoutAllDir(version_quad):
StepBanner("Layout All Dir", "Copying Arch specific to Arch-independent.")
target_dir = PnaclDirs.OutputAllDir(version_quad)
for arch in ARCHES:
arch_parent, arch_dir = PnaclDirs.OutputArchDir(arch)
# NOTE: The arch_parent contains the arch-specific manifest.json files.
# We carefully avoid copying those to the "all dir" since having
# more than one manifest.json will confuse the CRX tools (e.g., you will
# get a mysterious failure when uploading to the webstore).
copytree_existing(arch_dir,
J(target_dir, PnaclDirs.OutputArchBase(arch)))
def BuildCWSZip(version_quad):
""" Build a 'universal' chrome extension zipfile for webstore use (where the
installer doesn't know the target arch). Assumes the individual arch
versions were built.
"""
StepBanner("CWS ZIP", "Making a zip with all architectures.")
target_dir = PnaclDirs.OutputAllDir(version_quad)
web_accessible = GetWebAccessibleResources(target_dir)
# Overwrite the arch-specific 'manifest.json' that was there.
PnaclPackaging.GenerateManifests(target_dir,
version_quad,
'all',
web_accessible,
True)
target_zip = J(PnaclDirs.OutputDir(), 'pnacl_all.zip')
zipf = zipfile.ZipFile(target_zip, 'w', compression=zipfile.ZIP_DEFLATED)
ZipDirectory(target_dir, zipf)
zipf.close()
def BuildUnpacked(version_quad):
""" Build an unpacked chrome extension with all files for commandline
testing (load on chrome commandline).
"""
StepBanner("UNPACKED CRX", "Making an unpacked CRX of all architectures.")
target_dir = PnaclDirs.OutputAllDir(version_quad)
web_accessible = GetWebAccessibleResources(target_dir)
# Overwrite the manifest file (if there was one already).
PnaclPackaging.GenerateManifests(target_dir,
version_quad,
'all',
web_accessible,
True,
PnaclPackaging.WEBSTORE_PUBLIC_KEY)
def BuildExtensionStyle(version_quad, lib_overrides, options):
""" Package the pnacl components 3 ways, all of which are
chrome-extension-like.
1) Arch-specific CRXes that can be queried by Omaha.
2) A zip containing all arch files for the Chrome Webstore.
3) An unpacked extension with all arch files for offline testing.
"""
StepBanner("BUILD_ALL", "Packaging extension for version: %s" % version_quad)
for arch in ARCHES:
BuildArchCRXForComponentUpdater(version_quad, arch, lib_overrides, options)
LayoutAllDir(version_quad)
if not options.unpacked_only:
BuildCWSZip(version_quad)
BuildUnpacked(version_quad)
######################################################################
def UseWhitelistedChars(orig_basename, arch):
""" Make the filename match the pattern expected by nacl_file_host.
Currently, this assumes there is prefix "pnacl_public_" and
that the allowed chars are in the set [a-zA-Z0-9_].
"""
if arch:
target_basename = 'pnacl_public_%s_%s' % (arch, orig_basename)
else:
target_basename = 'pnacl_public_%s' % orig_basename
result = re.sub(r'[^a-zA-Z0-9_]', '_', target_basename)
logging.info('UseWhitelistedChars using: %s' % result)
return result
def CopyFlattenDirsAndPrefix(src_dir, arch, dest_dir):
""" Copy files from src_dir to dest_dir.
When copying, also rename the files such that they match the white-listing
pattern in chrome/browser/nacl_host/nacl_file_host.cc.
"""
for (root, dirs, files) in os.walk(src_dir, followlinks=True):
for f in files:
# Assume a flat directory.
assert (f == os.path.basename(f))
full_name = J(root, f)
target_name = UseWhitelistedChars(f, arch)
shutil.copy2(full_name, J(dest_dir, target_name))
def BuildArchForInstaller(version_quad, arch, lib_overrides, options):
""" Build an architecture specific version for the chrome installer.
"""
target_dir = PnaclDirs.OutputDir()
StepBanner('BUILD INSTALLER',
'Packaging for arch %s in %s' % (arch, target_dir))
# Copy llc.nexe and ld.nexe, but with some renaming and directory flattening.
CopyFlattenDirsAndPrefix(PnaclDirs.SandboxedCompilerDir(arch),
arch,
target_dir)
# Copy native libraries, also with renaming and directory flattening.
CopyFlattenDirsAndPrefix(PnaclDirs.LibDir(arch), arch, target_dir)
# Also copy files from the list of overrides.
# This needs the arch tagged onto the name too, like the other files.
if arch in lib_overrides:
for override in lib_overrides[arch]:
override_base = os.path.basename(override)
target_name = UseWhitelistedChars(override_base, arch)
shutil.copy2(override, J(target_dir, target_name))
def BuildInstallerStyle(version_quad, lib_overrides, options):
""" Package the pnacl component for use within the chrome installer
infrastructure. These files need to be named in a special way
so that white-listing of files is easy.
"""
StepBanner("BUILD_ALL", "Packaging installer for version: %s" % version_quad)
arches = DetermineInstallerArches(options.installer_only)
for arch in arches:
BuildArchForInstaller(version_quad, arch, lib_overrides, options)
# Generate pnacl info manifest.
# Hack around the fact that there may be more than one arch, on Windows.
if len(arches) == 1:
arches = arches[0]
PnaclPackaging.GeneratePnaclInfo(PnaclDirs.OutputDir(), version_quad,
arches, is_installer=True)
######################################################################
def Main():
usage = 'usage: %prog [options] version_arg'
parser = optparse.OptionParser(usage)
# We may want to accept a target directory to dump it in the usual
# output directory (e.g., scons-out).
parser.add_option('-c', '--clean', dest='clean',
action='store_true', default=False,
help='Clean out destination directory first.')
parser.add_option('-u', '--unpacked_only', action='store_true',
dest='unpacked_only', default=False,
help='Only generate the unpacked version')
parser.add_option('-i', '--installer_only',
dest='installer_only', default=None,
help='Only generate the chrome installer version for arch')
parser.add_option('-d', '--dest', dest='dest',
help='The destination root for laying out the extension')
parser.add_option('-p', '--priv_key',
dest='prev_priv_key', default=None,
help='Specify the old private key')
parser.add_option('-L', '--lib_override',
dest='lib_overrides', action='append', default=[],
help='Specify path to a fresher native library ' +
'that overrides the tarball library with ' +
'(arch:libfile) tuple.')
parser.add_option('-g', '--generate_key',
action='store_true', dest='gen_key',
help='Generate a fresh private key, and exit.')
parser.add_option('-C', '--chrome_path', dest='chrome_path',
help='Location of chrome.')
parser.add_option('-v', '--verbose', dest='verbose', default=False,
action='store_true',
help='Print verbose debug messages.')
(options, args) = parser.parse_args()
if options.verbose:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.ERROR)
logging.info('pnacl_component_crx_gen w/ options %s and args %s\n'
% (options, args))
# Set destination directory before doing any cleaning, etc.
if options.dest:
PnaclDirs.SetOutputDir(options.dest)
if options.clean:
Clean()
if options.gen_key:
GeneratePrivateKey(options)
return 0
lib_overrides = {}
for o in options.lib_overrides:
arch, override_lib = o.split(',')
arch = CanonicalArch(arch)
if not IsValidArch(arch):
raise Exception('Unknown arch for -L: %s (from %s)' % (arch, o))
if not os.path.isfile(override_lib):
raise Exception('Override native lib not a file for -L: %s (from %s)' %
(override_lib, o))
override_list = lib_overrides.get(arch, [])
override_list.append(override_lib)
lib_overrides[arch] = override_list
if len(args) != 1:
parser.print_help()
parser.error('Incorrect number of arguments')
version_quad = args[0]
if not IsValidVersion(version_quad):
print 'Invalid version format: %s\n' % version_quad
return 1
if options.installer_only:
BuildInstallerStyle(version_quad, lib_overrides, options)
else:
BuildExtensionStyle(version_quad, lib_overrides, options)
return 0
if __name__ == '__main__':
sys.exit(Main())
| |
"""Support for Epson projector."""
from __future__ import annotations
import logging
from epson_projector.const import (
BACK,
BUSY,
CMODE,
CMODE_LIST,
CMODE_LIST_SET,
DEFAULT_SOURCES,
EPSON_CODES,
FAST,
INV_SOURCES,
MUTE,
PAUSE,
PLAY,
POWER,
SOURCE,
SOURCE_LIST,
STATE_UNAVAILABLE as EPSON_STATE_UNAVAILABLE,
TURN_OFF,
TURN_ON,
VOL_DOWN,
VOL_UP,
VOLUME,
)
import voluptuous as vol
from homeassistant.components.media_player import MediaPlayerEntity
from homeassistant.components.media_player.const import (
SUPPORT_NEXT_TRACK,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.helpers import entity_platform
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_registry import async_get as async_get_entity_registry
from .const import ATTR_CMODE, DOMAIN, SERVICE_SELECT_CMODE
_LOGGER = logging.getLogger(__name__)
SUPPORT_EPSON = (
SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_SELECT_SOURCE
| SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_STEP
| SUPPORT_NEXT_TRACK
| SUPPORT_PREVIOUS_TRACK
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Epson projector from a config entry."""
entry_id = config_entry.entry_id
unique_id = config_entry.unique_id
projector = hass.data[DOMAIN][entry_id]
projector_entity = EpsonProjectorMediaPlayer(
projector=projector,
name=config_entry.title,
unique_id=unique_id,
entry=config_entry,
)
async_add_entities([projector_entity], True)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_SELECT_CMODE,
{vol.Required(ATTR_CMODE): vol.All(cv.string, vol.Any(*CMODE_LIST_SET))},
SERVICE_SELECT_CMODE,
)
class EpsonProjectorMediaPlayer(MediaPlayerEntity):
"""Representation of Epson Projector Device."""
def __init__(self, projector, name, unique_id, entry):
"""Initialize entity to control Epson projector."""
self._projector = projector
self._entry = entry
self._name = name
self._available = False
self._cmode = None
self._source_list = list(DEFAULT_SOURCES.values())
self._source = None
self._volume = None
self._state = None
self._unique_id = unique_id
async def set_unique_id(self):
"""Set unique id for projector config entry."""
_LOGGER.debug("Setting unique_id for projector")
if self._unique_id:
return False
if uid := await self._projector.get_serial_number():
self.hass.config_entries.async_update_entry(self._entry, unique_id=uid)
registry = async_get_entity_registry(self.hass)
old_entity_id = registry.async_get_entity_id(
"media_player", DOMAIN, self._entry.entry_id
)
if old_entity_id is not None:
registry.async_update_entity(old_entity_id, new_unique_id=uid)
self.hass.async_create_task(
self.hass.config_entries.async_reload(self._entry.entry_id)
)
return True
async def async_update(self):
"""Update state of device."""
power_state = await self._projector.get_power()
_LOGGER.debug("Projector status: %s", power_state)
if not power_state or power_state == EPSON_STATE_UNAVAILABLE:
self._available = False
return
self._available = True
if power_state == EPSON_CODES[POWER]:
self._state = STATE_ON
if await self.set_unique_id():
return
self._source_list = list(DEFAULT_SOURCES.values())
cmode = await self._projector.get_property(CMODE)
self._cmode = CMODE_LIST.get(cmode, self._cmode)
source = await self._projector.get_property(SOURCE)
self._source = SOURCE_LIST.get(source, self._source)
volume = await self._projector.get_property(VOLUME)
if volume:
self._volume = volume
elif power_state == BUSY:
self._state = STATE_ON
else:
self._state = STATE_OFF
@property
def device_info(self) -> DeviceInfo | None:
"""Get attributes about the device."""
if not self._unique_id:
return None
return DeviceInfo(
identifiers={(DOMAIN, self._unique_id)},
manufacturer="Epson",
model="Epson",
name="Epson projector",
via_device=(DOMAIN, self._unique_id),
)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self):
"""Return unique ID."""
return self._unique_id
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def available(self):
"""Return if projector is available."""
return self._available
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_EPSON
async def async_turn_on(self):
"""Turn on epson."""
if self._state == STATE_OFF:
await self._projector.send_command(TURN_ON)
async def async_turn_off(self):
"""Turn off epson."""
if self._state == STATE_ON:
await self._projector.send_command(TURN_OFF)
@property
def source_list(self):
"""List of available input sources."""
return self._source_list
@property
def source(self):
"""Get current input sources."""
return self._source
@property
def volume_level(self):
"""Return the volume level of the media player (0..1)."""
return self._volume
async def select_cmode(self, cmode):
"""Set color mode in Epson."""
await self._projector.send_command(CMODE_LIST_SET[cmode])
async def async_select_source(self, source):
"""Select input source."""
selected_source = INV_SOURCES[source]
await self._projector.send_command(selected_source)
async def async_mute_volume(self, mute):
"""Mute (true) or unmute (false) sound."""
await self._projector.send_command(MUTE)
async def async_volume_up(self):
"""Increase volume."""
await self._projector.send_command(VOL_UP)
async def async_volume_down(self):
"""Decrease volume."""
await self._projector.send_command(VOL_DOWN)
async def async_media_play(self):
"""Play media via Epson."""
await self._projector.send_command(PLAY)
async def async_media_pause(self):
"""Pause media via Epson."""
await self._projector.send_command(PAUSE)
async def async_media_next_track(self):
"""Skip to next."""
await self._projector.send_command(FAST)
async def async_media_previous_track(self):
"""Skip to previous."""
await self._projector.send_command(BACK)
@property
def extra_state_attributes(self):
"""Return device specific state attributes."""
if self._cmode is None:
return {}
return {ATTR_CMODE: self._cmode}
| |
"""
molecules.py
Define the molecules that make up a simple DNA chain
"""
from copy import deepcopy
import numpy as np
import pandas as pd
from fractaldna.dna_models import dnapositions as dpos
from fractaldna.utils import rotations as rot
# Physical parameters, all in Angstrom
GUANINE_SIZE = dpos.MoleculeFromAtoms.from_cylindrical(
dpos.GUANINE
).find_equivalent_half_lengths()
ADENINE_SIZE = dpos.MoleculeFromAtoms.from_cylindrical(
dpos.ADENINE
).find_equivalent_half_lengths()
THYMINE_SIZE = dpos.MoleculeFromAtoms.from_cylindrical(
dpos.THYMINE
).find_equivalent_half_lengths()
CYTOSINE_SIZE = dpos.MoleculeFromAtoms.from_cylindrical(
dpos.CYTOSINE
).find_equivalent_half_lengths()
SUGAR_RADIUS = dpos.MoleculeFromAtoms.from_cylindrical(
dpos.DEOXYRIBOSE
).find_equivalent_radius()
PHOSPHATE_RADIUS = dpos.MoleculeFromAtoms.from_cylindrical(
dpos.PHOSPHATE
).find_equivalent_radius()
class Molecule:
"""Create a molecule
:param name: molecule name
:param shape: shape of molecule
:param dimensions: dimensions of molecule shape
(3-vector for xyz-dims, int/float for radius)
:param strand: int to hold an ID related to a localised strand
:param chain: int to hold an ID related to a macromolecule/protein or
DNA chain
:param position: 3-vector for molecule position relative to global axis
:param rotation: 3-vector of euler angles (radians) for molecule
rotation relative to the global xyz axis
:param index: index of base pair along chain
"""
def __init__(
self,
name: str,
shape: str,
dimensions: np.array,
strand: int = -1,
chain: int = -1,
position: np.array = np.zeros(3),
rotation: np.array = np.zeros(3),
index: int = 0,
):
"""constructor"""
if type(dimensions) in [int, float, np.float64]:
self.dimensions = np.array([deepcopy(dimensions)] * 3)
else:
assert len(dimensions) == 3, "Position is invalid"
self.dimensions = deepcopy(dimensions)
self.shape = deepcopy(shape)
self.name = deepcopy(name)
self.position = deepcopy(position)
self.rotation = deepcopy(rotation)
self.strand = strand
self.chain = chain
self.index = index
def translate(self, translation: np.array) -> None:
"""
Molecule.translate(translation)
:param translation: Translate the molecule by (x, y, z)
"""
self.position = self.position + translation
return None
def rotate(self, rotation: np.array) -> None:
"""
Molecule.rotate(rotation)
Rotate molecule by [X_angle, Y_angle, Z_angle]
:param rotation: Euler angles for rotation
"""
if rotation.size == 3:
rmatrix = rot.eulerMatrix(rotation[0], rotation[1], rotation[2])
elif rotation.shape == (3, 3):
rmatrix = rotation
else:
return NotImplementedError("The rotation was invalid")
# assert rotation.size == 3, "Rotation array must be length-3"
oldrotation = rot.eulerMatrix(*self.rotation)
newrotation = rmatrix
self.rotation = rot.getEulerAngles(np.dot(newrotation, oldrotation))
return None
def to_text(self, seperator: str = " ") -> str:
"""Return a text description of the molecule
Molecule.toText(seperator=" ")
:param seperator: seperation character
"""
return (
seperator.join(
[
self.name,
self.shape,
str(self.chain),
str(self.strand),
str(self.index),
" ".join(map(str, self.dimensions)),
" ".join(map(str, self.position)),
" ".join(map(str, self.rotation)),
]
)
+ "\n"
)
def to_series(self) -> pd.Series:
"""Convert molecule to a pandas series
:return: Series representation of the molecule
"""
return pd.Series(
{
"name": self.name,
"shape": self.shape,
"chain_idx": self.chain,
"strand_idx": self.strand,
"bp_idx": self.index,
"size_x": self.dimensions[0],
"size_y": self.dimensions[1],
"size_z": self.dimensions[2],
"pos_x": self.position[0],
"pos_y": self.position[1],
"pos_z": self.position[2],
"rot_x": self.rotation[0],
"rot_y": self.rotation[1],
"rot_z": self.rotation[2],
}
)
# Define some standard molecules
class Guanine(Molecule):
"""
Guanine molecule
:param strand: strand ID
:param chain: Chain ID
:param position: position array (3-vector)
:param rotation: rotation array (euler angles)
:param index: base pait index
"""
def __init__(
self,
strand: int = -1,
chain: int = -1,
position: np.array = np.zeros(3),
rotation: np.array = np.zeros(3),
index: int = 0,
):
"""constructor"""
super().__init__(
"Guanine",
"ellipse",
GUANINE_SIZE,
strand=strand,
chain=chain,
position=position,
rotation=rotation,
index=index,
)
class Adenine(Molecule):
"""Adenine molecule
:param strand: strand ID
:param chain: Chain ID
:param position: position array (3-vector)
:param rotation: rotation array (euler angles)
:param index: base pait index
"""
def __init__(
self,
strand: int = -1,
chain: int = -1,
position: np.array = np.zeros(3),
rotation: np.array = np.zeros(3),
index: int = 0,
):
"""Constructor"""
super().__init__(
"Adenine",
"ellipse",
ADENINE_SIZE,
strand=strand,
chain=chain,
position=position,
rotation=rotation,
index=index,
)
class Thymine(Molecule):
"""Thymine molecule
:param strand: strand ID
:param chain: Chain ID
:param position: position array (3-vector)
:param rotation: rotation array (euler angles)
:param index: base pait index
"""
def __init__(
self,
strand: int = -1,
chain: int = -1,
position: np.array = np.zeros(3),
rotation: np.array = np.zeros(3),
index: int = 0,
):
"""constructor"""
super().__init__(
"Thymine",
"ellipse",
THYMINE_SIZE,
strand=strand,
chain=chain,
position=position,
rotation=rotation,
index=index,
)
class Cytosine(Molecule):
"""
Cytosine molecule
:param strand: strand ID
:param chain: Chain ID
:param position: position array (3-vector)
:param rotation: rotation array (euler angles)
:param index: base pait index
"""
def __init__(
self,
strand: int = -1,
chain: int = -1,
position: np.array = np.zeros(3),
rotation: np.array = np.zeros(3),
index: int = 0,
):
"""constructor"""
super().__init__(
"Cytosine",
"ellipse",
CYTOSINE_SIZE,
strand=strand,
chain=chain,
position=position,
rotation=rotation,
index=index,
)
class DNASugar(Molecule):
"""
DNASugar molecule
:param strand: strand ID
:param chain: Chain ID
:param position: position array (3-vector)
:param rotation: rotation array (euler angles)
:param index: base pait index
"""
def __init__(
self,
strand: int = -1,
chain: int = -1,
position: np.array = np.zeros(3),
rotation: np.array = np.zeros(3),
index: int = 0,
):
"""constructor"""
super().__init__(
"Sugar",
"sphere",
SUGAR_RADIUS,
strand=strand,
chain=chain,
position=position,
rotation=rotation,
index=index,
)
class Triphosphate(Molecule):
"""
Triphosphate molecule
:param strand: strand ID
:param chain: Chain ID
:param position: position array (3-vector)
:param rotation: rotation array (euler angles)
:param index: base pait index
"""
def __init__(
self,
strand: int = -1,
chain: int = -1,
position: np.array = np.zeros(3),
rotation: np.array = np.zeros(3),
index: int = 0,
):
"""constructor"""
super().__init__(
"Phosphate",
"sphere",
PHOSPHATE_RADIUS,
strand=strand,
chain=chain,
position=position,
rotation=rotation,
index=index,
)
| |
# Copyright 2016 Peter Dahlberg
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from collections import namedtuple, deque
from pelican_deploy.gittool import Repo, log_git_result
from functools import partial
from subprocess import Popen, PIPE, check_call
from pelican_deploy.util import exception_logged
from concurrent.futures import ThreadPoolExecutor
from threading import RLock, Thread
from datetime import datetime
import pytz
import sys
import logging
import shlex
import os
log = logging.getLogger(__name__)
log_git = partial(log_git_result, out_logger=log.debug,
err_logger=log.debug, status_logger=log.debug)
TOX_RESULT_FILE = "{name}_result.json"
BUILD_REPO_DIR = "{name}_build_repo"
OUTPUT_DIR = "{name}_output"
STATUS_LEN = 500
BuildStatus = namedtuple("BuildStatus", "date ok msg payload running")
class PullError(Exception):
pass
class DeploymentRunner:
def __init__(self, name, runner_config):
self.name = name
self.working_directory = Path(runner_config["working_directory"])
if not self.working_directory.exists():
log.info("creating working directory for %s: %s", name,
self.working_directory)
self.working_directory.mkdir(parents=True)
self.working_directory = self.working_directory.resolve()
self.clone_url = runner_config["clone_url"]
self.git_branch = runner_config["git_branch"]
self.build_repo_path = self.working_directory / BUILD_REPO_DIR.format(
name=name)
outdir = self.working_directory / OUTPUT_DIR.format(name=name)
toxresult = self.working_directory / TOX_RESULT_FILE.format(name=name)
self.build_command = runner_config["build_command"].format(
output=outdir, toxresult=toxresult)
self.final_install_command = runner_config["final_install_command"]\
.format(output=outdir)
self._output_dir = outdir
self._build_proc_env = dict(os.environ,
**runner_config.get("build_env", {}))
self._executor = ThreadPoolExecutor(max_workers=1)
self._futures = set()
self._build_proc = None
self._abort = False
self._build_lock = RLock()
self._repo_update_lock = RLock()
self.build_status = deque(maxlen=STATUS_LEN)
def clean_working_dir(self, abort_running=True):
Thread(target=self.clean_working_dir_blocking).start()
def clean_working_dir_blocking(self, abort_running=True):
def clean_fn():
rmpaths = [str(self.build_repo_path), str(self._output_dir)]
for p in rmpaths:
check_call(["rm", "-rf", p])
with self._build_lock:
if abort_running:
self.try_abort_build()
# cancel everything, so we are next
for fut in self._futures.copy():
fut.cancel()
if fut.done():
self._futures.remove(fut)
def build_job():
log.info("Starting cleaning of working dir!")
self.update_status(True, "Starting cleaning of working dir!",
running=False)
try:
exception_logged(clean_fn, log.error)()
except Exception as e:
self.update_status(False, "Cleaning failed!",
running=False, payload={"exception": e})
raise
future = self._executor.submit(build_job)
self._futures.add(future)
future.result()
log.info("Working dir cleand!")
self.update_status(True, "Working dir cleand!", running=False)
def update_status(self, ok, msg, payload=None, running=True):
date = pytz.utc.localize(datetime.utcnow())
self.build_status.append(BuildStatus(date, ok, msg, payload, running))
def update_build_repository(self):
with self._repo_update_lock:
self._update_build_repository()
def _update_build_repository(self):
if not self.build_repo_path.exists():
self.build_repo_path.mkdir(parents=True)
repo = Repo(str(self.build_repo_path))
if not repo.is_repo():
if self.build_repo_path.is_dir() and \
next(self.build_repo_path.iterdir(), None) is not None:
log.error(
"non-empty %s exists but not a valid git repository!",
self.build_repo_path)
raise RuntimeError(("non-empty {} exists but not a"
"valid git repository!").format(self.build_repo_path))
else:
log.info("Build repository %s not there, cloning",
self.build_repo_path)
result = repo.clone("--branch", self.git_branch,
"--depth", "1", self.clone_url, ".")
log_git(result)
origin_url = repo.config_get("remote.origin.url")
if origin_url != self.clone_url:
log.info("%s build_repo: URL of git origin changed (`%s` --> `%s`),\
adjusting...", self.name, origin_url, self.clone_url)
repo.config("remote.origin.url", self.clone_url)
# deinit submodules to avoid removed ones dangling around later
# they should stay around in .git, so reinit should be fast
result = repo.submodule("deinit", "--force", ".")
log_git(result)
result = repo.checkout("--force", self.git_branch)
log_git(result)
result = repo.reset("--hard")
log_git(result)
log.info("%s build_repo: pulling changes from origin", self.name)
refspec = "+{b}:{b}".format(b=self.git_branch)
try:
result = repo.pull("--force", "--recurse-submodules",
"--depth", "1", "origin", refspec)
log_git(result)
except Exception as e:
# need to reinit the submodules
self._update_build_repo_submodules(repo)
raise PullError from e
try:
result = repo.clean("--force", "-d", "-x")
log_git(result)
except:
log.warning("git clean failed!", exc_info=True)
# update the submodules
self._update_build_repo_submodules(repo)
def _update_build_repo_submodules(self, repo):
log.info("%s build_repo: update submodules", self.name)
# we must update the urls if changed!
result = repo.submodule("sync", "--recursive")
log_git(result)
result = repo.submodule("update", "--init", "--force", "--recursive")
log_git(result)
def build(self, abort_running=False, wait=False, ignore_pull_error=False,
build_fn=None):
with self._build_lock:
if abort_running:
self.try_abort_build()
# cancel everything, so we are next
for fut in self._futures.copy():
fut.cancel()
if fut.done():
self._futures.remove(fut)
build_bl = partial(self._build_blocking, ignore_pull_error=
ignore_pull_error)
build_fn = build_fn if build_fn else build_bl
def build_job():
build_func = exception_logged(build_fn, log.error)
try:
build_func()
except Exception as e:
self.update_status(False, "Build stopped with exception",
running=False, payload={"exception": e})
raise
future = self._executor.submit(build_job)
self._futures.add(future)
if wait:
return future.result()
def try_abort_build(self):
proc = self._build_proc
self._abort = True
if proc:
try:
proc.kill()
except:
log.debug("unable to kill", exc_info=True)
def final_install(self):
args = shlex.split(self.final_install_command)
self.update_status(True, "Starting final_install",
payload={"cmd": args})
log.info("%s: Starting final_install `%s`", self.name, args)
proc = Popen(args, stdout=PIPE, stderr=PIPE, universal_newlines=True,
start_new_session=True)
outs, errs = proc.communicate()
status = proc.wait()
if status < 0:
log.info("%s: killed final_install_command (%s)", self.name, status)
else:
log.info("%s: finished final_install_command with status %s!",
self.name, status)
log.info('%s final_install_command stdout: %s\n', self.name, outs)
log.info('%s final_install_command stderr: %s\n', self.name, errs)
if status > 0:
self.update_status(False, ("final_install_command failed."
" Website may be broken!"),
payload={"status": status,
"stdout": outs, "stderr": errs})
log.error("%s: final_install failed! Website may be broken!",
self.name)
else:
self.update_status(True, "finished final_install_command",
payload={"stdout": outs, "stderr": errs})
def _build_blocking(self, ignore_pull_error=False):
self._abort = False
# preparing build environment
try:
self.update_status(True, "Start updating repository")
self.update_build_repository()
except PullError:
if ignore_pull_error:
msg = "Git pull failed, trying to continue with what we have"
self.update_status(False, msg)
log.warning(msg, exc_info=True)
else:
raise
# start the build if we should not abort
if not self._abort:
args = shlex.split(self.build_command)
self.update_status(True, "Starting the main build command",
payload={"cmd": args})
log.info("%s: Starting build_command `%s`", self.name, args)
self._build_proc = Popen(args, stdout=PIPE, stderr=PIPE,
cwd=str(self.build_repo_path),
env=self._build_proc_env,
universal_newlines=True,
start_new_session=True)
outs, errs = self._build_proc.communicate()
status = self._build_proc.wait()
self._build_proc = None
if status < 0:
self.update_status(False, "killed build_command")
log.info("%s: killed build_command", self.name)
else:
log.info("%s: finished build_command with status %s!",
self.name, status)
log.info('%s build_command stdout: %s\n', self.name, outs)
log.info('%s build_command stderr: %s\n', self.name, errs)
if status == 0:
self.update_status(True, "finished build_command",
payload={"stdout": outs, "stderr": errs})
self.final_install()
else:
self.update_status(False, "build_command failed",
payload={"status": status,
"stdout": outs, "stderr": errs})
self.update_status(self.build_status[-1].ok, "End of build",
running=False)
def shutdown(self):
self.try_abort_build()
self._executor.shutdown(wait=True)
| |
#!/usr/bin/python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import os
import sys
import time
import pexpect
class otCli:
def __init__(self, nodeid):
self.nodeid = nodeid
self.verbose = int(float(os.getenv('VERBOSE', 0)))
self.node_type = os.getenv('NODE_TYPE', 'sim')
if self.node_type == 'soc':
self.__init_soc(nodeid)
elif self.node_type == 'ncp-sim':
self.__init_ncp_sim(nodeid)
else:
self.__init_sim(nodeid)
if self.verbose:
self.pexpect.logfile_read = sys.stdout
def __init_sim(self, nodeid):
""" Initialize a simulation node. """
if "OT_CLI_PATH" in os.environ.keys():
cmd = os.environ['OT_CLI_PATH']
elif "top_builddir" in os.environ.keys():
srcdir = os.environ['top_builddir']
cmd = '%s/examples/apps/cli/ot-cli-ftd' % srcdir
else:
cmd = './ot-cli-ftd'
cmd += ' %d' % nodeid
print ("%s" % cmd)
self.pexpect = pexpect.spawn(cmd, timeout=4)
# Add delay to ensure that the process is ready to receive commands.
time.sleep(0.2)
def __init_ncp_sim(self, nodeid):
""" Initialize an NCP simulation node. """
if "top_builddir" in os.environ.keys():
builddir = os.environ['top_builddir']
if "top_srcdir" in os.environ.keys():
srcdir = os.environ['top_srcdir']
else:
srcdir = os.path.dirname(os.path.realpath(__file__))
srcdir += "/../../.."
cmd = 'python %s/tools/spinel-cli/spinel-cli.py -p %s/examples/apps/ncp/ot-ncp-ftd -n' % (srcdir, builddir)
else:
cmd = './ot-ncp-ftd'
cmd += ' %d' % nodeid
print ("%s" % cmd)
self.pexpect = pexpect.spawn(cmd, timeout=4)
time.sleep(0.2)
self.pexpect.expect('spinel-cli >')
self.debug(int(os.getenv('DEBUG', '0')))
def __init_soc(self, nodeid):
""" Initialize a System-on-a-chip node connected via UART. """
import fdpexpect
serialPort = '/dev/ttyUSB%d' % ((nodeid-1)*2)
self.pexpect = fdpexpect.fdspawn(os.open(serialPort, os.O_RDWR|os.O_NONBLOCK|os.O_NOCTTY))
def __del__(self):
if self.pexpect.isalive():
self.send_command('exit')
self.pexpect.expect(pexpect.EOF)
self.pexpect.terminate()
self.pexpect.close(force=True)
def send_command(self, cmd):
print ("%d: %s" % (self.nodeid, cmd))
self.pexpect.sendline(cmd)
def get_commands(self):
self.send_command('?')
self.pexpect.expect('Commands:')
commands = []
while True:
i = self.pexpect.expect(['Done', '(\S+)'])
if i != 0:
commands.append(self.pexpect.match.groups()[0])
else:
break
return commands
def set_mode(self, mode):
cmd = 'mode ' + mode
self.send_command(cmd)
self.pexpect.expect('Done')
def debug(self, level):
self.send_command('debug '+str(level))
def interface_up(self):
self.send_command('ifconfig up')
self.pexpect.expect('Done')
def interface_down(self):
self.send_command('ifconfig down')
self.pexpect.expect('Done')
def thread_start(self):
self.send_command('thread start')
self.pexpect.expect('Done')
def thread_stop(self):
self.send_command('thread stop')
self.pexpect.expect('Done')
def commissioner_start(self):
cmd = 'commissioner start'
self.send_command(cmd)
self.pexpect.expect('Done')
def commissioner_add_joiner(self, addr, psk):
cmd = 'commissioner joiner add ' + addr + ' ' + psk
self.send_command(cmd)
self.pexpect.expect('Done')
def joiner_start(self, pskd='', provisioning_url=''):
cmd = 'joiner start ' + pskd + ' ' + provisioning_url
self.send_command(cmd)
self.pexpect.expect('Done')
def clear_whitelist(self):
self.send_command('whitelist clear')
self.pexpect.expect('Done')
def enable_whitelist(self):
self.send_command('whitelist enable')
self.pexpect.expect('Done')
def disable_whitelist(self):
self.send_command('whitelist disable')
self.pexpect.expect('Done')
def add_whitelist(self, addr, rssi=None):
cmd = 'whitelist add ' + addr
if rssi != None:
cmd += ' ' + str(rssi)
self.send_command(cmd)
self.pexpect.expect('Done')
def remove_whitelist(self, addr):
cmd = 'whitelist remove ' + addr
self.send_command(cmd)
self.pexpect.expect('Done')
def get_addr16(self):
self.send_command('rloc16')
i = self.pexpect.expect('([0-9a-fA-F]{4})')
if i == 0:
addr16 = int(self.pexpect.match.groups()[0], 16)
self.pexpect.expect('Done')
return addr16
def get_addr64(self):
self.send_command('extaddr')
i = self.pexpect.expect('([0-9a-fA-F]{16})')
if i == 0:
addr64 = self.pexpect.match.groups()[0].decode("utf-8")
self.pexpect.expect('Done')
return addr64
def get_hashmacaddr(self):
self.send_command('hashmacaddr')
i = self.pexpect.expect('([0-9a-fA-F]{16})')
if i == 0:
addr = self.pexpect.match.groups()[0].decode("utf-8")
self.pexpect.expect('Done')
return addr
def get_channel(self):
self.send_command('channel')
i = self.pexpect.expect('(\d+)\r\n')
if i == 0:
channel = int(self.pexpect.match.groups()[0])
self.pexpect.expect('Done')
return channel
def set_channel(self, channel):
cmd = 'channel %d' % channel
self.send_command(cmd)
self.pexpect.expect('Done')
def get_masterkey(self):
self.send_command('masterkey')
i = self.pexpect.expect('([0-9a-fA-F]{32})')
if i == 0:
masterkey = self.pexpect.match.groups()[0].decode("utf-8")
self.pexpect.expect('Done')
return masterkey
def set_masterkey(self, masterkey):
cmd = 'masterkey ' + masterkey
self.send_command(cmd)
self.pexpect.expect('Done')
def get_key_sequence_counter(self):
self.send_command('keysequence counter')
i = self.pexpect.expect('(\d+)\r\n')
if i == 0:
key_sequence_counter = int(self.pexpect.match.groups()[0])
self.pexpect.expect('Done')
return key_sequence_counter
def set_key_sequence_counter(self, key_sequence_counter):
cmd = 'keysequence counter %d' % key_sequence_counter
self.send_command(cmd)
self.pexpect.expect('Done')
def set_key_switch_guardtime(self, key_switch_guardtime):
cmd = 'keysequence guardtime %d' % key_switch_guardtime
self.send_command(cmd)
self.pexpect.expect('Done')
def set_network_id_timeout(self, network_id_timeout):
cmd = 'networkidtimeout %d' % network_id_timeout
self.send_command(cmd)
self.pexpect.expect('Done')
def get_network_name(self):
self.send_command('networkname')
while True:
i = self.pexpect.expect(['Done', '(\S+)'])
if i != 0:
network_name = self.pexpect.match.groups()[0].decode('utf-8')
else:
break
return network_name
def set_network_name(self, network_name):
cmd = 'networkname ' + network_name
self.send_command(cmd)
self.pexpect.expect('Done')
def get_panid(self):
self.send_command('panid')
i = self.pexpect.expect('([0-9a-fA-F]{4})')
if i == 0:
panid = int(self.pexpect.match.groups()[0], 16)
self.pexpect.expect('Done')
return panid
def set_panid(self, panid):
cmd = 'panid %d' % panid
self.send_command(cmd)
self.pexpect.expect('Done')
def get_partition_id(self):
self.send_command('leaderpartitionid')
i = self.pexpect.expect('(\d+)\r\n')
if i == 0:
weight = self.pexpect.match.groups()[0]
self.pexpect.expect('Done')
return weight
def set_partition_id(self, partition_id):
cmd = 'leaderpartitionid %d' % partition_id
self.send_command(cmd)
self.pexpect.expect('Done')
def set_router_upgrade_threshold(self, threshold):
cmd = 'routerupgradethreshold %d' % threshold
self.send_command(cmd)
self.pexpect.expect('Done')
def set_router_downgrade_threshold(self, threshold):
cmd = 'routerdowngradethreshold %d' % threshold
self.send_command(cmd)
self.pexpect.expect('Done')
def release_router_id(self, router_id):
cmd = 'releaserouterid %d' % router_id
self.send_command(cmd)
self.pexpect.expect('Done')
def get_state(self):
states = ['detached', 'child', 'router', 'leader']
self.send_command('state')
match = self.pexpect.expect(states)
self.pexpect.expect('Done')
return states[match]
def set_state(self, state):
cmd = 'state ' + state
self.send_command(cmd)
self.pexpect.expect('Done')
def get_timeout(self):
self.send_command('childtimeout')
i = self.pexpect.expect('(\d+)\r\n')
if i == 0:
timeout = self.pexpect.match.groups()[0]
self.pexpect.expect('Done')
return timeout
def set_timeout(self, timeout):
cmd = 'childtimeout %d' % timeout
self.send_command(cmd)
self.pexpect.expect('Done')
def set_max_children(self, number):
cmd = 'childmax %d' % number
self.send_command(cmd)
self.pexpect.expect('Done')
def get_weight(self):
self.send_command('leaderweight')
i = self.pexpect.expect('(\d+)\r\n')
if i == 0:
weight = self.pexpect.match.groups()[0]
self.pexpect.expect('Done')
return weight
def set_weight(self, weight):
cmd = 'leaderweight %d' % weight
self.send_command(cmd)
self.pexpect.expect('Done')
def add_ipaddr(self, ipaddr):
cmd = 'ipaddr add ' + ipaddr
self.send_command(cmd)
self.pexpect.expect('Done')
def get_addrs(self):
addrs = []
self.send_command('ipaddr')
while True:
i = self.pexpect.expect(['(\S+:\S+)\r\n', 'Done'])
if i == 0:
addrs.append(self.pexpect.match.groups()[0].decode("utf-8"))
elif i == 1:
break
return addrs
def get_context_reuse_delay(self):
self.send_command('contextreusedelay')
i = self.pexpect.expect('(\d+)\r\n')
if i == 0:
timeout = self.pexpect.match.groups()[0]
self.pexpect.expect('Done')
return timeout
def set_context_reuse_delay(self, delay):
cmd = 'contextreusedelay %d' % delay
self.send_command(cmd)
self.pexpect.expect('Done')
def add_prefix(self, prefix, flags, prf = 'med'):
cmd = 'prefix add ' + prefix + ' ' + flags + ' ' + prf
self.send_command(cmd)
self.pexpect.expect('Done')
def remove_prefix(self, prefix):
cmd = ' prefix remove ' + prefix
self.send_command(cmd)
self.pexpect.expect('Done')
def add_route(self, prefix, prf = 'med'):
cmd = 'route add ' + prefix + ' ' + prf
self.send_command(cmd)
self.pexpect.expect('Done')
def remove_route(self, prefix):
cmd = 'route remove ' + prefix
self.send_command(cmd)
self.pexpect.expect('Done')
def register_netdata(self):
self.send_command('netdataregister')
self.pexpect.expect('Done')
def energy_scan(self, mask, count, period, scan_duration, ipaddr):
cmd = 'commissioner energy ' + str(mask) + ' ' + str(count) + ' ' + str(period) + ' ' + str(scan_duration) + ' ' + ipaddr
self.send_command(cmd)
self.pexpect.expect('Energy:', timeout=8)
def panid_query(self, panid, mask, ipaddr):
cmd = 'commissioner panid ' + str(panid) + ' ' + str(mask) + ' ' + ipaddr
self.send_command(cmd)
self.pexpect.expect('Conflict:', timeout=8)
def scan(self):
self.send_command('scan')
results = []
while True:
i = self.pexpect.expect(['\|\s(\S+)\s+\|\s(\S+)\s+\|\s([0-9a-fA-F]{4})\s\|\s([0-9a-fA-F]{16})\s\|\s(\d+)\r\n',
'Done'])
if i == 0:
results.append(self.pexpect.match.groups())
else:
break
return results
def ping(self, ipaddr, num_responses=1, size=None):
cmd = 'ping ' + ipaddr
if size != None:
cmd += ' ' + str(size)
self.send_command(cmd)
result = True
try:
responders = {}
while len(responders) < num_responses:
i = self.pexpect.expect(['from (\S+):'])
if i == 0:
responders[self.pexpect.match.groups()[0]] = 1
self.pexpect.expect('\n')
except pexpect.TIMEOUT:
result = False
return result
def set_router_selection_jitter(self, jitter):
cmd = 'routerselectionjitter %d' % jitter
self.send_command(cmd)
self.pexpect.expect('Done')
def set_active_dataset(self, timestamp, panid=None, channel=None, channel_mask=None, master_key=None):
self.send_command('dataset clear')
self.pexpect.expect('Done')
cmd = 'dataset activetimestamp %d' % timestamp
self.send_command(cmd)
self.pexpect.expect('Done')
if panid != None:
cmd = 'dataset panid %d' % panid
self.send_command(cmd)
self.pexpect.expect('Done')
if channel != None:
cmd = 'dataset channel %d' % channel
self.send_command(cmd)
self.pexpect.expect('Done')
if channel_mask != None:
cmd = 'dataset channelmask %d' % channel_mask
self.send_command(cmd)
self.pexpect.expect('Done')
if master_key != None:
cmd = 'dataset masterkey ' + master_key
self.send_command(cmd)
self.pexpect.expect('Done')
self.send_command('dataset commit active')
self.pexpect.expect('Done')
def set_pending_dataset(self, pendingtimestamp, activetimestamp, panid=None, channel=None):
self.send_command('dataset clear')
self.pexpect.expect('Done')
cmd = 'dataset pendingtimestamp %d' % pendingtimestamp
self.send_command(cmd)
self.pexpect.expect('Done')
cmd = 'dataset activetimestamp %d' % activetimestamp
self.send_command(cmd)
self.pexpect.expect('Done')
if panid != None:
cmd = 'dataset panid %d' % panid
self.send_command(cmd)
self.pexpect.expect('Done')
if channel != None:
cmd = 'dataset channel %d' % channel
self.send_command(cmd)
self.pexpect.expect('Done')
self.send_command('dataset commit pending')
self.pexpect.expect('Done')
def announce_begin(self, mask, count, period, ipaddr):
cmd = 'commissioner announce ' + str(mask) + ' ' + str(count) + ' ' + str(period) + ' ' + ipaddr
self.send_command(cmd)
self.pexpect.expect('Done')
def send_mgmt_active_set(self, active_timestamp=None, channel=None, channel_mask=None, extended_panid=None,
panid=None, master_key=None, mesh_local=None, network_name=None, binary=None):
cmd = 'dataset mgmtsetcommand active '
if active_timestamp != None:
cmd += 'activetimestamp %d ' % active_timestamp
if channel != None:
cmd += 'channel %d ' % channel
if channel_mask != None:
cmd += 'channelmask %d ' % channel_mask
if extended_panid != None:
cmd += 'extpanid ' + extended_panid + ' '
if panid != None:
cmd += 'panid %d ' % panid
if master_key != None:
cmd += 'masterkey ' + master_key + ' '
if mesh_local != None:
cmd += 'localprefix ' + mesh_local + ' '
if network_name != None:
cmd += 'networkname ' + network_name + ' '
if binary != None:
cmd += 'binary ' + binary + ' '
self.send_command(cmd)
self.pexpect.expect('Done')
def send_mgmt_pending_set(self, pending_timestamp=None, active_timestamp=None, delay_timer=None, channel=None,
panid=None, master_key=None, mesh_local=None, network_name=None):
cmd = 'dataset mgmtsetcommand pending '
if pending_timestamp != None:
cmd += 'pendingtimestamp %d ' % pending_timestamp
if active_timestamp != None:
cmd += 'activetimestamp %d ' % active_timestamp
if delay_timer != None:
cmd += 'delaytimer %d ' % delay_timer
if channel != None:
cmd += 'channel %d ' % channel
if panid != None:
cmd += 'panid %d ' % panid
if master_key != None:
cmd += 'masterkey ' + master_key + ' '
if mesh_local != None:
cmd += 'localprefix ' + mesh_local + ' '
if network_name != None:
cmd += 'networkname ' + network_name + ' '
self.send_command(cmd)
self.pexpect.expect('Done')
| |
"""hiddenMarkovModel.py contains objects for handling HMMs for SignalAlign"""
from __future__ import print_function
import sys
import os
from itertools import izip
import numpy as np
# Globals
NORM_DIST_PARAMS = 2
NB_MODEL_PARAMS = 5
class SignalHmm(object):
def __init__(self, model_type):
self.match_model_params = 5 # level_mean, level_sd, noise_mean, noise_sd, noise_lambda
self.model_type = model_type # ID of model type
self.state_number = {"threeState": 3, "threeStateHdp": 3}[model_type]
self.symbol_set_size = 0
self.transitions = np.zeros(self.state_number**2)
self.transitions_expectations = np.zeros(self.state_number**2)
self.likelihood = 0.0
self.running_likelihoods = []
self.alphabet_size = 0
self.alphabet = ""
self.kmer_length = 0
self.has_model = False
self.normalized = False
# event model for describing normal distributions for each kmer
self.event_model = {"means": np.zeros(self.symbol_set_size),
"SDs": np.zeros(self.symbol_set_size),
"noise_means": np.zeros(self.symbol_set_size),
"noise_SDs": np.zeros(self.symbol_set_size),
"noise_lambdas": np.zeros(self.symbol_set_size)}
def normalize_transitions_expectations(self):
# normalize transitions
for from_state in xrange(self.state_number):
i = self.state_number * from_state
j = sum(self.transitions_expectations[i:i + self.state_number])
for to_state in xrange(self.state_number):
self.transitions_expectations[i + to_state] = self.transitions_expectations[i + to_state] / j
def set_default_transitions(self):
MATCH_CONTINUE = np.exp(-0.23552123624314988) # stride
MATCH_FROM_GAP_X = np.exp(-0.21880828092192281) # 1 - skip'
MATCH_FROM_GAP_Y = np.exp(-0.013406326748077823) # 1 - (skip + stay)
GAP_OPEN_X = np.exp(-1.6269694202638481) # skip
GAP_OPEN_Y = np.exp(-4.3187242127300092) # 1 - (skip + stride)
GAP_EXTEND_X = np.exp(-1.6269694202638481) # skip'
GAP_EXTEND_Y = np.exp(-4.3187242127239411) # stay (1 - (skip + stay))
GAP_SWITCH_TO_X = 0.000000001
GAP_SWITCH_TO_Y = 0.0
self.transitions = [
MATCH_CONTINUE, GAP_OPEN_X, GAP_OPEN_Y,
MATCH_FROM_GAP_X, GAP_EXTEND_X, GAP_SWITCH_TO_Y,
MATCH_FROM_GAP_Y, GAP_SWITCH_TO_X, GAP_EXTEND_Y
]
return
def check_header_line(self, line, expectations_file):
if len(line) != 4:
print("signalHmm.check_header_line - incorrect header (param line): {}".format(expectations_file), file=sys.stderr)
return False
if int(line[0]) != self.state_number:
print("signalHmm.check_header_line - state number error should be {exp} got {obs}"
"".format(exp=self.state_number, obs=line[0]), file=sys.stderr)
return False
if int(line[1]) != self.alphabet_size:
print("signalHmm.check_header_line - alphabet size error incorrect parameters: {file}, line {line}"
"".format(file=expectations_file, line=''.join(line)), file=sys.stderr)
return False
if line[2] != self.alphabet:
print("signalHmm.check_header_line - incorrect parameters: {file}, line {line}"
"".format(file=expectations_file, line=''.join(line)), file=sys.stderr)
return False
if int(line[3]) != self.kmer_length:
print("signalHmm.check_header_line - incorrect parameters: {file}, line {line}"
"".format(file=expectations_file, line=''.join(line)), file=sys.stderr)
return False
return True
def load_model(self, model_file):
# the model file has the format:
# line 0: stateNumber \t alphabetSize \t alphabet \t kmerLength
# line 1: match->match \t match->gapX \t match->gapY \t
# gapX->match \t gapX->gapX \t gapX->gapY \t
# gapY->match \t gapY->gapX \t gapY->gapY \n
# line 2: [level_mean] [level_sd] [noise_mean] [noise_sd] [noise_lambda ](.../kmer) \n
assert os.path.exists(model_file), "signalHmm.load_model - didn't find model here{}?".format(model_file)
fH = open(model_file, 'r')
line = fH.readline().split()
# check for correct header length
assert len(line) == 4, "signalHmm.load_model - incorrect line length line:{}".format(''.join(line))
# check stateNumber
assert int(line[0]) == self.state_number, "signalHmm.load_model - incorrect stateNumber got {got} should be {exp}" \
"".format(got=int(line[0]), exp=self.state_number)
# load model parameters
self.alphabet_size = int(line[1])
self.alphabet = line[2]
self.kmer_length = int(line[3])
self.symbol_set_size = self.alphabet_size**self.kmer_length
assert self.symbol_set_size > 0, "signalHmm.load_model - Got 0 for symbol_set_size"
assert self.symbol_set_size <= 6**6, "signalHmm.load_model - Got more than 6^6 for symbol_set_size got {}" \
"".format(self.symbol_set_size)
line = map(float, fH.readline().split())
assert len(line) == len(self.transitions) + 1, "signalHmm.load_model incorrect transitions line"
self.transitions = line[:-1]
self.likelihood = line[-1]
line = map(float, fH.readline().split())
assert len(line) == self.symbol_set_size * NB_MODEL_PARAMS, \
"signalHmm.load_model incorrect event model line"
self.event_model["means"] = line[::NB_MODEL_PARAMS]
self.event_model["SDs"] = line[1::NB_MODEL_PARAMS]
self.event_model["noise_means"] = line[2::NB_MODEL_PARAMS]
self.event_model["noise_SDs"] = line[3::NB_MODEL_PARAMS]
self.event_model["noise_lambdas"] = line[4::NB_MODEL_PARAMS]
assert not np.any(self.event_model["means"] == 0.0), "signalHmm.load_model, this model has 0 E_means"
assert not np.any(self.event_model["SDs"] == 0.0), "signalHmm.load_model, this model has 0 E_means"
assert not np.any(self.event_model["noise_means"] == 0.0), "signalHmm.load_model, this model has 0 E_noise_means"
assert not np.any(self.event_model["noise_SDs"] == 0.0), "signalHmm.load_model, this model has 0 E_noise_SDs"
self.has_model = True
def write(self, out_file):
# the model file has the format:
# line 0: stateNumber \t alphabetSize \t alphabet \t kmerLength
# line 1: match->match \t match->gapX \t match->gapY \t
# gapX->match \t gapX->gapX \t gapX->gapY \t
# gapY->match \t gapY->gapX \t gapY->gapY \n
# line 2: [level_mean] [level_sd] [noise_mean] [noise_sd] [noise_lambda ](.../kmer) \n
assert self.has_model, "Shouldn't be writing down a Hmm that has no Model"
assert self.normalized, "Shouldn't be writing down a not normalized HMM"
f = open(out_file, 'w')
# line 0
f.write("{stateNumber}\t{alphabetSize}\t{alphabet}\t{kmerLength}\n"
"".format(stateNumber=self.state_number, alphabetSize=self.alphabet_size,
alphabet=self.alphabet, kmerLength=self.kmer_length))
# line 1 transitions
for i in xrange(self.state_number * self.state_number):
f.write("{transition}\t".format(transition=str(self.transitions[i])))
# likelihood
f.write("{}\n".format(str(self.likelihood)))
# line 2 Event Model
for k in xrange(self.symbol_set_size):
f.write("{level_mean}\t{level_sd}\t{noise_mean}\t{noise_sd}\t{noise_lambda}\t"
"".format(level_mean=self.event_model["means"][k], level_sd=self.event_model["SDs"][k],
noise_mean=self.event_model["noise_means"][k], noise_sd=self.event_model["noise_SDs"][k],
noise_lambda=self.event_model["noise_lambdas"][k]))
f.write("\n")
f.close()
class ContinuousPairHmm(SignalHmm):
def __init__(self, model_type):
super(ContinuousPairHmm, self).__init__(model_type=model_type)
self.set_default_transitions()
# bins for expectations
self.mean_expectations = np.zeros(self.symbol_set_size)
self.sd_expectations = np.zeros(self.symbol_set_size)
self.posteriors = np.zeros(self.symbol_set_size)
self.observed = np.zeros(self.symbol_set_size, dtype=bool)
self.has_model = False
self.normalized = False
def add_expectations_file(self, expectations_file):
# expectations files have the format:
# line 0: stateNumber \t alphabetSize \t alphabet \t kmerLength
# line 1: match->match \t match->gapX \t match->gapY \t
# gapX->match \t gapX->gapX \t gapX->gapY \t
# gapY->match \t gapY->gapX \t gapY->gapY \n
# line 2: [level_mean] [level_sd] [noise_mean] [noise_sd] [noise_lambda ](.../kmer) \n
# line 3: event expectations [mean] [sd] / kmer \n
# line 4: posteriors 1 per kmer \n
# line 5: observed 1 per kmer \n
if not os.path.exists(expectations_file) or os.stat(expectations_file).st_size == 0:
print("Empty or missing file {}".format(expectations_file), file=sys.stderr)
return False
fH = open(expectations_file, 'r')
# line 0
line = fH.readline().split()
header_line_check = self.check_header_line(line=line, expectations_file=expectations_file)
if header_line_check is False:
fH.close()
return False
# line 1: transitions, likelihood
line = map(float, fH.readline().split())
# check if valid
if len(line) != (len(self.transitions) + 1):
print("cpHMM: check_file - bad file (transitions expectations): {}".format(expectations_file),
file=sys.stderr)
fH.close()
return False
self.likelihood += line[-1]
self.transitions_expectations = map(lambda x: sum(x), zip(self.transitions_expectations, line[0:-1]))
# line 2: event model
line = map(float, fH.readline().split())
if len(line) != self.symbol_set_size * NB_MODEL_PARAMS:
print("cpHMM: check_file - bad file (event model): {}".format(expectations_file), file=sys.stderr)
fH.close()
return False
# line 3 event expectations [E_mean, E_sd]
line = map(float, fH.readline().split())
if len(line) != self.symbol_set_size * NORM_DIST_PARAMS:
print("cpHMM: check_file - bad file (event expectations): {}".format(expectations_file), file=sys.stderr)
fH.close()
return False
self.mean_expectations = [i + j for i, j in izip(self.mean_expectations, line[::NORM_DIST_PARAMS])]
self.sd_expectations = [i + j for i, j in izip(self.sd_expectations, line[1::NORM_DIST_PARAMS])]
# line 4, posteriors
line = map(float, fH.readline().split())
if len(line) != self.symbol_set_size:
print("cpHMM: check_file - bad file (posteriors): {}".format(expectations_file), file=sys.stderr)
fH.close()
return False
self.posteriors = map(lambda x: sum(x), zip(self.posteriors, line))
line = map(bool, fH.readline().split())
if len(line) != self.symbol_set_size:
print("cpHMM: check_file - bad file (observations): {}".format(expectations_file), file=sys.stderr)
fH.close()
return False
self.observed = [any(b) for b in zip(self.observed, line)]
fH.close()
return True
def normalize(self, update_transitions, update_emissions):
# normalize transitions expectations
self.normalize_transitions_expectations()
# update
if update_transitions is True:
for i in xrange(self.state_number**2):
self.transitions[i] = self.transitions_expectations[i]
# calculate the new expected mean and standard deviation for the kmer normal distributions
if update_emissions:
for k in xrange(self.symbol_set_size): # TODO implement learning rate
if self.observed[k] is True:
u_k = self.mean_expectations[k] / self.posteriors[k]
o_k = np.sqrt(self.sd_expectations[k] / self.posteriors[k])
if u_k > 0:
self.event_model["means"][k] = u_k
self.event_model["SDs"][k] = o_k
else:
continue
self.normalized = True
class HdpSignalHmm(SignalHmm):
def __init__(self, model_type):
super(HdpSignalHmm, self).__init__(model_type=model_type)
self.set_default_transitions()
self.kmer_assignments = []
self.event_assignments = []
self.assignments_record = []
def add_expectations_file(self, expectations_file):
# expectations files have the format:
# line 0: stateNumber \t alphabetSize \t alphabet \t kmerLength
# line 1: match->match \t match->gapX \t match->gapY \t
# gapX->match \t gapX->gapX \t gapX->gapY \t
# gapY->match \t gapY->gapX \t gapY->gapY \n
# line 2: [level_mean] [level_sd] [noise_mean] [noise_sd] [noise_lambda ](.../kmer) \n
# line 3: event assignments
# line 4: kmer assignments
if not os.path.exists(expectations_file) or os.stat(expectations_file).st_size == 0:
print("Empty or missing file {}".format(expectations_file))
return
fH = open(expectations_file, 'r')
# line 0
line = fH.readline().split()
header_line_check = self.check_header_line(line=line, expectations_file=expectations_file)
if header_line_check is False:
fH.close()
return False
# line 1: transitions, likelihood
line = map(float, fH.readline().split())
if len(line) != (len(self.transitions) + 1):
print("hdpHmm.add_expectations_file - problem with file {f} transitions line {l}, incorrect length"
"".format(f=expectations_file, l=''.join(line)), file=sys.stdout)
fH.close()
return False
self.likelihood += line[-1]
self.transitions_expectations = map(lambda x: sum(x), zip(self.transitions_expectations, line[0:-1]))
# line 2: event model
line = map(float, fH.readline().split())
if len(line) != self.symbol_set_size * NB_MODEL_PARAMS:
print("hdpHmm.add_expectations_file - problem with event model in file {}"
"".format(expectations_file), file=sys.stderr)
fH.close()
return False
# line 3: event assignments
line = map(float, fH.readline().split())
self.event_assignments += line
# line 4: kmer assignments
line = map(str, fH.readline().split())
self.kmer_assignments += line
fH.close()
return True
def reset_assignments(self):
self.assignments_record.append(len(self.event_assignments))
self.event_assignments = []
self.kmer_assignments = []
def normalize(self, update_transitions, update_emissions=None):
self.normalize_transitions_expectations()
if update_transitions is True:
for i in xrange(self.state_number**2):
self.transitions[i] = self.transitions_expectations[i]
self.normalized = True
| |
r"""File-like objects that read from or write to a string buffer.
This implements (nearly) all stdio methods.
f = StringIO() # ready for writing
f = StringIO(buf) # ready for reading
f.close() # explicitly release resources held
flag = f.isatty() # always false
pos = f.tell() # get current position
f.seek(pos) # set current position
f.seek(pos, mode) # mode 0: absolute; 1: relative; 2: relative to EOF
buf = f.read() # read until EOF
buf = f.read(n) # read up to n bytes
buf = f.readline() # read until end of line ('\n') or EOF
list = f.readlines()# list of f.readline() results until EOF
f.truncate([size]) # truncate file at to at most size (default: current pos)
f.write(buf) # write at current position
f.writelines(list) # for line in list: f.write(line)
f.getvalue() # return whole file's contents as a string
Notes:
- Using a real file is often faster (but less convenient).
- There's also a much faster implementation in C, called cStringIO, but
it's not subclassable.
- fileno() is left unimplemented so that code which uses it triggers
an exception early.
- Seeking far beyond EOF and then writing will insert real null
bytes that occupy space in the buffer.
- There's a simple test set (see end of this file).
"""
EINVAL = 22
__all__ = ["StringIO"]
def _complain_ifclosed(closed):
if closed:
raise ValueError, "I/O operation on closed file"
class StringIO:
"""class StringIO([buffer])
When a StringIO object is created, it can be initialized to an existing
string by passing the string to the constructor. If no string is given,
the StringIO will start empty.
The StringIO object can accept either Unicode or 8-bit strings, but
mixing the two may take some care. If both are used, 8-bit strings that
cannot be interpreted as 7-bit ASCII (that use the 8th bit) will cause
a UnicodeError to be raised when getvalue() is called.
"""
def __init__(self, buf = ''):
# Force self.buf to be a string or unicode
if not isinstance(buf, basestring):
buf = str(buf)
self.buf = buf
self.len = len(buf)
self.buflist = []
self.pos = 0
self.closed = False
self.softspace = 0
def __iter__(self):
return self
def next(self):
"""A file object is its own iterator, for example iter(f) returns f
(unless f is closed). When a file is used as an iterator, typically
in a for loop (for example, for line in f: print line), the next()
method is called repeatedly. This method returns the next input line,
or raises StopIteration when EOF is hit.
"""
_complain_ifclosed(self.closed)
r = self.readline()
if not r:
raise StopIteration
return r
def close(self):
"""Free the memory buffer.
"""
if not self.closed:
self.closed = True
del self.buf, self.pos
def isatty(self):
"""Returns False because StringIO objects are not connected to a
tty-like device.
"""
_complain_ifclosed(self.closed)
return False
def seek(self, pos, mode = 0):
"""Set the file's current position.
The mode argument is optional and defaults to 0 (absolute file
positioning); other values are 1 (seek relative to the current
position) and 2 (seek relative to the file's end).
There is no return value.
"""
_complain_ifclosed(self.closed)
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
if mode == 1:
pos += self.pos
elif mode == 2:
pos += self.len
self.pos = max(0, pos)
def tell(self):
"""Return the file's current position."""
_complain_ifclosed(self.closed)
return self.pos
def read(self, n = -1):
"""Read at most size bytes from the file
(less if the read hits EOF before obtaining size bytes).
If the size argument is negative or omitted, read all data until EOF
is reached. The bytes are returned as a string object. An empty
string is returned when EOF is encountered immediately.
"""
_complain_ifclosed(self.closed)
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
if n < 0:
newpos = self.len
else:
newpos = min(self.pos+n, self.len)
r = self.buf[self.pos:newpos]
self.pos = newpos
return r
def readline(self, length=None):
r"""Read one entire line from the file.
A trailing newline character is kept in the string (but may be absent
when a file ends with an incomplete line). If the size argument is
present and non-negative, it is a maximum byte count (including the
trailing newline) and an incomplete line may be returned.
An empty string is returned only when EOF is encountered immediately.
Note: Unlike stdio's fgets(), the returned string contains null
characters ('\0') if they occurred in the input.
"""
_complain_ifclosed(self.closed)
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
i = self.buf.find('\n', self.pos)
if i < 0:
newpos = self.len
else:
newpos = i+1
if length is not None:
if self.pos + length < newpos:
newpos = self.pos + length
r = self.buf[self.pos:newpos]
self.pos = newpos
return r
def readlines(self, sizehint = 0):
"""Read until EOF using readline() and return a list containing the
lines thus read.
If the optional sizehint argument is present, instead of reading up
to EOF, whole lines totalling approximately sizehint bytes (or more
to accommodate a final whole line).
"""
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def truncate(self, size=None):
"""Truncate the file's size.
If the optional size argument is present, the file is truncated to
(at most) that size. The size defaults to the current position.
The current file position is not changed unless the position
is beyond the new file size.
If the specified size exceeds the file's current size, the
file remains unchanged.
"""
_complain_ifclosed(self.closed)
if size is None:
size = self.pos
elif size < 0:
raise IOError(EINVAL, "Negative size not allowed")
elif size < self.pos:
self.pos = size
self.buf = self.getvalue()[:size]
self.len = size
def write(self, s):
"""Write a string to the file.
There is no return value.
"""
_complain_ifclosed(self.closed)
if not s: return
# Force s to be a string or unicode
if not isinstance(s, basestring):
s = str(s)
spos = self.pos
slen = self.len
if spos == slen:
self.buflist.append(s)
self.len = self.pos = spos + len(s)
return
if spos > slen:
self.buflist.append('\0'*(spos - slen))
slen = spos
newpos = spos + len(s)
if spos < slen:
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = [self.buf[:spos], s, self.buf[newpos:]]
self.buf = ''
if newpos > slen:
slen = newpos
else:
self.buflist.append(s)
slen = newpos
self.len = slen
self.pos = newpos
def writelines(self, iterable):
"""Write a sequence of strings to the file. The sequence can be any
iterable object producing strings, typically a list of strings. There
is no return value.
(The name is intended to match readlines(); writelines() does not add
line separators.)
"""
write = self.write
for line in iterable:
write(line)
def flush(self):
"""Flush the internal buffer
"""
_complain_ifclosed(self.closed)
def getvalue(self):
"""
Retrieve the entire contents of the "file" at any time before
the StringIO object's close() method is called.
The StringIO object can accept either Unicode or 8-bit strings,
but mixing the two may take some care. If both are used, 8-bit
strings that cannot be interpreted as 7-bit ASCII (that use the
8th bit) will cause a UnicodeError to be raised when getvalue()
is called.
"""
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
return self.buf
#__EOF__
| |
import os
import sys
import unittest
import warnings
from types import ModuleType
from django.conf import ENVIRONMENT_VARIABLE, LazySettings, Settings, settings
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpRequest
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, modify_settings,
override_settings, signals,
)
@modify_settings(ITEMS={
'prepend': ['b'],
'append': ['d'],
'remove': ['a', 'e']
})
@override_settings(ITEMS=['a', 'c', 'e'], ITEMS_OUTER=[1, 2, 3], TEST='override', TEST_OUTER='outer')
class FullyDecoratedTranTestCase(TransactionTestCase):
available_apps = []
def test_override(self):
self.assertListEqual(settings.ITEMS, ['b', 'c', 'd'])
self.assertListEqual(settings.ITEMS_OUTER, [1, 2, 3])
self.assertEqual(settings.TEST, 'override')
self.assertEqual(settings.TEST_OUTER, 'outer')
@modify_settings(ITEMS={
'append': ['e', 'f'],
'prepend': ['a'],
'remove': ['d', 'c'],
})
def test_method_list_override(self):
self.assertListEqual(settings.ITEMS, ['a', 'b', 'e', 'f'])
self.assertListEqual(settings.ITEMS_OUTER, [1, 2, 3])
@modify_settings(ITEMS={
'append': ['b'],
'prepend': ['d'],
'remove': ['a', 'c', 'e'],
})
def test_method_list_override_no_ops(self):
self.assertListEqual(settings.ITEMS, ['b', 'd'])
@modify_settings(ITEMS={
'append': 'e',
'prepend': 'a',
'remove': 'c',
})
def test_method_list_override_strings(self):
self.assertListEqual(settings.ITEMS, ['a', 'b', 'd', 'e'])
@modify_settings(ITEMS={'remove': ['b', 'd']})
@modify_settings(ITEMS={'append': ['b'], 'prepend': ['d']})
def test_method_list_override_nested_order(self):
self.assertListEqual(settings.ITEMS, ['d', 'c', 'b'])
@override_settings(TEST='override2')
def test_method_override(self):
self.assertEqual(settings.TEST, 'override2')
self.assertEqual(settings.TEST_OUTER, 'outer')
def test_decorated_testcase_name(self):
self.assertEqual(FullyDecoratedTranTestCase.__name__, 'FullyDecoratedTranTestCase')
def test_decorated_testcase_module(self):
self.assertEqual(FullyDecoratedTranTestCase.__module__, __name__)
@modify_settings(ITEMS={
'prepend': ['b'],
'append': ['d'],
'remove': ['a', 'e']
})
@override_settings(ITEMS=['a', 'c', 'e'], TEST='override')
class FullyDecoratedTestCase(TestCase):
def test_override(self):
self.assertListEqual(settings.ITEMS, ['b', 'c', 'd'])
self.assertEqual(settings.TEST, 'override')
@modify_settings(ITEMS={
'append': 'e',
'prepend': 'a',
'remove': 'c',
})
@override_settings(TEST='override2')
def test_method_override(self):
self.assertListEqual(settings.ITEMS, ['a', 'b', 'd', 'e'])
self.assertEqual(settings.TEST, 'override2')
class ClassDecoratedTestCaseSuper(TestCase):
"""
Dummy class for testing max recursion error in child class call to
super(). Refs #17011.
"""
def test_max_recursion_error(self):
pass
@override_settings(TEST='override')
class ClassDecoratedTestCase(ClassDecoratedTestCaseSuper):
@classmethod
def setUpClass(cls):
super(ClassDecoratedTestCase, cls).setUpClass()
cls.foo = getattr(settings, 'TEST', 'BUG')
def test_override(self):
self.assertEqual(settings.TEST, 'override')
def test_setupclass_override(self):
"""Settings are overridden within setUpClass (#21281)."""
self.assertEqual(self.foo, 'override')
@override_settings(TEST='override2')
def test_method_override(self):
self.assertEqual(settings.TEST, 'override2')
def test_max_recursion_error(self):
"""
Overriding a method on a super class and then calling that method on
the super class should not trigger infinite recursion. See #17011.
"""
super(ClassDecoratedTestCase, self).test_max_recursion_error()
@modify_settings(ITEMS={'append': 'mother'})
@override_settings(ITEMS=['father'], TEST='override-parent')
class ParentDecoratedTestCase(TestCase):
pass
@modify_settings(ITEMS={'append': ['child']})
@override_settings(TEST='override-child')
class ChildDecoratedTestCase(ParentDecoratedTestCase):
def test_override_settings_inheritance(self):
self.assertEqual(settings.ITEMS, ['father', 'mother', 'child'])
self.assertEqual(settings.TEST, 'override-child')
class SettingsTests(SimpleTestCase):
def setUp(self):
self.testvalue = None
signals.setting_changed.connect(self.signal_callback)
def tearDown(self):
signals.setting_changed.disconnect(self.signal_callback)
def signal_callback(self, sender, setting, value, **kwargs):
if setting == 'TEST':
self.testvalue = value
def test_override(self):
settings.TEST = 'test'
self.assertEqual('test', settings.TEST)
with self.settings(TEST='override'):
self.assertEqual('override', settings.TEST)
self.assertEqual('test', settings.TEST)
del settings.TEST
def test_override_change(self):
settings.TEST = 'test'
self.assertEqual('test', settings.TEST)
with self.settings(TEST='override'):
self.assertEqual('override', settings.TEST)
settings.TEST = 'test2'
self.assertEqual('test', settings.TEST)
del settings.TEST
def test_override_doesnt_leak(self):
with self.assertRaises(AttributeError):
getattr(settings, 'TEST')
with self.settings(TEST='override'):
self.assertEqual('override', settings.TEST)
settings.TEST = 'test'
with self.assertRaises(AttributeError):
getattr(settings, 'TEST')
@override_settings(TEST='override')
def test_decorator(self):
self.assertEqual('override', settings.TEST)
def test_context_manager(self):
with self.assertRaises(AttributeError):
getattr(settings, 'TEST')
override = override_settings(TEST='override')
with self.assertRaises(AttributeError):
getattr(settings, 'TEST')
override.enable()
self.assertEqual('override', settings.TEST)
override.disable()
with self.assertRaises(AttributeError):
getattr(settings, 'TEST')
def test_class_decorator(self):
# SimpleTestCase can be decorated by override_settings, but not ut.TestCase
class SimpleTestCaseSubclass(SimpleTestCase):
pass
class UnittestTestCaseSubclass(unittest.TestCase):
pass
decorated = override_settings(TEST='override')(SimpleTestCaseSubclass)
self.assertIsInstance(decorated, type)
self.assertTrue(issubclass(decorated, SimpleTestCase))
with self.assertRaisesMessage(Exception, "Only subclasses of Django SimpleTestCase"):
decorated = override_settings(TEST='override')(UnittestTestCaseSubclass)
def test_signal_callback_context_manager(self):
with self.assertRaises(AttributeError):
getattr(settings, 'TEST')
with self.settings(TEST='override'):
self.assertEqual(self.testvalue, 'override')
self.assertIsNone(self.testvalue)
@override_settings(TEST='override')
def test_signal_callback_decorator(self):
self.assertEqual(self.testvalue, 'override')
#
# Regression tests for #10130: deleting settings.
#
def test_settings_delete(self):
settings.TEST = 'test'
self.assertEqual('test', settings.TEST)
del settings.TEST
with self.assertRaises(AttributeError):
getattr(settings, 'TEST')
def test_settings_delete_wrapped(self):
with self.assertRaises(TypeError):
delattr(settings, '_wrapped')
def test_override_settings_delete(self):
"""
Allow deletion of a setting in an overridden settings set (#18824)
"""
previous_i18n = settings.USE_I18N
previous_l10n = settings.USE_L10N
with self.settings(USE_I18N=False):
del settings.USE_I18N
with self.assertRaises(AttributeError):
getattr(settings, 'USE_I18N')
# Should also work for a non-overridden setting
del settings.USE_L10N
with self.assertRaises(AttributeError):
getattr(settings, 'USE_L10N')
self.assertNotIn('USE_I18N', dir(settings))
self.assertNotIn('USE_L10N', dir(settings))
self.assertEqual(settings.USE_I18N, previous_i18n)
self.assertEqual(settings.USE_L10N, previous_l10n)
def test_override_settings_nested(self):
"""
override_settings uses the actual _wrapped attribute at
runtime, not when it was instantiated.
"""
with self.assertRaises(AttributeError):
getattr(settings, 'TEST')
with self.assertRaises(AttributeError):
getattr(settings, 'TEST2')
inner = override_settings(TEST2='override')
with override_settings(TEST='override'):
self.assertEqual('override', settings.TEST)
with inner:
self.assertEqual('override', settings.TEST)
self.assertEqual('override', settings.TEST2)
# inner's __exit__ should have restored the settings of the outer
# context manager, not those when the class was instantiated
self.assertEqual('override', settings.TEST)
with self.assertRaises(AttributeError):
getattr(settings, 'TEST2')
with self.assertRaises(AttributeError):
getattr(settings, 'TEST')
with self.assertRaises(AttributeError):
getattr(settings, 'TEST2')
class TestComplexSettingOverride(SimpleTestCase):
def setUp(self):
self.old_warn_override_settings = signals.COMPLEX_OVERRIDE_SETTINGS.copy()
signals.COMPLEX_OVERRIDE_SETTINGS.add('TEST_WARN')
def tearDown(self):
signals.COMPLEX_OVERRIDE_SETTINGS = self.old_warn_override_settings
self.assertNotIn('TEST_WARN', signals.COMPLEX_OVERRIDE_SETTINGS)
def test_complex_override_warning(self):
"""Regression test for #19031"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with override_settings(TEST_WARN='override'):
self.assertEqual(settings.TEST_WARN, 'override')
self.assertEqual(len(w), 1)
# File extension may by .py, .pyc, etc. Compare only basename.
self.assertEqual(os.path.splitext(w[0].filename)[0], os.path.splitext(__file__)[0])
self.assertEqual(str(w[0].message), 'Overriding setting TEST_WARN can lead to unexpected behavior.')
class TrailingSlashURLTests(SimpleTestCase):
"""
Tests for the MEDIA_URL and STATIC_URL settings.
They must end with a slash to ensure there's a deterministic way to build
paths in templates.
"""
settings_module = settings
def setUp(self):
self._original_media_url = self.settings_module.MEDIA_URL
self._original_static_url = self.settings_module.STATIC_URL
def tearDown(self):
self.settings_module.MEDIA_URL = self._original_media_url
self.settings_module.STATIC_URL = self._original_static_url
def test_blank(self):
"""
The empty string is accepted, even though it doesn't end in a slash.
"""
self.settings_module.MEDIA_URL = ''
self.assertEqual('', self.settings_module.MEDIA_URL)
self.settings_module.STATIC_URL = ''
self.assertEqual('', self.settings_module.STATIC_URL)
def test_end_slash(self):
"""
It works if the value ends in a slash.
"""
self.settings_module.MEDIA_URL = '/foo/'
self.assertEqual('/foo/', self.settings_module.MEDIA_URL)
self.settings_module.MEDIA_URL = 'http://media.foo.com/'
self.assertEqual('http://media.foo.com/', self.settings_module.MEDIA_URL)
self.settings_module.STATIC_URL = '/foo/'
self.assertEqual('/foo/', self.settings_module.STATIC_URL)
self.settings_module.STATIC_URL = 'http://static.foo.com/'
self.assertEqual('http://static.foo.com/', self.settings_module.STATIC_URL)
def test_no_end_slash(self):
"""
An ImproperlyConfigured exception is raised if the value doesn't end
in a slash.
"""
with self.assertRaises(ImproperlyConfigured):
self.settings_module.MEDIA_URL = '/foo'
with self.assertRaises(ImproperlyConfigured):
self.settings_module.MEDIA_URL = 'http://media.foo.com'
with self.assertRaises(ImproperlyConfigured):
self.settings_module.STATIC_URL = '/foo'
with self.assertRaises(ImproperlyConfigured):
self.settings_module.STATIC_URL = 'http://static.foo.com'
def test_double_slash(self):
"""
If the value ends in more than one slash, presume they know what
they're doing.
"""
self.settings_module.MEDIA_URL = '/wrong//'
self.assertEqual('/wrong//', self.settings_module.MEDIA_URL)
self.settings_module.MEDIA_URL = 'http://media.foo.com/wrong//'
self.assertEqual('http://media.foo.com/wrong//', self.settings_module.MEDIA_URL)
self.settings_module.STATIC_URL = '/wrong//'
self.assertEqual('/wrong//', self.settings_module.STATIC_URL)
self.settings_module.STATIC_URL = 'http://static.foo.com/wrong//'
self.assertEqual('http://static.foo.com/wrong//', self.settings_module.STATIC_URL)
class SecureProxySslHeaderTest(SimpleTestCase):
settings_module = settings
def setUp(self):
self._original_setting = self.settings_module.SECURE_PROXY_SSL_HEADER
def tearDown(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = self._original_setting
def test_none(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = None
req = HttpRequest()
self.assertIs(req.is_secure(), False)
def test_set_without_xheader(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
req = HttpRequest()
self.assertIs(req.is_secure(), False)
def test_set_with_xheader_wrong(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
req = HttpRequest()
req.META['HTTP_X_FORWARDED_PROTOCOL'] = 'wrongvalue'
self.assertIs(req.is_secure(), False)
def test_set_with_xheader_right(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
req = HttpRequest()
req.META['HTTP_X_FORWARDED_PROTOCOL'] = 'https'
self.assertIs(req.is_secure(), True)
class IsOverriddenTest(SimpleTestCase):
def test_configure(self):
s = LazySettings()
s.configure(SECRET_KEY='foo')
self.assertTrue(s.is_overridden('SECRET_KEY'))
def test_module(self):
settings_module = ModuleType('fake_settings_module')
settings_module.SECRET_KEY = 'foo'
sys.modules['fake_settings_module'] = settings_module
try:
s = Settings('fake_settings_module')
self.assertTrue(s.is_overridden('SECRET_KEY'))
self.assertFalse(s.is_overridden('ALLOWED_HOSTS'))
finally:
del sys.modules['fake_settings_module']
def test_override(self):
self.assertFalse(settings.is_overridden('ALLOWED_HOSTS'))
with override_settings(ALLOWED_HOSTS=[]):
self.assertTrue(settings.is_overridden('ALLOWED_HOSTS'))
def test_unevaluated_lazysettings_repr(self):
lazy_settings = LazySettings()
expected = '<LazySettings [Unevaluated]>'
self.assertEqual(repr(lazy_settings), expected)
def test_evaluated_lazysettings_repr(self):
lazy_settings = LazySettings()
module = os.environ.get(ENVIRONMENT_VARIABLE)
expected = '<LazySettings "%s">' % module
# Force evaluation of the lazy object.
lazy_settings.APPEND_SLASH
self.assertEqual(repr(lazy_settings), expected)
def test_usersettingsholder_repr(self):
lazy_settings = LazySettings()
lazy_settings.configure(APPEND_SLASH=False)
expected = '<UserSettingsHolder>'
self.assertEqual(repr(lazy_settings._wrapped), expected)
def test_settings_repr(self):
module = os.environ.get(ENVIRONMENT_VARIABLE)
lazy_settings = Settings(module)
expected = '<Settings "%s">' % module
self.assertEqual(repr(lazy_settings), expected)
class TestListSettings(unittest.TestCase):
"""
Make sure settings that should be lists or tuples throw
ImproperlyConfigured if they are set to a string instead of a list or tuple.
"""
list_or_tuple_settings = (
"INSTALLED_APPS",
"TEMPLATE_DIRS",
"LOCALE_PATHS",
)
def test_tuple_settings(self):
settings_module = ModuleType('fake_settings_module')
settings_module.SECRET_KEY = 'foo'
for setting in self.list_or_tuple_settings:
setattr(settings_module, setting, ('non_list_or_tuple_value'))
sys.modules['fake_settings_module'] = settings_module
try:
with self.assertRaises(ImproperlyConfigured):
Settings('fake_settings_module')
finally:
del sys.modules['fake_settings_module']
delattr(settings_module, setting)
| |
# -*- coding: utf-8 -*-
"""
A collection of utilities for canonicalizing and inspecting graphs.
Among other things, they solve of the problem of deterministic bnode
comparisons.
Warning: the time to canonicalize bnodes may increase exponentially on larger
graphs. Use with care!
Example of comparing two graphs::
>>> g1 = Graph().parse(format='n3', data='''
... @prefix : <http://example.org/ns#> .
... <http://example.org> :rel
... <http://example.org/same>,
... [ :label "Same" ],
... <http://example.org/a>,
... [ :label "A" ] .
... ''')
>>> g2 = Graph().parse(format='n3', data='''
... @prefix : <http://example.org/ns#> .
... <http://example.org> :rel
... <http://example.org/same>,
... [ :label "Same" ],
... <http://example.org/b>,
... [ :label "B" ] .
... ''')
>>>
>>> iso1 = to_isomorphic(g1)
>>> iso2 = to_isomorphic(g2)
These are not isomorphic::
>>> iso1 == iso2
False
Diff the two graphs::
>>> in_both, in_first, in_second = graph_diff(iso1, iso2)
Present in both::
>>> def dump_nt_sorted(g):
... for l in sorted(g.serialize(format='nt').splitlines()):
... if l: print(l.decode('ascii'))
>>> dump_nt_sorted(in_both)
<http://example.org> <http://example.org/ns#rel> <http://example.org/same> .
<http://example.org> <http://example.org/ns#rel> _:cbcaabaaba17fecbc304a64f8edee4335e .
_:cbcaabaaba17fecbc304a64f8edee4335e <http://example.org/ns#label> "Same" .
Only in first::
>>> dump_nt_sorted(in_first)
<http://example.org> <http://example.org/ns#rel> <http://example.org/a> .
<http://example.org> <http://example.org/ns#rel> _:cb124e4c6da0579f810c0ffe4eff485bd9 .
_:cb124e4c6da0579f810c0ffe4eff485bd9 <http://example.org/ns#label> "A" .
Only in second::
>>> dump_nt_sorted(in_second)
<http://example.org> <http://example.org/ns#rel> <http://example.org/b> .
<http://example.org> <http://example.org/ns#rel> _:cb558f30e21ddfc05ca53108348338ade8 .
_:cb558f30e21ddfc05ca53108348338ade8 <http://example.org/ns#label> "B" .
"""
# TODO:
# - Doesn't handle quads.
# - Add warning and/or safety mechanism before working on large graphs?
# - use this in existing Graph.isomorphic?
__all__ = ['IsomorphicGraph', 'to_isomorphic', 'isomorphic', 'to_canonical_graph', 'graph_diff', 'similar']
from rdflib.graph import Graph, ConjunctiveGraph, ReadOnlyGraphAggregate
from rdflib.term import BNode
try:
import hashlib
md = hashlib.md5()
except ImportError:
# for Python << 2.5
import md5
md = md5.new()
class IsomorphicGraph(ConjunctiveGraph):
"""
Ported from <http://www.w3.org/2001/sw/DataAccess/proto-tests/tools/rdfdiff.py>
(Sean B Palmer's RDF Graph Isomorphism Tester).
"""
def __init__(self, **kwargs):
super(IsomorphicGraph, self).__init__(**kwargs)
def __eq__(self, other):
"""Graph isomorphism testing."""
if not isinstance(other, IsomorphicGraph):
return False
elif len(self) != len(other):
return False
elif list(self) == list(other):
return True # TODO: really generally cheaper?
return self.internal_hash() == other.internal_hash()
def __ne__(self, other):
"""Negative graph isomorphism testing."""
return not self.__eq__(other)
def internal_hash(self):
"""
This is defined instead of __hash__ to avoid a circular recursion
scenario with the Memory store for rdflib which requires a hash lookup
in order to return a generator of triples.
"""
return _TripleCanonicalizer(self).to_hash()
class _TripleCanonicalizer(object):
def __init__(self, graph, hashfunc=hash):
self.graph = graph
self.hashfunc = hashfunc
def to_hash(self):
return self.hashfunc(tuple(sorted(
map(self.hashfunc, self.canonical_triples()) )))
def canonical_triples(self):
for triple in self.graph:
yield tuple(self._canonicalize_bnodes(triple))
def _canonicalize_bnodes(self, triple):
for term in triple:
if isinstance(term, BNode):
yield BNode(value="cb%s"%self._canonicalize(term))
else:
yield term
def _canonicalize(self, term, done=False):
return self.hashfunc(tuple(sorted(self._vhashtriples(term, done),
key=_hetero_tuple_key)))
def _vhashtriples(self, term, done):
for triple in self.graph:
if term in triple:
yield tuple(self._vhashtriple(triple, term, done))
def _vhashtriple(self, triple, target_term, done):
for i, term in enumerate(triple):
if not isinstance(term, BNode):
yield term
elif done or (term == target_term):
yield i
else:
yield self._canonicalize(term, done=True)
def _hetero_tuple_key(x):
"Sort like Python 2 - by name of type, then by value. Expects tuples."
return tuple((type(a).__name__, a) for a in x)
def to_isomorphic(graph):
if isinstance(graph, IsomorphicGraph):
return graph
return IsomorphicGraph(store=graph.store)
def isomorphic(graph1, graph2):
"""
Compare graph for equality. Uses an algorithm to compute unique hashes
which takes bnodes into account.
Examples::
>>> g1 = Graph().parse(format='n3', data='''
... @prefix : <http://example.org/ns#> .
... <http://example.org> :rel <http://example.org/a> .
... <http://example.org> :rel <http://example.org/b> .
... <http://example.org> :rel [ :label "A bnode." ] .
... ''')
>>> g2 = Graph().parse(format='n3', data='''
... @prefix ns: <http://example.org/ns#> .
... <http://example.org> ns:rel [ ns:label "A bnode." ] .
... <http://example.org> ns:rel <http://example.org/b>,
... <http://example.org/a> .
... ''')
>>> isomorphic(g1, g2)
True
>>> g3 = Graph().parse(format='n3', data='''
... @prefix : <http://example.org/ns#> .
... <http://example.org> :rel <http://example.org/a> .
... <http://example.org> :rel <http://example.org/b> .
... <http://example.org> :rel <http://example.org/c> .
... ''')
>>> isomorphic(g1, g3)
False
"""
return _TripleCanonicalizer(graph1).to_hash() == _TripleCanonicalizer(graph2).to_hash()
def to_canonical_graph(g1):
"""
Creates a canonical, read-only graph where all bnode id:s are based on
deterministical MD5 checksums, correlated with the graph contents.
"""
graph = Graph()
graph += _TripleCanonicalizer(g1, _md5_hash).canonical_triples()
return ReadOnlyGraphAggregate([graph])
def graph_diff(g1, g2):
"""
Returns three sets of triples: "in both", "in first" and "in second".
"""
# bnodes have deterministic values in canonical graphs:
cg1 = to_canonical_graph(g1)
cg2 = to_canonical_graph(g2)
in_both = cg1*cg2
in_first = cg1-cg2
in_second = cg2-cg1
return (in_both, in_first, in_second)
def _md5_hash(t):
h = md
for i in t:
if isinstance(i, tuple):
h.update(_md5_hash(i).encode('ascii'))
else:
h.update(unicode(i).encode("utf8"))
return h.hexdigest()
_MOCK_BNODE = BNode()
def similar(g1, g2):
"""
Checks if the two graphs are "similar", by comparing sorted triples where
all bnodes have been replaced by a singular mock bnode (the
``_MOCK_BNODE``).
This is a much cheaper, but less reliable, alternative to the comparison
algorithm in ``isomorphic``.
"""
return all(t1 == t2 for (t1, t2) in _squashed_graphs_triples(g1, g2))
def _squashed_graphs_triples(g1, g2):
for (t1, t2) in zip(sorted(_squash_graph(g1)), sorted(_squash_graph(g2))):
yield t1, t2
def _squash_graph(graph):
return (_squash_bnodes(triple) for triple in graph)
def _squash_bnodes(triple):
return tuple((isinstance(t, BNode) and _MOCK_BNODE) or t for t in triple)
| |
"""Support for Rflink devices."""
import asyncio
from collections import defaultdict
import logging
import async_timeout
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_COMMAND, CONF_HOST, CONF_PORT,
STATE_ON, EVENT_HOMEASSISTANT_STOP)
from homeassistant.core import CoreState, callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.deprecation import get_deprecated
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.dispatcher import (
async_dispatcher_send, async_dispatcher_connect)
from homeassistant.helpers.restore_state import RestoreEntity
_LOGGER = logging.getLogger(__name__)
ATTR_EVENT = 'event'
ATTR_STATE = 'state'
CONF_ALIASES = 'aliases'
CONF_ALIASSES = 'aliasses'
CONF_GROUP_ALIASES = 'group_aliases'
CONF_GROUP_ALIASSES = 'group_aliasses'
CONF_GROUP = 'group'
CONF_NOGROUP_ALIASES = 'nogroup_aliases'
CONF_NOGROUP_ALIASSES = 'nogroup_aliasses'
CONF_DEVICE_DEFAULTS = 'device_defaults'
CONF_DEVICE_ID = 'device_id'
CONF_DEVICES = 'devices'
CONF_AUTOMATIC_ADD = 'automatic_add'
CONF_FIRE_EVENT = 'fire_event'
CONF_IGNORE_DEVICES = 'ignore_devices'
CONF_RECONNECT_INTERVAL = 'reconnect_interval'
CONF_SIGNAL_REPETITIONS = 'signal_repetitions'
CONF_WAIT_FOR_ACK = 'wait_for_ack'
DATA_DEVICE_REGISTER = 'rflink_device_register'
DATA_ENTITY_LOOKUP = 'rflink_entity_lookup'
DATA_ENTITY_GROUP_LOOKUP = 'rflink_entity_group_only_lookup'
DEFAULT_RECONNECT_INTERVAL = 10
DEFAULT_SIGNAL_REPETITIONS = 1
CONNECTION_TIMEOUT = 10
EVENT_BUTTON_PRESSED = 'button_pressed'
EVENT_KEY_COMMAND = 'command'
EVENT_KEY_ID = 'id'
EVENT_KEY_SENSOR = 'sensor'
EVENT_KEY_UNIT = 'unit'
RFLINK_GROUP_COMMANDS = ['allon', 'alloff']
DOMAIN = 'rflink'
SERVICE_SEND_COMMAND = 'send_command'
SIGNAL_AVAILABILITY = 'rflink_device_available'
SIGNAL_HANDLE_EVENT = 'rflink_handle_event_{}'
TMP_ENTITY = 'tmp.{}'
DEVICE_DEFAULTS_SCHEMA = vol.Schema({
vol.Optional(CONF_FIRE_EVENT, default=False): cv.boolean,
vol.Optional(CONF_SIGNAL_REPETITIONS,
default=DEFAULT_SIGNAL_REPETITIONS): vol.Coerce(int),
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_PORT): vol.Any(cv.port, cv.string),
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_WAIT_FOR_ACK, default=True): cv.boolean,
vol.Optional(CONF_RECONNECT_INTERVAL,
default=DEFAULT_RECONNECT_INTERVAL): int,
vol.Optional(CONF_IGNORE_DEVICES, default=[]):
vol.All(cv.ensure_list, [cv.string]),
}),
}, extra=vol.ALLOW_EXTRA)
SEND_COMMAND_SCHEMA = vol.Schema({
vol.Required(CONF_DEVICE_ID): cv.string,
vol.Required(CONF_COMMAND): cv.string,
})
def identify_event_type(event):
"""Look at event to determine type of device.
Async friendly.
"""
if EVENT_KEY_COMMAND in event:
return EVENT_KEY_COMMAND
if EVENT_KEY_SENSOR in event:
return EVENT_KEY_SENSOR
return 'unknown'
async def async_setup(hass, config):
"""Set up the Rflink component."""
from rflink.protocol import create_rflink_connection
import serial
# Allow entities to register themselves by device_id to be looked up when
# new rflink events arrive to be handled
hass.data[DATA_ENTITY_LOOKUP] = {
EVENT_KEY_COMMAND: defaultdict(list),
EVENT_KEY_SENSOR: defaultdict(list),
}
hass.data[DATA_ENTITY_GROUP_LOOKUP] = {
EVENT_KEY_COMMAND: defaultdict(list),
}
# Allow platform to specify function to register new unknown devices
hass.data[DATA_DEVICE_REGISTER] = {}
async def async_send_command(call):
"""Send Rflink command."""
_LOGGER.debug('Rflink command for %s', str(call.data))
if not (await RflinkCommand.send_command(
call.data.get(CONF_DEVICE_ID),
call.data.get(CONF_COMMAND))):
_LOGGER.error('Failed Rflink command for %s', str(call.data))
hass.services.async_register(
DOMAIN, SERVICE_SEND_COMMAND, async_send_command,
schema=SEND_COMMAND_SCHEMA)
@callback
def event_callback(event):
"""Handle incoming Rflink events.
Rflink events arrive as dictionaries of varying content
depending on their type. Identify the events and distribute
accordingly.
"""
event_type = identify_event_type(event)
_LOGGER.debug('event of type %s: %s', event_type, event)
# Don't propagate non entity events (eg: version string, ack response)
if event_type not in hass.data[DATA_ENTITY_LOOKUP]:
_LOGGER.debug('unhandled event of type: %s', event_type)
return
# Lookup entities who registered this device id as device id or alias
event_id = event.get(EVENT_KEY_ID, None)
is_group_event = (event_type == EVENT_KEY_COMMAND and
event[EVENT_KEY_COMMAND] in RFLINK_GROUP_COMMANDS)
if is_group_event:
entity_ids = hass.data[DATA_ENTITY_GROUP_LOOKUP][event_type].get(
event_id, [])
else:
entity_ids = hass.data[DATA_ENTITY_LOOKUP][event_type][event_id]
_LOGGER.debug('entity_ids: %s', entity_ids)
if entity_ids:
# Propagate event to every entity matching the device id
for entity in entity_ids:
_LOGGER.debug('passing event to %s', entity)
async_dispatcher_send(hass,
SIGNAL_HANDLE_EVENT.format(entity),
event)
elif not is_group_event:
# If device is not yet known, register with platform (if loaded)
if event_type in hass.data[DATA_DEVICE_REGISTER]:
_LOGGER.debug('device_id not known, adding new device')
# Add bogus event_id first to avoid race if we get another
# event before the device is created
# Any additional events received before the device has been
# created will thus be ignored.
hass.data[DATA_ENTITY_LOOKUP][event_type][
event_id].append(TMP_ENTITY.format(event_id))
hass.async_create_task(
hass.data[DATA_DEVICE_REGISTER][event_type](event))
else:
_LOGGER.debug('device_id not known and automatic add disabled')
# When connecting to tcp host instead of serial port (optional)
host = config[DOMAIN].get(CONF_HOST)
# TCP port when host configured, otherwise serial port
port = config[DOMAIN][CONF_PORT]
@callback
def reconnect(exc=None):
"""Schedule reconnect after connection has been unexpectedly lost."""
# Reset protocol binding before starting reconnect
RflinkCommand.set_rflink_protocol(None)
async_dispatcher_send(hass, SIGNAL_AVAILABILITY, False)
# If HA is not stopping, initiate new connection
if hass.state != CoreState.stopping:
_LOGGER.warning('disconnected from Rflink, reconnecting')
hass.async_create_task(connect())
async def connect():
"""Set up connection and hook it into HA for reconnect/shutdown."""
_LOGGER.info('Initiating Rflink connection')
# Rflink create_rflink_connection decides based on the value of host
# (string or None) if serial or tcp mode should be used
# Initiate serial/tcp connection to Rflink gateway
connection = create_rflink_connection(
port=port,
host=host,
event_callback=event_callback,
disconnect_callback=reconnect,
loop=hass.loop,
ignore=config[DOMAIN][CONF_IGNORE_DEVICES]
)
try:
with async_timeout.timeout(CONNECTION_TIMEOUT,
loop=hass.loop):
transport, protocol = await connection
except (serial.serialutil.SerialException, ConnectionRefusedError,
TimeoutError, OSError, asyncio.TimeoutError) as exc:
reconnect_interval = config[DOMAIN][CONF_RECONNECT_INTERVAL]
_LOGGER.exception(
"Error connecting to Rflink, reconnecting in %s",
reconnect_interval)
# Connection to Rflink device is lost, make entities unavailable
async_dispatcher_send(hass, SIGNAL_AVAILABILITY, False)
hass.loop.call_later(reconnect_interval, reconnect, exc)
return
# There is a valid connection to a Rflink device now so
# mark entities as available
async_dispatcher_send(hass, SIGNAL_AVAILABILITY, True)
# Bind protocol to command class to allow entities to send commands
RflinkCommand.set_rflink_protocol(
protocol, config[DOMAIN][CONF_WAIT_FOR_ACK])
# handle shutdown of Rflink asyncio transport
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP,
lambda x: transport.close())
_LOGGER.info('Connected to Rflink')
hass.async_create_task(connect())
return True
class RflinkDevice(Entity):
"""Representation of a Rflink device.
Contains the common logic for Rflink entities.
"""
platform = None
_state = None
_available = True
def __init__(self, device_id, initial_event=None, name=None, aliases=None,
group=True, group_aliases=None, nogroup_aliases=None,
fire_event=False,
signal_repetitions=DEFAULT_SIGNAL_REPETITIONS):
"""Initialize the device."""
# Rflink specific attributes for every component type
self._initial_event = initial_event
self._device_id = device_id
if name:
self._name = name
else:
self._name = device_id
self._aliases = aliases
self._group = group
self._group_aliases = group_aliases
self._nogroup_aliases = nogroup_aliases
self._should_fire_event = fire_event
self._signal_repetitions = signal_repetitions
@callback
def handle_event_callback(self, event):
"""Handle incoming event for device type."""
# Call platform specific event handler
self._handle_event(event)
# Propagate changes through ha
self.async_schedule_update_ha_state()
# Put command onto bus for user to subscribe to
if self._should_fire_event and identify_event_type(
event) == EVENT_KEY_COMMAND:
self.hass.bus.async_fire(EVENT_BUTTON_PRESSED, {
ATTR_ENTITY_ID: self.entity_id,
ATTR_STATE: event[EVENT_KEY_COMMAND],
})
_LOGGER.debug("Fired bus event for %s: %s",
self.entity_id, event[EVENT_KEY_COMMAND])
def _handle_event(self, event):
"""Platform specific event handler."""
raise NotImplementedError()
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return a name for the device."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
if self.assumed_state:
return False
return self._state
@property
def assumed_state(self):
"""Assume device state until first device event sets state."""
return self._state is None
@property
def available(self):
"""Return True if entity is available."""
return self._available
@callback
def _availability_callback(self, availability):
"""Update availability state."""
self._available = availability
self.async_schedule_update_ha_state()
async def async_added_to_hass(self):
"""Register update callback."""
await super().async_added_to_hass()
# Remove temporary bogus entity_id if added
tmp_entity = TMP_ENTITY.format(self._device_id)
if tmp_entity in self.hass.data[DATA_ENTITY_LOOKUP][
EVENT_KEY_COMMAND][self._device_id]:
self.hass.data[DATA_ENTITY_LOOKUP][
EVENT_KEY_COMMAND][self._device_id].remove(tmp_entity)
# Register id and aliases
self.hass.data[DATA_ENTITY_LOOKUP][
EVENT_KEY_COMMAND][self._device_id].append(self.entity_id)
if self._group:
self.hass.data[DATA_ENTITY_GROUP_LOOKUP][
EVENT_KEY_COMMAND][self._device_id].append(self.entity_id)
# aliases respond to both normal and group commands (allon/alloff)
if self._aliases:
for _id in self._aliases:
self.hass.data[DATA_ENTITY_LOOKUP][
EVENT_KEY_COMMAND][_id].append(self.entity_id)
self.hass.data[DATA_ENTITY_GROUP_LOOKUP][
EVENT_KEY_COMMAND][_id].append(self.entity_id)
# group_aliases only respond to group commands (allon/alloff)
if self._group_aliases:
for _id in self._group_aliases:
self.hass.data[DATA_ENTITY_GROUP_LOOKUP][
EVENT_KEY_COMMAND][_id].append(self.entity_id)
# nogroup_aliases only respond to normal commands
if self._nogroup_aliases:
for _id in self._nogroup_aliases:
self.hass.data[DATA_ENTITY_LOOKUP][
EVENT_KEY_COMMAND][_id].append(self.entity_id)
async_dispatcher_connect(self.hass, SIGNAL_AVAILABILITY,
self._availability_callback)
async_dispatcher_connect(self.hass,
SIGNAL_HANDLE_EVENT.format(self.entity_id),
self.handle_event_callback)
# Process the initial event now that the entity is created
if self._initial_event:
self.handle_event_callback(self._initial_event)
class RflinkCommand(RflinkDevice):
"""Singleton class to make Rflink command interface available to entities.
This class is to be inherited by every Entity class that is actionable
(switches/lights). It exposes the Rflink command interface for these
entities.
The Rflink interface is managed as a class level and set during setup (and
reset on reconnect).
"""
# Keep repetition tasks to cancel if state is changed before repetitions
# are sent
_repetition_task = None
_protocol = None
@classmethod
def set_rflink_protocol(cls, protocol, wait_ack=None):
"""Set the Rflink asyncio protocol as a class variable."""
cls._protocol = protocol
if wait_ack is not None:
cls._wait_ack = wait_ack
@classmethod
def is_connected(cls):
"""Return connection status."""
return bool(cls._protocol)
@classmethod
async def send_command(cls, device_id, action):
"""Send device command to Rflink and wait for acknowledgement."""
return await cls._protocol.send_command_ack(device_id, action)
async def _async_handle_command(self, command, *args):
"""Do bookkeeping for command, send it to rflink and update state."""
self.cancel_queued_send_commands()
if command == 'turn_on':
cmd = 'on'
self._state = True
elif command == 'turn_off':
cmd = 'off'
self._state = False
elif command == 'dim':
# convert brightness to rflink dim level
cmd = str(int(args[0] / 17))
self._state = True
elif command == 'toggle':
cmd = 'on'
# if the state is unknown or false, it gets set as true
# if the state is true, it gets set as false
self._state = self._state in [None, False]
# Cover options for RFlink
elif command == 'close_cover':
cmd = 'DOWN'
self._state = False
elif command == 'open_cover':
cmd = 'UP'
self._state = True
elif command == 'stop_cover':
cmd = 'STOP'
self._state = True
# Send initial command and queue repetitions.
# This allows the entity state to be updated quickly and not having to
# wait for all repetitions to be sent
await self._async_send_command(cmd, self._signal_repetitions)
# Update state of entity
await self.async_update_ha_state()
def cancel_queued_send_commands(self):
"""Cancel queued signal repetition commands.
For example when user changed state while repetitions are still
queued for broadcast. Or when an incoming Rflink command (remote
switch) changes the state.
"""
# cancel any outstanding tasks from the previous state change
if self._repetition_task:
self._repetition_task.cancel()
async def _async_send_command(self, cmd, repetitions):
"""Send a command for device to Rflink gateway."""
_LOGGER.debug(
"Sending command: %s to Rflink device: %s", cmd, self._device_id)
if not self.is_connected():
raise HomeAssistantError('Cannot send command, not connected!')
if self._wait_ack:
# Puts command on outgoing buffer then waits for Rflink to confirm
# the command has been send out in the ether.
await self._protocol.send_command_ack(self._device_id, cmd)
else:
# Puts command on outgoing buffer and returns straight away.
# Rflink protocol/transport handles asynchronous writing of buffer
# to serial/tcp device. Does not wait for command send
# confirmation.
self._protocol.send_command(self._device_id, cmd)
if repetitions > 1:
self._repetition_task = self.hass.async_create_task(
self._async_send_command(cmd, repetitions - 1))
class SwitchableRflinkDevice(RflinkCommand, RestoreEntity):
"""Rflink entity which can switch on/off (eg: light, switch)."""
async def async_added_to_hass(self):
"""Restore RFLink device state (ON/OFF)."""
await super().async_added_to_hass()
old_state = await self.async_get_last_state()
if old_state is not None:
self._state = old_state.state == STATE_ON
def _handle_event(self, event):
"""Adjust state if Rflink picks up a remote command for this device."""
self.cancel_queued_send_commands()
command = event['command']
if command in ['on', 'allon']:
self._state = True
elif command in ['off', 'alloff']:
self._state = False
def async_turn_on(self, **kwargs):
"""Turn the device on."""
return self._async_handle_command("turn_on")
def async_turn_off(self, **kwargs):
"""Turn the device off."""
return self._async_handle_command("turn_off")
DEPRECATED_CONFIG_OPTIONS = [
CONF_ALIASSES,
CONF_GROUP_ALIASSES,
CONF_NOGROUP_ALIASSES]
REPLACEMENT_CONFIG_OPTIONS = [
CONF_ALIASES,
CONF_GROUP_ALIASES,
CONF_NOGROUP_ALIASES]
def remove_deprecated(config):
"""Remove deprecated config options from device config."""
for index, deprecated_option in enumerate(DEPRECATED_CONFIG_OPTIONS):
if deprecated_option in config:
replacement_option = REPLACEMENT_CONFIG_OPTIONS[index]
# generate deprecation warning
get_deprecated(config, replacement_option, deprecated_option)
# remove old config value replacing new one
config[replacement_option] = config.pop(deprecated_option)
| |
# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import datetime
import logging
from imcsdk.imcgenutils import *
from imcsdk.imccoreutils import IMC_PLATFORM, get_server_dn
from imcsdk.mometa.huu.HuuFirmwareUpdater import HuuFirmwareUpdater, \
HuuFirmwareUpdaterConsts
from imcsdk.mometa.huu.HuuFirmwareUpdateStatus import HuuFirmwareUpdateStatus
from imcsdk.mometa.top.TopSystem import TopSystem
from imcsdk.mometa.huu.HuuController import HuuController
log = logging.getLogger('imc')
def firmware_huu_update(handle, remote_share, share_type, remote_ip,
username="", password="", update_component="all",
stop_on_error="yes", timeout=240,
verify_update="yes", cimc_secure_boot="no",
server_id=1):
"""
This method can be used to upgrade the cimc firmware
Args:
handle (ImcHandle)
remote_share (string): Full path to the firmware file
share_type (string): "nfs", "www", "cifs"
remote_ip (string): IP address of the remote machine
username (string): username
password (string): password
update_component (string): component to be updated.
"all" for upgrading all components
Refer release notes for individual component names
stop_on_error (string): "yes", "no"
timeout (int): Timeout value. Range is 30-240 mins.
verify_update (string): "yes", "no"
cimc_secure_boot (string): "yes", "no"
server_id (int): Server id for which firmware is performed.
This is relevant to C3260 platforms.
Returns:
HuuFirmwareUpdater object
Examples:
firmware_huu_update(handle=handle,
remote_ip=ip,
remote_share='nfsshare2/ucs-c460m4-huu-2.0.9l.iso',
share_type='nfs',
username=username,
password=password,
update_component='all',
stop_on_error='yes',
verify_update='no',
cimc_secure_boot='no',
timeout=60)
"""
top_system = TopSystem()
if handle.platform == IMC_PLATFORM.TYPE_CLASSIC:
parent_dn = top_system.dn
elif handle.platform == IMC_PLATFORM.TYPE_MODULAR:
parent_dn = get_server_dn(handle, str(server_id))
huu = HuuController(parent_mo_or_dn=parent_dn)
huu_firmware_updater = HuuFirmwareUpdater(
parent_mo_or_dn=huu,
remote_share=remote_share,
map_type=share_type,
remote_ip=remote_ip,
username=username,
password=password,
update_component=update_component,
admin_state=HuuFirmwareUpdaterConsts.ADMIN_STATE_TRIGGER,
stop_on_error=stop_on_error,
time_out=str(timeout),
verify_update=verify_update,
cimc_secure_boot=cimc_secure_boot)
handle.add_mo(huu_firmware_updater)
return huu_firmware_updater
def log_progress(msg="", status=""):
log.info("%s: %s. %s" % (datetime.datetime.now(), msg, status))
def _has_upgrade_started(update):
return update.update_start_time == "" and update.update_end_time == ""
# Tracks if upgrade is over, not necessarily successful
def _has_upgrade_finished(update):
return update.update_end_time != "NA"
def _print_component_upgrade_summary(handle):
update_objs = handle.query_classid("HuuUpdateComponentStatus")
log.info("Component Update Summary:-")
for obj in update_objs:
log.info("%20s: %s" % (obj.component, obj.update_status))
def firmware_huu_update_monitor(handle, timeout=60, interval=10, server_id=1):
"""
This method monitors status of a firmware upgrade.
Args:
handle(ImcHandle)
timeout(int): Timeout in minutes for monitor API.
interval(int): frequency of monitoring in seconds
server_id(int): Server id for monitoring firmware upgrade
Returns:
None
Examples:
firmware_huu_update_monitor(handle, 60, 10)
"""
current_status = []
start = datetime.datetime.now()
top_system = TopSystem()
if handle.platform == IMC_PLATFORM.TYPE_CLASSIC:
parent_dn = top_system.dn
elif handle.platform == IMC_PLATFORM.TYPE_MODULAR:
parent_dn = get_server_dn(handle, str(server_id))
huu = HuuController(parent_mo_or_dn=parent_dn)
huu_firmware_updater = HuuFirmwareUpdater(parent_mo_or_dn=huu.dn)
update_obj = HuuFirmwareUpdateStatus(
parent_mo_or_dn=huu_firmware_updater.dn)
while True:
try:
update_obj = handle.query_dn(update_obj.dn)
if _has_upgrade_started(update_obj):
log_progress("Firmware upgrade is yet to start")
if _has_upgrade_finished(update_obj):
log_progress("Firmware upgrade has finished",
update_obj.overall_status)
_print_component_upgrade_summary(handle)
break
elif update_obj.overall_status not in current_status:
log_progress("Firmware Upgrade is still running",
update_obj.overall_status)
current_status.append(update_obj.overall_status)
time.sleep(interval)
secs = (datetime.datetime.now() - start).total_seconds()
if int(secs / 60) > timeout:
log_progress("Monitor API timeout",
"rerun firmware_huu_update_monitor")
break
except:
_validate_connection(handle)
def _validate_connection(handle, timeout=15 * 60):
"""
Monitors IMC connection, if connection exists return True, else False
Args:
handle (ImcHandle)
timeout (number): timeout in seconds
Returns:
True/False(bool)
Raises:
Exception if unable to connect to IMC
"""
connected = False
start = datetime.datetime.now()
while not connected:
try:
# If the session is already established,
# this will validate the session
connected = handle.login()
except Exception as e:
# IMC may been in the middle of activation,
# hence connection would fail
log.debug("Login to IMC failed: %s", str(e))
if not connected:
try:
log.debug("Login to IMC, elapsed time %ds",
(datetime.datetime.now() - start).total_seconds())
handle.login(force=True)
log.debug("Login successful")
connected = True
except:
log.debug("Login failed. Sleeping for 60 seconds")
time.sleep(60)
if (datetime.datetime.now() - start).total_seconds() > timeout:
raise Exception("TimeOut: Unable to login to IMC")
return connected
| |
import os
import signal
import sys
import numpy as np
import pytest
import ray
from ray.test_utils import (
wait_for_condition,
wait_for_pid_to_exit,
)
SIGKILL = signal.SIGKILL if sys.platform != "win32" else signal.SIGTERM
def test_cached_object(ray_start_cluster):
config = {
"num_heartbeats_timeout": 10,
"raylet_heartbeat_timeout_milliseconds": 100,
"object_timeout_milliseconds": 200,
}
cluster = ray_start_cluster
# Head node with no resources.
cluster.add_node(num_cpus=0, _system_config=config)
ray.init(address=cluster.address)
# Node to place the initial object.
node_to_kill = cluster.add_node(
num_cpus=1, resources={"node1": 1}, object_store_memory=10**8)
cluster.add_node(
num_cpus=1, resources={"node2": 1}, object_store_memory=10**8)
cluster.wait_for_nodes()
@ray.remote
def large_object():
return np.zeros(10**7, dtype=np.uint8)
@ray.remote
def dependent_task(x):
return
obj = large_object.options(resources={"node1": 1}).remote()
ray.get(dependent_task.options(resources={"node2": 1}).remote(obj))
cluster.remove_node(node_to_kill, allow_graceful=False)
cluster.add_node(
num_cpus=1, resources={"node1": 1}, object_store_memory=10**8)
wait_for_condition(
lambda: not all(node["Alive"] for node in ray.nodes()), timeout=10)
for _ in range(20):
large_object.options(resources={"node2": 1}).remote()
ray.get(dependent_task.remote(obj))
@pytest.mark.parametrize("reconstruction_enabled", [False, True])
def test_reconstruction_cached_dependency(ray_start_cluster,
reconstruction_enabled):
config = {
"num_heartbeats_timeout": 10,
"raylet_heartbeat_timeout_milliseconds": 100,
"object_timeout_milliseconds": 200,
}
# Workaround to reset the config to the default value.
if not reconstruction_enabled:
config["lineage_pinning_enabled"] = 0
cluster = ray_start_cluster
# Head node with no resources.
cluster.add_node(
num_cpus=0,
_system_config=config,
enable_object_reconstruction=reconstruction_enabled)
ray.init(address=cluster.address)
# Node to place the initial object.
node_to_kill = cluster.add_node(
num_cpus=1, resources={"node1": 1}, object_store_memory=10**8)
cluster.add_node(
num_cpus=1, resources={"node2": 1}, object_store_memory=10**8)
cluster.wait_for_nodes()
@ray.remote(max_retries=0)
def large_object():
return np.zeros(10**7, dtype=np.uint8)
@ray.remote
def chain(x):
return x
@ray.remote
def dependent_task(x):
return
obj = large_object.options(resources={"node2": 1}).remote()
obj = chain.options(resources={"node1": 1}).remote(obj)
ray.get(dependent_task.options(resources={"node1": 1}).remote(obj))
cluster.remove_node(node_to_kill, allow_graceful=False)
cluster.add_node(
num_cpus=1, resources={"node1": 1}, object_store_memory=10**8)
wait_for_condition(
lambda: not all(node["Alive"] for node in ray.nodes()), timeout=10)
for _ in range(20):
large_object.options(resources={"node2": 1}).remote()
if reconstruction_enabled:
ray.get(dependent_task.remote(obj))
else:
with pytest.raises(ray.exceptions.RayTaskError) as e:
ray.get(dependent_task.remote(obj))
with pytest.raises(ray.exceptions.ObjectLostError):
raise e.as_instanceof_cause()
@pytest.mark.parametrize("reconstruction_enabled", [False, True])
def test_basic_reconstruction(ray_start_cluster, reconstruction_enabled):
config = {
"num_heartbeats_timeout": 10,
"raylet_heartbeat_timeout_milliseconds": 100,
"object_timeout_milliseconds": 200,
}
# Workaround to reset the config to the default value.
if not reconstruction_enabled:
config["lineage_pinning_enabled"] = 0
cluster = ray_start_cluster
# Head node with no resources.
cluster.add_node(
num_cpus=0,
_system_config=config,
enable_object_reconstruction=reconstruction_enabled)
ray.init(address=cluster.address)
# Node to place the initial object.
node_to_kill = cluster.add_node(
num_cpus=1, resources={"node1": 1}, object_store_memory=10**8)
cluster.add_node(
num_cpus=1, resources={"node2": 1}, object_store_memory=10**8)
cluster.wait_for_nodes()
@ray.remote(max_retries=1 if reconstruction_enabled else 0)
def large_object():
return np.zeros(10**7, dtype=np.uint8)
@ray.remote
def dependent_task(x):
return
obj = large_object.options(resources={"node1": 1}).remote()
ray.get(dependent_task.options(resources={"node1": 1}).remote(obj))
cluster.remove_node(node_to_kill, allow_graceful=False)
cluster.add_node(
num_cpus=1, resources={"node1": 1}, object_store_memory=10**8)
if reconstruction_enabled:
ray.get(dependent_task.remote(obj))
else:
with pytest.raises(ray.exceptions.RayTaskError) as e:
ray.get(dependent_task.remote(obj))
with pytest.raises(ray.exceptions.ObjectLostError):
raise e.as_instanceof_cause()
@pytest.mark.parametrize("reconstruction_enabled", [False, True])
def test_basic_reconstruction_put(ray_start_cluster, reconstruction_enabled):
config = {
"num_heartbeats_timeout": 10,
"raylet_heartbeat_timeout_milliseconds": 100,
"object_timeout_milliseconds": 200,
}
# Workaround to reset the config to the default value.
if not reconstruction_enabled:
config["lineage_pinning_enabled"] = 0
cluster = ray_start_cluster
# Head node with no resources.
cluster.add_node(
num_cpus=0,
_system_config=config,
enable_object_reconstruction=reconstruction_enabled)
ray.init(address=cluster.address)
# Node to place the initial object.
node_to_kill = cluster.add_node(
num_cpus=1, resources={"node1": 1}, object_store_memory=10**8)
cluster.add_node(
num_cpus=1, resources={"node2": 1}, object_store_memory=10**8)
cluster.wait_for_nodes()
@ray.remote(max_retries=1 if reconstruction_enabled else 0)
def large_object():
return np.zeros(10**7, dtype=np.uint8)
@ray.remote
def dependent_task(x):
return x
obj = ray.put(np.zeros(10**7, dtype=np.uint8))
result = dependent_task.options(resources={"node1": 1}).remote(obj)
ray.get(result)
del obj
cluster.remove_node(node_to_kill, allow_graceful=False)
cluster.add_node(
num_cpus=1, resources={"node1": 1}, object_store_memory=10**8)
for _ in range(20):
ray.put(np.zeros(10**7, dtype=np.uint8))
if reconstruction_enabled:
ray.get(result)
else:
# The copy that we fetched earlier may still be local or it may have
# been evicted.
try:
ray.get(result)
except ray.exceptions.ObjectLostError:
pass
@pytest.mark.parametrize("reconstruction_enabled", [False, True])
def test_basic_reconstruction_actor_task(ray_start_cluster,
reconstruction_enabled):
config = {
"num_heartbeats_timeout": 10,
"raylet_heartbeat_timeout_milliseconds": 100,
"object_timeout_milliseconds": 200,
}
# Workaround to reset the config to the default value.
if not reconstruction_enabled:
config["lineage_pinning_enabled"] = 0
cluster = ray_start_cluster
# Head node with no resources.
cluster.add_node(
num_cpus=0,
_system_config=config,
enable_object_reconstruction=reconstruction_enabled)
ray.init(address=cluster.address)
# Node to place the initial object.
node_to_kill = cluster.add_node(
num_cpus=1, resources={"node1": 2}, object_store_memory=10**8)
cluster.add_node(
num_cpus=1, resources={"node2": 1}, object_store_memory=10**8)
cluster.wait_for_nodes()
@ray.remote(
max_restarts=-1,
max_task_retries=-1 if reconstruction_enabled else 0,
resources={"node1": 1},
num_cpus=0)
class Actor:
def __init__(self):
pass
def large_object(self):
return np.zeros(10**7, dtype=np.uint8)
def pid(self):
return os.getpid()
@ray.remote
def dependent_task(x):
return
a = Actor.remote()
pid = ray.get(a.pid.remote())
obj = a.large_object.remote()
ray.get(dependent_task.options(resources={"node1": 1}).remote(obj))
# Workaround to kill the actor process too since there is a bug where the
# actor's plasma client hangs after the plasma store has exited.
os.kill(pid, SIGKILL)
cluster.remove_node(node_to_kill, allow_graceful=False)
cluster.add_node(
num_cpus=1, resources={"node1": 2}, object_store_memory=10**8)
wait_for_pid_to_exit(pid)
if reconstruction_enabled:
ray.get(dependent_task.remote(obj))
else:
with pytest.raises(ray.exceptions.RayTaskError) as e:
ray.get(dependent_task.remote(obj))
with pytest.raises(ray.exceptions.ObjectLostError):
raise e.as_instanceof_cause()
# Make sure the actor handle is still usable.
pid = ray.get(a.pid.remote())
@pytest.mark.skipif(sys.platform == "win32", reason="Test failing on Windows.")
@pytest.mark.parametrize("reconstruction_enabled", [False, True])
def test_basic_reconstruction_actor_constructor(ray_start_cluster,
reconstruction_enabled):
config = {
"num_heartbeats_timeout": 10,
"raylet_heartbeat_timeout_milliseconds": 100,
"object_timeout_milliseconds": 200,
}
# Workaround to reset the config to the default value.
if not reconstruction_enabled:
config["lineage_pinning_enabled"] = 0
cluster = ray_start_cluster
# Head node with no resources.
cluster.add_node(
num_cpus=0,
_system_config=config,
enable_object_reconstruction=reconstruction_enabled)
ray.init(address=cluster.address)
# Node to place the initial object.
node_to_kill = cluster.add_node(
num_cpus=1, resources={"node1": 1}, object_store_memory=10**8)
cluster.add_node(
num_cpus=1, resources={"node2": 1}, object_store_memory=10**8)
cluster.wait_for_nodes()
@ray.remote(max_retries=1 if reconstruction_enabled else 0)
def large_object():
return np.zeros(10**7, dtype=np.uint8)
# Both the constructor and a method depend on the large object.
@ray.remote(max_restarts=-1)
class Actor:
def __init__(self, x):
pass
def dependent_task(self, x):
return
def pid(self):
return os.getpid()
obj = large_object.options(resources={"node1": 1}).remote()
a = Actor.options(resources={"node1": 1}).remote(obj)
ray.get(a.dependent_task.remote(obj))
pid = ray.get(a.pid.remote())
# Workaround to kill the actor process too since there is a bug where the
# actor's plasma client hangs after the plasma store has exited.
os.kill(pid, SIGKILL)
cluster.remove_node(node_to_kill, allow_graceful=False)
cluster.add_node(
num_cpus=1, resources={"node1": 1}, object_store_memory=10**8)
wait_for_pid_to_exit(pid)
# Wait for the actor to restart.
def probe():
try:
ray.get(a.dependent_task.remote(obj))
return True
except ray.exceptions.RayActorError:
return False
except (ray.exceptions.RayTaskError, ray.exceptions.ObjectLostError):
return True
wait_for_condition(probe)
if reconstruction_enabled:
ray.get(a.dependent_task.remote(obj))
else:
with pytest.raises(ray.exceptions.RayTaskError) as e:
x = a.dependent_task.remote(obj)
print(x)
ray.get(x)
with pytest.raises(ray.exceptions.ObjectLostError):
raise e.as_instanceof_cause()
@pytest.mark.parametrize("reconstruction_enabled", [False, True])
def test_multiple_downstream_tasks(ray_start_cluster, reconstruction_enabled):
config = {
"num_heartbeats_timeout": 10,
"raylet_heartbeat_timeout_milliseconds": 100,
"object_timeout_milliseconds": 200,
}
# Workaround to reset the config to the default value.
if not reconstruction_enabled:
config["lineage_pinning_enabled"] = 0
cluster = ray_start_cluster
# Head node with no resources.
cluster.add_node(
num_cpus=0,
_system_config=config,
enable_object_reconstruction=reconstruction_enabled)
ray.init(address=cluster.address)
# Node to place the initial object.
node_to_kill = cluster.add_node(
num_cpus=1, resources={"node1": 1}, object_store_memory=10**8)
cluster.add_node(
num_cpus=1, resources={"node2": 1}, object_store_memory=10**8)
cluster.wait_for_nodes()
@ray.remote(max_retries=1 if reconstruction_enabled else 0)
def large_object():
return np.zeros(10**7, dtype=np.uint8)
@ray.remote
def chain(x):
return x
@ray.remote
def dependent_task(x):
return
obj = large_object.options(resources={"node2": 1}).remote()
downstream = [chain.remote(obj) for _ in range(4)]
for obj in downstream:
ray.get(dependent_task.options(resources={"node1": 1}).remote(obj))
cluster.remove_node(node_to_kill, allow_graceful=False)
cluster.add_node(
num_cpus=1, resources={"node1": 1}, object_store_memory=10**8)
if reconstruction_enabled:
for obj in downstream:
ray.get(dependent_task.options(resources={"node1": 1}).remote(obj))
else:
with pytest.raises(ray.exceptions.RayTaskError) as e:
for obj in downstream:
ray.get(
dependent_task.options(resources={
"node1": 1
}).remote(obj))
with pytest.raises(ray.exceptions.ObjectLostError):
raise e.as_instanceof_cause()
@pytest.mark.parametrize("reconstruction_enabled", [False, True])
def test_reconstruction_chain(ray_start_cluster, reconstruction_enabled):
config = {
"num_heartbeats_timeout": 10,
"raylet_heartbeat_timeout_milliseconds": 100,
"object_timeout_milliseconds": 200,
}
# Workaround to reset the config to the default value.
if not reconstruction_enabled:
config["lineage_pinning_enabled"] = 0
cluster = ray_start_cluster
# Head node with no resources.
cluster.add_node(
num_cpus=0,
_system_config=config,
object_store_memory=10**8,
enable_object_reconstruction=reconstruction_enabled)
ray.init(address=cluster.address)
node_to_kill = cluster.add_node(num_cpus=1, object_store_memory=10**8)
cluster.wait_for_nodes()
@ray.remote(max_retries=1 if reconstruction_enabled else 0)
def large_object():
return np.zeros(10**7, dtype=np.uint8)
@ray.remote
def chain(x):
return x
@ray.remote
def dependent_task(x):
return x
obj = large_object.remote()
for _ in range(20):
obj = chain.remote(obj)
ray.get(dependent_task.remote(obj))
cluster.remove_node(node_to_kill, allow_graceful=False)
cluster.add_node(num_cpus=1, object_store_memory=10**8)
if reconstruction_enabled:
ray.get(dependent_task.remote(obj))
else:
with pytest.raises(ray.exceptions.RayTaskError) as e:
ray.get(dependent_task.remote(obj))
with pytest.raises(ray.exceptions.ObjectLostError):
raise e.as_instanceof_cause()
def test_reconstruction_stress(ray_start_cluster):
config = {
"num_heartbeats_timeout": 10,
"raylet_heartbeat_timeout_milliseconds": 100,
"max_direct_call_object_size": 100,
"task_retry_delay_ms": 100,
"object_timeout_milliseconds": 200,
}
cluster = ray_start_cluster
# Head node with no resources.
cluster.add_node(
num_cpus=0, _system_config=config, enable_object_reconstruction=True)
ray.init(address=cluster.address)
# Node to place the initial object.
node_to_kill = cluster.add_node(
num_cpus=1, resources={"node1": 1}, object_store_memory=10**8)
cluster.add_node(
num_cpus=1, resources={"node2": 1}, object_store_memory=10**8)
cluster.wait_for_nodes()
@ray.remote
def large_object():
return np.zeros(10**5, dtype=np.uint8)
@ray.remote
def dependent_task(x):
return
for _ in range(3):
obj = large_object.options(resources={"node1": 1}).remote()
ray.get(dependent_task.options(resources={"node2": 1}).remote(obj))
outputs = [
large_object.options(resources={
"node1": 1
}).remote() for _ in range(1000)
]
outputs = [
dependent_task.options(resources={
"node2": 1
}).remote(obj) for obj in outputs
]
cluster.remove_node(node_to_kill, allow_graceful=False)
node_to_kill = cluster.add_node(
num_cpus=1, resources={"node1": 1}, object_store_memory=10**8)
i = 0
while outputs:
ray.get(outputs.pop(0))
print(i)
i += 1
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
| |
import json
from flask import g
from sci.utils import random_sha1
from jobserver.utils import get_ts
from jobserver.recipe import Recipe
from jobserver.db import BUILD_HISTORY
KEY_JOB_BUILDS = 'job:builds:%s'
KEY_BUILD = 'build:%s'
KEY_BUILD_SESSIONS = 'sessions:%s'
KEY_SESSION = 'session:%s'
# The session is created, but not yet scheduled to run
SESSION_STATE_NEW = 'new'
# The session is scheduled to be handled by the backend
SESSION_STATE_TO_BACKEND = 'to-backend'
# No agent can processed the session, so it's queued and awaiting an agent
SESSION_STATE_QUEUED = 'queued'
# The session has been dispatched to a agent, but it has not yet ack'ed.
SESSION_STATE_TO_AGENT = 'to-agent'
# The session has been reported to be running
SESSION_STATE_RUNNING = 'running'
# The agent has finished (successfully, or with errors) - see RESULT_
SESSION_STATE_DONE = 'done'
RESULT_UNKNOWN = 'unknown'
RESULT_SUCCESS = 'success'
RESULT_FAILED = 'failed'
RESULT_ERROR = 'error'
RESULT_ABORTED = 'aborted'
BUILD_HISTORY_LIMIT = 100
class Build(object):
def __init__(self, build_uuid, **kwargs):
self._uuid = build_uuid
self.job_name = kwargs['job_name']
self.job_ref = kwargs['job_ref']
self.recipe = kwargs['recipe']
self.recipe_ref = kwargs['recipe_ref']
self.number = kwargs.get('number', 0)
self.build_id = kwargs.get('build_id', '')
self.description = kwargs.get('description', '')
self.created = kwargs.get('created', get_ts())
self.next_sess_id = kwargs.get('next_sess_id', 0)
self.ss_token = kwargs.get('ss_token', 'SS' + build_uuid)
self.parameters = kwargs.get('parameters', {})
self.artifacts = kwargs.get('artifacts', [])
self.state = kwargs.get('state', SESSION_STATE_NEW)
self.result = kwargs.get('result', RESULT_UNKNOWN)
def as_dict(self):
return dict(job_name = self.job_name,
job_ref = self.job_ref,
recipe = self.recipe,
recipe_ref = self.recipe_ref,
number = self.number,
build_id = self.build_id,
description = self.description,
created = self.created,
next_sess_id = self.next_sess_id,
ss_token = self.ss_token,
parameters = self.parameters,
artifacts = self.artifacts,
state = self.state,
result = self.result)
def save(self):
build = self.as_dict()
build['parameters'] = json.dumps(self.parameters)
build['artifacts'] = json.dumps(self.artifacts)
g.db.hmset(KEY_BUILD % self.uuid, build)
@classmethod
def set_description(self, build_uuid, description, pipe = None):
if not pipe:
pipe = g.db
pipe.hset(KEY_BUILD % build_uuid, 'description', description)
@classmethod
def set_build_id(self, build_uuid, build_id, pipe = None):
if not pipe:
pipe = g.db
pipe.hset(KEY_BUILD % build_uuid, 'build_id', build_id)
@classmethod
def set_done(cls, build_uuid, result, pipe):
pipe.hmset(KEY_BUILD % build_uuid, {'result': result})
# TODO: Clean the sessions, builds and such?
# Add to build history
pipe.lpush(BUILD_HISTORY, build_uuid)
pipe.ltrim(BUILD_HISTORY, 0, BUILD_HISTORY_LIMIT - 1)
@classmethod
def set_state(cls, build_uuid, state, pipe):
pipe.hmset(KEY_BUILD % build_uuid, {'state': state})
@classmethod
def get_job_name(self, build_uuid):
return g.db.hget(KEY_BUILD % build_uuid, 'job_name')
@property
def uuid(self):
return self._uuid
@classmethod
def create(cls, job, parameters = {}, description = ''):
recipe_ref = job.recipe_ref
if not recipe_ref:
recipe_ref = Recipe.load(job.recipe).ref
build_uuid = 'B%s' % random_sha1()
build = Build(build_uuid,
job_name = job.name, job_ref = job.ref,
recipe = job.recipe, recipe_ref = recipe_ref,
parameters = parameters, description = description)
build.save()
# Create the main session
create_session(g.db, build.uuid)
number = g.db.rpush(KEY_JOB_BUILDS % job.name, build.uuid)
build.number = number
build.build_id = '%s-%d' % (job.name, number)
g.db.hmset(KEY_BUILD % build.uuid, {'number': build.number,
'build_id': build.build_id})
return build
@classmethod
def add_artifact(cls, build_uuid, entry):
key = KEY_BUILD % build_uuid
def update(pipe):
files = json.loads(pipe.hget(key, 'artifacts'))
files.append(entry)
pipe.multi()
pipe.hset(key, 'artifacts', json.dumps(files))
g.db.transaction(update, key)
@classmethod
def load(cls, build_uuid):
build = g.db.hgetall(KEY_BUILD % build_uuid)
if not build:
return None
build['number'] = int(build['number'])
build['parameters'] = json.loads(build['parameters'])
build['artifacts'] = json.loads(build['artifacts'])
return Build(build_uuid, **build)
def create_session(db, build_id, parent = None, labels = [],
run_info = None, state = SESSION_STATE_NEW):
ri = run_info or {}
args = ", ".join(ri.get('args', []))
title = "%s(%s)" % (ri.get('step_name', 'main'), args)
session = dict(state = state,
title = title,
result = RESULT_UNKNOWN,
parent = parent,
labels = ",".join(labels),
agent = '',
run_info = json.dumps(run_info),
log_file = '',
created = get_ts(),
started = 0,
ended = 0,
output = json.dumps(None))
session_no = db.hincrby(KEY_BUILD % build_id, 'next_sess_id', 1) - 1
session_id = '%s-%s' % (build_id, session_no)
db.hmset(KEY_SESSION % session_id, session)
return session_no
def get_session_title(session):
return session.get('title', '')
def set_session_state(pipe, session_id, state):
build_id, num = session_id.split('-')
if int(num) == 0:
Build.set_state(build_id, state, pipe=pipe)
pipe.hset(KEY_SESSION % session_id, 'state', state)
def get_session(db, session_id):
session = db.hgetall(KEY_SESSION % session_id)
if not session:
return None
session['labels'] = set(session['labels'].split(','))
session['labels'].remove('') # if labels is empty
session['run_info'] = json.loads(session.get('run_info', '{}'))
session['output'] = json.loads(session['output'])
session['created'] = int(session.get('created', '0'))
session['started'] = int(session.get('started', '0'))
session['ended'] = int(session.get('ended', '0'))
return session
def set_session_done(pipe, session_id, result, output, log_file):
set_session_state(pipe, session_id, SESSION_STATE_DONE)
pipe.hmset(KEY_SESSION % session_id, {'result': result,
'output': json.dumps(output),
'log_file': log_file,
'ended': get_ts()})
def set_session_queued(pipe, session_id):
set_session_state(pipe, session_id, SESSION_STATE_QUEUED)
def set_session_to_agent(pipe, session_id, agent_id):
set_session_state(pipe, session_id, SESSION_STATE_TO_AGENT)
pipe.hmset(KEY_SESSION % session_id, {'agent': agent_id})
def set_session_running(pipe, session_id):
set_session_state(pipe, session_id, SESSION_STATE_RUNNING)
pipe.hmset(KEY_SESSION % session_id, {'started': get_ts()})
def get_session_labels(db, session_id):
labels = set(db.hget(KEY_SESSION % session_id, 'labels').split(','))
labels.remove('')
return labels
| |
"""
Utilities for working with nexus tokens.
Helps with generating tokens, validating tokens.
"""
import binascii
from collections import Mapping
from datetime import datetime
import hashlib
import json
import logging
import os
import re
import sys
import time
import urllib
import requests
import rsa
log = logging.getLogger()
class DictObj(Mapping):
"""
Simple dictionary wrapper
"""
def __init__(self, delegate):
self.delegate = delegate
def __len__(self):
return len(self.delegate)
def __iter__(self):
return self.delegate.__iter__()
def __getitem__(self, item):
return self.delegate[item]
def __getattr__(self, attrname):
try:
return self.delegate[attrname]
except KeyError:
raise AttributeError()
class InMemoryCache(object):
"""
Simple cache implementation for signing certificates.
"""
def __init__(self):
self.cache_map = {}
def save_public_key(self, key_id, key):
self.cache_map[key_id] = key
def has_public_key(self, key_id):
return key_id in self.cache_map
def get_public_key(self, key_id):
return rsa.PublicKey.load_pkcs1(self.cache_map[key_id])
class FileSystemCache(object):
"""
Cache signing certificates to the filesystem.
"""
def __init__(self, cache_path):
self.cache_path = cache_path
if not os.path.exists(self.cache_path):
os.makedirs(self.cache_path)
def save_public_key(self, key_id, key):
cached_cert_path = os.path.join(self.cache_path,
"{0}.pem".format(key_id))
with open(cached_cert_path, 'w') as cert:
cert.write(str(key))
def has_public_key(self, key_id):
cached_cert_path = os.path.join(self.cache_path,
"{0}.pem".format(key_id))
return os.path.exists(cached_cert_path)
def get_public_key(self, key_id):
cached_cert_path = os.path.join(self.cache_path,
"{0}.pem".format(key_id))
with open(cached_cert_path, 'r') as cert:
return rsa.PublicKey.load_pkcs1(cert.read())
class LoggingCacheWrapper(object):
def __init__(self, cache):
self.cache = cache
def save_public_key(self, key_id, key):
message = "{0}: Saving public key {1}:{2}"
log.debug(message.format(self.cache.__class__.__name__,
key_id, key))
self.cache.save_public_key(key_id, key)
def has_public_key(self, key_id):
return self.cache.has_public_key(key_id)
def get_public_key(self, key_id):
message = "{0}: Getting public key {1}"
log.debug(message.format(self.cache.__class__.__name__, key_id))
return self.cache.get_public_key(key_id)
def validate_token(token, cache=InMemoryCache(), verify=True):
"""
Given a token validate it.
Keyword arguments:
:param tokens: A signed authentication token which was provided by Nexus
:raises ValueError: If the signature is invalid
"""
unencoded_token = urllib.unquote(token)
token_map = {}
for entry in unencoded_token.split('|'):
key, value = entry.split('=')
token_map[key] = value
subject_hash = hashlib.md5(token_map['SigningSubject']).hexdigest()
if not cache.has_public_key(subject_hash):
key_struct = requests.get(token_map['SigningSubject'], verify=verify).content
public_key = json.loads(key_struct)['pubkey']
cache.save_public_key(subject_hash, public_key)
public_key = cache.get_public_key(subject_hash)
sig = token_map.pop('sig')
match = re.match('^(.+)\|sig=.*', unencoded_token)
signed_data = match.group(1)
try:
sig = binascii.a2b_hex(sig)
rsa.verify(signed_data, sig, public_key)
except rsa.VerificationError:
exc_value, exc_traceback = sys.exc_info()[1:]
log.debug('RSA Verification error')
log.debug(exc_value)
log.debug(exc_traceback)
raise ValueError('Invalid Signature')
now = time.mktime(datetime.utcnow().timetuple())
if token_map['expiry'] < now:
raise ValueError('TokenExpired')
return token_map['un']
def request_access_token(client_id, client_secret,
auth_code, auth_uri="https://graph.api.globusonline.org/token",
verify=True):
"""
Given an authorization code, request an access token.
:param client_id: The client's api id
:param client_secret: The client's api secret
:param auth_code: The authorization code given to the resource owner by nexus
:param auth_uri: The url of the authentication endpoint
:returns: A dictionary of the access code response. This will include the
fields: access_token, refresh_token and expires_in
:raises AccessTokenRequestError: If the request for an access token fails
"""
payload = {
'grant_type': 'authorization_code',
'code': auth_code,
}
response = requests.post(auth_uri,
auth=(client_id, client_secret),
data=payload, verify=verify)
if response.status_code == requests.codes.ok:
return DictObj(response.json)
raise TokenRequestError(response.json)
def get_token_refresh(client_id, client_secret,
refresh_token, auth_uri="https://graph.api.globuscs.info/authorize",
verify=True):
"""
Update the access token using the refresh token from a previous request.
:param client_id: The client's api id
:param client_secret: The client's api secret
:param refresh_token: The refresh token issued in a previous authentication.
:param auth_uri: The url of the authentication endpoint.
"""
payload = {
'grant_type': 'refresh_token',
'refresh_token': refresh_token,
}
response = requests.post(auth_uri,
auth=(client_id, client_secret),
data=payload, verify=verify)
if response.status_code == requests.codes.ok:
return DictObj(response.json)
raise TokenRequestError(response.json)
class TokenRequestError(Exception):
"""
Just an Error class that takes a json response as a property.
"""
def __init__(self, error):
super(TokenRequestError, self).__init__()
self.error = error
| |
"""Qt implementation of _Renderer and GUI."""
# Authors: Guillaume Favelier <guillaume.favelier@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
from contextlib import contextmanager
import pyvista
from pyvistaqt.plotting import FileDialog
from PyQt5.QtCore import Qt, pyqtSignal, QLocale
from PyQt5.QtGui import QIcon, QImage, QPixmap, QCursor
from PyQt5.QtWidgets import (QComboBox, QDockWidget, QDoubleSpinBox, QGroupBox,
QHBoxLayout, QLabel, QToolButton, QMenuBar,
QSlider, QSpinBox, QVBoxLayout, QWidget,
QSizePolicy, QScrollArea, QStyle, QProgressBar,
QStyleOptionSlider, QLayout, QCheckBox,
QButtonGroup, QRadioButton, QLineEdit,
QFileDialog)
from ._pyvista import _PyVistaRenderer
from ._pyvista import (_close_all, _close_3d_figure, _check_3d_figure, # noqa: F401,E501 analysis:ignore
_set_3d_view, _set_3d_title, _take_3d_screenshot) # noqa: F401,E501 analysis:ignore
from ._abstract import (_AbstractDock, _AbstractToolBar, _AbstractMenuBar,
_AbstractStatusBar, _AbstractLayout, _AbstractWidget,
_AbstractWindow, _AbstractMplCanvas, _AbstractPlayback,
_AbstractBrainMplCanvas, _AbstractMplInterface,
_AbstractWidgetList)
from ._utils import _init_qt_resources, _qt_disable_paint
from ..utils import logger
class _QtLayout(_AbstractLayout):
def _layout_initialize(self, max_width):
pass
def _layout_add_widget(self, layout, widget, stretch=0):
if isinstance(widget, QLayout):
layout.addLayout(widget)
else:
layout.addWidget(widget, stretch)
class _QtDock(_AbstractDock, _QtLayout):
def _dock_initialize(self, window=None, name="Controls",
area="left"):
window = self._window if window is None else window
qt_area = Qt.LeftDockWidgetArea if area == "left" \
else Qt.RightDockWidgetArea
self._dock, self._dock_layout = _create_dock_widget(
self._window, name, qt_area)
if area == "left":
window.setCorner(Qt.BottomLeftCorner, Qt.LeftDockWidgetArea)
else:
window.setCorner(Qt.BottomRightCorner, Qt.RightDockWidgetArea)
def _dock_finalize(self):
self._dock.setMinimumSize(self._dock.sizeHint().width(), 0)
self._dock_add_stretch(self._dock_layout)
def _dock_show(self):
self._dock.show()
def _dock_hide(self):
self._dock.hide()
def _dock_add_stretch(self, layout=None):
layout = self._dock_layout if layout is None else layout
layout.addStretch()
def _dock_add_layout(self, vertical=True):
layout = QVBoxLayout() if vertical else QHBoxLayout()
return layout
def _dock_add_label(self, value, align=False, layout=None):
layout = self._dock_layout if layout is None else layout
widget = QLabel()
if align:
widget.setAlignment(Qt.AlignCenter)
widget.setText(value)
self._layout_add_widget(layout, widget)
return _QtWidget(widget)
def _dock_add_button(self, name, callback, layout=None):
layout = self._dock_layout if layout is None else layout
# If we want one with text instead of an icon, we should use
# QPushButton(name)
widget = QToolButton()
widget.clicked.connect(callback)
widget.setText(name)
self._layout_add_widget(layout, widget)
return _QtWidget(widget)
def _dock_named_layout(self, name, layout=None, compact=True):
layout = self._dock_layout if layout is None else layout
if name is not None:
hlayout = self._dock_add_layout(not compact)
self._dock_add_label(
value=name, align=not compact, layout=hlayout)
self._layout_add_widget(layout, hlayout)
layout = hlayout
return layout
def _dock_add_slider(self, name, value, rng, callback,
compact=True, double=False, layout=None):
layout = self._dock_named_layout(name, layout, compact)
slider_class = QFloatSlider if double else QSlider
cast = float if double else int
widget = slider_class(Qt.Horizontal)
widget.setMinimum(cast(rng[0]))
widget.setMaximum(cast(rng[1]))
widget.setValue(cast(value))
widget.valueChanged.connect(callback)
self._layout_add_widget(layout, widget)
return _QtWidget(widget)
def _dock_add_check_box(self, name, value, callback, layout=None):
layout = self._dock_layout if layout is None else layout
widget = QCheckBox(name)
widget.setChecked(value)
widget.stateChanged.connect(callback)
self._layout_add_widget(layout, widget)
return _QtWidget(widget)
def _dock_add_spin_box(self, name, value, rng, callback,
compact=True, double=True, step=None,
layout=None):
layout = self._dock_named_layout(name, layout, compact)
value = value if double else int(value)
widget = QDoubleSpinBox() if double else QSpinBox()
widget.setAlignment(Qt.AlignCenter)
widget.setMinimum(rng[0])
widget.setMaximum(rng[1])
widget.setKeyboardTracking(False)
if step is None:
inc = (rng[1] - rng[0]) / 20.
inc = max(int(round(inc)), 1) if not double else inc
widget.setSingleStep(inc)
else:
widget.setSingleStep(step)
widget.setValue(value)
widget.valueChanged.connect(callback)
self._layout_add_widget(layout, widget)
return _QtWidget(widget)
def _dock_add_combo_box(self, name, value, rng,
callback, compact=True, layout=None):
layout = self._dock_named_layout(name, layout, compact)
widget = QComboBox()
widget.addItems(rng)
widget.setCurrentText(value)
widget.currentTextChanged.connect(callback)
widget.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self._layout_add_widget(layout, widget)
return _QtWidget(widget)
def _dock_add_radio_buttons(self, value, rng, callback, vertical=True,
layout=None):
layout = self._dock_layout if layout is None else layout
group_layout = QVBoxLayout() if vertical else QHBoxLayout()
group = QButtonGroup()
for val in rng:
button = QRadioButton(val)
if val == value:
button.setChecked(True)
group.addButton(button)
self._layout_add_widget(group_layout, button)
def func(button):
callback(button.text())
group.buttonClicked.connect(func)
self._layout_add_widget(layout, group_layout)
return _QtWidgetList(group)
def _dock_add_group_box(self, name, layout=None):
layout = self._dock_layout if layout is None else layout
hlayout = QVBoxLayout()
widget = QGroupBox(name)
widget.setLayout(hlayout)
self._layout_add_widget(layout, widget)
return hlayout
def _dock_add_text(self, name, value, placeholder, layout=None):
layout = self._dock_layout if layout is None else layout
widget = QLineEdit(value)
widget.setPlaceholderText(placeholder)
self._layout_add_widget(layout, widget)
return _QtWidget(widget)
def _dock_add_file_button(self, name, desc, func, value=None, save=False,
directory=False, input_text_widget=True,
placeholder="Type a file name", layout=None):
layout = self._dock_layout if layout is None else layout
if input_text_widget:
hlayout = self._dock_add_layout(vertical=False)
text_widget = self._dock_add_text(
name=f"{name}_field",
value=value,
placeholder=placeholder,
layout=hlayout,
)
def sync_text_widget(s):
text_widget.set_value(s)
else:
hlayout = layout
def callback():
if directory:
name = QFileDialog.getExistingDirectory()
elif save:
name = QFileDialog.getSaveFileName()
else:
name = QFileDialog.getOpenFileName()
name = name[0] if isinstance(name, tuple) else name
# handle the cancel button
if len(name) == 0:
return
if input_text_widget:
sync_text_widget(name)
func(name)
button_widget = self._dock_add_button(
name=desc,
callback=callback,
layout=hlayout,
)
if input_text_widget:
self._layout_add_widget(layout, hlayout)
return _QtWidgetList([text_widget, button_widget])
else:
return _QtWidget(button_widget)
class QFloatSlider(QSlider):
"""Slider that handles float values."""
valueChanged = pyqtSignal(float)
def __init__(self, ori, parent=None):
"""Initialize the slider."""
super().__init__(ori, parent)
self._opt = QStyleOptionSlider()
self.initStyleOption(self._opt)
self._gr = self.style().subControlRect(
QStyle.CC_Slider, self._opt, QStyle.SC_SliderGroove, self)
self._sr = self.style().subControlRect(
QStyle.CC_Slider, self._opt, QStyle.SC_SliderHandle, self)
self._precision = 10000
super().valueChanged.connect(self._convert)
def _convert(self, value):
self.valueChanged.emit(value / self._precision)
def minimum(self):
"""Get the minimum."""
return super().minimum() / self._precision
def setMinimum(self, value):
"""Set the minimum."""
super().setMinimum(int(value * self._precision))
def maximum(self):
"""Get the maximum."""
return super().maximum() / self._precision
def setMaximum(self, value):
"""Set the maximum."""
super().setMaximum(int(value * self._precision))
def value(self):
"""Get the current value."""
return super().value() / self._precision
def setValue(self, value):
"""Set the current value."""
super().setValue(int(value * self._precision))
# Adapted from:
# https://stackoverflow.com/questions/52689047/moving-qslider-to-mouse-click-position # noqa: E501
def mousePressEvent(self, event):
"""Add snap-to-location handling."""
opt = QStyleOptionSlider()
self.initStyleOption(opt)
sr = self.style().subControlRect(
QStyle.CC_Slider, opt, QStyle.SC_SliderHandle, self)
if (event.button() != Qt.LeftButton or sr.contains(event.pos())):
super().mousePressEvent(event)
return
if self.orientation() == Qt.Vertical:
half = (0.5 * sr.height()) + 0.5
max_ = self.height()
pos = max_ - event.y()
else:
half = (0.5 * sr.width()) + 0.5
max_ = self.width()
pos = event.x()
max_ = max_ - 2 * half
pos = min(max(pos - half, 0), max_) / max_
val = self.minimum() + (self.maximum() - self.minimum()) * pos
val = (self.maximum() - val) if self.invertedAppearance() else val
self.setValue(val)
event.accept()
# Process afterward so it's seen as a drag
super().mousePressEvent(event)
class _QtToolBar(_AbstractToolBar, _QtLayout):
def _tool_bar_load_icons(self):
_init_qt_resources()
self.icons = dict()
self.icons["help"] = QIcon(":/help.svg")
self.icons["play"] = QIcon(":/play.svg")
self.icons["pause"] = QIcon(":/pause.svg")
self.icons["reset"] = QIcon(":/reset.svg")
self.icons["scale"] = QIcon(":/scale.svg")
self.icons["clear"] = QIcon(":/clear.svg")
self.icons["movie"] = QIcon(":/movie.svg")
self.icons["restore"] = QIcon(":/restore.svg")
self.icons["screenshot"] = QIcon(":/screenshot.svg")
self.icons["visibility_on"] = QIcon(":/visibility_on.svg")
self.icons["visibility_off"] = QIcon(":/visibility_off.svg")
def _tool_bar_initialize(self, name="default", window=None):
self.actions = dict()
window = self._window if window is None else window
self._tool_bar = window.addToolBar(name)
self._tool_bar_layout = self._tool_bar.layout()
def _tool_bar_add_button(self, name, desc, func, icon_name=None,
shortcut=None):
icon_name = name if icon_name is None else icon_name
icon = self.icons[icon_name]
self.actions[name] = self._tool_bar.addAction(icon, desc, func)
if shortcut is not None:
self.actions[name].setShortcut(shortcut)
def _tool_bar_update_button_icon(self, name, icon_name):
self.actions[name].setIcon(self.icons[icon_name])
def _tool_bar_add_text(self, name, value, placeholder):
pass
def _tool_bar_add_spacer(self):
spacer = QWidget()
spacer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)
self._tool_bar.addWidget(spacer)
def _tool_bar_add_file_button(self, name, desc, func, shortcut=None):
def callback():
return FileDialog(
self.plotter.app_window,
callback=func,
)
self._tool_bar_add_button(
name=name,
desc=desc,
func=callback,
shortcut=shortcut,
)
def _tool_bar_add_play_button(self, name, desc, func, shortcut=None):
self._tool_bar_add_button(name, desc, func, None, shortcut)
def _tool_bar_set_theme(self, theme):
if theme == 'auto':
theme = _detect_theme()
if theme == 'dark':
for icon_key in self.icons:
icon = self.icons[icon_key]
image = icon.pixmap(80).toImage()
image.invertPixels(mode=QImage.InvertRgb)
self.icons[icon_key] = QIcon(QPixmap.fromImage(image))
class _QtMenuBar(_AbstractMenuBar):
def _menu_initialize(self, window=None):
self._menus = dict()
self._menu_actions = dict()
self._menu_bar = QMenuBar()
self._menu_bar.setNativeMenuBar(False)
window = self._window if window is None else window
window.setMenuBar(self._menu_bar)
def _menu_add_submenu(self, name, desc):
self._menus[name] = self._menu_bar.addMenu(desc)
def _menu_add_button(self, menu_name, name, desc, func):
menu = self._menus[menu_name]
self._menu_actions[name] = menu.addAction(desc, func)
class _QtStatusBar(_AbstractStatusBar, _QtLayout):
def _status_bar_initialize(self, window=None):
window = self._window if window is None else window
self._status_bar = window.statusBar()
self._status_bar_layout = self._status_bar.layout()
def _status_bar_show_message(self, value, timeout=5000):
self._status_bar.showMessage(value, timeout)
def _status_bar_add_label(self, value, stretch=0):
widget = QLabel(value)
self._layout_add_widget(self._status_bar_layout, widget, stretch)
return _QtWidget(widget)
def _status_bar_add_progress_bar(self, stretch=0):
widget = QProgressBar()
self._layout_add_widget(self._status_bar_layout, widget, stretch)
return _QtWidget(widget)
def _status_bar_update(self):
self._status_bar_layout.update()
class _QtPlayback(_AbstractPlayback):
def _playback_initialize(self, func, timeout, value, rng,
time_widget, play_widget):
self.figure.plotter.add_callback(func, timeout)
class _QtMplInterface(_AbstractMplInterface):
def _mpl_initialize(self):
from PyQt5 import QtWidgets
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
self.canvas = FigureCanvasQTAgg(self.fig)
FigureCanvasQTAgg.setSizePolicy(
self.canvas,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding
)
FigureCanvasQTAgg.updateGeometry(self.canvas)
class _QtMplCanvas(_AbstractMplCanvas, _QtMplInterface):
def __init__(self, width, height, dpi):
super().__init__(width, height, dpi)
self._mpl_initialize()
class _QtBrainMplCanvas(_AbstractBrainMplCanvas, _QtMplInterface):
def __init__(self, brain, width, height, dpi):
super().__init__(brain, width, height, dpi)
self._mpl_initialize()
if brain.separate_canvas:
self.canvas.setParent(None)
else:
self.canvas.setParent(brain._renderer._window)
self._connect()
class _QtWindow(_AbstractWindow):
def _window_initialize(self):
super()._window_initialize()
self._interactor = self.figure.plotter.interactor
self._window = self.figure.plotter.app_window
self._window.setLocale(QLocale(QLocale.Language.English))
self._window.signal_close.connect(self._window_clean)
def _window_clean(self):
self.figure.plotter = None
self._interactor = None
def _window_close_connect(self, func):
self._window.signal_close.connect(func)
def _window_get_dpi(self):
return self._window.windowHandle().screen().logicalDotsPerInch()
def _window_get_size(self):
w = self._interactor.geometry().width()
h = self._interactor.geometry().height()
return (w, h)
def _window_get_simple_canvas(self, width, height, dpi):
return _QtMplCanvas(width, height, dpi)
def _window_get_mplcanvas(self, brain, interactor_fraction, show_traces,
separate_canvas):
w, h = self._window_get_mplcanvas_size(interactor_fraction)
self._interactor_fraction = interactor_fraction
self._show_traces = show_traces
self._separate_canvas = separate_canvas
self._mplcanvas = _QtBrainMplCanvas(
brain, w, h, self._window_get_dpi())
return self._mplcanvas
def _window_adjust_mplcanvas_layout(self):
canvas = self._mplcanvas.canvas
self._mpl_dock, dock_layout = _create_dock_widget(
self._window, "Traces", Qt.BottomDockWidgetArea)
dock_layout.addWidget(canvas)
def _window_get_cursor(self):
return self._interactor.cursor()
def _window_set_cursor(self, cursor):
self._interactor.setCursor(cursor)
def _window_new_cursor(self, name):
return QCursor(getattr(Qt, name))
@contextmanager
def _window_ensure_minimum_sizes(self):
sz = self.figure.store['window_size']
adjust_mpl = (self._show_traces and not self._separate_canvas)
# plotter: pyvista.plotting.qt_plotting.BackgroundPlotter
# plotter.interactor: vtk.qt.QVTKRenderWindowInteractor.QVTKRenderWindowInteractor -> QWidget # noqa
# plotter.app_window: pyvista.plotting.qt_plotting.MainWindow -> QMainWindow # noqa
# plotter.frame: QFrame with QVBoxLayout with plotter.interactor as centralWidget # noqa
# plotter.ren_win: vtkXOpenGLRenderWindow
self._interactor.setMinimumSize(*sz)
# Lines like this are useful for debugging these issues:
# print('*' * 80)
# print(0, self._interactor.app_window.size().height(), self._interactor.size().height(), self._mpl_dock.widget().height(), self._mplcanvas.canvas.size().height()) # noqa
if adjust_mpl:
mpl_h = int(round((sz[1] * self._interactor_fraction) /
(1 - self._interactor_fraction)))
self._mplcanvas.canvas.setMinimumSize(sz[0], mpl_h)
self._mpl_dock.widget().setMinimumSize(sz[0], mpl_h)
try:
yield # show
finally:
# 1. Process events
self._process_events()
self._process_events()
# 2. Get the window and interactor sizes that work
win_sz = self._window.size()
ren_sz = self._interactor.size()
# 3. Undo the min size setting and process events
self._interactor.setMinimumSize(0, 0)
if adjust_mpl:
self._mplcanvas.canvas.setMinimumSize(0, 0)
self._mpl_dock.widget().setMinimumSize(0, 0)
self._process_events()
self._process_events()
# 4. Compute the extra height required for dock decorations and add
win_h = win_sz.height()
if adjust_mpl:
win_h += max(
self._mpl_dock.widget().size().height() - mpl_h, 0)
# 5. Resize the window and interactor to the correct size
# (not sure why, but this is required on macOS at least)
self._interactor.window_size = (win_sz.width(), win_h)
self._interactor.resize(ren_sz.width(), ren_sz.height())
self._process_events()
self._process_events()
def _window_set_theme(self, theme):
if theme == 'auto':
theme = _detect_theme()
if theme == 'dark':
try:
import qdarkstyle
except ModuleNotFoundError:
logger.info('For Dark-Mode "qdarkstyle" has to be installed! '
'You can install it with `pip install qdarkstyle`')
stylesheet = None
else:
stylesheet = qdarkstyle.load_stylesheet()
elif theme != 'light':
with open(theme, 'r') as file:
stylesheet = file.read()
else:
stylesheet = None
self._window.setStyleSheet(stylesheet)
class _QtWidgetList(_AbstractWidgetList):
def __init__(self, src):
self._src = src
self._widgets = list()
if isinstance(self._src, QButtonGroup):
widgets = self._src.buttons()
else:
widgets = src
for widget in widgets:
if not isinstance(widget, _QtWidget):
widget = _QtWidget(widget)
self._widgets.append(widget)
def set_enabled(self, state):
for widget in self._widgets:
widget.set_enabled(state)
def get_value(self, idx):
return self._widgets[idx].get_value()
def set_value(self, idx, value):
if isinstance(self._src, QButtonGroup):
self._widgets[idx].set_value(True)
else:
self._widgets[idx].set_value(value)
class _QtWidget(_AbstractWidget):
def set_value(self, value):
if isinstance(self._widget, (QRadioButton, QToolButton)):
self._widget.click()
else:
if hasattr(self._widget, "setValue"):
self._widget.setValue(value)
elif hasattr(self._widget, "setCurrentText"):
self._widget.setCurrentText(value)
elif hasattr(self._widget, "setChecked"):
self._widget.setChecked(value)
else:
assert hasattr(self._widget, "setText")
self._widget.setText(value)
def get_value(self):
if hasattr(self._widget, "value"):
return self._widget.value()
elif hasattr(self._widget, "currentText"):
return self._widget.currentText()
elif hasattr(self._widget, "checkState"):
return bool(self._widget.checkState())
else:
assert hasattr(self._widget, "text")
return self._widget.text()
def set_range(self, rng):
self._widget.setRange(rng[0], rng[1])
def show(self):
self._widget.show()
def hide(self):
self._widget.hide()
def set_enabled(self, state):
self._widget.setEnabled(state)
def update(self, repaint=True):
self._widget.update()
if repaint:
self._widget.repaint()
class _Renderer(_PyVistaRenderer, _QtDock, _QtToolBar, _QtMenuBar,
_QtStatusBar, _QtWindow, _QtPlayback):
_kind = 'qt'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._window_initialize()
def show(self):
super().show()
with _qt_disable_paint(self.plotter):
with self._window_ensure_minimum_sizes():
self.plotter.app_window.show()
self._update()
for plotter in self._all_plotters:
plotter.updateGeometry()
plotter._render()
self._process_events()
def _create_dock_widget(window, name, area):
# create dock widget
dock = QDockWidget(name)
# add scroll area
scroll = QScrollArea(dock)
dock.setWidget(scroll)
# give the scroll area a child widget
widget = QWidget(scroll)
scroll.setWidget(widget)
scroll.setWidgetResizable(True)
dock.setAllowedAreas(area)
dock.setTitleBarWidget(QLabel(name))
window.addDockWidget(area, dock)
dock_layout = QVBoxLayout()
widget.setLayout(dock_layout)
# Fix resize grip size
# https://stackoverflow.com/a/65050468/2175965
dock.setStyleSheet("QDockWidget { margin: 4px; }")
return dock, dock_layout
def _detect_theme():
try:
import darkdetect
return darkdetect.theme().lower()
except Exception:
return 'light'
@contextmanager
def _testing_context(interactive):
from . import renderer
orig_offscreen = pyvista.OFF_SCREEN
orig_testing = renderer.MNE_3D_BACKEND_TESTING
orig_interactive = renderer.MNE_3D_BACKEND_INTERACTIVE
renderer.MNE_3D_BACKEND_TESTING = True
if interactive:
pyvista.OFF_SCREEN = False
renderer.MNE_3D_BACKEND_INTERACTIVE = True
else:
pyvista.OFF_SCREEN = True
renderer.MNE_3D_BACKEND_INTERACTIVE = False
try:
yield
finally:
pyvista.OFF_SCREEN = orig_offscreen
renderer.MNE_3D_BACKEND_TESTING = orig_testing
renderer.MNE_3D_BACKEND_INTERACTIVE = orig_interactive
| |
import copy
import json
import os.path as op
from inspect import getmro
import six
from six.moves.urllib.parse import urlencode
from .exceptions import ObjectDoesNotExist
from .mixins import ReplicatedMixin, ScalableMixin
from .query import Query
from .utils import obj_merge
class ObjectManager(object):
def __call__(self, api, namespace=None):
if namespace is None and NamespacedAPIObject in getmro(self.api_obj_class):
namespace = api.config.namespace
return Query(api, self.api_obj_class, namespace=namespace)
def __get__(self, obj, api_obj_class):
assert obj is None, "cannot invoke objects on resource object."
self.api_obj_class = api_obj_class
return self
@six.python_2_unicode_compatible
class APIObject(object):
objects = ObjectManager()
base = None
namespace = None
def __init__(self, api, obj):
self.api = api
self.set_obj(obj)
def set_obj(self, obj):
self.obj = obj
self._original_obj = copy.deepcopy(obj)
def __repr__(self):
return "<{kind} {name}>".format(kind=self.kind, name=self.name)
def __str__(self):
return self.name
@property
def name(self):
return self.obj["metadata"]["name"]
@property
def metadata(self):
return self.obj["metadata"]
@property
def labels(self):
return self.obj["metadata"].get("labels", {})
@property
def annotations(self):
return self.obj["metadata"].get("annotations", {})
def api_kwargs(self, **kwargs):
kw = {}
# Construct url for api request
obj_list = kwargs.pop("obj_list", False)
if obj_list:
kw["url"] = self.endpoint
else:
operation = kwargs.pop("operation", "")
kw["url"] = op.normpath(op.join(self.endpoint, self.name, operation))
params = kwargs.pop("params", None)
if params is not None:
query_string = urlencode(params)
kw["url"] = "{}{}".format(kw["url"], "?{}".format(query_string) if query_string else "")
if self.base:
kw["base"] = self.base
kw["version"] = self.version
if self.namespace is not None:
kw["namespace"] = self.namespace
kw.update(kwargs)
return kw
def exists(self, ensure=False):
r = self.api.get(**self.api_kwargs())
if r.status_code not in {200, 404}:
self.api.raise_for_status(r)
if not r.ok:
if ensure:
raise ObjectDoesNotExist("{} does not exist.".format(self.name))
else:
return False
return True
def create(self):
r = self.api.post(**self.api_kwargs(data=json.dumps(self.obj), obj_list=True))
self.api.raise_for_status(r)
self.set_obj(r.json())
def reload(self):
r = self.api.get(**self.api_kwargs())
self.api.raise_for_status(r)
self.set_obj(r.json())
def watch(self):
return self.__class__.objects(
self.api,
namespace=self.namespace
).filter(field_selector={
"metadata.name": self.name
}).watch()
def update(self):
self.obj = obj_merge(self.obj, self._original_obj)
r = self.api.patch(**self.api_kwargs(
headers={"Content-Type": "application/merge-patch+json"},
data=json.dumps(self.obj),
))
self.api.raise_for_status(r)
self.set_obj(r.json())
def delete(self):
r = self.api.delete(**self.api_kwargs())
if r.status_code != 404:
self.api.raise_for_status(r)
class NamespacedAPIObject(APIObject):
@property
def namespace(self):
if self.obj["metadata"].get("namespace"):
return self.obj["metadata"]["namespace"]
else:
return self.api.config.namespace
def object_factory(api, api_version, kind):
"""
Dynamically builds a Python class for the given Kubernetes object in an API.
For example:
api = pykube.HTTPClient(...)
NetworkPolicy = pykube.object_factory(api, "networking.k8s.io/v1", "NetworkPolicy")
This enables construction of any Kubernetes object kind without explicit support
from pykube.
Currently, the HTTPClient passed to this function will not be bound to the returned type.
It is planned to fix this, but in the mean time pass it as you would normally.
"""
resource_list = api.resource_list(api_version)
resource = next((resource for resource in resource_list["resources"] if resource["kind"] == kind), None)
base = NamespacedAPIObject if resource["namespaced"] else APIObject
return type(kind, (base,), {
"version": api_version,
"endpoint": resource["name"],
"kind": kind
})
class ConfigMap(NamespacedAPIObject):
version = "v1"
endpoint = "configmaps"
kind = "ConfigMap"
class CronJob(NamespacedAPIObject):
version = "batch/v2alpha1"
endpoint = "cronjobs"
kind = "CronJob"
class DaemonSet(NamespacedAPIObject):
version = "extensions/v1beta1"
endpoint = "daemonsets"
kind = "DaemonSet"
class Deployment(NamespacedAPIObject, ReplicatedMixin, ScalableMixin):
version = "extensions/v1beta1"
endpoint = "deployments"
kind = "Deployment"
@property
def ready(self):
return (
self.obj["status"]["observedGeneration"] >= self.obj["metadata"]["generation"] and
self.obj["status"]["updatedReplicas"] == self.replicas
)
def rollout_undo(self, target_revision=None):
"""Produces same action as kubectl rollout undo deployment command.
Input variable is revision to rollback to (in kubectl, --to-revision)
"""
if target_revision is None:
revision = {}
else:
revision = {
"revision": target_revision
}
params = {
"kind": "DeploymentRollback",
"apiVersion": self.version,
"name": self.name,
"rollbackTo": revision
}
kwargs = {
"version": self.version,
"namespace": self.namespace,
"operation": "rollback",
}
r = self.api.post(**self.api_kwargs(data=json.dumps(params), **kwargs))
r.raise_for_status()
return r.text
class Endpoint(NamespacedAPIObject):
version = "v1"
endpoint = "endpoints"
kind = "Endpoint"
class Event(NamespacedAPIObject):
version = "v1"
endpoint = "events"
kind = "Event"
class LimitRange(NamespacedAPIObject):
version = "v1"
endpoint = "limitranges"
kind = "LimitRange"
class ResourceQuota(NamespacedAPIObject):
version = "v1"
endpoint = "resourcequotas"
kind = "ResourceQuota"
class ServiceAccount(NamespacedAPIObject):
version = "v1"
endpoint = "serviceaccounts"
kind = "ServiceAccount"
class Ingress(NamespacedAPIObject):
version = "extensions/v1beta1"
endpoint = "ingresses"
kind = "Ingress"
class ThirdPartyResource(APIObject):
version = "extensions/v1beta1"
endpoint = "thirdpartyresources"
kind = "ThirdPartyResource"
class Job(NamespacedAPIObject, ScalableMixin):
version = "batch/v1"
endpoint = "jobs"
kind = "Job"
scalable_attr = "parallelism"
@property
def parallelism(self):
return self.obj["spec"]["parallelism"]
@parallelism.setter
def parallelism(self, value):
self.obj["spec"]["parallelism"] = value
class Namespace(APIObject):
version = "v1"
endpoint = "namespaces"
kind = "Namespace"
class Node(APIObject):
version = "v1"
endpoint = "nodes"
kind = "Node"
@property
def unschedulable(self):
if 'unschedulable' in self.obj["spec"]:
return self.obj["spec"]["unschedulable"]
return False
@unschedulable.setter
def unschedulable(self, value):
self.obj["spec"]["unschedulable"] = value
self.update()
def cordon(self):
self.unschedulable = True
def uncordon(self):
self.unschedulable = False
class Pod(NamespacedAPIObject):
version = "v1"
endpoint = "pods"
kind = "Pod"
@property
def ready(self):
cs = self.obj["status"].get("conditions", [])
condition = next((c for c in cs if c["type"] == "Ready"), None)
return condition is not None and condition["status"] == "True"
def logs(self, container=None, pretty=None, previous=False,
since_seconds=None, since_time=None, timestamps=False,
tail_lines=None, limit_bytes=None):
"""
Produces the same result as calling kubectl logs pod/<pod-name>.
Check parameters meaning at
http://kubernetes.io/docs/api-reference/v1/operations/,
part 'read log of the specified Pod'. The result is plain text.
"""
log_call = "log"
params = {}
if container is not None:
params["container"] = container
if pretty is not None:
params["pretty"] = pretty
if previous:
params["previous"] = "true"
if since_seconds is not None and since_time is None:
params["sinceSeconds"] = int(since_seconds)
elif since_time is not None and since_seconds is None:
params["sinceTime"] = since_time
if timestamps:
params["timestamps"] = "true"
if tail_lines is not None:
params["tailLines"] = int(tail_lines)
if limit_bytes is not None:
params["limitBytes"] = int(limit_bytes)
query_string = urlencode(params)
log_call += "?{}".format(query_string) if query_string else ""
kwargs = {
"version": self.version,
"namespace": self.namespace,
"operation": log_call,
}
r = self.api.get(**self.api_kwargs(**kwargs))
r.raise_for_status()
return r.text
class ReplicationController(NamespacedAPIObject, ReplicatedMixin, ScalableMixin):
version = "v1"
endpoint = "replicationcontrollers"
kind = "ReplicationController"
@property
def ready(self):
return (
self.obj['status']['observedGeneration'] >= self.obj['metadata']['generation'] and
self.obj['status']['readyReplicas'] == self.replicas
)
class ReplicaSet(NamespacedAPIObject, ReplicatedMixin, ScalableMixin):
version = "extensions/v1beta1"
endpoint = "replicasets"
kind = "ReplicaSet"
class Secret(NamespacedAPIObject):
version = "v1"
endpoint = "secrets"
kind = "Secret"
class Service(NamespacedAPIObject):
version = "v1"
endpoint = "services"
kind = "Service"
class PersistentVolume(APIObject):
version = "v1"
endpoint = "persistentvolumes"
kind = "PersistentVolume"
class PersistentVolumeClaim(NamespacedAPIObject):
version = "v1"
endpoint = "persistentvolumeclaims"
kind = "PersistentVolumeClaim"
class HorizontalPodAutoscaler(NamespacedAPIObject):
version = "autoscaling/v1"
endpoint = "horizontalpodautoscalers"
kind = "HorizontalPodAutoscaler"
class PetSet(NamespacedAPIObject):
version = "apps/v1alpha1"
endpoint = "petsets"
kind = "PetSet"
class StatefulSet(NamespacedAPIObject, ReplicatedMixin, ScalableMixin):
version = "apps/v1beta1"
endpoint = "statefulsets"
kind = "StatefulSet"
class Role(NamespacedAPIObject):
version = "rbac.authorization.k8s.io/v1alpha1"
endpoint = "roles"
kind = "Role"
class RoleBinding(NamespacedAPIObject):
version = "rbac.authorization.k8s.io/v1alpha1"
endpoint = "rolebindings"
kind = "RoleBinding"
class ClusterRole(APIObject):
version = "rbac.authorization.k8s.io/v1alpha1"
endpoint = "clusterroles"
kind = "ClusterRole"
class ClusterRoleBinding(APIObject):
version = "rbac.authorization.k8s.io/v1alpha1"
endpoint = "clusterrolebindings"
kind = "ClusterRoleBinding"
class PodSecurityPolicy(APIObject):
version = "extensions/v1beta1"
endpoint = "podsecuritypolicies"
kind = "PodSecurityPolicy"
| |
#! /usr/env/python
"""
flow_director_dinf.py: provides the component FlowDirectorDINF.
Directs flow on raster grids only using the Dinfinity algorithm of
Tarboton 1997.
"""
from landlab.components.flow_director.flow_director_to_many import(
_FlowDirectorToMany)
from landlab.components.flow_director import flow_direction_dinf
from landlab import VoronoiDelaunayGrid
from landlab import (FIXED_VALUE_BOUNDARY, FIXED_GRADIENT_BOUNDARY,
BAD_INDEX_VALUE)
import numpy
class FlowDirectorDINF(_FlowDirectorToMany):
"""
Flow direction on a raster grid by the D infinity method.
Directs flow by the D infinity method (Tarboton, 1997). Each node is
assigned two flow directions, toward the two neighboring nodes that are on
the steepest subtriangle. Partitioning of flow is done based on the aspect
of the subtriangle.
Specifically, it stores as ModelGrid fields:
- Node array of receivers (nodes that receive flow), or ITS OWN ID if
there is no receiver: *'flow__receiver_nodes'*. This array is 2D, and is
of dimension (number of nodes x max number of receivers).
- Node array of flow proportions: *'flow__receiver_proportions'*. This
array is 2D, and is of dimension (number of nodes x max number of
receivers).
- Node array of links carrying flow: *'flow__links_to_receiver_nodes'*.
This array is 2D, and is of dimension (number of nodes x max number of
receivers).
- Node array of the steepest downhill receiver. *'flow__receiver_nodes'*
- Node array of steepest downhill slope from each receiver:
*'topographic__steepest_slope'*
- Node array containing ID of steepest link that leads from each node to a
receiver, or BAD_INDEX_VALUE if no link:
*'flow__link_to_receiver_node'*
- Boolean node array of all local lows: *'flow__sink_flag'*
The primary method of this class is :func:`run_one_step`.
Construction::
FlowDirectorDINF(grid, surface='topographic__elevation')
Parameters
----------
grid : ModelGrid
A grid.
surface : field name at node or array of length node, optional
The surface to direct flow across, default is field at node:
topographic__self.surface_valuesation.
partition_method: string, optional
Method for partitioning flow. Options include 'slope' (default) and
'square_root_of_slope'.
Examples
--------
This method works for both raster and irregular grids. First we will look
at a raster example, and then an irregular example.
>>> import numpy as numpy
>>> from landlab import RasterModelGrid
>>> from landlab.components import FlowDirectorDINF
>>> mg = RasterModelGrid((4,4), spacing=(1, 1))
>>> mg.set_closed_boundaries_at_grid_edges(True, True, True, False)
>>> _ = mg.add_field('topographic__elevation',
... mg.node_x**2 + mg.node_y**2,
... at = 'node')
The DINF flow director can be uses for raster grids only.
>>> fd = FlowDirectorDINF(mg, 'topographic__elevation')
>>> fd.surface_values.reshape(mg.shape)
array([[ 0., 1., 4., 9.],
[ 1., 2., 5., 10.],
[ 4., 5., 8., 13.],
[ 9., 10., 13., 18.]])
>>> fd.run_one_step()
Unlike flow directors that only direct flow to one node or to all
downstream nodes, FlowDirectorDINF directs flow two nodes only. It stores
the receiver information is a (number of nodes x 2) shape field at nodes.
>>> mg.at_node['flow__receiver_nodes']
array([[ 0, -1],
[ 1, -1],
[ 2, -1],
[ 3, -1],
[ 0, 1],
[ 1, 0],
[ 5, 1],
[ 6, 2],
[ 8, -1],
[ 5, -1],
[ 6, 5],
[10, 6],
[12, -1],
[ 9, -1],
[10, 9],
[-1, 10]])
It also stores the proportions of flow going to each receiver and the link
on which the flow moves in at node arrays.
>>> mg.at_node['flow__receiver_proportions'] # doctest: +NORMALIZE_WHITESPACE
array([[ 1. , 0. ],
[ 1. , 0. ],
[ 1. , 0. ],
[ 1. , 0. ],
[ 1. , 0. ],
[ 0. , 1. ],
[ 0.59033447, 0.40966553],
[ 0.74866817, 0.25133183],
[ 1. , 0. ],
[ 1. , 0. ],
[ 0. , 1. ],
[ 0.31191652, 0.68808348],
[ 1. , 0. ],
[ 1. , 0. ],
[ 0.31191652, 0.68808348],
[ 0. , 1. ]])
>>> mg.at_node['flow__links_to_receiver_nodes']
array([[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[ 3, 25],
[ 4, 24],
[ 8, 26],
[ 9, 28],
[-1, -1],
[11, 30],
[12, 32],
[16, 34],
[-1, -1],
[18, 36],
[19, 38],
[20, 40]])
Like flow directors that only direct flow to one downstream node,
FlowDirectorDINF identifies and stores the steepest slope leading downhill
from each node, the link carrying that flow, and the receiver receiving
flow on that link.
>>> mg.at_node['topographic__steepest_slope'] # doctest: +NORMALIZE_WHITESPACE
array([ 0. , 0. , 0. , 0. , 1. ,
1.41421356, 3. , 5. , 0. , 3. ,
4.24264069, 5.65685425, 0. , 5. , 5.65685425,
7.07106781])
>>> mg.at_node['flow__link_to_receiver_node']
array([-1, 0, 1, 2, 3, 24, 8, 9, -1, 11, 32, 34, -1, 18, 38, 40])
>>> mg.at_node['flow__receiver_node']
array([ 0, 0, 1, 2, 0, 0, 5, 6, 8, 5, 5, 6, 12, 9, 9, 10])
Finally, FlowDirectorDINF identifies sinks, or local lows.
>>> mg.at_node['flow__sink_flag']
array([1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0], dtype=int8)
The flow directors also have the ability to return the flow receiver nodes
through a function called direct_flow()
>>> fd = FlowDirectorDINF(mg, 'topographic__elevation')
>>> fd.run_one_step()
>>> receivers, proportions = fd.direct_flow()
>>> receivers
array([[ 0, -1],
[ 1, -1],
[ 2, -1],
[ 3, -1],
[ 0, 1],
[ 1, 0],
[ 5, 1],
[ 6, 2],
[ 8, -1],
[ 5, -1],
[ 6, 5],
[10, 6],
[12, -1],
[ 9, -1],
[10, 9],
[-1, 10]])
>>> proportions # doctest: +NORMALIZE_WHITESPACE
array([[ 1. , 0. ],
[ 1. , 0. ],
[ 1. , 0. ],
[ 1. , 0. ],
[ 1. , 0. ],
[ 0. , 1. ],
[ 0.59033447, 0.40966553],
[ 0.74866817, 0.25133183],
[ 1. , 0. ],
[ 1. , 0. ],
[ 0. , 1. ],
[ 0.31191652, 0.68808348],
[ 1. , 0. ],
[ 1. , 0. ],
[ 0.31191652, 0.68808348],
[ 0. , 1. ]])
For each donor node (represented by each row) the proportions should sum to
one.
>>> proportions.sum(axis=1)
array([ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1.])
"""
_name = 'FlowDirectorDINF'
def __init__(self, grid, surface='topographic__elevation'):
"""
Initialize FlowDirectorDINF
"""
self.method = 'DINF'
super(FlowDirectorDINF, self).__init__(grid, surface)
self._is_Voroni = isinstance(self._grid, VoronoiDelaunayGrid)
if self._is_Voroni:
raise NotImplementedError('FlowDirectorDINF is not implemented'
' for irregular grids.')
self.max_receivers = 2
self.updated_boundary_conditions()
# set the number of recievers, proportions, and receiver links with the
# right size.
self.receivers = grid.add_field('flow__receiver_nodes',
BAD_INDEX_VALUE*numpy.ones((self._grid.number_of_nodes,
self.max_receivers),
dtype=int),
at='node',
dtype=int,
noclobber=False)
self.receiver_links = grid.add_field('flow__links_to_receiver_nodes',
BAD_INDEX_VALUE*numpy.ones((self._grid.number_of_nodes,
self.max_receivers),
dtype=int),
at='node',
dtype=int,
noclobber=False)
self.proportions = grid.add_field('flow__receiver_proportions',
BAD_INDEX_VALUE*numpy.ones((self._grid.number_of_nodes,
self.max_receivers),
dtype=float),
at='node',
dtype=int,
noclobber=False)
def updated_boundary_conditions(self):
"""
Method to update FlowDirectorDINF when boundary conditions change.
Call this if boundary conditions on the grid are updated after the
component is instantiated.
"""
self._active_links = self.grid.active_links
self._activelink_tail = self.grid.node_at_link_tail[self.grid.active_links]
self._activelink_head = self.grid.node_at_link_head[self.grid.active_links]
def run_one_step(self):
"""
Find flow directions and save to the model grid.
run_one_step() checks for updated boundary conditions, calculates
slopes on links, finds basself.surface_valuesel nodes based on the status at node,
calculates flow directions, and saves results to the grid.
An alternative to direct_flow() is direct_flow() which does the same
things but also returns the receiver nodes not return values.
"""
self.direct_flow()
def direct_flow(self):
"""
Find flow directions, save to the model grid, and return receivers.
direct_flow() checks for updated boundary conditions, calculates
slopes on links, finds basself.surface_valuesel nodes based on the status at node,
calculates flow directions, saves results to the grid, and returns a
at-node array of receiver nodes. This array is stored in the grid at:
grid['node']['flow__receiver_nodes']
An alternative to direct_flow() is run_one_step() which does the same
things but also returns a at-node array of receiver nodes. This array
is stored in the grid at:
grid['node']['flow__receiver_nodes']
"""
# step 0. Check and update BCs
if self._bc_set_code != self.grid.bc_set_code:
self.updated_boundary_conditions()
self._bc_set_code = self.grid.bc_set_code
# Step 1. Find and save base level nodes.
(baselevel_nodes, ) = numpy.where(
numpy.logical_or(self._grid.status_at_node == FIXED_VALUE_BOUNDARY,
self._grid.status_at_node == FIXED_GRADIENT_BOUNDARY))
# Calculate flow directions
(self.receivers, self.proportions, steepest_slope,
steepest_receiver, sink,
receiver_links, steepest_link)= \
flow_direction_dinf.flow_directions_dinf(self._grid,
self.surface_values,
baselevel_nodes=baselevel_nodes)
# Save the four ouputs of this component.
self._grid['node']['flow__receiver_nodes'][:] = self.receivers
self._grid['node']['flow__receiver_node'][:] = steepest_receiver
self._grid['node']['flow__receiver_proportions'][:] = self.proportions
self._grid['node']['topographic__steepest_slope'][:] = steepest_slope
self._grid['node']['flow__link_to_receiver_node'][:] = steepest_link
self._grid['node']['flow__links_to_receiver_nodes'][:] = receiver_links
self._grid['node']['flow__sink_flag'][:] = numpy.zeros_like(steepest_link,
dtype=bool)
self._grid['node']['flow__sink_flag'][sink] = True
return (self.receivers, self.proportions)
@property
def nodes_receiving_flow(self):
"""Return the node ids of the nodes receiving flow."""
return self._grid['node']['flow__receiver_nodes']
@property
def proportions_of_flow(self):
"""Return the proportion of flow going to receivers."""
return self._grid['node']['flow__receiver_proportions']
if __name__ == '__main__':
import doctest
doctest.testmod()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_unregister_request(
resource_provider_namespace: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-05-10"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/unregister')
path_format_arguments = {
"resourceProviderNamespace": _SERIALIZER.url("resource_provider_namespace", resource_provider_namespace, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_register_request(
resource_provider_namespace: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-05-10"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register')
path_format_arguments = {
"resourceProviderNamespace": _SERIALIZER.url("resource_provider_namespace", resource_provider_namespace, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_request(
subscription_id: str,
*,
top: Optional[int] = None,
expand: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-05-10"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if top is not None:
query_parameters['$top'] = _SERIALIZER.query("top", top, 'int')
if expand is not None:
query_parameters['$expand'] = _SERIALIZER.query("expand", expand, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_at_tenant_scope_request(
*,
top: Optional[int] = None,
expand: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-05-10"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/providers')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if top is not None:
query_parameters['$top'] = _SERIALIZER.query("top", top, 'int')
if expand is not None:
query_parameters['$expand'] = _SERIALIZER.query("expand", expand, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
resource_provider_namespace: str,
subscription_id: str,
*,
expand: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-05-10"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}')
path_format_arguments = {
"resourceProviderNamespace": _SERIALIZER.url("resource_provider_namespace", resource_provider_namespace, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = _SERIALIZER.query("expand", expand, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_at_tenant_scope_request(
resource_provider_namespace: str,
*,
expand: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-05-10"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/providers/{resourceProviderNamespace}')
path_format_arguments = {
"resourceProviderNamespace": _SERIALIZER.url("resource_provider_namespace", resource_provider_namespace, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = _SERIALIZER.query("expand", expand, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class ProvidersOperations(object):
"""ProvidersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.resources.v2019_05_10.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def unregister(
self,
resource_provider_namespace: str,
**kwargs: Any
) -> "_models.Provider":
"""Unregisters a subscription from a resource provider.
:param resource_provider_namespace: The namespace of the resource provider to unregister.
:type resource_provider_namespace: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Provider, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_05_10.models.Provider
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Provider"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_unregister_request(
resource_provider_namespace=resource_provider_namespace,
subscription_id=self._config.subscription_id,
template_url=self.unregister.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Provider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
unregister.metadata = {'url': '/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/unregister'} # type: ignore
@distributed_trace
def register(
self,
resource_provider_namespace: str,
**kwargs: Any
) -> "_models.Provider":
"""Registers a subscription with a resource provider.
:param resource_provider_namespace: The namespace of the resource provider to register.
:type resource_provider_namespace: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Provider, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_05_10.models.Provider
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Provider"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_register_request(
resource_provider_namespace=resource_provider_namespace,
subscription_id=self._config.subscription_id,
template_url=self.register.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Provider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
register.metadata = {'url': '/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register'} # type: ignore
@distributed_trace
def list(
self,
top: Optional[int] = None,
expand: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.ProviderListResult"]:
"""Gets all resource providers for a subscription.
:param top: The number of results to return. If null is passed returns all deployments.
:type top: int
:param expand: The properties to include in the results. For example, use &$expand=metadata in
the query string to retrieve resource provider metadata. To include property aliases in
response, use $expand=resourceTypes/aliases.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProviderListResult or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.resource.resources.v2019_05_10.models.ProviderListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProviderListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
top=top,
expand=expand,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
top=top,
expand=expand,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ProviderListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers'} # type: ignore
@distributed_trace
def list_at_tenant_scope(
self,
top: Optional[int] = None,
expand: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.ProviderListResult"]:
"""Gets all resource providers for the tenant.
:param top: The number of results to return. If null is passed returns all providers.
:type top: int
:param expand: The properties to include in the results. For example, use &$expand=metadata in
the query string to retrieve resource provider metadata. To include property aliases in
response, use $expand=resourceTypes/aliases.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProviderListResult or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.resource.resources.v2019_05_10.models.ProviderListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProviderListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_at_tenant_scope_request(
top=top,
expand=expand,
template_url=self.list_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_at_tenant_scope_request(
top=top,
expand=expand,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ProviderListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_at_tenant_scope.metadata = {'url': '/providers'} # type: ignore
@distributed_trace
def get(
self,
resource_provider_namespace: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.Provider":
"""Gets the specified resource provider.
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param expand: The $expand query parameter. For example, to include property aliases in
response, use $expand=resourceTypes/aliases.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Provider, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_05_10.models.Provider
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Provider"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_provider_namespace=resource_provider_namespace,
subscription_id=self._config.subscription_id,
expand=expand,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Provider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}'} # type: ignore
@distributed_trace
def get_at_tenant_scope(
self,
resource_provider_namespace: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.Provider":
"""Gets the specified resource provider at the tenant level.
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param expand: The $expand query parameter. For example, to include property aliases in
response, use $expand=resourceTypes/aliases.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Provider, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_05_10.models.Provider
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Provider"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_at_tenant_scope_request(
resource_provider_namespace=resource_provider_namespace,
expand=expand,
template_url=self.get_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Provider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_tenant_scope.metadata = {'url': '/providers/{resourceProviderNamespace}'} # type: ignore
| |
from teafacto.util import ticktock, argprun, inp, tokenize
import os, pickle, random
from teafacto.procutil import *
from IPython import embed
from scipy import sparse
from teafacto.blocks.lang.wordvec import Glove, WordEmb
from teafacto.blocks.lang.sentenc import TwoLevelEncoder
from teafacto.blocks.seq.rnn import RNNSeqEncoder, MaskMode
from teafacto.blocks.seq.enc import SimpleSeq2Vec, SimpleSeq2MultiVec, SimpleSeq2Sca, EncLastDim
from teafacto.blocks.cnn import CNNSeqEncoder
from teafacto.blocks.basic import VectorEmbed, MatDot, Linear
from teafacto.blocks.memory import MemVec
from teafacto.blocks.match import SeqMatchScore, CosineDistance, MatchScore
from teafacto.core.base import Block, tensorops as T, Val
def readdata(p="../../../../data/simplequestions/clean/datamat.word.fb2m.pkl",
entinfp="../../../../data/simplequestions/clean/subjs-counts-labels-types.fb2m.tsv",
cachep=None, #"subjpredcharns.readdata.cache.pkl",
maskid=-1,
debug=False,
numtestcans=None,
):
tt = ticktock("dataloader")
if cachep is not None and os.path.isfile(cachep): # load
tt.tick("loading from cache")
ret = pickle.load(open(cachep))
tt.tock("loaded from cache")
else:
tt.tick("loading datamat")
x = pickle.load(open(p))
tt.tock("datamat loaded")
worddic = x["worddic"]
entdic = x["entdic"]
entmat = x["entmat"]
numents = x["numents"]
traindata, traingold = x["train"]
validdata, validgold = x["valid"]
testdata, testgold = x["test"]
traingold[:, 1] -= numents
validgold[:, 1] -= numents
testgold[:, 1] -= numents
rwd = {v: k for k, v in worddic.items()}
subjdic = {k: v for k, v in entdic.items() if v < numents}
reldic = {k: v - numents for k, v in entdic.items() if v >= numents}
subjmat = entmat[:numents]
ssubjmat = np.sum(subjmat != maskid, axis=1)
if np.any(ssubjmat == 0):
for i in list(np.argwhere(ssubjmat == 0)[:, 0]):
subjmat[i, 0] = worddic["<RARE>"]
relmat = entmat[numents:]
if debug:
embed()
traindata = wordmat2wordchartensor(traindata, rwd=rwd, maskid=maskid)
validdata = wordmat2wordchartensor(validdata, rwd=rwd, maskid=maskid)
testdata = wordmat2wordchartensor(testdata, rwd=rwd, maskid=maskid)
subjmat = wordmat2charmat(subjmat, rwd=rwd, maskid=maskid, raretoken="<RARE>", maxlen=75)
ret = ((traindata, traingold), (validdata, validgold),
(testdata, testgold), (subjmat, relmat), (subjdic, reldic),
worddic)
if cachep is not None:
tt.tick("dumping to cache")
pickle.dump(ret, open(cachep, "w"))
tt.tock("dumped to cache")
subjinfo = loadsubjinfo(entinfp, subjdic)
testsubjcans = loadsubjtestcans(numcans=numtestcans)
testrelcans, relspersubj = loadreltestcans(testgold, subjdic, reldic)
if debug:
embed()
return ret + (subjinfo, (testsubjcans, relspersubj))
def loadreltestcans(testgold, subjdic, reldic, relsperentp="../../../../data/simplequestions/allrelsperent.dmp"):
tt = ticktock("test rel can loader")
testsubjs = testgold[:, 0]
relsperent = {} #{k: ([], []) for k in set(list(testsubjs))}
tt.tick("loading rel test cans")
for line in open(relsperentp):
subj, relsout, relsin = line[:-1].split("\t")
if subj in subjdic:
relsperent[subjdic[subj]] = (
[reldic[x] for x in relsout.split(" ")] if relsout != "" else [],
[reldic[x] for x in relsin.split(" ")] if relsin != "" else []
)
#if subj in subjdic and subjdic[subj] in relsoftestsubjs:
# relsoftestsubjs[subjdic[subj]] = (
# [reldic[x] for x in relsout.split(" ")] if relsout != "" else [],
# [reldic[x] for x in relsin.split(" ")] if relsin != "" else []
# )
tt.tock("test cans loaded")
relsoftestexamples = [(relsperent[x][0], relsperent[x][1])
for x in testsubjs]
return relsoftestexamples, relsperent
def loadsubjtestcans(p="../../../../data/simplequestions/clean/testcans{}.pkl", numcans=None):
tt = ticktock("test subjects candidate loader")
tt.tick("loading candidates")
p = p.format("{}c".format(numcans)) if numcans is not None else p.format("")
ret = pickle.load(open(p))
tt.tock("canddiates loaded")
return ret
def loadsubjinfo(entinfp, entdic, cachep=None):#"subjinfo.cache.pkl"):
tt = ticktock("subjinfoloader")
def make():
tt.tick("making subject info from file")
subjinf = {}
c = 0
for line in open(entinfp):
subjuri, subjc, objc, subjname, typuri, typname = line[:-1].split("\t")
subjinf[entdic[subjuri]] = (subjname, typname.lower().split(), typuri, subjc, objc)
if c % 1000 == 0:
tt.live(str(c))
c += 1
tt.tock("made subject info from file")
return subjinf
if cachep is not None:
if os.path.isfile(cachep): # load
tt.tick("loading cached subject info")
subjinfo = pickle.load(open(cachep))
tt.tock("loaded cached subject info")
else: # make and dump
subjinfo = make()
tt.tick("dumping subject info in cache")
pickle.dump(subjinfo, open(cachep, "w"))
tt.tock("dumped subject info in cache")
else: # just make
subjinfo = make()
return subjinfo
def buildrelsamplespace(entmat, wd, maskid=-1):
tt = ticktock("samplespace")
tt.tick("making sample space")
#rwd = {v: k for k, v in wd.items()}
entmatm = sparse.dok_matrix((entmat.shape[0], np.max(entmat) + 1))
posblacklist = {0: {wd["base"], wd["user"]}}
blacklist = set([wd[x] for x in "default domain of by the in at s this for with type".split()])
#revin = {k: set() for k in np.unique(entmat)}
#revinm = sparse.dok_matrix((np.max(entmat), entmat.shape[0]))
samdic = {k: set() for k in range(entmat.shape[0])} # from ent ids to sets of ent ids
#samdic = np.zeros((entmat.shape[0], entmat.shape[0]))
for i in range(entmat.shape[0]):
for j in range(entmat.shape[1]):
w = entmat[i, j]
if w == -1: # beginning of padding
break
if j in posblacklist:
if w in posblacklist[j]:
continue
if w in blacklist:
continue
entmatm[i, w] = 1
#for oe in revin[w]: # other entities already in revind
# samdic[oe].add(i)
# samdic[i].add(oe)
#revin[w].add(i)
#revinm[w, i] = 1
samdicm = entmatm.dot(entmatm.T)
for i in range(samdicm.shape[0]):
samdic[i] = list(np.argwhere(samdicm[i, :])[:, 1])
tt.tock("made sample space")
return samdic, entmatm.T
def loadsubjsamplespace(p="../../../../data/simplequestions/clean/subjclose.dic.pkl"):
d = pickle.load(open(p))
return d
def buildtypmat(subjmat, subjinfo, worddic, maxlen=6, maskid=-1):
ret = maskid * np.ones((subjmat.shape[0], maxlen), dtype="int32")
import re
splitterregex = re.compile("[\s/]")
for i in range(ret.shape[0]):
typstring = " ".join(subjinfo[i][1] if i in subjinfo else "<unk>")
typwords = splitterregex.split(typstring)
typwordids = [worddic[typword] if typword in worddic else 0 for typword in typwords]
typwordids = typwordids[:min(len(typwords), maxlen)]
ret[i, :len(typwordids)] = typwordids
return ret
class SeqLeftBlock(Block):
def __init__(self, inner, **kw):
super(SeqLeftBlock, self).__init__(**kw)
self.inner = inner
def apply(self, x):
# idxs^(batsize, seqlen, ...) --> (batsize, seqlen, 2, encdim)
res = self.inner(x).dimshuffle(0, "x", 1)
ret = T.concatenate([res, res], axis=1)
return ret # (batsize, 2, decdim)
class ConcatLeftBlock(Block):
def __init__(self, inner, **kw):
super(ConcatLeftBlock, self).__init__(**kw)
self.trans = MatDot(inner.outdim, inner.outdim, init="glorotuniform") \
if inner.bidir else lambda x: x
self.inner = inner
def apply(self, x):
res = self.inner(x)
res = self.trans(res)
res = res.dimshuffle(0, "x", 1) # (batsize, 1, q_enc_dim)
if not self.inner.bidir:
mid = res.shape[2]/2
ret = T.concatenate([res[:, :, :mid], res[:, :, mid:]], axis=1)
else:
quart = res.shape[2]/2
ret = T.concatenate([
T.concatenate([res[:, :, :quart], res[:, :, 2*quart:3*quart]], axis=2),
T.concatenate([res[:, :, quart:2*quart], res[:, :, 3*quart:]], axis=2)
], axis=1)
return ret # (batsize, 2, decdim)
class MultiLeftBlock(Block):
def __init__(self, inner, mode, **kw):
super(MultiLeftBlock, self).__init__(**kw)
self.inner = inner
self.mode = mode
def apply(self, x):
res = self.inner(x) # (batsize, 2, encdim)
if self.mode == "multic": # take top half of first and bottom half of second
if not self.inner.bidir:
mid = res.shape[2]/2
ret = T.concatenate([res[:, 0:1, :mid], res[:, 1:2, mid:]], axis=1)
else:
quarts = res.shape[2]/4
ret = T.concatenate([
T.concatenate([ res[:, 0:1, :quarts],
res[:, 0:1, 2*quarts:3*quarts]], axis=2),
T.concatenate([ res[:, 1:2, quarts:2*quarts],
res[:, 1:2, 3*quarts:]], axis=2)
], axis=1)
else: # return as is
ret = res
print "NDIM MULTILEFTBLOCK !!!!!!!!!!!!!!!!!!!!!{}".format(ret.ndim)
return ret # (batsize, 2, decdim)
class BinoEncoder(Block):
def __init__(self, charenc=None, wordemb=None, maskid=-1, scalayers=1,
scadim=100, encdim=100, outdim=None, scabidir=False, encbidir=False, enclayers=1, **kw):
super(BinoEncoder, self).__init__(**kw)
self.charenc = charenc
self.wordemb = wordemb
self.maskid = maskid
self.bidir = encbidir # TODO
outdim = encdim if outdim is None else outdim
self.outdim = outdim # TODO
self.outerpol = SimpleSeq2Sca(inpemb=False, inpembdim=charenc.outdim + wordemb.outdim,
innerdim=[scadim]*scalayers, bidir=scabidir)
self.leftenc = RNNSeqEncoder(inpemb=False, inpembdim=charenc.outdim + wordemb.outdim,
innerdim=[encdim]*enclayers, bidir=encbidir, maskid=maskid)
self.rightenc = RNNSeqEncoder(inpemb=False, inpembdim=charenc.outdim + wordemb.outdim,
innerdim=[encdim]*enclayers, bidir=encbidir, maskid=maskid)
self.leftlin = Linear(self.leftenc.outdim, outdim)
self.rightlin = Linear(self.rightenc.outdim, outdim)
def apply(self, x):
# word vectors and mask
charten = x[:, :, 1:]
charencs = EncLastDim(self.charenc)(charten)
wordmat = x[:, :, 0]
wordembs = self.wordemb(wordmat)
wordvecs = T.concatenate([charencs, wordembs], axis=2)
wordmask = T.neq(wordmat, self.maskid)
wordvecs.mask = wordmask
# do outerpolation
weights, mask = self.outerpol(wordvecs)
leftenco = self.leftenc(wordvecs, weights=weights).dimshuffle(0, 'x', 1)
rightenco = self.rightenc(wordvecs, weights=(1 - weights)).dimshuffle(0, 'x', 1)
ret = T.concatenate([self.leftlin(leftenco),
self.rightlin(rightenco)],
axis=1)
return ret # (batsize, 2, decdim)
class RightBlock(Block):
def __init__(self, a, b, **kw):
super(RightBlock, self).__init__(**kw)
self.subjenc = a
self.predenc = b
def apply(self, subjslice, relslice): # idxs^(batsize, len)
aret = self.subjenc(subjslice).dimshuffle(0, "x", 1)
bret = self.predenc(relslice).dimshuffle(0, "x", 1)
ret = T.concatenate([aret, bret], axis=1)
return ret # (batsize, 2, decdim)
class TypedSubjBlock(Block):
def __init__(self, typelen, subjenc, typenc, **kw):
super(TypedSubjBlock, self).__init__(**kw)
self.typelen = typelen
self.typenc = typenc
self.subjenc = subjenc
def apply(self, x):
typewords = x[:, :self.typelen]
subjchars = x[:, self.typelen:]
typemb = self.typenc(typewords)
subemb = self.subjenc(subjchars)
ret = T.concatenate([subemb, typemb], axis=1)
return ret
class CustomPredictor(object):
def __init__(self, questionencoder=None, entityencoder=None,
relationencoder=None,
enttrans=None, reltrans=None, debug=False,
subjinfo=None):
self.qenc = questionencoder
self.eenc = entityencoder
self.renc = relationencoder
#self.mode = mode
self.enttrans = enttrans
self.reltrans = reltrans
self.debug = debug
self.subjinfo = subjinfo
self.qencodings = None
self.tt = ticktock("predictor")
# stateful API
def encodequestions(self, data):
self.tt.tick("encoding questions")
self.qencodings = self.qenc.predict(data)
self.tt.tock("encoded questions")
def ranksubjects(self, entcans):
assert(self.qencodings is not None)
qencforent = self.qencodings[:, 0, :]
'''if self.mode == "concat":
qencforent = self.qencodings[:, :(self.qencodings.shape[1] / 2)]
elif self.mode == "seq":
qencforent = self.qencodings[:, :]
elif self.mode == "multi":
qencforent = self.qencodings[:, 0, :]
elif self.mode == "multic":
qencforent = self.qencodings[:, 0, :(self.qencodings.shape[2] / 2)]
else:
raise Exception("unrecognized mode in prediction")'''
self.tt.tick("rank subjects")
ret = [] # list of lists of (subj, score) tuples, sorted
for i in range(self.qencodings.shape[0]): # for every question
if len(entcans[i]) == 0:
scoredentcans = [(-1, 0)]
elif len(entcans[i]) == 1:
scoredentcans = [(entcans[i][0], 1)]
else:
entembs = self.eenc.predict.transform(self.enttrans)(entcans[i])
#embed()
entscoresi = np.tensordot(qencforent[i], entembs, axes=(0, 1))
entscoresi /= np.linalg.norm(qencforent[i])
entscoresi /= np.linalg.norm(entembs, axis=1)
scoredentcans = sorted(zip(entcans[i], entscoresi), key=lambda (x, y): y, reverse=True)
ret.append(scoredentcans)
self.tt.progress(i, self.qencodings.shape[0], live=True)
self.tt.tock("ranked subjects")
self.subjranks = ret
return ret
def rankrelations(self, relcans):
assert(self.qencodings is not None)
qencforrel = self.qencodings[:, 1, :]
'''if self.mode == "concat":
qencforrel = self.qencodings[:, (self.qencodings.shape[1] / 2):]
elif self.mode == "seq":
qencforrel = self.qencodings[:, :]
elif self.mode == "multi":
qencforrel = self.qencodings[:, 1, :]
elif self.mode == "multic":
qencforrel = self.qencodings[:, 1, (self.qencodings.shape[2] / 2):]
else:
raise Exception("unrecognized mode in prediction")'''
self.tt.tick("rank relations")
ret = []
for i in range(self.qencodings.shape[0]):
if len(relcans[i]) == 0:
scoredrelcans = [(-1, 0)]
elif len(relcans[i]) == 1:
scoredrelcans = [(relcans[i][0], 1)]
else:
relembs = self.renc.predict.transform(self.reltrans)(relcans[i])
relscoresi = np.tensordot(qencforrel[i], relembs, axes=(0, 1))
relscoresi /= np.linalg.norm(qencforrel[i])
relscoresi /= np.linalg.norm(relembs, axis=1)
scoredrelcans = sorted(zip(relcans[i], relscoresi), key=lambda (x, y): y, reverse=True)
ret.append(scoredrelcans)
self.tt.progress(i, self.qencodings.shape[0], live=True)
self.tt.tock("ranked relations")
self.relranks = ret
return ret
def rankrelationsfroments(self, bestsubjs, relsperent):
relcans = [relsperent[bestsubj][0] if bestsubj in relsperent else [] for bestsubj in bestsubjs]
return self.rankrelations(relcans)
def predict(self, data, entcans=None, relsperent=None, relcans=None, multiprune=-1):
print multiprune
assert(relsperent is None or relcans is None)
assert(relsperent is not None or relcans is not None)
assert(entcans is not None)
self.encodequestions(data)
rankedsubjs = self.ranksubjects(entcans)
bestsubjs = [x[0][0] for x in rankedsubjs]
if relcans is not None:
rankedrels = self.rankrelations(relcans)
bestrels = [x[0][0] for x in rankedrels]
else:
if multiprune <= 0:
relcans = [relsperent[bestsubj][0] if bestsubj in relsperent else [] for bestsubj in bestsubjs]
rankedrels = self.rankrelations(relcans)
bestrels = [x[0][0] for x in rankedrels]
else:
print "multipruning !!!!!!!!!!!!!!!!!"
topk = multiprune # TOP K !!!!!!!!!!!!!!!!!!!!!!!!!!!!
# get relcans
relcans = []
for subjranking in rankedsubjs:
toplabel = None
relcanse = []
i = 0
for subj, score in subjranking:
subjlabel = " ".join(tokenize(self.subjinfo[subj][0]) if subj in self.subjinfo else [])
topcan = None
if toplabel is None:
toplabel = subjlabel
topcan = subj
elif subjlabel == toplabel:
topcan = subj
elif i < topk:
topcan = subj
else:
pass
toadd = relsperent[topcan][0] if topcan in relsperent else []
relcanse.extend(toadd)
i += 1
relcans.append(relcanse)
# rank relations
rankedrels = self.rankrelations(relcans)
bestrels = [x[0][0] for x in rankedrels]
# build ents per relation
entsperrel = {}
for ent, rels in relsperent.items():
for rel in rels[0]:
if rel not in entsperrel:
entsperrel[rel] = set()
entsperrel[rel].add(ent)
# filter rankedsubjs
filteredrankedsubjs = []
for i in range(len(rankedsubjs)):
filteredrankedsubjs.append([])
for subj, score in rankedsubjs[i]:
if bestrels[i] in entsperrel and \
subj in entsperrel[bestrels[i]]:
filteredrankedsubjs[i].append((subj, score))
if len(filteredrankedsubjs[i]) == 0:
filteredrankedsubjs[i].append((-1, -1.))
bestsubjs = [x[0][0] for x in filteredrankedsubjs]
ret = np.concatenate([
np.expand_dims(np.asarray(bestsubjs, dtype="int32"), axis=1),
np.expand_dims(np.asarray(bestrels, dtype="int32"), axis=1)
], axis=1)
return ret
def oldpredict(self, data, entcans, relsperent):
tt = ticktock("predictor")
tt.tick("computing question encodings")
qencodings = self.qenc.predict(data) # (numsam, encdim)
tt.tock("computed question encodings")
tt.tick("predicting")
ret = np.zeros((data.shape[0], 2), dtype="int32")
if self.mode == "concat":
mid = qencodings.shape[1] / 2
qencforent = qencodings[:, :mid]
qencforrel = qencodings[:, mid:]
elif self.mode == "seq":
qencforent = qencodings[:, :]
qencforrel = qencodings[:, :]
else:
raise Exception("unrecognized mode")
for i in range(qencodings.shape[0]):
# predict subject
if len(entcans[i]) == 0:
bestsubj = -1
elif len(entcans[i]) == 1:
bestsubj = entcans[i][0]
else:
entembs = self.eenc.predict.transform(self.enttrans)(entcans[i])
entscoresi = np.tensordot(qencforent[i], entembs, axes=(0, 1))
scoredentcans = sorted(zip(entcans[i], entscoresi), key=lambda (x, y): y, reverse=True)
bestsubj = scoredentcans[0][0]
if self.debug:
embed()
ret[i, 0] = bestsubj
# predict relation
relcans = relsperent[ret[i, 0]][0] if ret[i, 0] in relsperent else []
if len(relcans) == 0:
bestrel = -1
elif len(relcans) == 1:
bestrel = relcans[0]
else:
if self.debug:
embed()
relembs = self.renc.predict.transform(self.reltrans)(relcans)
relscoresi = np.tensordot(qencforrel[i], relembs, axes=(0, 1))
scoredrelcans = sorted(zip(relcans, relscoresi), key=lambda (x, y): y, reverse=True)
bestrel = scoredrelcans[0][0]
ret[i, 1] = bestrel
if self.debug:
embed()
tt.progress(i, qencodings.shape[0], live=True)
tt.tock("predicted")
return ret
class NegIdxGen(object):
def __init__(self, maxentid, maxrelid, relclose=None, subjclose=None, relsperent=None):
self.maxentid = maxentid
self.maxrelid = maxrelid
print "using relclose" if relclose is not None else "no relclose"
print "using subjclose" if subjclose is not None else "no subjclose"
print "using relsperent" if relsperent is not None else "no relsperent"
self.relclose = {k: set(v) for k, v in relclose.items()} if relclose is not None else None
self.subjclose = {k: set(v) for k, v in subjclose.items()} if subjclose is not None else None
self.relsperent = {k: set(v[0]) for k, v in relsperent.items()} if relsperent is not None else None
self.samprobf = lambda x: np.tanh(np.log(x + 1)/3)
def __call__(self, datas, gold):
subjrand = self.sample(gold[:, 0], self.subjclose, self.maxentid)
if self.relsperent is not None: # sample uber-close
relrand = self.samplereluberclose(gold[:, 1], gold[:, 0])
else:
relrand = self.sample(gold[:, 1], self.relclose, self.maxrelid)
ret = np.concatenate([subjrand, relrand], axis=1)
# embed()
# TODO NEGATIVE SAMPLING OF RELATIONS FROM GOLD ENTITY'S RELATIONS
return datas, ret.astype("int32")
def samplereluberclose(self, relgold, entgold):
ret = np.zeros_like(relgold, dtype="int32")
for i in range(relgold.shape[0]):
uberclosesampleset = (self.relsperent[entgold[i]] if entgold[i] in self.relsperent else set())\
.difference({relgold[i]})
if np.random.random() < self.samprobf(len(uberclosesampleset)):
ret[i] = random.sample(uberclosesampleset, 1)[0]
else:
completerandom = False
if self.relclose is not None:
closesampleset = (self.relclose[relgold[i]] if relgold[i] in self.relclose else set())\
.difference({relgold[i]})
if np.random.random() < self.samprobf(len(closesampleset)):
ret[i] = random.sample(closesampleset, 1)[0]
else:
completerandom = True
else:
completerandom = True
if completerandom:
ret[i] = np.random.randint(0, self.maxrelid + 1)
ret = np.expand_dims(ret, axis=1)
return ret
def sample(self, gold, closeset, maxid):
# assert(gold.ndim == 2 and gold.shape[1] == 1)
if closeset is None:
return np.random.randint(0, maxid + 1, (gold.shape[0], 1))
else:
ret = np.zeros_like(gold)
for i in range(gold.shape[0]):
sampleset = closeset[gold[i]] if gold[i] in closeset else []
if np.random.random() < self.samprobf(len(sampleset)):
ret[i] = random.sample(sampleset, 1)[0]
else:
ret[i] = np.random.randint(0, maxid + 1)
ret = np.expand_dims(ret, axis=1)
return ret.astype("int32")
def run(negsammode="closest", # "close" or "random"
usetypes=True,
mode="concat", # "seq" or "concat" or "multi" or "multic" or "bino"
glove=True,
embdim=100,
charencdim=100,
charembdim=50,
encdim=400,
bidir=False,
layers=1,
charenc="rnn", # "cnn" or "rnn"
margin=0.5,
lr=0.1,
numbats=700,
epochs=15,
gradnorm=1.0,
wreg=0.0001,
loadmodel="no",
debug=False,
debugtest=False,
forcesubjincl=False,
randsameval=0,
numtestcans=5,
multiprune=-1,
checkdata=False,
testnegsam=False,
testmodel=False,
sepcharembs=False,
):
tt = ticktock("script")
tt.tick("loading data")
(traindata, traingold), (validdata, validgold), (testdata, testgold), \
(subjmat, relmat), (subjdic, reldic), worddic, \
subjinfo, (testsubjcans, relsperent) = readdata(debug=debug,
numtestcans=numtestcans if numtestcans > 0 else None)
if usetypes:
print "building type matrix"
typmat = buildtypmat(subjmat, subjinfo, worddic)
subjmat = np.concatenate([typmat, subjmat], axis=1)
typlen = typmat.shape[1]
relsamplespace = None
subjsamplespace = None
if negsammode == "closest" or negsammode == "close":
relsamplespace, revind = buildrelsamplespace(relmat, worddic)
subjsamplespace = loadsubjsamplespace()
tt.tock("data loaded")
if checkdata:
embed()
numwords = max(worddic.values()) + 1
numsubjs = max(subjdic.values()) + 1
numrels = max(reldic.values()) + 1
maskid = -1
numchars = 256
nsrelsperent = relsperent if negsammode == "closest" else None
if testnegsam:
nig = NegIdxGen(numsubjs - 1, numrels - 1,
relclose=relsamplespace,
subjclose=subjsamplespace,
relsperent=nsrelsperent)
embed()
if mode == "seq" or mode == "multi":
decdim = encdim
elif mode == "concat" or mode == "multic" or mode == "bino":
decdim = encdim / 2
else:
raise Exception("unrecognized mode")
print "{} mode: {} decdim".format(mode, decdim)
# defining model
if glove:
wordemb = Glove(embdim).adapt(worddic)
else:
wordemb = WordEmb(dim=embdim, indim=numwords)
charemb = VectorEmbed(indim=numchars, dim=charembdim)
charemb2 = VectorEmbed(indim=numchars, dim=charembdim)
if charenc == "cnn":
print "using CNN char encoder"
charenc = CNNSeqEncoder(inpemb=charemb,
innerdim=[charencdim]*2, maskid=maskid,
stride=1)
elif charenc == "rnn":
print "using RNN char encoder"
charenc = RNNSeqEncoder(inpemb=charemb, innerdim=charencdim) \
.maskoptions(maskid, MaskMode.AUTO)
else:
raise Exception("no other character encoding modes available")
if bidir:
encdim = encdim / 2
if mode != "bino":
if mode == "multi" or mode == "multic":
wordenc = \
SimpleSeq2MultiVec(inpemb=False, inpembdim=wordemb.outdim + charencdim,
innerdim=encdim, bidir=bidir, numouts=2, mode="seq")
else:
encdim = [encdim] * layers
wordenc = RNNSeqEncoder(inpemb=False, inpembdim=wordemb.outdim + charencdim,
innerdim=encdim, bidir=bidir).maskoptions(MaskMode.NONE)
question_encoder = TwoLevelEncoder(l1enc=charenc, l2emb=wordemb,
l2enc=wordenc, maskid=maskid)
else:
question_encoder = BinoEncoder(charenc=charenc, wordemb=wordemb, maskid=maskid,
scadim=100, encdim=encdim/2, bidir=bidir,
enclayers=layers, outdim=decdim, scabidir=True)
# encode predicate on word level
predemb = SimpleSeq2Vec(inpemb=wordemb,
innerdim=decdim,
maskid=maskid,
bidir=False,
layers=1)
#predemb.load(relmat)
scharemb = charemb2 if sepcharembs else charemb
if usetypes:
# encode subj type on word level
subjtypemb = SimpleSeq2Vec(inpemb=wordemb,
innerdim=int(np.ceil(decdim*1./2)),
maskid=maskid,
bidir=False,
layers=1)
# encode subject on character level
charbidir = True
charencinnerdim = int(np.floor(decdim*1./2))
charenclayers = 1
if charbidir:
charencinnerdim /= 2
charenclayers = 2
subjemb = SimpleSeq2Vec(inpemb=scharemb,
innerdim=charencinnerdim,
maskid=maskid,
bidir=charbidir,
layers=charenclayers)
subjemb = TypedSubjBlock(typlen, subjemb, subjtypemb)
else:
# encode subject on character level
subjemb = SimpleSeq2Vec(inpemb=scharemb,
innerdim=decdim,
maskid=maskid,
bidir=False,
layers=1)
#subjemb.load(subjmat)
if testmodel:
embed()
# package
if mode == "seq":
lb = SeqLeftBlock(question_encoder)
rb = RightBlock(subjemb, predemb)
elif mode == "concat":
lb = ConcatLeftBlock(question_encoder)
rb = RightBlock(subjemb, predemb)
elif mode == "multi" or mode == "multic":
lb = MultiLeftBlock(question_encoder, mode)
rb = RightBlock(subjemb, predemb)
elif mode == "bino":
lb = question_encoder
rb = RightBlock(subjemb, predemb)
else:
raise Exception("unrecognized mode")
scorer = SeqMatchScore(lb, rb, scorer=CosineDistance(),
aggregator=lambda x: x, argproc=lambda x, y, z: ((x,), (y, z)))
obj = lambda p, n: T.sum((n - p + margin).clip(0, np.infty), axis=1)
class PreProc(object):
def __init__(self, subjmat, relmat):
self.ef = PreProcEnt(subjmat)
self.rf = PreProcEnt(relmat)
def __call__(self, data, gold): # gold: idxs-(batsize, 2)
st = self.ef(gold[:, 0])[0][0]
rt = self.rf(gold[:, 1])[0][0]
return (data, st, rt), {}
class PreProcE(object):
def __init__(self, subjmat, relmat):
self.ef = PreProcEnt(subjmat)
self.rf = PreProcEnt(relmat)
def __call__(self, x):
subjslice = self.ef(x[:, 0])[0][0]
relslice = self.rf(x[:, 1])[0][0]
return (subjslice, relslice), {}
class PreProcEnt(object):
def __init__(self, mat):
self.entmat = Val(mat)
def __call__(self, x):
return (self.entmat[x],), {}
transf = PreProc(subjmat, relmat)
if debug:
embed()
if epochs > 0 and loadmodel == "no":
tt.tick("training")
saveid = "".join([str(np.random.randint(0, 10)) for i in range(4)])
print("CHECKPOINTING AS: {}".format(saveid))
nscorer = scorer.nstrain([traindata, traingold]).transform(transf) \
.negsamplegen(NegIdxGen(numsubjs-1, numrels-1,
relclose=relsamplespace,
subjclose=subjsamplespace,
relsperent=nsrelsperent)) \
.objective(obj).adagrad(lr=lr).l2(wreg).grad_total_norm(gradnorm) \
.validate_on([validdata, validgold]) \
.autosavethis(scorer, "fullrank{}.model".format(saveid)) \
.train(numbats=numbats, epochs=epochs)
tt.tock("trained").tick()
# saving
#scorer.save("fullrank{}.model".format(saveid))
print("SAVED AS: {}".format(saveid))
if loadmodel is not "no":
tt.tick("loading model")
m = SeqMatchScore.load("fullrank{}.model".format(loadmodel))
#embed()
lb = m.l
subjemb = m.r.subjenc
predemb = m.r.predenc
tt.tock("loaded model")
# evaluation
predictor = CustomPredictor(questionencoder=lb,
entityencoder=subjemb,
relationencoder=predemb,
#mode=mode,
enttrans=transf.ef,
reltrans=transf.rf,
debug=debugtest,
subjinfo=subjinfo)
tt.tick("predicting")
if forcesubjincl: # forces the intended subject entity to be among candidates
for i in range(len(testsubjcans)):
if testgold[i, 0] not in testsubjcans[i]:
testsubjcans[i].append(testgold[i, 0])
if randsameval > 0: # generate random sampling eval data
testsubjcans = np.random.randint(0, numsubjs, (testgold.shape[0], randsameval))
testrelcans = np.random.randint(0, numrels, (testgold.shape[0], randsameval))
testsubjcans = np.concatenate([testgold[:, 0:1], testsubjcans], axis=1)
testrelcans = np.concatenate([testgold[:, 1:2], testrelcans], axis=1)
testsubjcans = testsubjcans.tolist()
testrelcans = testrelcans.tolist()
prediction = predictor.predict(testdata, entcans=testsubjcans, relcans=testrelcans)
else:
prediction = predictor.predict(testdata, entcans=testsubjcans,
relsperent=relsperent, multiprune=multiprune)
tt.tock("predicted")
tt.tick("evaluating")
evalmat = prediction == testgold
subjacc = np.sum(evalmat[:, 0]) * 1. / evalmat.shape[0]
predacc = np.sum(evalmat[:, 1]) * 1. / evalmat.shape[0]
totalacc = np.sum(np.sum(evalmat, axis=1) == 2) * 1. / evalmat.shape[0]
print "Test results ::::::::::::::::"
print "Total Acc: \t {}".format(totalacc)
print "Subj Acc: \t {}".format(subjacc)
print "Pred Acc: \t {}".format(predacc)
tt.tock("evaluated")
def subjinspect(subjrank, gold):
ret = [(("GOLD - " if gold == x else " ") +
subjinfo[x][0] + " (" + " ".join(subjinfo[x][1]) + ")" +
str(subjinfo[x][3]) + " rels",
y) if x in subjinfo else (x, y)
for x, y in subjrank]
return ret
def inspectboth(hidecorrect=False, hidenotincan=False):
rwd = {v: k for k, v in worddic.items()}
for i in range(len(predictor.subjranks)):
subjx = testgold[i, 0]
predx = testgold[i, 1]
subjrank = predictor.subjranks[i]
predrank = predictor.relranks[i]
if hidecorrect and subjx == subjrank[0][0] and predrank[0][0] == predx:
continue
if subjx not in [k for k, v in subjrank]:
if hidenotincan:
continue
def inspectsubjs(hidecorrect=False, hidenotincan=False, shownotincan=False):
rwd = {v: k for k, v in worddic.items()}
for i in range(len(predictor.subjranks)):
subjx = testgold[i, 0]
subjrank = predictor.subjranks[i]
if subjx == subjrank[0][0] and hidecorrect: # only look for errors
continue
if subjx not in [k for k, v in subjrank]:
if hidenotincan:
continue
if shownotincan and subjx in [k for k, v in subjrank]:
continue
print "test question {}: {} \t GOLD: {}".format(i,
wordids2string(testdata[i, :, 0], rwd),
"{} ({}) - {} rels --- {}".format(
*([subjinfo[subjx][0],
subjinfo[subjx][1],
subjinfo[subjx][3],
subjinfo[subjx][2]]
if subjx in subjinfo
else ["<UNK>", "<UNK>", "<UNK>", "<UNK>"])
))
inspres = subjinspect(subjrank, subjx)
i = 1
for inspre in inspres:
print "{}:\t{}\t{}".format(i, inspre[1], inspre[0])
if i % 50 == 0:
inp()
i += 1
inp()
def inspectpreds(hidecorrect=False):
rwd = {v: k for k, v in worddic.items()}
for i in range(len(predictor.relranks)):
relx = testgold[i, 1]
subjx = testgold[i, 0]
relrank = predictor.relranks[i]
if relx == relrank[0][0] and hidecorrect:
continue
print "test question {}: {} \t GOLD: {}".format(i,
wordids2string(testdata[i, :, 0], rwd),
wordids2string(relmat[relx, :], rwd))
inspres = [(("GOLD - " if relx == x else " ") +
wordids2string(relmat[x], rwd), y) for x, y in relrank]
i = 1
for inspre in inspres:
print "{}:\t{}\t{}".format(i, inspre[1], inspre[0])
if i % 50 == 0:
inp()
i += 1
inp()
embed()
if __name__ == "__main__":
argprun(run)
| |
import datetime
from urllib.parse import urlencode
from rest_framework.reverse import reverse
from minicash.core.models import Record
from minicash.utils.testing import RESTTestCase
from .factories import AssetFactory, RecordFactory, TagFactory
class FilterTestCaseMixin:
def assert_res_count(self, count, q):
res = self.jget(reverse('records-list'), q)
pagination_details, records_data = res.data
self.assertEqual(count, len(records_data))
self.assertEqual(count, pagination_details['count'])
class RecordsFilterCreatedDTTest(FilterTestCaseMixin, RESTTestCase):
def setUp(self):
super().setUp()
self.now = datetime.datetime.now(datetime.timezone.utc)
self.tomorrow = self.now + datetime.timedelta(days=1)
self.before_yesterday = (self.now - datetime.timedelta(days=2))
self.yesterday = self.now - datetime.timedelta(days=1)
self.after_tomorrow = self.now + datetime.timedelta(days=2)
RecordFactory.create_batch(5, created_dt=self.now, owner=self.owner)
RecordFactory.create_batch(4, created_dt=self.yesterday, owner=self.owner)
RecordFactory.create_batch(3, created_dt=self.tomorrow, owner=self.owner)
def tearDown(self):
super().tearDown()
Record.objects.all().delete()
# pylint: disable=arguments-differ
def jget(self, url, q, *args, **kwargs):
qargs = urlencode(q)
qurl = f'{url}?{qargs}'
return super().jget(qurl, *args, **kwargs)
def test_filter_created_dt_between_minutes(self):
minute_ago = self.now - datetime.timedelta(minutes=1)
minute_later = self.now + datetime.timedelta(minutes=1)
q_today = {
'dt_from': minute_ago.strftime('%Y-%m-%d %H:%M'),
'dt_to': minute_later.strftime('%Y-%m-%d %H:%M'),
}
self.assert_res_count(5, q_today)
def test_filter_created_dt_from_bound(self):
minute_later = self.now + datetime.timedelta(minutes=1)
q_today = {
'dt_from': self.before_yesterday.strftime('%Y-%m-%d %H:%M'),
'dt_to': minute_later.strftime('%Y-%m-%d %H:%M'),
}
self.assert_res_count(9, q_today)
def test_filter_created_dt_to_bound(self):
q_today = {
'dt_from': self.now.strftime('%Y-%m-%d %H:%M'),
'dt_to': self.after_tomorrow.strftime('%Y-%m-%d %H:%M'),
}
self.assert_res_count(8, q_today)
class RecordsFilterTagsTest(FilterTestCaseMixin, RESTTestCase):
def setUp(self):
super().setUp()
tagA = TagFactory.create(name='TAG--A', owner=self.owner)
tagB = TagFactory.create(name='TAG--B', owner=self.owner)
tagC = TagFactory.create(name='TAG--C', owner=self.owner)
RecordFactory.create_batch(2, tags=[tagA], owner=self.owner) # A
RecordFactory.create_batch(2, tags=[tagB], owner=self.owner) # B
RecordFactory.create_batch(2, tags=[tagC], owner=self.owner) # C
RecordFactory.create_batch(3, tags=[tagA, tagB], owner=self.owner) # AB
RecordFactory.create_batch(3, tags=[tagA, tagC], owner=self.owner) # AC
RecordFactory.create_batch(3, tags=[tagB, tagC], owner=self.owner) # BC
self.tagA, self.tagB, self.tagC = [tagA, tagB, tagC]
def test_single_tag_or_filter(self):
q = {
'tags_or': self.tagA.pk
}
self.assert_res_count(8, q)
def test_single_tag_or_filter_for_empty_result(self):
q = {
'tags_or': 'abcd-000'
}
self.assert_res_count(0, q)
def test_single_tag_and_filter(self):
q = {
'tags_and': self.tagA.pk
}
self.assert_res_count(8, q)
def test_single_tag_and_filter_for_empty_result(self):
q = {
'tags_and': 'abcd-000'
}
self.assert_res_count(0, q)
def test_many_tags_or_in_filter(self):
q = {
'tags_or': [self.tagA.pk, self.tagB.pk]
}
self.assert_res_count(13, q)
def test_many_tags_and_in_filter(self):
q = {
'tags_and': [self.tagA.pk, self.tagB.pk]
}
self.assert_res_count(3, q)
class RecordsFilterAssetsTest(FilterTestCaseMixin, RESTTestCase):
def setUp(self):
super().setUp()
asset_from_A = AssetFactory.create(owner=self.owner)
asset_from_B = AssetFactory.create(owner=self.owner)
asset_to_A = AssetFactory.create(owner=self.owner)
asset_to_B = AssetFactory.create(owner=self.owner)
RecordFactory.create_batch(5, asset_from=asset_from_A, owner=self.owner)
RecordFactory.create_batch(3, asset_from=asset_from_B, owner=self.owner)
RecordFactory.create_batch(7, asset_to=asset_to_A, owner=self.owner)
RecordFactory.create_batch(4, asset_to=asset_to_B, owner=self.owner)
RecordFactory.create_batch(1, asset_from=asset_from_A, asset_to=asset_to_A, owner=self.owner)
RecordFactory.create_batch(2, asset_from=asset_from_B, asset_to=asset_to_B, owner=self.owner)
self.asset_from_A = asset_from_A
self.asset_from_B = asset_from_B
self.asset_to_A = asset_to_A
self.asset_to_B = asset_to_B
def test_single_asset_from_filter(self):
q = {
'assets_from': self.asset_from_A.pk
}
self.assert_res_count(6, q)
def test_single_asset_from_filter_for_empty_result(self):
q = {
'assets_from': self.asset_to_B.pk
}
self.assert_res_count(0, q)
def test_many_assets_from_filter(self):
q = {
'assets_from': [self.asset_from_A.pk, self.asset_from_B.pk]
}
self.assert_res_count(11, q)
def test_single_asset_to_filter(self):
q = {
'assets_to': self.asset_to_A.pk
}
self.assert_res_count(8, q)
def test_single_asset_to_filter_for_empty_result(self):
q = {
'assets_to': self.asset_from_B.pk
}
self.assert_res_count(0, q)
def test_many_assets_to_filter(self):
q = {
'assets_to': [self.asset_to_A.pk, self.asset_to_B.pk]
}
self.assert_res_count(14, q)
def test_single_assets_from_to_filter(self):
q = {
'assets_from': [self.asset_from_A.pk],
'assets_to': [self.asset_to_A.pk],
}
self.assert_res_count(1, q)
def test_single_assets_from_to_filter_for_empty_result(self):
q = {
'assets_from': [self.asset_from_B.pk],
'assets_to': [self.asset_to_A.pk],
}
self.assert_res_count(0, q)
def test_multiple_assets_from_to_filter(self):
q = {
'assets_from': [self.asset_from_A.pk, self.asset_from_B.pk],
'assets_to': [self.asset_to_A.pk, self.asset_to_B.pk],
}
self.assert_res_count(3, q)
class RecordsFilterModeTest(FilterTestCaseMixin, RESTTestCase):
def setUp(self):
super().setUp()
RecordFactory.create_batch(5, mode=Record.EXPENSE, owner=self.owner)
RecordFactory.create_batch(4, mode=Record.INCOME, owner=self.owner)
RecordFactory.create_batch(3, mode=Record.TRANSFER, owner=self.owner)
def test_record_mode_or_empty(self):
q = {
'mode_or': '',
}
self.assert_res_count(0, q)
def test_record_mode_or_expense(self):
q = {
'mode_or': Record.EXPENSE,
}
self.assert_res_count(5, q)
def test_record_mode_or_income_transfer(self):
q = {
'mode_or': [Record.INCOME, Record.TRANSFER]
}
self.assert_res_count(7, q)
| |
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from eventlet import greenthread
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import oslo_messaging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import six
import sqlalchemy as sa
from sqlalchemy.orm import exc
from sqlalchemy import sql
from neutron._i18n import _, _LE, _LI, _LW
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron import context
from neutron.db import model_base
from neutron.extensions import agent as ext_agent
from neutron.extensions import availability_zone as az_ext
from neutron import manager
LOG = logging.getLogger(__name__)
AGENT_OPTS = [
cfg.IntOpt('agent_down_time', default=75,
help=_("Seconds to regard the agent is down; should be at "
"least twice report_interval, to be sure the "
"agent is down for good.")),
cfg.StrOpt('dhcp_load_type', default='networks',
choices=['networks', 'subnets', 'ports'],
help=_('Representing the resource type whose load is being '
'reported by the agent. This can be "networks", '
'"subnets" or "ports". '
'When specified (Default is networks), the server will '
'extract particular load sent as part of its agent '
'configuration object from the agent report state, '
'which is the number of resources being consumed, at '
'every report_interval.'
'dhcp_load_type can be used in combination with '
'network_scheduler_driver = '
'neutron.scheduler.dhcp_agent_scheduler.WeightScheduler '
'When the network_scheduler_driver is WeightScheduler, '
'dhcp_load_type can be configured to represent the '
'choice for the resource being balanced. '
'Example: dhcp_load_type=networks')),
cfg.BoolOpt('enable_new_agents', default=True,
help=_("Agent starts with admin_state_up=False when "
"enable_new_agents=False. In the case, user's "
"resources will not be scheduled automatically to the "
"agent until admin changes admin_state_up to True.")),
]
cfg.CONF.register_opts(AGENT_OPTS)
class Agent(model_base.BASEV2, model_base.HasId):
"""Represents agents running in neutron deployments."""
__table_args__ = (
sa.UniqueConstraint('agent_type', 'host',
name='uniq_agents0agent_type0host'),
model_base.BASEV2.__table_args__
)
# L3 agent, DHCP agent, OVS agent, LinuxBridge
agent_type = sa.Column(sa.String(255), nullable=False)
binary = sa.Column(sa.String(255), nullable=False)
# TOPIC is a fanout exchange topic
topic = sa.Column(sa.String(255), nullable=False)
# TOPIC.host is a target topic
host = sa.Column(sa.String(255), nullable=False)
availability_zone = sa.Column(sa.String(255))
admin_state_up = sa.Column(sa.Boolean, default=True,
server_default=sql.true(), nullable=False)
# the time when first report came from agents
created_at = sa.Column(sa.DateTime, nullable=False)
# the time when first report came after agents start
started_at = sa.Column(sa.DateTime, nullable=False)
# updated when agents report
heartbeat_timestamp = sa.Column(sa.DateTime, nullable=False)
# description is note for admin user
description = sa.Column(sa.String(attributes.DESCRIPTION_MAX_LEN))
# configurations: a json dict string, I think 4095 is enough
configurations = sa.Column(sa.String(4095), nullable=False)
# load - number of resources hosted by the agent
load = sa.Column(sa.Integer, server_default='0', nullable=False)
@property
def is_active(self):
return not AgentDbMixin.is_agent_down(self.heartbeat_timestamp)
class AgentAvailabilityZoneMixin(az_ext.AvailabilityZonePluginBase):
"""Mixin class to add availability_zone extension to AgentDbMixin."""
def _list_availability_zones(self, context, filters=None):
result = {}
query = self._get_collection_query(context, Agent, filters=filters)
for agent in query.group_by(Agent.admin_state_up,
Agent.availability_zone,
Agent.agent_type):
if not agent.availability_zone:
continue
if agent.agent_type == constants.AGENT_TYPE_DHCP:
resource = 'network'
elif agent.agent_type == constants.AGENT_TYPE_L3:
resource = 'router'
else:
continue
key = (agent.availability_zone, resource)
result[key] = agent.admin_state_up or result.get(key, False)
return result
def get_availability_zones(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
"""Return a list of availability zones."""
# NOTE(hichihara): 'tenant_id' is dummy for policy check.
# it is not visible via API.
return [{'state': 'available' if v else 'unavailable',
'name': k[0], 'resource': k[1],
'tenant_id': context.tenant_id}
for k, v in six.iteritems(self._list_availability_zones(
context, filters))]
def validate_availability_zones(self, context, resource_type,
availability_zones):
"""Verify that the availability zones exist."""
if not availability_zones:
return
if resource_type == 'network':
agent_type = constants.AGENT_TYPE_DHCP
elif resource_type == 'router':
agent_type = constants.AGENT_TYPE_L3
else:
return
query = context.session.query(Agent.availability_zone).filter_by(
agent_type=agent_type).group_by(Agent.availability_zone)
query = query.filter(Agent.availability_zone.in_(availability_zones))
azs = [item[0] for item in query]
diff = set(availability_zones) - set(azs)
if diff:
raise az_ext.AvailabilityZoneNotFound(availability_zone=diff.pop())
class AgentDbMixin(ext_agent.AgentPluginBase, AgentAvailabilityZoneMixin):
"""Mixin class to add agent extension to db_base_plugin_v2."""
def _get_agent(self, context, id):
try:
agent = self._get_by_id(context, Agent, id)
except exc.NoResultFound:
raise ext_agent.AgentNotFound(id=id)
return agent
def get_enabled_agent_on_host(self, context, agent_type, host):
"""Return agent of agent_type for the specified host."""
query = context.session.query(Agent)
query = query.filter(Agent.agent_type == agent_type,
Agent.host == host,
Agent.admin_state_up == sql.true())
try:
agent = query.one()
except exc.NoResultFound:
LOG.debug('No enabled %(agent_type)s agent on host '
'%(host)s', {'agent_type': agent_type, 'host': host})
return
if self.is_agent_down(agent.heartbeat_timestamp):
LOG.warn(_LW('%(agent_type)s agent %(agent_id)s is not active'),
{'agent_type': agent_type, 'agent_id': agent.id})
return agent
@classmethod
def is_agent_down(cls, heart_beat_time):
return timeutils.is_older_than(heart_beat_time,
cfg.CONF.agent_down_time)
def get_configuration_dict(self, agent_db):
try:
conf = jsonutils.loads(agent_db.configurations)
except Exception:
msg = _LW('Configuration for agent %(agent_type)s on host %(host)s'
' is invalid.')
LOG.warn(msg, {'agent_type': agent_db.agent_type,
'host': agent_db.host})
conf = {}
return conf
def _get_agent_load(self, agent):
configs = agent.get('configurations', {})
load_type = None
load = 0
if(agent['agent_type'] == constants.AGENT_TYPE_DHCP):
load_type = cfg.CONF.dhcp_load_type
if load_type:
load = int(configs.get(load_type, 0))
return load
def _make_agent_dict(self, agent, fields=None):
attr = ext_agent.RESOURCE_ATTRIBUTE_MAP.get(
ext_agent.RESOURCE_NAME + 's')
res = dict((k, agent[k]) for k in attr
if k not in ['alive', 'configurations'])
res['alive'] = not AgentDbMixin.is_agent_down(
res['heartbeat_timestamp'])
res['configurations'] = self.get_configuration_dict(agent)
res['availability_zone'] = agent['availability_zone']
return self._fields(res, fields)
def delete_agent(self, context, id):
with context.session.begin(subtransactions=True):
agent = self._get_agent(context, id)
context.session.delete(agent)
def update_agent(self, context, id, agent):
agent_data = agent['agent']
with context.session.begin(subtransactions=True):
agent = self._get_agent(context, id)
agent.update(agent_data)
return self._make_agent_dict(agent)
def get_agents_db(self, context, filters=None):
query = self._get_collection_query(context, Agent, filters=filters)
return query.all()
def get_agents(self, context, filters=None, fields=None):
agents = self._get_collection(context, Agent,
self._make_agent_dict,
filters=filters, fields=fields)
alive = filters and filters.get('alive', None)
if alive:
# alive filter will be a list
alive = attributes.convert_to_boolean(alive[0])
agents = [agent for agent in agents if agent['alive'] == alive]
return agents
def agent_health_check(self):
"""Scan agents and log if some are considered dead."""
agents = self.get_agents(context.get_admin_context(),
filters={'admin_state_up': [True]})
dead_agents = [agent for agent in agents if not agent['alive']]
if dead_agents:
data = '%20s %20s %s\n' % ('Type', 'Last heartbeat', "host")
data += '\n'.join(['%20s %20s %s' %
(agent['agent_type'],
agent['heartbeat_timestamp'],
agent['host']) for agent in dead_agents])
LOG.warn(_LW("Agent healthcheck: found %(count)s dead agents "
"out of %(total)s:\n%(data)s"),
{'count': len(dead_agents),
'total': len(agents),
'data': data})
else:
LOG.debug("Agent healthcheck: found %s active agents",
len(agents))
def _get_agent_by_type_and_host(self, context, agent_type, host):
query = self._model_query(context, Agent)
try:
agent_db = query.filter(Agent.agent_type == agent_type,
Agent.host == host).one()
return agent_db
except exc.NoResultFound:
raise ext_agent.AgentNotFoundByTypeHost(agent_type=agent_type,
host=host)
except exc.MultipleResultsFound:
raise ext_agent.MultipleAgentFoundByTypeHost(agent_type=agent_type,
host=host)
def get_agent(self, context, id, fields=None):
agent = self._get_agent(context, id)
return self._make_agent_dict(agent, fields)
def _log_heartbeat(self, state, agent_db, agent_conf):
if agent_conf.get('log_agent_heartbeats'):
delta = timeutils.utcnow() - agent_db.heartbeat_timestamp
LOG.info(_LI("Heartbeat received from %(type)s agent on "
"host %(host)s, uuid %(uuid)s after %(delta)s"),
{'type': agent_db.agent_type,
'host': agent_db.host,
'uuid': state.get('uuid'),
'delta': delta})
def _create_or_update_agent(self, context, agent_state):
"""Registers new agent in the database or updates existing.
Returns agent status from server point of view: alive, new or revived.
It could be used by agent to do some sync with the server if needed.
"""
status = constants.AGENT_ALIVE
with context.session.begin(subtransactions=True):
res_keys = ['agent_type', 'binary', 'host', 'topic']
res = dict((k, agent_state[k]) for k in res_keys)
if 'availability_zone' in agent_state:
res['availability_zone'] = agent_state['availability_zone']
configurations_dict = agent_state.get('configurations', {})
res['configurations'] = jsonutils.dumps(configurations_dict)
res['load'] = self._get_agent_load(agent_state)
current_time = timeutils.utcnow()
try:
agent_db = self._get_agent_by_type_and_host(
context, agent_state['agent_type'], agent_state['host'])
if not agent_db.is_active:
status = constants.AGENT_REVIVED
res['heartbeat_timestamp'] = current_time
if agent_state.get('start_flag'):
res['started_at'] = current_time
greenthread.sleep(0)
self._log_heartbeat(agent_state, agent_db, configurations_dict)
agent_db.update(res)
except ext_agent.AgentNotFoundByTypeHost:
greenthread.sleep(0)
res['created_at'] = current_time
res['started_at'] = current_time
res['heartbeat_timestamp'] = current_time
res['admin_state_up'] = cfg.CONF.enable_new_agents
agent_db = Agent(**res)
greenthread.sleep(0)
context.session.add(agent_db)
self._log_heartbeat(agent_state, agent_db, configurations_dict)
status = constants.AGENT_NEW
greenthread.sleep(0)
return status
def create_or_update_agent(self, context, agent):
"""Create or update agent according to report."""
try:
return self._create_or_update_agent(context, agent)
except db_exc.DBDuplicateEntry:
# It might happen that two or more concurrent transactions
# are trying to insert new rows having the same value of
# (agent_type, host) pair at the same time (if there has
# been no such entry in the table and multiple agent status
# updates are being processed at the moment). In this case
# having a unique constraint on (agent_type, host) columns
# guarantees that only one transaction will succeed and
# insert a new agent entry, others will fail and be rolled
# back. That means we must retry them one more time: no
# INSERTs will be issued, because
# _get_agent_by_type_and_host() will return the existing
# agent entry, which will be updated multiple times
return self._create_or_update_agent(context, agent)
class AgentExtRpcCallback(object):
"""Processes the rpc report in plugin implementations.
This class implements the server side of an rpc interface. The client side
can be found in neutron.agent.rpc.PluginReportStateAPI. For more
information on changing rpc interfaces, see doc/source/devref/rpc_api.rst.
API version history:
1.0 - Initial version.
1.1 - report_state now returns agent state.
"""
target = oslo_messaging.Target(version='1.1',
namespace=constants.RPC_NAMESPACE_STATE)
START_TIME = timeutils.utcnow()
def __init__(self, plugin=None):
super(AgentExtRpcCallback, self).__init__()
self.plugin = plugin
def report_state(self, context, **kwargs):
"""Report state from agent to server.
Returns - agent's status: AGENT_NEW, AGENT_REVIVED, AGENT_ALIVE
"""
time = kwargs['time']
time = timeutils.parse_strtime(time)
agent_state = kwargs['agent_state']['agent_state']
self._check_clock_sync_on_agent_start(agent_state, time)
if self.START_TIME > time:
time_agent = datetime.datetime.isoformat(time)
time_server = datetime.datetime.isoformat(self.START_TIME)
log_dict = {'agent_time': time_agent, 'server_time': time_server}
LOG.debug("Stale message received with timestamp: %(agent_time)s. "
"Skipping processing because it's older than the "
"server start timestamp: %(server_time)s", log_dict)
return
if not self.plugin:
self.plugin = manager.NeutronManager.get_plugin()
return self.plugin.create_or_update_agent(context, agent_state)
def _check_clock_sync_on_agent_start(self, agent_state, agent_time):
"""Checks if the server and the agent times are in sync.
Method checks if the agent time is in sync with the server time
on start up. Ignores it, on subsequent re-connects.
"""
if agent_state.get('start_flag'):
time_server_now = timeutils.utcnow()
diff = abs(timeutils.delta_seconds(time_server_now, agent_time))
if diff > cfg.CONF.agent_down_time:
agent_name = agent_state['agent_type']
time_agent = datetime.datetime.isoformat(agent_time)
host = agent_state['host']
log_dict = {'host': host,
'agent_name': agent_name,
'agent_time': time_agent,
'threshold': cfg.CONF.agent_down_time,
'serv_time': (datetime.datetime.isoformat
(time_server_now)),
'diff': diff}
LOG.error(_LE("Message received from the host: %(host)s "
"during the registration of %(agent_name)s has "
"a timestamp: %(agent_time)s. This differs from "
"the current server timestamp: %(serv_time)s by "
"%(diff)s seconds, which is more than the "
"threshold agent down"
"time: %(threshold)s."), log_dict)
| |
"""
The User is really a cheap abstraction for inputs and outputs. Rather than have an individual document
for each input and each output, we group them by users. That way, a device that connects has all its properties
associated with one document, and it becomes trivial to modify/check its properties and valid input types.
This also means that users are very, very cheap - each item can be its own user. A heart rate sensor is one user,
and an android app is another - and each user can have multiple inputs and outputs that it defines.
This architecture choice has two benefits: Usually, a sensor defines specific inputs which are somehow related
to each other. This means that correlations and compression of data is more likely to be effective with all the
inputs of one user.
The user data structure is as follows:
_id: the uid
secret: The "password" which gives access to the uid
perm: The permissions of this user with respect to other users
{
uid: {
r: Read data that the user wrote (including things it triggered, and its registered inputs/outputs)
o: Trigger the user's outputs
p: Read the user's permissions
s: Read the user's secret
wp: Write user's permissions (can only enable permissions it itself has)
ws: Write the user's secret
wr: Write the user's registered inputs (ie, allows another user to register inputs)
wo: Write the user's outputs (ie, allows another user to create/modify outputs)
d: Can delete user
}
}
1) "db" - sets permissions database-wide (admin has db.<everything>=True)
2) uid - sets permissions for the given uid
write: T/F : Whether or not allowed to write _data_ to the database
wunreg: If write is true, then:
-1 : User is not allowed to write unregistered inputs to the database
<any other number>: The kafka partition number that unregistered inputs are written to.
create: T/F : Whether or not allowed to create user. The new user can only have permissions <= creating user
trigger: [] : Specific IDs of outputs that the user is allowed to trigger. This is similar to the 'o' permission,
but here, it is not per-user, but rather a specific list of outputs
inputs: The inputs to the database that the uid registers. In general, inputs do not need to be registered, and can be
of any type. A registered input is constrained to within the registered type/range. It allows analysis
algorithms to normalize and process the data. Registering inputs also allows easy finding of the most common inputs
outputs: The outputs from the database that the uid registers. Outputs require registration.
Inputs and outputs have data of the following format:
[
{
id: The Kafka partition number to which this input/output is written to
parts: {
partname: {metadata}
}
meta: {metadata}
}
]
"""
from simplify.mongo import MongoObject
from bson.objectid import ObjectId
#We define a custom exception to handle the actions of the user
class PermissionDenied(Exception):
pass
class User(object):
"""
This class exposes the datastore as the specific user sees it. In particular, it maintains and
enforces all restrictions and permissions for the specific user.
"""
def __init__(self,conn,usr,secret=None):
self.connection = conn
#Try loading the user
if (secret is not None):
self.usr = self.ds.users(usr,secret)
else:
self.usr = self.ds.users.get(usr)
#Make sure that the user was extracted
if (self.usr is None):
raise PermissionDenied()
def reload(self):
#Reload the user from the database
self.usr = self.ds.users.get(self.usr.id)
#The single most important function that the user has - write data to the database
def write(self,data,timestamp=None):
if not (self.usr.write):
raise PermissionDenied()
else:
self.ds.inputs.add(self.usr.id,data,timestamp)
#Read data from the database subject to permission clusterf*ck
def read(self,users=None,attributes=None,starttime=None,endtime=None):
if (not self.usr.pRead("db")):
#We don't have unlimited reading access - so make sure we are allowed to read this shit
if (users is None):
#users is None, so we want results of all permitted users.
users = list(self.usr.readset())
else:
if (isinstance(users,str)):
if not (str(users) in self.usr.readset()):
raise PermissionDenied()
else:
users = list(set(users).intersection(self.usr.readset()))
if (len(users)==0):
raise PermissionDenied()
return self.ds.inputs.get(users,attributes,starttime,endtime)
#Registering and unregistering inputs
def register(self,name,meta = {}):
if not (self.usr.pWriteInputs(self.usr.id) or self.usr.pWriteInputs("db")):
raise PermissionDenied()
else:
#We don't explicitly run addInput, since we want to keep
# any additional metadata that might already exist in the register
self.usr.input(name,setv=meta)
def unregister(self,name):
if not (self.usr.pWriteInputs(self.usr.id) or self.usr.pWriteInputs("db")):
raise PermissionDenied()
else:
self.usr.remInput(name)
def getinput(self,name):
if not (self.usr.pRead(self.usr.id) or self.usr.pRead("db")):
raise PermissionDenied()
else:
return self.usr.getInput(name)
def intersectOutputs(self,outputs):
#Returns intersection of outputs and the outputs the user is allowed to trigger
res = []
for o in outputs:
#If the output in question o is in the list of trigger outputs
if (self.usr.output(o)): # or the output's parent is permitted
res.append(o)
def mkuser(self,perm={},write=False,create=False,outputs=[]):
if not (self.usr.create):
raise PermissionDenied()
else:
if not self.usr.write:
write = False #If this user can't write, then the child sure as hell can't
import uuid
from bson.objectid import ObjectId
class usr(object):
#The user info for each object being accessed are stored in memory in the form of this object
#The object modifies the database when things are set/unset
#NOTE: The object does NOT query the database for checking things. It caches all the data in memory.
# the object needs to be reloaded for changes in the database not initiated through this object to be seen.
def __init__(self,data,db):
self.db = db
self.__id = data["_id"]
self.__secret = data["secret"]
self.__perm = data["perm"]
self.__write = data["write"]
self.__wunreg = data["wunreg"]
self.__create = data["create"]
self.__trigger = data["trigger"]
self.__outputs = data["outputs"]
self.__inputs = data["inputs"]
#All is initialized. Shit's cool
self.rset = None #We keep a set of the permitted readers, which is updated when necessary
#WRITE and CREATE are properties of the user. They can be get/set using usr.write and usr.create
def getWrite(self):
return self.__write
def setWrite(self,s):
if (s!= self.__write):
self.db.update({"_id":self.__id},{"$set":{"write": s}},upsert=False)
self.__write = s
write = property(getWrite,setWrite,doc="Whether or not the accessor has write permissions")
def getWriteUnregistered(self):
return self.__wunreg
def setWriteUnregistered(self,s):
if (s!= self.__wunreg):
self.db.update({"_id":self.__id},{"$set":{"wunreg": s}},upsert=False)
self.__write = s
writeunregistered = property(getWriteUnregistered,setWriteUnregistered,doc="Whether or not the accessor is allowed to write unregistered input")
def getCreate(self):
return self.__create
def setCreate(self,s):
if (s!= self.__create):
self.db.update({"_id":self.__id},{"$set":{"create": s}},upsert=False)
self.__create = s
create = property(getCreate,setCreate,doc="Whether or not accessor allowed to create users")
#SECRET is also a property of the user
def getSecret(self):
return self.__secret
def setSecret(self,s):
self.db.update({"_id":self.__id},{"$set":{"secret": s}},upsert=False)
self.__secret = s
secret = property(getSecret,setSecret)
def trigger(self,oid,newval=None):
#Read and write output-toggling permissions for specific outputs
hasoutput = (oid in self.__trigger)
if (newval==None):
return hasoutput
elif (newval == True and not hasoutput):
self.db.update({"_id":self.__id},{"$addToSet":{"trigger": ObjectId(oid)}},upsert=False)
self.__trigger.append(oid)
elif (newval == False):
#Remove it even if it isn't there, just in case it was inserted since we checed
self.db.update({"_id":self.__id},{"$pull":{"trigger": ObjectId(oid)}})
if (hasoutput): #We need to check if it exists to remove here
self.__trigger.remove(oid)
def triggerlist(self):
#Returns the list of IDs for the outputs that the user is allowed to toggle
return self.__trigger
def __permSet(self,perm,uid,newval):
#Gets/sets the permission queried in "perm"
hasid = (uid in self.__perm)
hasperm = False
if (hasid): hasperm = (perm in self.__perm[uid])
if (newval is None):
#We just get the permissions
return (hasid and hasperm)
elif (newval==True):
if not (hasid):
#We add both the id and the permission in one go
self.db.update({"_id":self.__id},{"$set": {"perm."+str(uid): {perm: True}}},upsert=False)
self.__perm[str(uid)] = {perm: True}
elif not (hasperm):
#The ID exists - so we just add the permission
self.db.update({"_id":self.__id},{"$set": {"perm."+str(uid)+"."+perm: True}},upsert=False)
self.__perm[str(uid)][perm] = True
elif (hasid and hasperm):
#Newval is False - if we are to remove something, this is the place to do it
if (len(self.__perm[uid])<=1):
#This permission is the only one for the given ID - so we delete the entire ID
self.db.update({"_id":self.__id},{"$unset": {"perm."+str(uid): ""}},upsert=False)
del self.__perm[str(uid)]
else:
#There are more permissions for the ID, so just delete this specific one
self.db.update({"_id":self.__id},{"$unset": {"perm."+str(uid)+"."+perm: ""}},upsert=False)
del self.__perm[str(uid)][perm]
#All changes were implemented
return None
#Getting and setting permissions for the user with regards to other specific users
def pRead(self,uid,newval=None):
if (newval is not None): #The set of valid readers is no longer valid
self.rset = None
return self.__permSet("r",uid,newval)
def pTrigger(self,uid,newval=None):
return self.__permSet("o",uid,newval)
def pReadPerm(self,uid,newval=None):
return self.__permSet("p",uid,newval)
def pReadSecret(self,uid,newval=None):
return self.__permSet("s",uid,newval)
def pWritePerm(self,uid,newval=None):
return self.__permSet("wp",uid,newval)
def pWriteSecret(self,uid,newval=None):
return self.__permSet("ws",uid,newval)
def pWriteInputs(self,uid,newval=None):
return self.__permSet("wr",uid,newval)
def pWriteOutputs(self,uid,newval=None):
return self.__permSet("wo",uid,newval)
def pDelete(self,uid,newval=None):
return self.__permSet("d",uid,newval)
def getperm(self,uid):
hasid = (uid in self.__perm)
if (hasid):
return self.__perm[uid]
return None
def readset(self):
if (self.rset is None): #We cache the set of valid readers
l = []
for key in self.__perm:
if ("r" in self.__perm[key]):
l.append(key)
self.rset = set(l)
return self.rset
def getID(self):
return self.__id
id = property(getID)
#Functions for manipulating input - these are quite brutal in the way they work.
#The goal is to create functions which will make very basic modifications possible
#There should be a further class which wraps the inputs.
#Functions for modifying input and output registrations.
def addIO(self,io,parts,meta={}):
doc = {"id": ObjectId(uuid.uuid4().hex[:24]), "meta": meta, "parts": parts}
self.db.update({"_id":self.__id},{"$addToSet":{io: doc}},upsert=False)
return doc
def remIO(self,io,ioid):
self.db.update({"_id":self.__id},{"$pull":{io: {"id": ioid}}},upsert=False)
def clrIO(self,io):
self.db.update({"_id":self.__id},{"$set": {io: []}},upsert=False)
def metaIO(self,io,ioid,getv=None,setv=None,delv=None):
pass
def addInput(self,name,meta={}):
#Adds an input with the given name and given metadata to the register
self.db.update({"_id":self.__id},{"$set": {"inputs."+name: meta}},upsert=False)
self.__inputs[name] = meta
def remInput(self,name):
#Removes the input with the given name from the register
if (name in self.__inputs):
self.db.update({"_id":self.__id},{"$unset": {"inputs."+name: ""}},upsert=False)
del self.__inputs[name]
def clrInputs(self):
#Clears the inputs
self.db.update({"_id":self.__id},{"$set": {"inputs": {}}},upsert=False)
self.__inputs = {}
def setInput(self,name,meta):
#Sets the input with the given name with meta
if (name in self.__inputs):
self.addInput(name,meta)
else: raise Exception("Could not find the given input")
def getInput(self,name):
#Gets the input with the given name. Returns None if no input exists
if (name in self.__inputs):
return self.__inputs[name]
return None
#Allows to get/set/delete values in the input's register freely.
def input(self,name,getv=None,setv=None,delv=None,create=True):
#Gets/sets the input's value at the given path
n = self.getInput(name)
if (n is None): #The input does not exist yet
if (create==False):
raise Exception("Could not find the given input")
else:
self.addInput(name,setv)
n = self.getInput(name)
elif (setv is not None):
#We set the input
s = {}
for key in setv:
s["inputs."+str(name)+"."+key] = setv[key]
n[key] = setv[key]
self.db.update({"_id":self.__id},{"$set": s},upsert=False)
if (delv is not None):
s = {}
for v in delv:
s["inputs."+str(name)+"."+v] = ""
if (v in n):
del n[v]
self.db.update({"_id":self.__id},{"$unset": s},upsert=False)
if (getv is not None):
#Get the values it asks for
for key in getv:
if (key in n):
getv[key] = n[key]
else:
getv[key] = None
return getv
return None
def inputlist(self):
#Returns a list of all registered input names
return self.__inputs.keys()
def delete(self):
#Deletes the entire record for the usert
self.db.remove({"_id": self.__id})
def __eq__(self,x):
return str(self) == str(x)
def __str__(self):
return str(self.id)
if (__name__=="__main__"):
#Change the file locations of the container for testing
MongoContainer.fileLocation = "./test_db"
MongoContainer.tmpLocation = "./test_mnt"
cname = "adfdsf"
dname = "adfsfd"
cpwd = "password"
import shutil
import time
if (os.path.exists(MongoContainer.fileLocation)):
shutil.rmtree(MongoContainer.fileLocation)
if (os.path.exists(MongoContainer.tmpLocation)):
shutil.rmtree(MongoContainer.tmpLocation)
#Open a datastore
ds = DataStore(cname,dname,cpwd,create = True)
ds.users.create()
ds.close()
print "\n\nAll tests completed successfully!"
| |
"""
This script will instruct a physical machine or VM
to run a binary for a specified amount of time
(c) 2015 Massachusetts Institute of Technology
"""
# Native
import logging
logger = logging.getLogger(__name__)
import optparse
import sys
import os
import socket
import time
import multiprocessing
# LO-PHI
sys.path.append(os.path.join(os.getcwd(), "../"))
sys.path.append(os.path.join(os.getcwd(), "../../"))
os.chdir("../../")
import lophi.globals as G
from lophi.sensors.memory import MemorySensor
from lophi.sensors.disk import DiskSensor
from lophi.sensors.cpu import CPUSensor
from lophi.sensors.control import ControlSensor
from lophi.actuation.keypressgenerator import KeypressGeneratorPhysical,KeypressGeneratorVirtual
from lophi.analysis import MemoryAnalysisEngine
from lophi.analysis import DiskAnalysisEngine
import lophi.configs.helper as CONF
class DiskAnalysisProcess(multiprocessing.Process):
def __init__(self, machine):
multiprocessing.Process.__init__(self)
self.machine = machine
def start(self):
if self.machine.disk is None:
logger.error("Machine does not have disk sensor!")
sys.exit(0)
disk_sensor = self.machine.disk
# connect to disk sensor
disk_sensor._connect()
if self.machine.type == G.MACHINE_TYPES.PHYSICAL:
print "Printing registers..."
disk_sensor.print_all_registers()
print "Modifying Registers..."
disk_sensor.sata_enable_all()
disk_sensor.set_udp_delay(200)
disk_sensor.print_all_registers()
print "Reading packets..."
while 1:
# Get our packet
packet = disk_sensor.get_disk_packet()
# TODO: log the packet
def main(options):
"""
Main function
"""
if options.machine_config is None:
logger.error("No config file given.")
return
if options.command_file is None:
logger.error("No script file provided.")
return
# This isn't the class we use in practice, but fake it here for simplicity
# Get list of machine objects
machines = CONF.import_from_config(options.machine_config, "machine")
if options.machine not in machines:
logger.error("%s is not a valid machine from the config file."%options.machine)
logger.error("Valid targets are: %s"%machines.keys())
return
machine = machines[options.machine]
# Add a sensors to physical machines if needed
if machine.type == G.MACHINE_TYPES.PHYSICAL:
has_memory = has_disk = False
# Ensure that a sensor config is defined
if options.sensor_config is None:
logger.error("A sensor config file must be defined for physical analysis")
return
# Get the list of sensors
sensors = CONF.import_from_config(options.sensor_config, "sensor")
# Add sensors to our machine
print "Trying to find physical sensors for %s..."%options.machine
added_sensors = machine.add_sensors(sensors)
# Check that we can do both memory and disk analysis
if not machine.memory:
logger.error("No memory sensor available for analysis! Quitting.")
return
if not machine.disk:
logger.error("No disk sensor available for analysis! Quitting.")
return
if not machine.control:
logger.error("No control sensor available for analysis! Quitting.")
return
# load the command script
if not os.path.exists(options.command_file):
logger.error("File (%s) does not exist!" % options.command_file)
sys.exit(0)
# prepare the command script parser
parser = None
if machine.type == G.MACHINE_TYPES.PHYSICAL:
parser = KeypressGeneratorPhysical()
else:
parser = KeypressGeneratorVirtual()
# open file
f = open(options.command_file, 'r')
script_text = f.read()
f.close()
script = parser.text_to_script(script_text)
# Start the trials
for trial_num in range(options.trials):
print "Running trial %d" % trial_num
# Prep the machine -- reset it
if machine.type != G.MACHINE_TYPES.PHYSICAL:
machine.machine_reset()
else:
machine.machine_reset(options.pxe_server)
machine.power_off()
# Wait for machine to shutdown
time.sleep(15)
# Wait until machine has an ip address
logger.info("Waiting to get IP address of machine from PXE Server.")
start_time = time.time()
timeout = 360
while True:
#machine.ip_addr = get_ip(options.pxe_server, machine.get_mac_addr())
if (time.time() - start_time) > timeout:
logger.error("Could not get ip address for test machine from PXE Server for %d s" % timeout)
break
ip = machine.get_ip_addr(options.pxe_server)
if ip:
logger.info("Machine has IP address %s" % ip)
break
# wait until machine is up
logger.info("Waiting for machine to be up on the network.")
start_time = time.time()
timeout = 360
while True:
if (time.time() - start_time) > timeout:
logger.error("Timed out while waiting for machine to come back up (e.g. waiting for system to boot)")
break
if machine.get_net_status():
break
logger.info("Machine is back up. Commencing analysis.")
if machine.type != G.MACHINE_TYPES.PHYSICAL:
logger.info("Pausing Virtual Machine!")
machine.machine_pause()
else:
# pass
logger.info("Pausing Physical Machine Not Implemented Yet!")
# Take memory snapshot #1
logger.info("Taking start memory dump")
#memory_dump(machine, os.path.join(options.output_dir, "mem_dump_start" + str(trial_num)))
# TODO: Spawn data consumers for disk and memory?
logger.info("TODO: Starting disk analysis")
# Resume machine
if machine.type != G.MACHINE_TYPES.PHYSICAL:
logger.info("Resuming Virtual Machine!")
machine.machine_resume()
else:
# pass
logger.info("Resuming Physical Machine Not Implemented Yet!")
# Run command script and wait runtime seconds
logger.info("Running %s script for %d seconds." % (options.command_file, options.runtime))
machine.keypress_send(script)
time.sleep(options.runtime)
# pause machine if VM
if machine.type != G.MACHINE_TYPES.PHYSICAL:
logger.info("Pausing Virtual Machine!")
machine.machine_pause()
else:
# pass
logger.info("Pausing Physical Machine Not Implemented Yet!")
logger.info("TODO: Stopping disk analysis")
#disk_analysis.stop()
# Take memory snapshot #2
logger.info("Taking end memory dump")
#memory_dump(machine, os.path.join(options.output_dir, "mem_dump_end" + str(trial_num)))
# Resume machine
if machine.type != G.MACHINE_TYPES.PHYSICAL:
logger.info("Resuming Virtual Machine!")
machine.machine_resume()
else:
# pass
logger.info("Resuming Physical Machine Not Implemented Yet!")
print "Completed trial %d" % trial_num
print "Completed all trials."
def reset_vm(machine, parser):
# reboot the machine
machine.power_shutdown()
time.sleep(30)
machine.power_on()
# wait about 5 seconds?
time.sleep(5)
# press special key for boot menu
machine.keypress_send([parser.parse_special("F12")])
machine.keypress_send([parser.parse_special("F12")])
machine.keypress_send([parser.parse_special("F12")])
# pick PXE boot
machine.keypress_send([parser.parse_text("2")])
# wait for PXE boot
time.sleep(10)
# pick the appropriate clonezilla batch job
machine.keypress_send([parser.parse_special("RETURN")])
def reset_phys(machine, parser, options):
# machine.power_on()
#
# # Machine will be set to automatically pxe boot and reset itself
# # add machine's mac address to acl server
# add_mac(options.pxe_server, machine.get_mac_addr())
#
# time.sleep(120)
#
# # delete machine's mac address from acl server so that machine will
# # timeout its pxe boot and boot from its hard drive
# del_mac(options.pxe_server, machine.get_mac_addr())
pass
# def add_mac(pxe_server, mac_addr):
# msg = G.PXE_ADD_ACL + mac_addr
# send(msg, pxe_server, G.PXEBOOT_PORT)
#
#
# def del_mac(pxe_server, mac_addr):
# msg = G.PXE_DEL_ACL + mac_addr
# send(msg, pxe_server, G.PXEBOOT_PORT)
#
# def get_ip(pxe_server, mac_addr):
# msg = G.PXE_GET_IP + mac_addr
# #resp = send(msg, pxe_server, G.PXEBOOT_PORT)[0]
# resp = send(msg, pxe_server, 4011)[0]
# if resp == G.PXE_NO_IP_RESP:
# return None
# else:
# return resp
#
# def send(msg, ip, port):
# sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# logger.info("Sending to PXE Server: %s" % msg)
# sock.sendto(msg, (ip, port))
#
# # get response
# resp = sock.recvfrom(512)
# logger.info("Got response: %s" % resp[0])
# return resp
def reset(machine, parser, options):
"""
Resets machine to a saved state. For testing purposes only.
Will eventually be ported to machine.machine_reset()
"""
if machine.type != G.MACHINE_TYPES.PHYSICAL:
reset_vm(machine, parser)
else:
reset_phys(machine, parser, options)
def memory_dump(machine, output_filename):
"""
Records memory snapshot
"""
# Create output file
try:
os.makedirs(os.path.dirname(output_filename))
except:
pass
try:
output_file = open(output_filename, "w+")
except:
print "ERROR: Could not open output file."
sys.exit(0)
start_addr = 0 # TODO bug in KVM cannot read first 8 bytes
if machine.type != G.MACHINE_TYPES.PHYSICAL:
start_addr = 8
MEM_SIZE = machine.memory.get_memsize()
READ_SIZE = (32 * 1024) # 32K
if MEM_SIZE < READ_SIZE:
READ_SIZE = MEM_SIZE
# Read memory
print "Reading memory from %d to %d" % (start_addr, MEM_SIZE)
# Get memory from remote system
read_addr = start_addr
for addr in range(start_addr, start_addr+(MEM_SIZE / READ_SIZE)):
logger.debug("Reading %d chunks of size %d bytes from 0x%016x"%(READ_SIZE,MEM_SIZE,addr))
# Read memory
data = machine.memory_read(read_addr, READ_SIZE)
# Set point to the next chunk
read_addr += READ_SIZE
# Write to file
output_file.write(data)
# Close output file
output_file.close()
print "Memory dump (%d bytes) written to %s." % (MEM_SIZE*READ_SIZE,output_filename)
if __name__ == "__main__":
# Import our command line parser
opts = optparse.OptionParser()
# Get our machine types
machine_types = {}
for x in G.MACHINE_TYPES.__dict__: # @UndefinedVariable
if x != "ASCII" and not x.startswith("_"):
machine_types[x] = G.MACHINE_TYPES.__dict__[x] # @UndefinedVariable
# Machine configs
opts.add_option("-c", "--config", action="store", type="string",
dest="machine_config", default=None,
help="Config file containing machine descriptions.")
# Sensors
opts.add_option("-s", "--sensor_config", action="store", type="string",
dest="sensor_config", default=None,
help="Config file containing sensor descriptions.")
# Comand line options
opts.add_option("-m", "--machine", action="store", type="string",
dest="machine", default=None,
help="Machine to perform analysis on.")
# Command file to run
opts.add_option("-f", "--command-file", action="store", type="string",
dest="command_file", default=None,
help="Command file containing keyboard commands to send to the machine.")
# Time to run in seconds
opts.add_option("-r", "--runtime", action="store", type="int",
dest="runtime", default=300,
help="Total time to run the binary")
# Number of trials to run
opts.add_option("-t", "--trials", action="store", type="int",
dest="trials", default=300,
help="Total number of trials")
# output directory for memory sensor
opts.add_option("-o", "--out-dir", action='store', type="string",
help="Output directory to save memory dump to. (Default: mem_dumps)", default="mem_dumps", dest='output_dir')
opts.add_option("-d", "--debug", action="store_true",
dest="debug", default=False,
help="Enable DEBUG")
# PXE server
opts.add_option("-p", "--pxe_server", action='store', type="string",
help="IP of PXE server. E.g. 127.0.0.1", dest='pxe_server')
# Get arguments
(options, positionals) = opts.parse_args(None)
# Get our log level
if options.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig()
# start program
main(options)
| |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
import csv
import tempfile
from collections import defaultdict
from pathlib import Path
import torchaudio
try:
import webrtcvad
except ImportError:
raise ImportError("Please install py-webrtcvad: pip install webrtcvad")
import pandas as pd
from tqdm import tqdm
from examples.speech_synthesis.preprocessing.denoiser.pretrained import master64
import examples.speech_synthesis.preprocessing.denoiser.utils as utils
from examples.speech_synthesis.preprocessing.vad import (
frame_generator, vad_collector, read_wave, write_wave, FS_MS, THRESHOLD,
SCALE
)
from examples.speech_to_text.data_utils import save_df_to_tsv
log = logging.getLogger(__name__)
PATHS = ["after_denoise", "after_vad"]
MIN_T = 0.05
def generate_tmp_filename(extension="txt"):
return tempfile._get_default_tempdir() + "/" + \
next(tempfile._get_candidate_names()) + "." + extension
def convert_sr(inpath, sr, output_path=None):
if not output_path:
output_path = generate_tmp_filename("wav")
cmd = f"sox {inpath} -r {sr} {output_path}"
os.system(cmd)
return output_path
def apply_vad(vad, inpath):
audio, sample_rate = read_wave(inpath)
frames = frame_generator(FS_MS, audio, sample_rate)
frames = list(frames)
segments = vad_collector(sample_rate, FS_MS, 300, vad, frames)
merge_segments = list()
timestamp_start = 0.0
timestamp_end = 0.0
# removing start, end, and long sequences of sils
for i, segment in enumerate(segments):
merge_segments.append(segment[0])
if i and timestamp_start:
sil_duration = segment[1] - timestamp_end
if sil_duration > THRESHOLD:
merge_segments.append(int(THRESHOLD / SCALE) * (b'\x00'))
else:
merge_segments.append(int((sil_duration / SCALE)) * (b'\x00'))
timestamp_start = segment[1]
timestamp_end = segment[2]
segment = b''.join(merge_segments)
return segment, sample_rate
def write(wav, filename, sr=16_000):
# Normalize audio if it prevents clipping
wav = wav / max(wav.abs().max().item(), 1)
torchaudio.save(filename, wav.cpu(), sr, encoding="PCM_S",
bits_per_sample=16)
def process(args):
# making sure we are requested either denoise or vad
if not args.denoise and not args.vad:
log.error("No denoise or vad is requested.")
return
log.info("Creating out directories...")
if args.denoise:
out_denoise = Path(args.output_dir).absolute().joinpath(PATHS[0])
out_denoise.mkdir(parents=True, exist_ok=True)
if args.vad:
out_vad = Path(args.output_dir).absolute().joinpath(PATHS[1])
out_vad.mkdir(parents=True, exist_ok=True)
log.info("Loading pre-trained speech enhancement model...")
model = master64().to(args.device)
log.info("Building the VAD model...")
vad = webrtcvad.Vad(int(args.vad_agg_level))
# preparing the output dict
output_dict = defaultdict(list)
log.info(f"Parsing input manifest: {args.audio_manifest}")
with open(args.audio_manifest, "r") as f:
manifest_dict = csv.DictReader(f, delimiter="\t")
for row in tqdm(manifest_dict):
filename = str(row["audio"])
final_output = filename
keep_sample = True
n_frames = row["n_frames"]
snr = -1
if args.denoise:
output_path_denoise = out_denoise.joinpath(Path(filename).name)
# convert to 16khz in case we use a differet sr
tmp_path = convert_sr(final_output, 16000)
# loading audio file and generating the enhanced version
out, sr = torchaudio.load(tmp_path)
out = out.to(args.device)
estimate = model(out)
estimate = (1 - args.dry_wet) * estimate + args.dry_wet * out
write(estimate[0], str(output_path_denoise), sr)
snr = utils.cal_snr(out, estimate)
snr = snr.cpu().detach().numpy()[0][0]
final_output = str(output_path_denoise)
if args.vad:
output_path_vad = out_vad.joinpath(Path(filename).name)
sr = torchaudio.info(final_output).sample_rate
if sr in [16000, 32000, 48000]:
tmp_path = final_output
elif sr < 16000:
tmp_path = convert_sr(final_output, 16000)
elif sr < 32000:
tmp_path = convert_sr(final_output, 32000)
else:
tmp_path = convert_sr(final_output, 48000)
# apply VAD
segment, sample_rate = apply_vad(vad, tmp_path)
if len(segment) < sample_rate * MIN_T:
keep_sample = False
print((
f"WARNING: skip {filename} because it is too short "
f"after VAD ({len(segment) / sample_rate} < {MIN_T})"
))
else:
if sample_rate != sr:
tmp_path = generate_tmp_filename("wav")
write_wave(tmp_path, segment, sample_rate)
convert_sr(tmp_path, sr,
output_path=str(output_path_vad))
else:
write_wave(str(output_path_vad), segment, sample_rate)
final_output = str(output_path_vad)
segment, _ = torchaudio.load(final_output)
n_frames = segment.size(1)
if keep_sample:
output_dict["id"].append(row["id"])
output_dict["audio"].append(final_output)
output_dict["n_frames"].append(n_frames)
output_dict["tgt_text"].append(row["tgt_text"])
output_dict["speaker"].append(row["speaker"])
output_dict["src_text"].append(row["src_text"])
output_dict["snr"].append(snr)
out_tsv_path = Path(args.output_dir) / Path(args.audio_manifest).name
log.info(f"Saving manifest to {out_tsv_path.as_posix()}")
save_df_to_tsv(pd.DataFrame.from_dict(output_dict), out_tsv_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--audio-manifest", "-i", required=True,
type=str, help="path to the input manifest.")
parser.add_argument(
"--output-dir", "-o", required=True, type=str,
help="path to the output dir. it will contain files after denoising and"
" vad"
)
parser.add_argument("--vad-agg-level", "-a", type=int, default=2,
help="the aggresive level of the vad [0-3].")
parser.add_argument(
"--dry-wet", "-dw", type=float, default=0.01,
help="the level of linear interpolation between noisy and enhanced "
"files."
)
parser.add_argument(
"--device", "-d", type=str, default="cpu",
help="the device to be used for the speech enhancement model: "
"cpu | cuda."
)
parser.add_argument("--denoise", action="store_true",
help="apply a denoising")
parser.add_argument("--vad", action="store_true", help="apply a VAD")
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| |
"""
Tests the content_features module.
"""
import unittest
import numpy as np
import scipy.sparse as ss
import mock
from sklearn.feature_extraction.text import CountVectorizer
from .context import content_features
from .context import config
from .context import util
from .context import test_utils as tu
class ContentFeaturesTestCase(unittest.TestCase):
def setUp(self):
config_obj = tu.sample_config()
util_obj = util.Util()
self.test_obj = content_features.ContentFeatures(config_obj, util_obj)
def tearDown(self):
self.test_obj = None
def test_init(self):
result = self.test_obj
self.assertTrue(isinstance(result.config_obj, config.Config))
@mock.patch('pandas.concat')
def test_build(self, mock_concat):
self.test_obj.config_obj.ngrams = True
self.test_obj.util_obj.start = mock.Mock()
self.test_obj.define_file_folders = mock.Mock(return_value='f/')
self.test_obj.settings = mock.Mock(return_value='ngram_params')
self.test_obj.basic = mock.Mock(return_value=('tr', 'te', 'feats'))
mock_concat.side_effect = ['coms_df', 'feats_df']
self.test_obj.ngrams = mock.Mock(return_value=('tr_m', 'te_m'))
self.test_obj.util_obj.end = mock.Mock()
result = self.test_obj.build('train_df', 'test_df', 'test', fw='fw')
exp_start = 'building content features...'
exp_concat = [mock.call(['train_df', 'test_df']),
mock.call(['tr', 'te'])]
self.test_obj.util_obj.start.assert_called_with(exp_start, fw='fw')
self.test_obj.define_file_folders.assert_called()
self.test_obj.settings.assert_called()
self.test_obj.basic.assert_called_with('train_df', 'test_df',
'train_test_1', '_content.pkl', 'f/')
self.assertTrue(mock_concat.call_args_list == exp_concat)
self.test_obj.ngrams.assert_called_with('coms_df', 'train_df',
'test_df', 'ngram_params', 'train_test_1', '_ngrams.npz',
'f/', fw='fw')
self.test_obj.util_obj.end.assert_called_with(fw='fw')
self.assertTrue(result == ('tr_m', 'te_m', 'feats_df', 'feats'))
def test_define_file_folders(self):
result = self.test_obj.define_file_folders()
self.assertTrue(result == 'ind/output/soundcloud/features/')
def test_settings(self):
setting_dict = {'stop_words': 'english', 'ngram_range': (3, 3),
'max_features': 10000, 'analyzer': 'char_wb',
'min_df': 1, 'max_df': 1.0, 'binary': True,
'vocabulary': None, 'dtype': np.int32}
result = self.test_obj.settings()
self.assertTrue(result == setting_dict)
def test_basic(self):
self.test_obj.util_obj.load = mock.Mock()
self.test_obj.build_features = mock.Mock()
self.test_obj.build_features.side_effect = [('tr', ''), ('te', 'fts')]
self.test_obj.util_obj.save = mock.Mock()
result = self.test_obj.basic('train', 'test', 'fn', '_ext', 'f/')
exp_bf = [mock.call('train'), mock.call('test')]
self.assertTrue(result == ('tr', 'te', 'fts'))
self.assertTrue(self.test_obj.build_features.call_args_list == exp_bf)
self.test_obj.util_obj.load.assert_not_called()
def test_ngrams_none(self):
self.test_obj.config_obj.ngrams = False
result = self.test_obj.ngrams('coms', 'train', 'test', 'np', 'fn',
'_ext', 'f/')
self.assertTrue(result == (None, None))
def test_ngrams(self):
self.test_obj.config_obj.ngrams = True
self.test_obj.util_obj.load_sparse = mock.Mock()
self.test_obj.build_ngrams = mock.Mock(return_value='ngrams')
self.test_obj.util_obj.save_sparse = mock.Mock()
self.test_obj.split_mat = mock.Mock(return_value=('tr_m', 'te_m'))
result = self.test_obj.ngrams('coms', 'train', 'test', 'np', 'fn',
'_ext', 'f/', fw='fw')
self.assertTrue(result == ('tr_m', 'te_m'))
self.test_obj.util_obj.load_sparse.assert_not_called()
self.test_obj.build_ngrams.assert_called_with('coms', 'np', fw='fw')
self.test_obj.util_obj.save_sparse.assert_called_with('ngrams',
'f/fn_ext')
self.test_obj.split_mat.assert_called_with('ngrams', 'train', 'test')
def test_count_vectorizer(self):
setting_dict = {'stop_words': 'english', 'ngram_range': (3, 3),
'max_features': 10000, 'analyzer': 'char_wb',
'min_df': 6, 'max_df': 0.1, 'binary': True,
'vocabulary': None, 'dtype': np.int32}
result = self.test_obj.count_vectorizer(setting_dict)
self.assertTrue(isinstance(result, CountVectorizer))
def test_build_ngrams(self):
setting_dict = {'stop_words': 'english', 'ngram_range': (3, 3),
'max_features': 10000, 'analyzer': 'char_wb',
'min_df': 6, 'max_df': 0.1, 'binary': True,
'vocabulary': None, 'dtype': np.int32}
matrix = mock.Mock(np.matrix)
matrix.tocsr = mock.Mock(return_value='ngrams_csr')
df = tu.sample_df(2)
df['text'] = ['banana', 'orange']
cv = mock.Mock(CountVectorizer)
cv.fit_transform = mock.Mock(return_value='ngrams_m')
self.test_obj.count_vectorizer = mock.Mock(return_value=cv)
ss.lil_matrix = mock.Mock(return_value='id_m')
ss.hstack = mock.Mock(return_value=matrix)
result = self.test_obj.build_ngrams(df, setting_dict)
self.test_obj.count_vectorizer.assert_called_with(setting_dict)
cv.fit_transform.assert_called_with(['banana', 'orange'])
ss.lil_matrix.assert_called_with((2, 1))
ss.hstack.assert_called_with(['id_m', 'ngrams_m'])
matrix.tocsr.assert_called()
self.assertTrue(result == 'ngrams_csr')
def test_split_mat(self):
df1 = tu.sample_df(4)
df2 = tu.sample_df(2)
m = np.matrix([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]])
result = self.test_obj.split_mat(m, df1, df2)
self.assertTrue(result[0].shape == (4, 2))
self.assertTrue(result[1].shape == (2, 2))
def test_build_features(self):
df = tu.sample_df(2)
df['text'] = ['banana', 'kiwi']
self.test_obj.soundcloud = mock.Mock(return_value=('f', 'l'))
self.test_obj.youtube = mock.Mock()
self.test_obj.twitter = mock.Mock()
self.test_obj.yelp_hotel = mock.Mock()
self.test_obj.yelp_restaurant = mock.Mock()
result = self.test_obj.build_features(df)
df['text'] = df['text'].fillna('')
self.assertTrue(result == ('f', 'l'))
self.test_obj.soundcloud.assert_called_with(df)
self.test_obj.youtube.assert_not_called()
self.test_obj.twitter.assert_not_called()
self.test_obj.yelp_hotel.assert_not_called()
self.test_obj.yelp_restaurant.assert_not_called()
def test_soundcloud(self):
df = tu.sample_df(2)
df['text'] = ['banana', 'orange']
result = self.test_obj.soundcloud(df)
self.assertTrue(len(result[0] == 2))
self.assertTrue(result[1] == ['com_num_chars', 'com_has_link'])
def test_youtube(self):
df = tu.sample_df(2)
df['text'] = ['banana', 'orange']
df['timestamp'] = ['2011-10-31 13:37:50', '2011-10-31 13:47:50']
result = self.test_obj.youtube(df)
self.assertTrue(len(result[0] == 2))
self.assertTrue(result[1] == ['com_num_chars', 'com_weekday',
'com_hour'])
def test_twitter(self):
df = tu.sample_df(2)
df['text'] = ['bana@na', '#orange']
df['timestamp'] = ['2011-10-31 13:37:50', '2011-10-31 13:47:50']
result = self.test_obj.twitter(df)
self.assertTrue(len(result[0] == 2))
self.assertTrue(result[1] == ['com_num_chars', 'com_num_hashtags',
'com_num_mentions', 'com_num_links', 'com_num_retweets'])
@mock.patch('pandas.DataFrame')
def test_ifwe(self, mock_df):
cf = tu.sample_df(10)
mock_df.return_value = 'feats_df'
result = self.test_obj.ifwe(cf)
exp_list = ['sex_id', 'time_passed_id', 'age_id']
self.assertTrue(result == ('feats_df', exp_list))
mock_df.assert_called_with(cf['com_id'])
def test_yelp_hotel(self):
df = tu.sample_df(2)
df['text'] = ['bana@na', '#orange!']
result = self.test_obj.yelp_hotel(df)
self.assertTrue(len(result[0] == 2))
self.assertTrue(list(result[0]['com_num_chars']) == [7, 8])
self.assertTrue(result[1] == ['com_num_chars', 'com_num_links'])
def test_yelp_restaurant(self):
df = tu.sample_df(2)
df['text'] = ['bana@na', '#orange']
result = self.test_obj.yelp_restaurant(df)
self.assertTrue(len(result[0] == 2))
self.assertTrue(list(result[0]['com_num_chars']) == [7, 7])
self.assertTrue(result[1] == ['com_num_chars', 'com_num_links'])
def test_suite():
suite = unittest.TestLoader().loadTestsFromTestCase(
ContentFeaturesTestCase)
return suite
if __name__ == '__main__':
unittest.main()
| |
# -*- coding: utf-8 -*-
"""
sphinx.transforms
~~~~~~~~~~~~~~~~~
Docutils transforms used by Sphinx when reading documents.
:copyright: Copyright 2007-2015 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from os import path
from docutils import nodes
from docutils.utils import new_document, relative_path
from docutils.parsers.rst import Parser as RSTParser
from docutils.transforms import Transform
from docutils.transforms.parts import ContentsFilter
from sphinx import addnodes
from sphinx.locale import _, init as init_locale
from sphinx.util import split_index_msg
from sphinx.util.nodes import (
traverse_translatable_index, extract_messages, LITERAL_TYPE_NODES, IMAGE_TYPE_NODES,
)
from sphinx.util.osutil import ustrftime
from sphinx.util.i18n import find_catalog
from sphinx.util.pycompat import indent
from sphinx.domains.std import (
make_term_from_paragraph_node,
make_termnodes_from_paragraph_node,
)
default_substitutions = set([
'version',
'release',
'today',
])
class DefaultSubstitutions(Transform):
"""
Replace some substitutions if they aren't defined in the document.
"""
# run before the default Substitutions
default_priority = 210
def apply(self):
config = self.document.settings.env.config
# only handle those not otherwise defined in the document
to_handle = default_substitutions - set(self.document.substitution_defs)
for ref in self.document.traverse(nodes.substitution_reference):
refname = ref['refname']
if refname in to_handle:
text = config[refname]
if refname == 'today' and not text:
# special handling: can also specify a strftime format
text = ustrftime(config.today_fmt or _('%B %d, %Y'))
ref.replace_self(nodes.Text(text, text))
class MoveModuleTargets(Transform):
"""
Move module targets that are the first thing in a section to the section
title.
XXX Python specific
"""
default_priority = 210
def apply(self):
for node in self.document.traverse(nodes.target):
if not node['ids']:
continue
if ('ismod' in node and
node.parent.__class__ is nodes.section and
# index 0 is the section title node
node.parent.index(node) == 1):
node.parent['ids'][0:0] = node['ids']
node.parent.remove(node)
class HandleCodeBlocks(Transform):
"""
Several code block related transformations.
"""
default_priority = 210
def apply(self):
# move doctest blocks out of blockquotes
for node in self.document.traverse(nodes.block_quote):
if all(isinstance(child, nodes.doctest_block) for child
in node.children):
node.replace_self(node.children)
# combine successive doctest blocks
# for node in self.document.traverse(nodes.doctest_block):
# if node not in node.parent.children:
# continue
# parindex = node.parent.index(node)
# while len(node.parent) > parindex+1 and \
# isinstance(node.parent[parindex+1], nodes.doctest_block):
# node[0] = nodes.Text(node[0] + '\n\n' +
# node.parent[parindex+1][0])
# del node.parent[parindex+1]
class AutoNumbering(Transform):
"""
Register IDs of tables, figures and literal_blocks to assign numbers.
"""
default_priority = 210
def apply(self):
def has_child(node, cls):
return any(isinstance(child, cls) for child in node)
for node in self.document.traverse(nodes.Element):
if isinstance(node, nodes.figure):
if has_child(node, nodes.caption):
self.document.note_implicit_target(node)
elif isinstance(node, nodes.image):
if has_child(node.parent, nodes.caption):
self.document.note_implicit_target(node.parent)
elif isinstance(node, nodes.table):
if has_child(node, nodes.title):
self.document.note_implicit_target(node)
elif isinstance(node, nodes.literal_block):
if has_child(node.parent, nodes.caption):
self.document.note_implicit_target(node.parent)
class SortIds(Transform):
"""
Sort secion IDs so that the "id[0-9]+" one comes last.
"""
default_priority = 261
def apply(self):
for node in self.document.traverse(nodes.section):
if len(node['ids']) > 1 and node['ids'][0].startswith('id'):
node['ids'] = node['ids'][1:] + [node['ids'][0]]
class CitationReferences(Transform):
"""
Replace citation references by pending_xref nodes before the default
docutils transform tries to resolve them.
"""
default_priority = 619
def apply(self):
for citnode in self.document.traverse(nodes.citation_reference):
cittext = citnode.astext()
refnode = addnodes.pending_xref(cittext, reftype='citation',
reftarget=cittext, refwarn=True,
ids=citnode["ids"])
refnode.line = citnode.line or citnode.parent.line
refnode += nodes.Text('[' + cittext + ']')
citnode.parent.replace(citnode, refnode)
TRANSLATABLE_NODES = {
'literal-block': nodes.literal_block,
'doctest-block': nodes.doctest_block,
'raw': nodes.raw,
'index': addnodes.index,
'image': nodes.image,
}
class ExtraTranslatableNodes(Transform):
"""
make nodes translatable
"""
default_priority = 10
def apply(self):
targets = self.document.settings.env.config.gettext_additional_targets
target_nodes = [v for k, v in TRANSLATABLE_NODES.items() if k in targets]
if not target_nodes:
return
def is_translatable_node(node):
return isinstance(node, tuple(target_nodes))
for node in self.document.traverse(is_translatable_node):
node['translatable'] = True
class CustomLocaleReporter(object):
"""
Replacer for document.reporter.get_source_and_line method.
reST text lines for translation do not have the original source line number.
This class provides the correct line numbers when reporting.
"""
def __init__(self, source, line):
self.source, self.line = source, line
def set_reporter(self, document):
document.reporter.get_source_and_line = self.get_source_and_line
def get_source_and_line(self, lineno=None):
return self.source, self.line
class Locale(Transform):
"""
Replace translatable nodes with their translated doctree.
"""
default_priority = 20
def apply(self):
env = self.document.settings.env
settings, source = self.document.settings, self.document['source']
# XXX check if this is reliable
assert source.startswith(env.srcdir)
docname = path.splitext(relative_path(path.join(env.srcdir, 'dummy'),
source))[0]
textdomain = find_catalog(docname,
self.document.settings.gettext_compact)
# fetch translations
dirs = [path.join(env.srcdir, directory)
for directory in env.config.locale_dirs]
catalog, has_catalog = init_locale(dirs, env.config.language,
textdomain)
if not has_catalog:
return
parser = RSTParser()
# phase1: replace reference ids with translated names
for node, msg in extract_messages(self.document):
msgstr = catalog.gettext(msg)
# XXX add marker to untranslated parts
if not msgstr or msgstr == msg or not msgstr.strip():
# as-of-yet untranslated
continue
# Avoid "Literal block expected; none found." warnings.
# If msgstr ends with '::' then it cause warning message at
# parser.parse() processing.
# literal-block-warning is only appear in avobe case.
if msgstr.strip().endswith('::'):
msgstr += '\n\n dummy literal'
# dummy literal node will discard by 'patch = patch[0]'
# literalblock need literal block notation to avoid it become
# paragraph.
if isinstance(node, LITERAL_TYPE_NODES):
msgstr = '::\n\n' + indent(msgstr, ' '*3)
patch = new_document(source, settings)
CustomLocaleReporter(node.source, node.line).set_reporter(patch)
parser.parse(msgstr, patch)
try:
patch = patch[0]
except IndexError: # empty node
pass
# XXX doctest and other block markup
if not isinstance(patch, nodes.paragraph):
continue # skip for now
processed = False # skip flag
# update title(section) target name-id mapping
if isinstance(node, nodes.title):
section_node = node.parent
new_name = nodes.fully_normalize_name(patch.astext())
old_name = nodes.fully_normalize_name(node.astext())
if old_name != new_name:
# if name would be changed, replace node names and
# document nameids mapping with new name.
names = section_node.setdefault('names', [])
names.append(new_name)
if old_name in names:
names.remove(old_name)
_id = self.document.nameids.get(old_name, None)
explicit = self.document.nametypes.get(old_name, None)
# * if explicit: _id is label. title node need another id.
# * if not explicit:
#
# * if _id is None:
#
# _id is None means:
#
# 1. _id was not provided yet.
#
# 2. _id was duplicated.
#
# old_name entry still exists in nameids and
# nametypes for another duplicated entry.
#
# * if _id is provided: bellow process
if _id:
if not explicit:
# _id was not duplicated.
# remove old_name entry from document ids database
# to reuse original _id.
self.document.nameids.pop(old_name, None)
self.document.nametypes.pop(old_name, None)
self.document.ids.pop(_id, None)
# re-entry with new named section node.
#
# Note: msgnode that is a second parameter of the
# `note_implicit_target` is not necessary here because
# section_node has been noted previously on rst parsing by
# `docutils.parsers.rst.states.RSTState.new_subsection()`
# and already has `system_message` if needed.
self.document.note_implicit_target(section_node)
# replace target's refname to new target name
def is_named_target(node):
return isinstance(node, nodes.target) and \
node.get('refname') == old_name
for old_target in self.document.traverse(is_named_target):
old_target['refname'] = new_name
processed = True
# glossary terms update refid
if isinstance(node, nodes.term):
gloss_entries = env.temp_data.setdefault('gloss_entries', set())
ids = []
termnodes = []
for _id in node['names']:
if _id in gloss_entries:
gloss_entries.remove(_id)
_id, _, new_termnodes = \
make_termnodes_from_paragraph_node(env, patch, _id)
ids.append(_id)
termnodes.extend(new_termnodes)
if termnodes and ids:
patch = make_term_from_paragraph_node(termnodes, ids)
node['ids'] = patch['ids']
node['names'] = patch['names']
processed = True
# update leaves with processed nodes
if processed:
for child in patch.children:
child.parent = node
node.children = patch.children
node['translated'] = True
# phase2: translation
for node, msg in extract_messages(self.document):
if node.get('translated', False):
continue
msgstr = catalog.gettext(msg)
# XXX add marker to untranslated parts
if not msgstr or msgstr == msg: # as-of-yet untranslated
continue
# Avoid "Literal block expected; none found." warnings.
# If msgstr ends with '::' then it cause warning message at
# parser.parse() processing.
# literal-block-warning is only appear in avobe case.
if msgstr.strip().endswith('::'):
msgstr += '\n\n dummy literal'
# dummy literal node will discard by 'patch = patch[0]'
# literalblock need literal block notation to avoid it become
# paragraph.
if isinstance(node, LITERAL_TYPE_NODES):
msgstr = '::\n\n' + indent(msgstr, ' '*3)
patch = new_document(source, settings)
CustomLocaleReporter(node.source, node.line).set_reporter(patch)
parser.parse(msgstr, patch)
try:
patch = patch[0]
except IndexError: # empty node
pass
# XXX doctest and other block markup
if not isinstance(
patch,
(nodes.paragraph,) + LITERAL_TYPE_NODES + IMAGE_TYPE_NODES):
continue # skip for now
# auto-numbered foot note reference should use original 'ids'.
def is_autonumber_footnote_ref(node):
return isinstance(node, nodes.footnote_reference) and \
node.get('auto') == 1
def list_replace_or_append(lst, old, new):
if old in lst:
lst[lst.index(old)] = new
else:
lst.append(new)
old_foot_refs = node.traverse(is_autonumber_footnote_ref)
new_foot_refs = patch.traverse(is_autonumber_footnote_ref)
if len(old_foot_refs) != len(new_foot_refs):
env.warn_node('inconsistent footnote references in '
'translated message', node)
old_foot_namerefs = {}
for r in old_foot_refs:
old_foot_namerefs.setdefault(r.get('refname'), []).append(r)
for new in new_foot_refs:
refname = new.get('refname')
refs = old_foot_namerefs.get(refname, [])
if not refs:
continue
old = refs.pop(0)
new['ids'] = old['ids']
for id in new['ids']:
self.document.ids[id] = new
list_replace_or_append(
self.document.autofootnote_refs, old, new)
if refname:
list_replace_or_append(
self.document.footnote_refs.setdefault(refname, []),
old, new)
list_replace_or_append(
self.document.refnames.setdefault(refname, []),
old, new)
# reference should use new (translated) 'refname'.
# * reference target ".. _Python: ..." is not translatable.
# * use translated refname for section refname.
# * inline reference "`Python <...>`_" has no 'refname'.
def is_refnamed_ref(node):
return isinstance(node, nodes.reference) and \
'refname' in node
old_refs = node.traverse(is_refnamed_ref)
new_refs = patch.traverse(is_refnamed_ref)
if len(old_refs) != len(new_refs):
env.warn_node('inconsistent references in '
'translated message', node)
old_ref_names = [r['refname'] for r in old_refs]
new_ref_names = [r['refname'] for r in new_refs]
orphans = list(set(old_ref_names) - set(new_ref_names))
for new in new_refs:
if not self.document.has_name(new['refname']):
# Maybe refname is translated but target is not translated.
# Note: multiple translated refnames break link ordering.
if orphans:
new['refname'] = orphans.pop(0)
else:
# orphan refnames is already empty!
# reference number is same in new_refs and old_refs.
pass
self.document.note_refname(new)
# refnamed footnote and citation should use original 'ids'.
def is_refnamed_footnote_ref(node):
footnote_ref_classes = (nodes.footnote_reference,
nodes.citation_reference)
return isinstance(node, footnote_ref_classes) and \
'refname' in node
old_refs = node.traverse(is_refnamed_footnote_ref)
new_refs = patch.traverse(is_refnamed_footnote_ref)
refname_ids_map = {}
if len(old_refs) != len(new_refs):
env.warn_node('inconsistent references in '
'translated message', node)
for old in old_refs:
refname_ids_map[old["refname"]] = old["ids"]
for new in new_refs:
refname = new["refname"]
if refname in refname_ids_map:
new["ids"] = refname_ids_map[refname]
# Original pending_xref['reftarget'] contain not-translated
# target name, new pending_xref must use original one.
# This code restricts to change ref-targets in the translation.
old_refs = node.traverse(addnodes.pending_xref)
new_refs = patch.traverse(addnodes.pending_xref)
xref_reftarget_map = {}
if len(old_refs) != len(new_refs):
env.warn_node('inconsistent term references in '
'translated message', node)
def get_ref_key(node):
case = node["refdomain"], node["reftype"]
if case == ('std', 'term'):
return None
else:
return (
node["refdomain"],
node["reftype"],
node['reftarget'],)
for old in old_refs:
key = get_ref_key(old)
if key:
xref_reftarget_map[key] = old.attributes
for new in new_refs:
key = get_ref_key(new)
# Copy attributes to keep original node behavior. Especially
# copying 'reftarget', 'py:module', 'py:class' are needed.
for k, v in xref_reftarget_map.get(key, {}).items():
# Note: This implementation overwrite all attributes.
# if some attributes `k` should not be overwritten,
# you should provide exclude list as:
# `if k not in EXCLUDE_LIST: new[k] = v`
new[k] = v
# update leaves
for child in patch.children:
child.parent = node
node.children = patch.children
# for highlighting that expects .rawsource and .astext() are same.
if isinstance(node, LITERAL_TYPE_NODES):
node.rawsource = node.astext()
if isinstance(node, IMAGE_TYPE_NODES):
node.update_all_atts(patch)
node['translated'] = True
if 'index' in env.config.gettext_additional_targets:
# Extract and translate messages for index entries.
for node, entries in traverse_translatable_index(self.document):
new_entries = []
for type, msg, tid, main in entries:
msg_parts = split_index_msg(type, msg)
msgstr_parts = []
for part in msg_parts:
msgstr = catalog.gettext(part)
if not msgstr:
msgstr = part
msgstr_parts.append(msgstr)
new_entries.append((type, ';'.join(msgstr_parts), tid, main))
node['raw_entries'] = entries
node['entries'] = new_entries
class RemoveTranslatableInline(Transform):
"""
Remove inline nodes used for translation as placeholders.
"""
default_priority = 999
def apply(self):
from sphinx.builders.gettext import MessageCatalogBuilder
env = self.document.settings.env
builder = env.app.builder
if isinstance(builder, MessageCatalogBuilder):
return
for inline in self.document.traverse(nodes.inline):
if 'translatable' in inline:
inline.parent.remove(inline)
inline.parent += inline.children
class SphinxContentsFilter(ContentsFilter):
"""
Used with BuildEnvironment.add_toc_from() to discard cross-file links
within table-of-contents link nodes.
"""
def visit_pending_xref(self, node):
text = node.astext()
self.parent.append(nodes.literal(text, text))
raise nodes.SkipNode
def visit_image(self, node):
raise nodes.SkipNode
| |
from functools import partial
from whiskeynode import whiskeycache
from whiskeynode import WhiskeyNode
from whiskeynode.edges import Edge
from whiskeynode.terminaltypes import TerminalType
from whiskeynode.exceptions import (BadEdgeRemovalException,
InvalidEdgeDataException,
InvalidTerminalException,
InvalidTerminalOperationException,
InvalidTerminalParameterException,
InvalidTerminalStateException,
)
'''
Requirements of connection terminals
-Lazy loading, only grab data if you have to
-Caching in memory - only grab data once
-easy to declare - simple dictionary declaration
-easy to use - dot notation syntax
'''
OUTBOUND='OUTBOUND'
INBOUND='INBOUND'
BIDIRECTIONAL = 'BIDIRECTIONAL'
IDID = 0
def outbound_node( to_node_class,
create_on_request=False,
render=False,
voteable=False,
):
return partial(NodeTerminal, to_node_class, OUTBOUND, render=render, create_on_request=create_on_request)
def inbound_node( to_node_class,
inbound_name,
render=False,
voteable=False,
):
''' inbound nodes just grab the first node. if there could ever be more than one connection use a list '''
return partial(NodeTerminal, to_node_class, INBOUND, inbound_name=inbound_name, render=render)
def outbound_list( to_node_class,
render=False,
attributes=None,
sort_func=None,
voteable=False,
):
if attributes is not None:
return partial(AttributedListOfNodesTerminal, to_node_class, OUTBOUND, render=render, attributes=attributes, sort_func=sort_func)
else:
return partial(ListOfNodesTerminal, to_node_class, OUTBOUND, render=render)
def inbound_list( to_node_class,
inbound_name,
attributes=None,
sort_func=None,
render=False,
voteable=False,
):
if attributes is not None:
return partial(AttributedListOfNodesTerminal, to_node_class, INBOUND, inbound_name=inbound_name, attributes=attributes, sort_func=sort_func, render=render)
else:
return partial(ListOfNodesTerminal, to_node_class, INBOUND, inbound_name=inbound_name, render=render)
def bidirectional_list( to_node_class,
render=False,
voteable=False,
):
return partial(ListOfNodesTerminal, to_node_class, BIDIRECTIONAL, render=render)
'''
class BaseTerminal():
def __init__(self, to_node_class, direction, origin_node, name, inbound_name, render, terminaltype):
self.activated = False
self.name = inbound_name if inbound_name is not None else name
self.node = origin_node
self.to_node_class = to_node_class
self.terminaltype = terminaltype
self.direction = direction
self._render = render
self._insave = False
if self.direction == INBOUND and inbound_name == None:
raise InvalidTerminalException('inbound_name cannot be none when direction is INBOUND')
def edge_display_name(self):
return '%s:%s' % (self.name, self.to_node_class.COLLECTION_NAME)
def edge_query(self):
raise NotImplementedError()
def get(self):
raise NotImplementedError()
def set(self, value):
raise NotImplementedError()
def delete(self):
raise NotImplementedError()
def render(self):
raise NotImplementedError()
def exists(self):
raise NotImplementedError()
def add_inbound_edge(self):
raise NotImplementedError()
def remove_inbound_edge(self):
raise NotImplementedError()
def remove_outbound_edge(self, edge):
raise NotImplementedError()
'''
class NodeTerminal():
def __init__(self, to_node_class, direction, origin_node, name, inbound_name=None, render=False, create_on_request=False ): #, inbound_edges, outbound_edges):
self.activated = False
self.name = inbound_name if inbound_name is not None else name
self.original_name = name
self.node = origin_node
self.to_node_class = to_node_class
self.terminaltype = TerminalType.NODE
self.direction = direction
self._render = render
self._insave = False
if self.direction == INBOUND and inbound_name == None:
raise InvalidTerminalException('inbound_name cannot be none when direction is INBOUND')
self._edge = None
self._to_node = None
self.create_on_request = create_on_request
if self.direction != OUTBOUND and self.direction != INBOUND:
raise InvalidTerminalException('Node terminals can only be INBOUND or OUTBOUND')
def __repr__(self):
return '%s node to %s.%s named %s' % (self.direction, self.to_node_class.__module__, self.to_node_class.__name__, self.name)
def _get_to_node_id(self):
self.get_edge()
if self._edge:
return self._edge.inboundId if self.direction == OUTBOUND else self._edge.outboundId
return None
def _get_to_node_from_cache(self):
''' without going to the database '''
if self._to_node is None:
_id = self._get_to_node_id()
if _id:
self._to_node = whiskeycache.RAM.get(_id, None)
return self._to_node
def add_inbound_edge(self, edge):
assert self.direction == INBOUND, \
'Terminal [%s] on [%s] is an outbound node, you can\'t add inbound connections to an outbound node' % (self.name, self.node.__class__)
if self._edge is not None and self._edge != edge:
self._to_node = None
if self._to_node is None:
self.activated = True
self._edge = edge
self.get()
def add_outbound_edge(self, edge):
self.activated = True
self._edge = edge
self._to_node = self.to_node_class.from_id(self._get_to_node_id())
def delete(self):
#print "DELETE!!! "+str(self._edge)+" : "+self.name+" : "+str(self.node.__class__)
assert self.direction == OUTBOUND, \
'Terminal [%s] on [%s] is an inbound node, you can\'t remove connections from an inbound node' % (self.name, self.node.__class__)
self.set(None)
def edge_display_name(self):
return '%s:%s' % (self.name, self.to_node_class.COLLECTION_NAME)
def edge_query(self):
if self.direction == OUTBOUND:
return {'outboundId':self.node._id, 'name':self.name}
else: #if self.direction == INBOUND
return {'inboundId':self.node._id, 'outboundCollection':self.to_node_class.COLLECTION_NAME, 'name':self.name}
def exists(self):
return self._edge != None or Edge.find(self.edge_query()).count() > 0
def get_self(self):
return self.get()
def get(self):
if self._to_node == None:
self.get_edge()
if self._edge is None and self.create_on_request:
self.set(self.to_node_class())
elif self._edge:
self._to_node = self.to_node_class.from_id(self._get_to_node_id())
assert self._to_node is not None, 'to node should not be none ' + str(self)
if self.direction == OUTBOUND:
self._to_node.add_inbound_edge(self.name, self._edge)
else:
self._to_node.add_outbound_edge(self.name, self._edge)
return self._to_node
def get_edge(self):
if not self.activated or self._edge is None:
assert self._edge is None, 'edge should be none'
self._edge = Edge.find_one(self.edge_query())
assert self.direction == INBOUND or \
self._edge is None or \
self._edge.inboundCollection == self.to_node_class.COLLECTION_NAME, \
'Edge collection doesn not match to_node_class on node named [%s] on class [%s] edge: %s' % (self.name, self.node.__class__, str(self._edge.to_dict()))
self.activated = True
return self._edge
def remove_inbound_edge(self, edge):
assert self.direction == INBOUND, \
'Terminal [%s] on [%s] is an outbound node, you can\'t remove inbound connections from an outbound node' % (self.name, self.node.__class__)
if self.activated:
if self.get_edge() is not None and self._edge._id == edge._id:
self._edge = None
self._to_node = None
#leaving activated as true, so lazy traversals know that something has changed
def remove_outbound_edge(self, edge):
assert self.direction == OUTBOUND
if self.activated:
if self.get_edge() is not None and self._edge._id == edge._id:
self._edge = None
self._to_node = None
#leaving activated as true, so lazy traversals know that something has changed
def render(self, render_terminals=False, *args, **kwargs):
self.get()
if self._to_node:
return self._to_node.render(render_terminals=render_terminals, *args, **kwargs)
else:
return {}
def render_pretty(self, do_print=True, *args, **kwargs):
ret_val = pformat(self.render(*args, **kwargs))
if do_print:
print ret_val
else:
return ret_val
def save(self, *args, **kwargs):
if not self._insave:
self._insave = True
#print "SAVE!!! "+str(self._edge)+" : "+self.name+" : "+str(self.node.__class__)
if self.activated and self._edge:
if self._to_node:
self._to_node.save(*args, **kwargs)
self._edge.save(*args, **kwargs)
self._insave = False
def set(self, value):
assert self.direction == OUTBOUND, \
'Terminal [%s] on [%s] is an inbound node, you can\'t add connections to an inbound node' % (self.name, self.node.__class__)
if value and value._id == self._get_to_node_id():
return
if value is None and self._get_to_node_id() is None:
return
self._get_to_node_from_cache()
if self._to_node:
self._to_node.remove_inbound_edge(self.name, self._edge)
if self._edge:
self._edge.remove()
self._edge = None
self._to_node = None
if value is not None:
if value.COLLECTION_NAME != self.to_node_class.COLLECTION_NAME:
raise InvalidTerminalException('Terminal [%s] on [%s] takes [%s] not [%s]' % (
self.name, self.node.__class__, self.to_node_class, value.__class__))
#print "SET!!! "+str(self._edge)+" : "+self.name+" : "+str(self.node.__class__)
self._edge = Edge.from_nodes(self.node, value, self.name, self.terminaltype)
self._to_node = value
self._to_node.add_inbound_edge(self.name, self._edge)
self.activated = True
class ListOfNodesTerminal():
def __init__(self, to_node_class, direction, origin_node, name, inbound_name = None, render=False, **kwargs):
self.activated = False
self.name = inbound_name if inbound_name is not None else name
self.original_name = name
self.node = origin_node
self.to_node_class = to_node_class
self.terminaltype = TerminalType.LIST_OF_NODES
self.direction = direction
self._render = render
self._insave = False
self._temp_yup_reference = [] #wanted to make appending o(1), so need to save a reference to the node so the whiskey weak reference cache doesn't drop it
if self.direction == INBOUND and inbound_name == None:
raise InvalidTerminalException('inbound_name cannot be none when direction is INBOUND')
self._list = None
self._edges = None
self._initialized = False
global IDID
self._idid = IDID
IDID += 1
if self.direction == BIDIRECTIONAL and type(origin_node) != to_node_class:
raise InvalidTerminalException('Bidirectional lists can only be created between nodes of the same type')
def __repr__(self):
return '%s list to %s.%s named %s' % (self.direction, self.to_node_class.__module__, self.to_node_class.__name__, self.name)
def __len__(self):
return len(self.get_edges())
def __getitem__(self, i):
#if self.activated:
self.get()
return self._list[i]
def __delitem__(self, i):
raise NotImplementedError()
#def __contains__(self, node):
# return node._id in self.get_edges()
def _add_node(self, to_node):
assert self.direction != INBOUND, \
'(wrong direction) Terminal [INBOUND:%s] on [%s] is an inbound node, you can\'t add connections to an inbound node' % (self.name, self.node.__class__)
assert to_node.COLLECTION_NAME == self.to_node_class.COLLECTION_NAME, \
'Terminal [%s] on [%s] takes [%s] not [%s]' % (self.name, self.node.__class__, self.to_node_class, to_node.__class__)
if not to_node._id in self.get_edges():
self._edges[to_node._id] = Edge.from_nodes(self.node, to_node, self.name, self.terminaltype)
to_node.add_inbound_edge(self.name, self._edges[to_node._id])
if self._list is not None:
self._list.append(to_node)
self.sort()
else:
self._temp_yup_reference.append(to_node)
def _remove_node(self, to_node):
assert self.direction != INBOUND, \
'Terminal [%s] on [%s] is an inbound node, you can\'t remove connections from an inbound node' % (self.name, self.node.__class__)
if to_node._id in self.get_edges():
self.get()
edge = self._edges[to_node._id]
if edge.inboundId == to_node._id:
to_node.remove_inbound_edge(self.name, edge)
else:
to_node.remove_outbound_edge(self.name, edge)
edge.remove()
del self._edges[to_node._id]
self._list.remove(to_node)
self.sort()
def add_inbound_edge(self, edge):
assert self.direction != OUTBOUND
#we have to add inbound nodes here so that we know a save will
#traverse all nodes and make the proper saves
#self.get()
if edge.outboundId not in self.get_edges():
self._edges[edge.outboundId] = edge
if self._list is not None:
self._list.append(self.to_node_class.from_id(edge.outboundId))
self.sort()
def add_outbound_edge(self, edge):
pass #don't think we need to do anything here
def append(self, node):
self._add_node(node)
def count(self):
''' counts all items in db and in local cache '''
return Edge.find(self.edge_query()).count()
def delete(self):
self.set([])
self._temp_yup_reference = []
def edge_display_name(self):
return '%s:%s' % (self.name, self.to_node_class.COLLECTION_NAME)
def edge_query(self, direction=None): #todo include to_node=None
if direction == None: direction = self.direction
if direction == INBOUND:
rv = {'inboundId':self.node._id, 'outboundCollection':self.to_node_class.COLLECTION_NAME, 'name':self.name}
elif direction == OUTBOUND:
rv = {'outboundId':self.node._id, 'name':self.name}
elif direction == BIDIRECTIONAL:
rv = {
'$or':[
{'inboundId':self.node._id, 'outboundCollection':self.to_node_class.COLLECTION_NAME, 'name':self.name},
{'outboundId':self.node._id, 'name':self.name}
]
}
else:
raise NotImplementedError('direction %s is not supported' % direction)
return rv
def exists(self):
return len(self.get_edges()) > 0
def extend(self, nodes):
for node in nodes:
self._add_node(node)
def get_self(self):
return self
def get(self):
if self._list is None:
self.get_edges()
self._list = self.to_node_class.from_ids(self._edges.keys())
self.sort()
def get_edge(self, node):
#todo run edge_query with to_node
return self.get_edges()[node._id]
def get_edges(self):
if self.activated == False:
assert self._edges is None, '_edges should be None'
self._edges = {}
self.activated = True
if self.direction == INBOUND or self.direction == BIDIRECTIONAL:
for edge in Edge.find(self.edge_query(INBOUND), limit=200): #hack here, if there is an edge filter, skip the cache
self._edges[edge.outboundId] = edge
if self.direction == OUTBOUND or self.direction == BIDIRECTIONAL:
for edge in Edge.find(self.edge_query(OUTBOUND), limit=200): #hack here, if there is an edge filter, skip the cache
self._edges[edge.inboundId] = edge
#if self.check_errors
assert edge.inboundCollection == self.to_node_class.COLLECTION_NAME, \
'On node named [%s] on class [%s] data: %s' % (self.name, self.node.__class__, str(edge.to_dict()))
return self._edges
def insert(self, i, node):
raise NotImplementedError()
def pop(self, index=-1):
self.get()
node = self._list[index]
self._remove_node(node)
return node
def remove(self, node):
self._remove_node(node)
def remove_inbound_edge(self, edge):
assert self.direction != OUTBOUND
if self.activated:
if edge.outboundId in self._edges:
if self._list is not None:
self._list.remove(self.to_node_class.from_id(edge.outboundId))
del self._edges[edge.outboundId]
self.sort()
def remove_outbound_edge(self, edge):
''' called when a node we're connected to is removed '''
if self.activated:
if edge.inboundId in self._edges:
del self._edges[edge.inboundId]
if self._list != None:
self._list.remove(self.to_node_class.from_id(edge.inboundId))
def render(self, render_terminals=False, *args, **kwargs):
self.get()
return[x.render(render_terminals=render_terminals, *args, **kwargs) for x in self._list]
def render_pretty(self, do_print=True, *args, **kwargs):
ret_val = pformat(self.render(*args, **kwargs))
if do_print:
print ret_val
else:
return ret_val
def save(self, *args, **kwargs):
if not self._insave:
self._insave = True
if self.activated and len(self._edges) > 0:
if self._list:
for node in self._list:
node.save(*args, **kwargs) #saves shouldn't call the db if nothing has changed
for edge in self._edges.values():
edge.save(*args, **kwargs) #saves shouldn't call the db if nothing has changed
for node in self._temp_yup_reference:
node.save()
self._temp_yup_reference = []
self._insave = False
def set(self, nodes):
if type(nodes) != list:
raise InvalidTerminalException('Terminal [%s] on [%s] should not be set to anything other than a list' % (self.name, self.to_node_class))
self.get()
old_nodes = self._list[:]
for node in old_nodes:
self._remove_node(node)
assert len(self) == 0, 'Why didn\'t we clear our list?'
for node in reversed(nodes):
self._add_node(node)
def sort(self, key=None):
if self._list != None:
if key is None:
edges_for_sort = [(k,v) for k,v in self._edges.items()]
edges_for_sort.sort(key=lambda x: x[1]._id, reverse=True)
_ids = [x[0] for x in edges_for_sort]
self._list.sort(key=lambda x: _ids.index(x._id))
else:
self._list.sort(key=key)
class AttributedListOfNodesTerminal(ListOfNodesTerminal):
def __init__(self, *args, **kwargs):
ListOfNodesTerminal.__init__(self, *args, **kwargs)
self.attributes = kwargs['attributes']
self.sort_func = kwargs.get('sort_func', None)
def __repr__(self):
return '%s list to %s.%s named %s with %s attributes' % (self.direction, self.to_node_class.__module__, self.to_node_class.__name__, self.name, str(self.attributes))
def add(self, node, **kwargs):
return self.append(node, **kwargs)
def append(self, node, **kwargs):
ListOfNodesTerminal.append(self, node)
self.update(node, **kwargs)
def render(self, render_terminals=False, custom_sort_func=None, *args, **kwargs):
self.get()
self.sort()
ret_val = [self.render_one(x, render_terminals=render_terminals, *args, **kwargs) for x in self._list]
if custom_sort_func:
return custom_sort_func(ret_val)
elif self.sort_func:
return self.sort_func(ret_val)
else:
return ret_val
def render_one(self, node, render_terminals=False, *args, **kwargs):
return dict(self.get_edge(node).data, **node.render(render_terminals, *args, **kwargs))
def update(self, node, **kwargs):
changes = {}
edge = self.get_edge(node)
for k,v in kwargs.items():
if k in self.attributes:
if v != edge.data.get(k):
changes[k] = v
edge.data[k] = v
else:
raise InvalidEdgeDataException('Edge attribute [%s] has not been explicitly defined for terminal [%s] in class [%s]' % (k, self.name, self.node.__class__))
| |
#!/usr/bin/env python3
import sys
import os
import git_wrapper
import path_utils
import string_utils
def change_stash_index(stash_name, index):
if stash_name is None or index is None:
return None
idx_begin = stash_name.rfind("{")
idx_end = stash_name.rfind("}")
if idx_begin == -1 or idx_end == -1:
return None
if idx_begin == 0 or idx_end == 0:
return None
if idx_begin > idx_end:
return None
new_stash_name = "%s%d%s" % (stash_name[:idx_begin+1], index, stash_name[idx_end:])
return new_stash_name
def get_stash_name(str_line):
return string_utils.generic_parse(str_line, ":")
def get_prev_hash(str_line):
return string_utils.generic_parse(str_line, " ")
def get_renamed_details(renamed_msg):
if renamed_msg is None:
return None
filename_original = None
filename_renamed = None
n = renamed_msg.find("->")
if n == -1 or n == 0:
return None
filename_original = renamed_msg[:n-1]
filename_renamed = (renamed_msg[n+2:]).lstrip()
return (filename_original, filename_renamed)
def remove_gitlog_decorations(commitmsg):
res = commitmsg
# cut out first four lines (commit, author, date, \n)
nl = -1
for x in range(4):
nl = res.find("\n", nl+1)
if nl == -1:
return None
res = res[nl+1:]
# remove the remaining commits
remaining = res.find("\ncommit")
if remaining != -1: # this could be the only commit. so we will only try to cut if there's more
res = res[:remaining]
# remove the trailing last newline
nl = res.rfind("\n")
if nl == -1:
return None
res = res[:nl]
# remove the indentation before each line
res_lines = res.split("\n")
res = ""
for line in res_lines:
line = line[4:]
res += line + "\n"
res = res[:len(res)-1] # the above code will always add a newline at the end of each line. this renders the last line "incorrect". lets fix it.
return res
def remove_gitstatus_simple_decorations(statusmsg_singleline):
if statusmsg_singleline is None:
return None
if len(statusmsg_singleline) < 4:
return None
if statusmsg_singleline[2] != " ":
return None
return statusmsg_singleline[3:]
def is_repo_root(path):
if path is None:
return None
path = os.path.abspath(path)
if path is None:
return False
if not os.path.exists(path):
return False
if path_utils.basename_filtered(path).endswith(".git"):
return True
return False
def discover_repo_root(repo):
if repo is None:
return None
repo = os.path.abspath(repo)
curpath = repo
while not is_repo_root(path_utils.concat_path(curpath, ".git")):
curpath = path_utils.backpedal_path(curpath)
if curpath is None:
return None
return curpath
def is_head_clear(repo):
# is_head_clear is a misnomer, because this will also take into consideration staged, unversioned, etc.
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
v, r = git_wrapper.status_simple(repo)
if not v:
return False, r
return True, (r.strip() == "")
def get_remotes(repo):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
""" get_remotes
returns a 3-level dictionary containing remotes information
example:
{ 'offline': {'push': 'local/path1', 'fetch': 'local/path2'},
'private': {'push': 'git@remote/path.git', 'fetch': 'git@remote/path.git'} }
returns False,error-msg_str on failures
"""
t1 = is_repo_working_tree(repo)
if t1 is None:
return False, "%s does not exist." % repo
elif t1 is False:
return False, "%s is not a git work tree." % repo
v, r = git_wrapper.remote_list(repo)
if not v:
return False, "get_remotes failed: %s" % r
filtered_list = r.split() # removes the trailing newline
# has to return multiples of 3 (name_remote, remote_path, (fetch/push))
if len(filtered_list) == 0:
return True, {} # no remotes
elif len(filtered_list) % 3 != 0:
return False, "could not detect remotes"
elif (len(filtered_list) / 3) % 2 != 0:
return False, "could not detect remotes"
ret_dict = {}
for i in range(0, len(filtered_list), 3):
remote_name = filtered_list[i]
remote_path = filtered_list[i+1]
remote_operation = filtered_list[i+2]
remote_operation = remote_operation[1:len(remote_operation)-1] # removes the encasing parenthesis
if remote_name in ret_dict:
ret_dict[remote_name][remote_operation] = remote_path
else:
ret_dict[remote_name] = {remote_operation: remote_path}
return True, ret_dict
def get_branches(repo):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
t1 = is_repo_working_tree(repo)
if t1 is None:
return False, "%s does not exist." % repo
elif t1 is False:
return False, "%s is not a git work tree." % repo
v, r = git_wrapper.branch(repo)
if not v:
return False, "get_branches failed: %s" % r
branch_list = r.split("\n")
# move the checked out branch to the front
for i in branch_list:
if i.startswith("*"):
branch_list.remove(i)
branch_list = [i[2:]] + branch_list
break;
# remove blank spaces
if "" in branch_list:
branch_list.remove("")
# trim branch strings
ret_list = []
for i in branch_list:
ret_list.append(i.strip())
return True, ret_list
def get_current_branch(repo):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
t1 = is_repo_working_tree(repo)
if t1 is None:
return False, "%s does not exist." % repo
elif t1 is False:
return False, "%s is not a git work tree." % repo
v, r = get_branches(repo)
if not v:
return False, "get_current_branch failed: [%s]" % r
current_branch = r
if len(current_branch) == 0:
return True, None
return True, current_branch[0]
def repo_has_any_not_of_states(repo, states):
list_unexpected = []
if states is None:
return False, "states is unspecified"
if not isinstance(states, list):
return False, "states is not a list"
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
t1 = is_repo_working_tree(repo)
if t1 is None:
return False, "%s does not exist" % repo
elif t1 is False:
return False, "%s is not a git work tree" % repo
v, r = git_wrapper.status(repo)
if not v:
return False, "repo_has_any_not_of_states failed: [%s]" % r
out = r.rstrip() # removes the trailing newline
if len(out) == 0:
return True, []
if len(states) == 0:
return True, []
for l in out.split("\n"):
cl = l.rstrip()
if len(cl) < 2:
continue
item_status = cl[0:2]
if item_status not in states:
list_unexpected.append(item_status)
return True, list_unexpected
def get_head_files(repo):
total_entries = []
funcs = [get_head_modified_files, get_head_deleted_files, get_head_updated_files, get_head_updated_deleted_files, get_head_deleted_updated_files, get_head_added_added_files, get_head_deleted_deleted_files, get_head_updated_added_files, get_head_added_updated_files, get_head_modified_modified_files, get_head_added_modified_files, get_head_renamed_modified_files]
for f in funcs:
v, r = f(repo)
if not v:
return False, r
total_entries += r
return True, total_entries
def get_head_modified_files(repo):
return get_head_files_delegate(repo, " M", "modified")
def get_head_deleted_files(repo):
return get_head_files_delegate(repo, " D", "deleted")
def get_head_modified_modified_files(repo):
return get_head_files_delegate(repo, "MM", "modified")
def get_head_added_modified_files(repo):
return get_head_files_delegate(repo, "AM", "added_modified")
def get_head_updated_files(repo):
return get_head_files_delegate(repo, "UU", "updated")
def get_head_deleted_deleted_files(repo):
return get_head_files_delegate(repo, "DD", "deleted_deleted")
def get_head_updated_added_files(repo):
return get_head_files_delegate(repo, "UA", "updated_added")
def get_head_updated_deleted_files(repo):
return get_head_files_delegate(repo, "UD", "updated_deleted")
def get_head_deleted_updated_files(repo):
return get_head_files_delegate(repo, "DU", "deleted_updated")
def get_head_added_added_files(repo):
return get_head_files_delegate(repo, "AA", "added_added")
def get_head_added_updated_files(repo):
return get_head_files_delegate(repo, "AU", "added_updated")
def get_head_renamed_modified_files(repo):
v, r = get_head_files_delegate(repo, "RM", "renamed_modified")
if not v:
return False, r
renamed_list = r
repo_local = os.path.abspath(repo)
renamed_list_filtered = []
for rl in renamed_list:
r = get_renamed_details(rl)
if r is None:
return False, "Unable to read out and parse head, renamed files from repo [%s]" % repo_local
original_fn = r[0]
renamed_fn = r[1]
renamed_list_filtered.append( (original_fn, path_utils.concat_path(repo_local, renamed_fn)) )
return True, renamed_list_filtered
def get_head_files_delegate(repo, status_detect, info_variation):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
t1 = is_repo_working_tree(repo)
if t1 is None:
return False, "%s does not exist." % repo
elif t1 is False:
return False, "%s is not a git work tree." % repo
v, r = git_wrapper.status(repo)
if not v:
return False, "get_head_%s_files failed: %s" % (info_variation, r)
out = r.rstrip() # removes the trailing newline
if len(out) == 0:
return True, []
ret = []
for l in out.split("\n"):
cl = l.rstrip()
if len(cl) < 2:
continue
if cl[0:2] == status_detect:
lf = cl[3:]
fp = path_utils.concat_path(repo, lf)
ret.append(os.path.abspath(fp))
return True, ret
def get_staged_files(repo):
all_staged_files = []
# mvtodo: in theory there shold also be a status "C" (Copied)
funcs = [get_staged_modified_files, get_staged_added_files, get_staged_deleted_files]
for f in funcs:
v, r = f(repo)
if not v:
return False, r
all_staged_files += r
v, r = get_staged_renamed_files(repo)
if not v:
return False, r
for x in r:
all_staged_files.append(x[1])
return True, all_staged_files
def get_staged_modified_files(repo):
return get_staged_delegate(repo, ["M"])
def get_staged_added_files(repo):
return get_staged_delegate(repo, ["A"])
def get_staged_deleted_files(repo):
return get_staged_delegate(repo, ["D"])
def get_staged_renamed_files(repo):
v, r = get_staged_delegate(repo, ["R"])
if not v:
return False, r
renamed_list = r
repo_local = os.path.abspath(repo)
renamed_list_filtered = []
for rl in renamed_list:
r = get_renamed_details(rl)
if r is None:
return False, "Unable to read out and parse staged, renamed files from repo [%s]" % repo_local
original_fn = r[0]
renamed_fn = r[1]
renamed_list_filtered.append( (original_fn, path_utils.concat_path(repo_local, renamed_fn)) )
return True, renamed_list_filtered
def get_staged_delegate(repo, check_chars):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
t1 = is_repo_working_tree(repo)
if t1 is None:
return False, "%s does not exist." % repo
elif t1 is False:
return False, "%s is not a git work tree." % repo
v, r = git_wrapper.status(repo)
if not v:
return False, "get_staged_files failed: %s" % r
out = r.rstrip() # removes the trailing newline
if len(out) == 0:
return True, []
ret = []
for l in out.split("\n"):
cl = l.rstrip()
if len(cl) < 2:
continue
if cl[0] in check_chars:
lf = cl[3:]
fp = path_utils.concat_path(repo, lf)
ret.append(os.path.abspath(fp))
return True, ret
def get_unversioned_files(repo):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
v, r = git_wrapper.ls_files(repo)
if not v:
return False, r
unversioned_files = [path_utils.concat_path(repo, x) for x in r.split(os.linesep) if x != ""]
return True, unversioned_files
def get_unversioned_files_and_folders(repo):
# this version of "get_unversioned_files" will return a folder, if that entire folder
# contains unversioned files only. empty folders, alone, will not be returned (as per Git's design).
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
v, r = git_wrapper.status_simple(repo)
if not v:
return False, r
saved_st_msg = r
status_items = [x for x in r.split(os.linesep) if x != ""]
unversioned_files = []
for si in status_items:
if len(si) < 4:
return False, "Invalid status message returned. Repo: [%s]. Status msg: [%s]" % (repo, saved_st_msg)
if si[0:2] == "??":
si_filtered = remove_gitstatus_simple_decorations(si)
if si_filtered is None:
return False, "Invalid status message returned (detected while filtering). Repo: [%s]. Status msg: [%s]" % (repo, saved_st_msg)
unversioned_files.append(path_utils.concat_path(repo, si_filtered))
return True, unversioned_files
def get_stash_list(repo):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
v, r = git_wrapper.stash_list(repo)
if not v:
return False, r
stash_list = [get_stash_name(x) for x in r.split(os.linesep) if x != ""]
return True, stash_list
def get_previous_hash_list(repo, num_previous = None):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
v, r = git_wrapper.log_oneline(repo, num_previous)
if not v:
return False, r
prev_list = [get_prev_hash(x) for x in r.split(os.linesep) if x != ""]
return True, prev_list
def get_head_hash(repo):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
v, r = get_previous_hash_list(repo, 1)
if not v:
return False, "git_lib.get_head_hash failed: [%s]" % r
return True, r[0]
def is_repo_working_tree(repo):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
v, r = git_wrapper.rev_parse_is_inside_work_tree(repo)
if not v:
if "not a git repository" in r:
return True, False
else:
return False, "git_lib.is_repo_working_tree failed: %s" % r
return True, "true" in r
def is_repo_bare(repo):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
if not os.path.exists(repo):
return False, "Repo [%s] does not exist" % repo
v, r = git_wrapper.rev_parse_is_bare_repo(repo)
if not v:
return True, False
bare_query_result = "true" in r
if not bare_query_result:
return True, False
v, r = git_wrapper.rev_parse_absolute_git_dir(repo)
if not v:
return False, "git_lib.is_repo_bare failed: %s" % r
abs_path_found = r
if abs_path_found != repo:
# is a subdirectory of a bare repo
return True, False
return True, True
def is_repo_standard(repo):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
v, r = is_repo_working_tree(repo)
if not v:
return False, "git_lib.is_repo_standard failed: %s" % r
the_git_obj = path_utils.concat_path(repo, ".git")
if os.path.isdir(the_git_obj):
return True, True
return True, False
def is_repo_submodule(repo):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
v, r = is_repo_working_tree(repo)
if not v:
return False, "git_lib.is_repo_submodule failed: %s" % r
the_git_obj = path_utils.concat_path(repo, ".git")
if not os.path.exists( the_git_obj ):
return True, False
if not os.path.isdir(the_git_obj):
return True, True
return True, False
def patch_as_head(repo, patch_file, override_head_check):
if repo is None:
return False, "No repo specified"
if patch_file is None:
return False, "No patch file specified"
repo = os.path.abspath(repo)
patch_file = os.path.abspath(patch_file)
if not override_head_check:
v, r = is_head_clear(repo)
if not v:
return False, r
if not r:
return False, "Cannot patch - head is not clear"
v, r = git_wrapper.apply(repo, patch_file)
if not v:
return False, r
return True, None
def patch_as_staged(repo, patch_file, override_head_check):
if repo is None:
return False, "No repo specified"
if patch_file is None:
return False, "No patch file specified"
repo = os.path.abspath(repo)
patch_file = os.path.abspath(patch_file)
v, r = patch_as_head(repo, patch_file, override_head_check)
if not v:
return False, r
v, r = git_wrapper.stage(repo)
if not v:
return False, r
return True, None
def patch_as_stash(repo, patch_file, override_head_check, override_stash_check):
if repo is None:
return False, "No repo specified"
if patch_file is None:
return False, "No patch file specified"
repo = os.path.abspath(repo)
patch_file = os.path.abspath(patch_file)
if not override_stash_check:
v, r = get_stash_list(repo)
if not v:
return False, r
if len(r) != 0:
return False, "Cannot patch - stash is not empty"
v, r = patch_as_head(repo, patch_file, override_head_check)
if not v:
return False, r
v, r = git_wrapper.stash(repo)
if not v:
return False, r
return True, None
def soft_reset(repo, file_list=None):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
file_list_final = []
if file_list is not None:
if not isinstance(file_list, list):
return False, "file_list must be a list"
if len(file_list) == 0:
return False, "file_list can't be empty"
for f in file_list:
file_list_final.append(os.path.abspath(f))
return git_wrapper.reset_head(repo, file_list_final)
def unstage(repo, file_list=None):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
v, r = get_staged_files(repo)
if not v:
return False, "Can't unstage - unable to fetch staged files first: [%s]" % r
staged_files = r
file_list_final = None
if file_list is not None:
file_list_final = []
if not isinstance(file_list, list):
return False, "file_list must be a list"
if len(file_list) == 0:
return False, "file_list can't be empty"
for f in file_list:
f_abs = os.path.abspath(f)
if not f_abs in staged_files:
return False, "File [%s] is not staged" % f_abs
file_list_final.append(f_abs)
return git_wrapper.reset_head(repo, file_list_final)
def clone_bare(repo_source, repo_target):
if repo_source is None:
return False, "No source repo specified"
if repo_target is None:
return False, "No target repo specified"
repo_source = os.path.abspath(repo_source)
repo_target = os.path.abspath(repo_target)
return git_wrapper.clone_bare(repo_source, repo_target)
def clone(repo_source, repo_target, remotename=None):
if repo_source is None:
return False, "No source repo specified"
if repo_target is None:
return False, "No target repo specified"
repo_source = os.path.abspath(repo_source)
repo_target = os.path.abspath(repo_target)
return git_wrapper.clone(repo_source, repo_target, remotename)
def clone_ext(repo_source, repo_target, remotename=None):
if repo_source is None:
return False, "No source repo specified"
if repo_target is None:
return False, "No target repo specified"
repo_source = os.path.abspath(repo_source)
repo_target = os.path.abspath(repo_target)
return git_wrapper.clone_ext(repo_source, repo_target, remotename)
def pull_default(repo):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
return git_wrapper.pull_default(repo)
def pull(repo, remote, branch):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
return git_wrapper.pull(repo, remote, branch)
def push(repo, remote, branch):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
return git_wrapper.push(repo, remote, branch)
def log(repo, limit=None):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
return git_wrapper.log(repo, limit)
def fetch_multiple(repo, remotes):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
return git_wrapper.fetch_multiple(repo, remotes)
def fetch_all(repo):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
return git_wrapper.fetch_all(repo)
def diff(repo, file_list=None):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
final_file_list = None
if file_list is not None:
final_file_list = []
if not isinstance(file_list, list):
return False, "file_list must be a list"
if len(file_list) == 0:
return False, "file_list can't be empty"
for fl in file_list:
final_file_list.append(os.path.abspath(fl))
return git_wrapper.diff(repo, final_file_list)
def diff_indexed(repo, file_list):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
final_file_list = []
if file_list is None:
return False, "file_list must be specified"
if not isinstance(file_list, list):
return False, "file_list must be a list"
if len(file_list) == 0:
return False, "file_list can't be empty"
for fl in file_list:
final_file_list.append(os.path.abspath(fl))
return git_wrapper.diff_indexed(repo, final_file_list)
def diff_cached(repo, file_list=None):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
final_file_list = None
if file_list is not None:
final_file_list = []
if not isinstance(file_list, list):
return False, "file_list must be a list"
if len(file_list) == 0:
return False, "file_list can't be empty"
for fl in file_list:
final_file_list.append(os.path.abspath(fl))
return git_wrapper.diff_cached(repo, final_file_list)
def diff_cached_indexed(repo, file_list):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
final_file_list = []
if file_list is None:
return False, "file_list must be specified"
if not isinstance(file_list, list):
return False, "file_list must be a list"
if len(file_list) == 0:
return False, "file_list can't be empty"
for fl in file_list:
final_file_list.append(os.path.abspath(fl))
return git_wrapper.diff_cached_indexed(repo, final_file_list)
def rev_parse_head(repo):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
return git_wrapper.rev_parse_head(repo)
def stash_show(repo, stash_name):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
return git_wrapper.stash_show(repo, stash_name)
def stash_show_diff(repo, stash_name):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
return git_wrapper.stash_show_diff(repo, stash_name)
def stash_clear(repo):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
return git_wrapper.stash_clear(repo)
def stash_drop(repo, stash_name = None):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
return git_wrapper.stash_drop(repo, stash_name)
def stash_pop(repo):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
return git_wrapper.stash_pop(repo)
def show(repo, commit_id):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
return git_wrapper.show(repo, commit_id)
def checkout(repo, file_list=None):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
final_file_list = None
if file_list is not None:
final_file_list = []
if not isinstance(file_list, list):
return False, "file_list must be a list"
if len(file_list) == 0:
return False, "file_list can't be empty"
for fl in file_list:
final_file_list.append(os.path.abspath(fl))
return git_wrapper.checkout(repo, final_file_list)
def config(key, value, global_cfg=True):
if key is None:
return False, "key unspecified"
return git_wrapper.config(key, value, global_cfg)
def commit_editor(repo):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
return git_wrapper.commit_editor(repo)
def commit_direct(repo, params):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
return git_wrapper.commit_direct(repo, params)
def remote_change_url(repo, remote, new_url):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
return git_wrapper.remote_change_url(repo, remote, new_url)
def status_simple(repo):
if repo is None:
return False, "No repo specified"
repo = os.path.abspath(repo)
return git_wrapper.status_simple(repo)
def kill_previous(repo, num_previous):
if repo is None:
return False, "No repo specified"
if num_previous is None:
return False, "previous quantity unspecified"
repo = os.path.abspath(repo)
return git_wrapper.reset_hard_head(repo, num_previous)
if __name__ == "__main__":
print("Hello from %s" % path_utils.basename_filtered(__file__))
| |
# Copyright 2018 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run Inception V3 benchmarks.
Tutorials: https://cloud.google.com/tpu/docs/tutorials/inception
Code: https://github.com/tensorflow/tpu/blob/master/models/experimental/inception/inception_v3.py
This benchmark is equivalent to tensorflow_benchmark with the inception3 model
except that this can target TPU.
"""
# TODO(tohaowu): We only measure image processing speed for now, and we will
# measure the other metrics in the future.
import time
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker.linux_benchmarks import mnist_benchmark
from perfkitbenchmarker.linux_benchmarks import resnet_benchmark
from perfkitbenchmarker.linux_packages import cloud_tpu_models
from perfkitbenchmarker.linux_packages import tensorflow
from six.moves import range
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'inception3'
BENCHMARK_CONFIG = """
inception3:
description: Runs Inception V3 Benchmark.
vm_groups:
default:
vm_spec:
GCP:
machine_type: n1-standard-4
zone: us-east1-d
boot_disk_size: 200
AWS:
machine_type: p2.xlarge
zone: us-east-1
boot_disk_size: 200
Azure:
machine_type: Standard_NC6
zone: eastus
"""
flags.DEFINE_float('inception3_learning_rate', 0.165, 'Learning rate.')
flags.DEFINE_integer('inception3_train_epochs', 200,
'Number of epochs use for training.', lower_bound=1)
flags.DEFINE_enum('inception3_use_data', 'real', ['real', 'fake'],
'Whether to use real or fake data. If real, the data is '
'downloaded from imagenet_data_dir. Otherwise, synthetic '
'data is generated.')
flags.DEFINE_enum('inception3_mode', 'train_and_eval',
['train', 'eval', 'train_and_eval'],
'Mode to run: train, eval, train_and_eval')
flags.DEFINE_integer('inception3_epochs_per_eval', 2,
'Number of training epochs to run between evaluations.')
flags.DEFINE_integer('inception3_save_checkpoints_secs', 0, 'Interval (in '
'seconds) at which the model data should be checkpointed. '
'Set to 0 to disable.')
flags.DEFINE_integer('inception3_train_batch_size', 1024,
'Global (not per-shard) batch size for training')
flags.DEFINE_integer('inception3_eval_batch_size', 1024,
'Global (not per-shard) batch size for evaluation')
def GetConfig(user_config):
"""Load and return benchmark config.
Args:
user_config: user supplied configuration (flags and config file)
Returns:
loaded benchmark configuration
"""
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def _UpdateBenchmarkSpecWithFlags(benchmark_spec):
"""Update the benchmark_spec with supplied command line flags.
Args:
benchmark_spec: benchmark specification to update
"""
benchmark_spec.learning_rate = FLAGS.inception3_learning_rate
benchmark_spec.use_data = FLAGS.inception3_use_data
benchmark_spec.mode = FLAGS.inception3_mode
benchmark_spec.save_checkpoints_secs = FLAGS.inception3_save_checkpoints_secs
benchmark_spec.train_batch_size = FLAGS.inception3_train_batch_size
benchmark_spec.eval_batch_size = FLAGS.inception3_eval_batch_size
benchmark_spec.commit = cloud_tpu_models.GetCommit(benchmark_spec.vms[0])
benchmark_spec.data_dir = FLAGS.imagenet_data_dir
benchmark_spec.num_train_images = FLAGS.imagenet_num_train_images
benchmark_spec.num_eval_images = FLAGS.imagenet_num_eval_images
benchmark_spec.num_examples_per_epoch = (
float(benchmark_spec.num_train_images) / benchmark_spec.train_batch_size)
benchmark_spec.train_epochs = FLAGS.inception3_train_epochs
benchmark_spec.train_steps = int(
benchmark_spec.train_epochs * benchmark_spec.num_examples_per_epoch)
benchmark_spec.epochs_per_eval = FLAGS.inception3_epochs_per_eval
benchmark_spec.steps_per_eval = int(
benchmark_spec.epochs_per_eval *
benchmark_spec.num_examples_per_epoch)
def Prepare(benchmark_spec):
"""Install and set up Inception V3 on the target vm.
Args:
benchmark_spec: The benchmark specification
"""
mnist_benchmark.Prepare(benchmark_spec)
_UpdateBenchmarkSpecWithFlags(benchmark_spec)
def _CreateMetadataDict(benchmark_spec):
"""Create metadata dict to be used in run results.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
metadata dict
"""
metadata = mnist_benchmark.CreateMetadataDict(benchmark_spec)
metadata.update({
'learning_rate': benchmark_spec.learning_rate,
'use_data': benchmark_spec.use_data,
'mode': benchmark_spec.mode,
'save_checkpoints_secs': benchmark_spec.save_checkpoints_secs,
'epochs_per_eval': benchmark_spec.epochs_per_eval,
'steps_per_eval': benchmark_spec.steps_per_eval,
'precision': benchmark_spec.precision,
'train_batch_size': benchmark_spec.train_batch_size,
'eval_batch_size': benchmark_spec.eval_batch_size
})
return metadata
def Run(benchmark_spec):
"""Run Inception V3 on the cluster.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
_UpdateBenchmarkSpecWithFlags(benchmark_spec)
vm = benchmark_spec.vms[0]
inception3_benchmark_script = (
'tpu/models/experimental/inception/inception_v3.py')
inception3_benchmark_cmd = (
'{env_cmd} && python {script} '
'--learning_rate={learning_rate} '
'--iterations={iterations} '
'--use_tpu={use_tpu} '
'--use_data={use_data} '
'--train_steps_per_eval={steps_per_eval} '
'--data_dir={data_dir} '
'--model_dir={model_dir} '
'--save_checkpoints_secs={save_checkpoints_secs} '
'--train_batch_size={train_batch_size} '
'--eval_batch_size={eval_batch_size} '
'--precision={precision}'.format(
env_cmd=benchmark_spec.env_cmd,
script=inception3_benchmark_script,
learning_rate=benchmark_spec.learning_rate,
iterations=benchmark_spec.iterations,
use_tpu=bool(benchmark_spec.tpus),
use_data=benchmark_spec.use_data,
steps_per_eval=benchmark_spec.steps_per_eval,
data_dir=benchmark_spec.data_dir,
model_dir=benchmark_spec.model_dir,
save_checkpoints_secs=benchmark_spec.save_checkpoints_secs,
train_batch_size=benchmark_spec.train_batch_size,
eval_batch_size=benchmark_spec.eval_batch_size,
precision=benchmark_spec.precision))
if FLAGS.tf_device == 'gpu':
inception3_benchmark_cmd = '{env} {cmd}'.format(
env=tensorflow.GetEnvironmentVars(vm), cmd=inception3_benchmark_cmd)
samples = []
metadata = _CreateMetadataDict(benchmark_spec)
elapsed_seconds = 0
steps_per_eval = benchmark_spec.steps_per_eval
train_steps = benchmark_spec.train_steps
for step in range(steps_per_eval, train_steps + steps_per_eval,
steps_per_eval):
step = min(step, train_steps)
inception3_benchmark_cmd_step = '{cmd} --train_steps={step}'.format(
cmd=inception3_benchmark_cmd, step=step)
if benchmark_spec.mode in ('train', 'train_and_eval'):
if benchmark_spec.tpus:
tpu = benchmark_spec.tpu_groups['train'].GetName()
num_shards = '--num_shards={}'.format(
benchmark_spec.tpu_groups['train'].GetNumShards())
else:
tpu = num_shards = ''
inception3_benchmark_train_cmd = (
'{cmd} --tpu={tpu} --mode=train {num_shards}'.format(
cmd=inception3_benchmark_cmd_step,
tpu=tpu, num_shards=num_shards))
start = time.time()
stdout, stderr = vm.RobustRemoteCommand(inception3_benchmark_train_cmd,
should_log=True)
elapsed_seconds += (time.time() - start)
samples.extend(mnist_benchmark.MakeSamplesFromTrainOutput(
metadata, stdout + stderr, elapsed_seconds, step))
if benchmark_spec.mode in ('train_and_eval', 'eval'):
if benchmark_spec.tpus:
tpu = benchmark_spec.tpu_groups['eval'].GetName()
num_shards = '--num_shards={}'.format(
benchmark_spec.tpu_groups['eval'].GetNumShards())
else:
tpu = num_shards = ''
inception3_benchmark_eval_cmd = (
'{cmd} --tpu={tpu} --mode=eval {num_shards}'.format(
cmd=inception3_benchmark_cmd_step,
tpu=tpu, num_shards=num_shards))
stdout, stderr = vm.RobustRemoteCommand(inception3_benchmark_eval_cmd,
should_log=True)
samples.extend(resnet_benchmark.MakeSamplesFromEvalOutput(
metadata, stdout + stderr, elapsed_seconds))
return samples
def Cleanup(benchmark_spec):
"""Cleanup Inception V3 on the cluster.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
mnist_benchmark.Cleanup(benchmark_spec)
| |
from django.conf import settings
from django.core.checks.messages import Error
from django.core.checks.security import base, csrf, sessions
from django.core.management.utils import get_random_secret_key
from django.test import SimpleTestCase
from django.test.utils import override_settings
class CheckSessionCookieSecureTest(SimpleTestCase):
@override_settings(
SESSION_COOKIE_SECURE=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=[],
)
def test_session_cookie_secure_with_installed_app(self):
"""
Warn if SESSION_COOKIE_SECURE is off and "django.contrib.sessions" is
in INSTALLED_APPS.
"""
self.assertEqual(sessions.check_session_cookie_secure(None), [sessions.W010])
@override_settings(
SESSION_COOKIE_SECURE=False,
INSTALLED_APPS=[],
MIDDLEWARE=['django.contrib.sessions.middleware.SessionMiddleware'],
)
def test_session_cookie_secure_with_middleware(self):
"""
Warn if SESSION_COOKIE_SECURE is off and
"django.contrib.sessions.middleware.SessionMiddleware" is in
MIDDLEWARE.
"""
self.assertEqual(sessions.check_session_cookie_secure(None), [sessions.W011])
@override_settings(
SESSION_COOKIE_SECURE=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=['django.contrib.sessions.middleware.SessionMiddleware'],
)
def test_session_cookie_secure_both(self):
"""
If SESSION_COOKIE_SECURE is off and we find both the session app and
the middleware, provide one common warning.
"""
self.assertEqual(sessions.check_session_cookie_secure(None), [sessions.W012])
@override_settings(
SESSION_COOKIE_SECURE=True,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=['django.contrib.sessions.middleware.SessionMiddleware'],
)
def test_session_cookie_secure_true(self):
"""
If SESSION_COOKIE_SECURE is on, there's no warning about it.
"""
self.assertEqual(sessions.check_session_cookie_secure(None), [])
class CheckSessionCookieHttpOnlyTest(SimpleTestCase):
@override_settings(
SESSION_COOKIE_HTTPONLY=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=[],
)
def test_session_cookie_httponly_with_installed_app(self):
"""
Warn if SESSION_COOKIE_HTTPONLY is off and "django.contrib.sessions"
is in INSTALLED_APPS.
"""
self.assertEqual(sessions.check_session_cookie_httponly(None), [sessions.W013])
@override_settings(
SESSION_COOKIE_HTTPONLY=False,
INSTALLED_APPS=[],
MIDDLEWARE=['django.contrib.sessions.middleware.SessionMiddleware'],
)
def test_session_cookie_httponly_with_middleware(self):
"""
Warn if SESSION_COOKIE_HTTPONLY is off and
"django.contrib.sessions.middleware.SessionMiddleware" is in
MIDDLEWARE.
"""
self.assertEqual(sessions.check_session_cookie_httponly(None), [sessions.W014])
@override_settings(
SESSION_COOKIE_HTTPONLY=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=['django.contrib.sessions.middleware.SessionMiddleware'],
)
def test_session_cookie_httponly_both(self):
"""
If SESSION_COOKIE_HTTPONLY is off and we find both the session app and
the middleware, provide one common warning.
"""
self.assertEqual(sessions.check_session_cookie_httponly(None), [sessions.W015])
@override_settings(
SESSION_COOKIE_HTTPONLY=True,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=['django.contrib.sessions.middleware.SessionMiddleware'],
)
def test_session_cookie_httponly_true(self):
"""
If SESSION_COOKIE_HTTPONLY is on, there's no warning about it.
"""
self.assertEqual(sessions.check_session_cookie_httponly(None), [])
class CheckCSRFMiddlewareTest(SimpleTestCase):
@override_settings(MIDDLEWARE=[])
def test_no_csrf_middleware(self):
"""
Warn if CsrfViewMiddleware isn't in MIDDLEWARE.
"""
self.assertEqual(csrf.check_csrf_middleware(None), [csrf.W003])
@override_settings(MIDDLEWARE=['django.middleware.csrf.CsrfViewMiddleware'])
def test_with_csrf_middleware(self):
self.assertEqual(csrf.check_csrf_middleware(None), [])
class CheckCSRFCookieSecureTest(SimpleTestCase):
@override_settings(
MIDDLEWARE=["django.middleware.csrf.CsrfViewMiddleware"],
CSRF_COOKIE_SECURE=False,
)
def test_with_csrf_cookie_secure_false(self):
"""
Warn if CsrfViewMiddleware is in MIDDLEWARE but
CSRF_COOKIE_SECURE isn't True.
"""
self.assertEqual(csrf.check_csrf_cookie_secure(None), [csrf.W016])
@override_settings(
MIDDLEWARE=["django.middleware.csrf.CsrfViewMiddleware"],
CSRF_USE_SESSIONS=True,
CSRF_COOKIE_SECURE=False,
)
def test_use_sessions_with_csrf_cookie_secure_false(self):
"""
No warning if CSRF_COOKIE_SECURE isn't True while CSRF_USE_SESSIONS
is True.
"""
self.assertEqual(csrf.check_csrf_cookie_secure(None), [])
@override_settings(MIDDLEWARE=[], CSRF_COOKIE_SECURE=False)
def test_with_csrf_cookie_secure_false_no_middleware(self):
"""
No warning if CsrfViewMiddleware isn't in MIDDLEWARE, even if
CSRF_COOKIE_SECURE is False.
"""
self.assertEqual(csrf.check_csrf_cookie_secure(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.csrf.CsrfViewMiddleware"],
CSRF_COOKIE_SECURE=True,
)
def test_with_csrf_cookie_secure_true(self):
self.assertEqual(csrf.check_csrf_cookie_secure(None), [])
class CheckSecurityMiddlewareTest(SimpleTestCase):
@override_settings(MIDDLEWARE=[])
def test_no_security_middleware(self):
"""
Warn if SecurityMiddleware isn't in MIDDLEWARE.
"""
self.assertEqual(base.check_security_middleware(None), [base.W001])
@override_settings(MIDDLEWARE=['django.middleware.security.SecurityMiddleware'])
def test_with_security_middleware(self):
self.assertEqual(base.check_security_middleware(None), [])
class CheckStrictTransportSecurityTest(SimpleTestCase):
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_SECONDS=0,
)
def test_no_sts(self):
"""
Warn if SECURE_HSTS_SECONDS isn't > 0.
"""
self.assertEqual(base.check_sts(None), [base.W004])
@override_settings(MIDDLEWARE=[], SECURE_HSTS_SECONDS=0)
def test_no_sts_no_middleware(self):
"""
Don't warn if SECURE_HSTS_SECONDS isn't > 0 and SecurityMiddleware isn't
installed.
"""
self.assertEqual(base.check_sts(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_SECONDS=3600,
)
def test_with_sts(self):
self.assertEqual(base.check_sts(None), [])
class CheckStrictTransportSecuritySubdomainsTest(SimpleTestCase):
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_INCLUDE_SUBDOMAINS=False,
SECURE_HSTS_SECONDS=3600,
)
def test_no_sts_subdomains(self):
"""
Warn if SECURE_HSTS_INCLUDE_SUBDOMAINS isn't True.
"""
self.assertEqual(base.check_sts_include_subdomains(None), [base.W005])
@override_settings(
MIDDLEWARE=[],
SECURE_HSTS_INCLUDE_SUBDOMAINS=False,
SECURE_HSTS_SECONDS=3600,
)
def test_no_sts_subdomains_no_middleware(self):
"""
Don't warn if SecurityMiddleware isn't installed.
"""
self.assertEqual(base.check_sts_include_subdomains(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_SSL_REDIRECT=False,
SECURE_HSTS_SECONDS=None,
)
def test_no_sts_subdomains_no_seconds(self):
"""
Don't warn if SECURE_HSTS_SECONDS isn't set.
"""
self.assertEqual(base.check_sts_include_subdomains(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_INCLUDE_SUBDOMAINS=True,
SECURE_HSTS_SECONDS=3600,
)
def test_with_sts_subdomains(self):
self.assertEqual(base.check_sts_include_subdomains(None), [])
class CheckStrictTransportSecurityPreloadTest(SimpleTestCase):
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_PRELOAD=False,
SECURE_HSTS_SECONDS=3600,
)
def test_no_sts_preload(self):
"""
Warn if SECURE_HSTS_PRELOAD isn't True.
"""
self.assertEqual(base.check_sts_preload(None), [base.W021])
@override_settings(MIDDLEWARE=[], SECURE_HSTS_PRELOAD=False, SECURE_HSTS_SECONDS=3600)
def test_no_sts_preload_no_middleware(self):
"""
Don't warn if SecurityMiddleware isn't installed.
"""
self.assertEqual(base.check_sts_preload(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_SSL_REDIRECT=False,
SECURE_HSTS_SECONDS=None,
)
def test_no_sts_preload_no_seconds(self):
"""
Don't warn if SECURE_HSTS_SECONDS isn't set.
"""
self.assertEqual(base.check_sts_preload(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_PRELOAD=True,
SECURE_HSTS_SECONDS=3600,
)
def test_with_sts_preload(self):
self.assertEqual(base.check_sts_preload(None), [])
class CheckXFrameOptionsMiddlewareTest(SimpleTestCase):
@override_settings(MIDDLEWARE=[])
def test_middleware_not_installed(self):
"""
Warn if XFrameOptionsMiddleware isn't in MIDDLEWARE.
"""
self.assertEqual(base.check_xframe_options_middleware(None), [base.W002])
@override_settings(MIDDLEWARE=["django.middleware.clickjacking.XFrameOptionsMiddleware"])
def test_middleware_installed(self):
self.assertEqual(base.check_xframe_options_middleware(None), [])
class CheckXFrameOptionsDenyTest(SimpleTestCase):
@override_settings(
MIDDLEWARE=["django.middleware.clickjacking.XFrameOptionsMiddleware"],
X_FRAME_OPTIONS='SAMEORIGIN',
)
def test_x_frame_options_not_deny(self):
"""
Warn if XFrameOptionsMiddleware is in MIDDLEWARE but
X_FRAME_OPTIONS isn't 'DENY'.
"""
self.assertEqual(base.check_xframe_deny(None), [base.W019])
@override_settings(MIDDLEWARE=[], X_FRAME_OPTIONS='SAMEORIGIN')
def test_middleware_not_installed(self):
"""
No error if XFrameOptionsMiddleware isn't in MIDDLEWARE even if
X_FRAME_OPTIONS isn't 'DENY'.
"""
self.assertEqual(base.check_xframe_deny(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.clickjacking.XFrameOptionsMiddleware"],
X_FRAME_OPTIONS='DENY',
)
def test_xframe_deny(self):
self.assertEqual(base.check_xframe_deny(None), [])
class CheckContentTypeNosniffTest(SimpleTestCase):
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_CONTENT_TYPE_NOSNIFF=False,
)
def test_no_content_type_nosniff(self):
"""
Warn if SECURE_CONTENT_TYPE_NOSNIFF isn't True.
"""
self.assertEqual(base.check_content_type_nosniff(None), [base.W006])
@override_settings(MIDDLEWARE=[], SECURE_CONTENT_TYPE_NOSNIFF=False)
def test_no_content_type_nosniff_no_middleware(self):
"""
Don't warn if SECURE_CONTENT_TYPE_NOSNIFF isn't True and
SecurityMiddleware isn't in MIDDLEWARE.
"""
self.assertEqual(base.check_content_type_nosniff(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_CONTENT_TYPE_NOSNIFF=True,
)
def test_with_content_type_nosniff(self):
self.assertEqual(base.check_content_type_nosniff(None), [])
class CheckSSLRedirectTest(SimpleTestCase):
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_SSL_REDIRECT=False,
)
def test_no_ssl_redirect(self):
"""
Warn if SECURE_SSL_REDIRECT isn't True.
"""
self.assertEqual(base.check_ssl_redirect(None), [base.W008])
@override_settings(MIDDLEWARE=[], SECURE_SSL_REDIRECT=False)
def test_no_ssl_redirect_no_middleware(self):
"""
Don't warn if SECURE_SSL_REDIRECT is False and SecurityMiddleware isn't
installed.
"""
self.assertEqual(base.check_ssl_redirect(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_SSL_REDIRECT=True,
)
def test_with_ssl_redirect(self):
self.assertEqual(base.check_ssl_redirect(None), [])
class CheckSecretKeyTest(SimpleTestCase):
@override_settings(SECRET_KEY=('abcdefghijklmnopqrstuvwx' * 2) + 'ab')
def test_okay_secret_key(self):
self.assertEqual(len(settings.SECRET_KEY), base.SECRET_KEY_MIN_LENGTH)
self.assertGreater(len(set(settings.SECRET_KEY)), base.SECRET_KEY_MIN_UNIQUE_CHARACTERS)
self.assertEqual(base.check_secret_key(None), [])
@override_settings(SECRET_KEY='')
def test_empty_secret_key(self):
self.assertEqual(base.check_secret_key(None), [base.W009])
@override_settings(SECRET_KEY=None)
def test_missing_secret_key(self):
del settings.SECRET_KEY
self.assertEqual(base.check_secret_key(None), [base.W009])
@override_settings(SECRET_KEY=None)
def test_none_secret_key(self):
self.assertEqual(base.check_secret_key(None), [base.W009])
@override_settings(
SECRET_KEY=base.SECRET_KEY_INSECURE_PREFIX + get_random_secret_key()
)
def test_insecure_secret_key(self):
self.assertEqual(base.check_secret_key(None), [base.W009])
@override_settings(SECRET_KEY=('abcdefghijklmnopqrstuvwx' * 2) + 'a')
def test_low_length_secret_key(self):
self.assertEqual(len(settings.SECRET_KEY), base.SECRET_KEY_MIN_LENGTH - 1)
self.assertEqual(base.check_secret_key(None), [base.W009])
@override_settings(SECRET_KEY='abcd' * 20)
def test_low_entropy_secret_key(self):
self.assertGreater(len(settings.SECRET_KEY), base.SECRET_KEY_MIN_LENGTH)
self.assertLess(len(set(settings.SECRET_KEY)), base.SECRET_KEY_MIN_UNIQUE_CHARACTERS)
self.assertEqual(base.check_secret_key(None), [base.W009])
class CheckDebugTest(SimpleTestCase):
@override_settings(DEBUG=True)
def test_debug_true(self):
"""
Warn if DEBUG is True.
"""
self.assertEqual(base.check_debug(None), [base.W018])
@override_settings(DEBUG=False)
def test_debug_false(self):
self.assertEqual(base.check_debug(None), [])
class CheckAllowedHostsTest(SimpleTestCase):
@override_settings(ALLOWED_HOSTS=[])
def test_allowed_hosts_empty(self):
self.assertEqual(base.check_allowed_hosts(None), [base.W020])
@override_settings(ALLOWED_HOSTS=['.example.com'])
def test_allowed_hosts_set(self):
self.assertEqual(base.check_allowed_hosts(None), [])
class CheckReferrerPolicyTest(SimpleTestCase):
@override_settings(
MIDDLEWARE=['django.middleware.security.SecurityMiddleware'],
SECURE_REFERRER_POLICY=None,
)
def test_no_referrer_policy(self):
self.assertEqual(base.check_referrer_policy(None), [base.W022])
@override_settings(MIDDLEWARE=[], SECURE_REFERRER_POLICY=None)
def test_no_referrer_policy_no_middleware(self):
"""
Don't warn if SECURE_REFERRER_POLICY is None and SecurityMiddleware
isn't in MIDDLEWARE.
"""
self.assertEqual(base.check_referrer_policy(None), [])
@override_settings(MIDDLEWARE=['django.middleware.security.SecurityMiddleware'])
def test_with_referrer_policy(self):
tests = (
'strict-origin',
'strict-origin,origin',
'strict-origin, origin',
['strict-origin', 'origin'],
('strict-origin', 'origin'),
)
for value in tests:
with self.subTest(value=value), override_settings(SECURE_REFERRER_POLICY=value):
self.assertEqual(base.check_referrer_policy(None), [])
@override_settings(
MIDDLEWARE=['django.middleware.security.SecurityMiddleware'],
SECURE_REFERRER_POLICY='invalid-value',
)
def test_with_invalid_referrer_policy(self):
self.assertEqual(base.check_referrer_policy(None), [base.E023])
def failure_view_with_invalid_signature():
pass
class CSRFFailureViewTest(SimpleTestCase):
@override_settings(CSRF_FAILURE_VIEW='')
def test_failure_view_import_error(self):
self.assertEqual(
csrf.check_csrf_failure_view(None),
[
Error(
"The CSRF failure view '' could not be imported.",
id='security.E102',
)
],
)
@override_settings(
CSRF_FAILURE_VIEW='check_framework.test_security.failure_view_with_invalid_signature',
)
def test_failure_view_invalid_signature(self):
msg = (
"The CSRF failure view "
"'check_framework.test_security.failure_view_with_invalid_signature' "
"does not take the correct number of arguments."
)
self.assertEqual(
csrf.check_csrf_failure_view(None),
[Error(msg, id='security.E101')],
)
class CheckCrossOriginOpenerPolicyTest(SimpleTestCase):
@override_settings(
MIDDLEWARE=['django.middleware.security.SecurityMiddleware'],
SECURE_CROSS_ORIGIN_OPENER_POLICY=None,
)
def test_no_coop(self):
self.assertEqual(base.check_cross_origin_opener_policy(None), [])
@override_settings(MIDDLEWARE=['django.middleware.security.SecurityMiddleware'])
def test_with_coop(self):
tests = ['same-origin', 'same-origin-allow-popups', 'unsafe-none']
for value in tests:
with self.subTest(value=value), override_settings(
SECURE_CROSS_ORIGIN_OPENER_POLICY=value,
):
self.assertEqual(base.check_cross_origin_opener_policy(None), [])
@override_settings(
MIDDLEWARE=['django.middleware.security.SecurityMiddleware'],
SECURE_CROSS_ORIGIN_OPENER_POLICY='invalid-value',
)
def test_with_invalid_coop(self):
self.assertEqual(base.check_cross_origin_opener_policy(None), [base.E024])
| |
"""Class to hold all media player accessories."""
import logging
from pyhap.const import CATEGORY_SWITCH
from homeassistant.components.media_player import (
ATTR_INPUT_SOURCE,
ATTR_INPUT_SOURCE_LIST,
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
DOMAIN,
SERVICE_SELECT_SOURCE,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_SELECT_SOURCE,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_PLAY_PAUSE,
SERVICE_MEDIA_STOP,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_VOLUME_DOWN,
SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_SET,
SERVICE_VOLUME_UP,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
STATE_STANDBY,
STATE_UNKNOWN,
)
from homeassistant.core import callback
from .accessories import TYPES, HomeAccessory
from .const import (
ATTR_KEY_NAME,
CHAR_ACTIVE,
CHAR_MUTE,
CHAR_NAME,
CHAR_ON,
CHAR_VOLUME,
CHAR_VOLUME_CONTROL_TYPE,
CHAR_VOLUME_SELECTOR,
CONF_FEATURE_LIST,
EVENT_HOMEKIT_TV_REMOTE_KEY_PRESSED,
FEATURE_ON_OFF,
FEATURE_PLAY_PAUSE,
FEATURE_PLAY_STOP,
FEATURE_TOGGLE_MUTE,
KEY_PLAY_PAUSE,
MAX_NAME_LENGTH,
SERV_SWITCH,
SERV_TELEVISION_SPEAKER,
)
from .type_remotes import REMOTE_KEYS, RemoteInputSelectAccessory
from .util import get_media_player_features
_LOGGER = logging.getLogger(__name__)
# Names may not contain special characters
# or emjoi (/ is a special character for Apple)
MODE_FRIENDLY_NAME = {
FEATURE_ON_OFF: "Power",
FEATURE_PLAY_PAUSE: "Play-Pause",
FEATURE_PLAY_STOP: "Play-Stop",
FEATURE_TOGGLE_MUTE: "Mute",
}
MEDIA_PLAYER_OFF_STATES = (
STATE_OFF,
STATE_UNKNOWN,
STATE_STANDBY,
"None",
)
@TYPES.register("MediaPlayer")
class MediaPlayer(HomeAccessory):
"""Generate a Media Player accessory."""
def __init__(self, *args):
"""Initialize a Switch accessory object."""
super().__init__(*args, category=CATEGORY_SWITCH)
state = self.hass.states.get(self.entity_id)
self.chars = {
FEATURE_ON_OFF: None,
FEATURE_PLAY_PAUSE: None,
FEATURE_PLAY_STOP: None,
FEATURE_TOGGLE_MUTE: None,
}
feature_list = self.config.get(
CONF_FEATURE_LIST, get_media_player_features(state)
)
if FEATURE_ON_OFF in feature_list:
name = self.generate_service_name(FEATURE_ON_OFF)
serv_on_off = self.add_preload_service(SERV_SWITCH, CHAR_NAME)
serv_on_off.configure_char(CHAR_NAME, value=name)
self.chars[FEATURE_ON_OFF] = serv_on_off.configure_char(
CHAR_ON, value=False, setter_callback=self.set_on_off
)
if FEATURE_PLAY_PAUSE in feature_list:
name = self.generate_service_name(FEATURE_PLAY_PAUSE)
serv_play_pause = self.add_preload_service(SERV_SWITCH, CHAR_NAME)
serv_play_pause.configure_char(CHAR_NAME, value=name)
self.chars[FEATURE_PLAY_PAUSE] = serv_play_pause.configure_char(
CHAR_ON, value=False, setter_callback=self.set_play_pause
)
if FEATURE_PLAY_STOP in feature_list:
name = self.generate_service_name(FEATURE_PLAY_STOP)
serv_play_stop = self.add_preload_service(SERV_SWITCH, CHAR_NAME)
serv_play_stop.configure_char(CHAR_NAME, value=name)
self.chars[FEATURE_PLAY_STOP] = serv_play_stop.configure_char(
CHAR_ON, value=False, setter_callback=self.set_play_stop
)
if FEATURE_TOGGLE_MUTE in feature_list:
name = self.generate_service_name(FEATURE_TOGGLE_MUTE)
serv_toggle_mute = self.add_preload_service(SERV_SWITCH, CHAR_NAME)
serv_toggle_mute.configure_char(CHAR_NAME, value=name)
self.chars[FEATURE_TOGGLE_MUTE] = serv_toggle_mute.configure_char(
CHAR_ON, value=False, setter_callback=self.set_toggle_mute
)
self.async_update_state(state)
def generate_service_name(self, mode):
"""Generate name for individual service."""
return f"{self.display_name} {MODE_FRIENDLY_NAME[mode]}"[:MAX_NAME_LENGTH]
def set_on_off(self, value):
"""Move switch state to value if call came from HomeKit."""
_LOGGER.debug('%s: Set switch state for "on_off" to %s', self.entity_id, value)
service = SERVICE_TURN_ON if value else SERVICE_TURN_OFF
params = {ATTR_ENTITY_ID: self.entity_id}
self.async_call_service(DOMAIN, service, params)
def set_play_pause(self, value):
"""Move switch state to value if call came from HomeKit."""
_LOGGER.debug(
'%s: Set switch state for "play_pause" to %s', self.entity_id, value
)
service = SERVICE_MEDIA_PLAY if value else SERVICE_MEDIA_PAUSE
params = {ATTR_ENTITY_ID: self.entity_id}
self.async_call_service(DOMAIN, service, params)
def set_play_stop(self, value):
"""Move switch state to value if call came from HomeKit."""
_LOGGER.debug(
'%s: Set switch state for "play_stop" to %s', self.entity_id, value
)
service = SERVICE_MEDIA_PLAY if value else SERVICE_MEDIA_STOP
params = {ATTR_ENTITY_ID: self.entity_id}
self.async_call_service(DOMAIN, service, params)
def set_toggle_mute(self, value):
"""Move switch state to value if call came from HomeKit."""
_LOGGER.debug(
'%s: Set switch state for "toggle_mute" to %s', self.entity_id, value
)
params = {ATTR_ENTITY_ID: self.entity_id, ATTR_MEDIA_VOLUME_MUTED: value}
self.async_call_service(DOMAIN, SERVICE_VOLUME_MUTE, params)
@callback
def async_update_state(self, new_state):
"""Update switch state after state changed."""
current_state = new_state.state
if self.chars[FEATURE_ON_OFF]:
hk_state = current_state not in MEDIA_PLAYER_OFF_STATES
_LOGGER.debug(
'%s: Set current state for "on_off" to %s', self.entity_id, hk_state
)
self.chars[FEATURE_ON_OFF].set_value(hk_state)
if self.chars[FEATURE_PLAY_PAUSE]:
hk_state = current_state == STATE_PLAYING
_LOGGER.debug(
'%s: Set current state for "play_pause" to %s',
self.entity_id,
hk_state,
)
self.chars[FEATURE_PLAY_PAUSE].set_value(hk_state)
if self.chars[FEATURE_PLAY_STOP]:
hk_state = current_state == STATE_PLAYING
_LOGGER.debug(
'%s: Set current state for "play_stop" to %s',
self.entity_id,
hk_state,
)
self.chars[FEATURE_PLAY_STOP].set_value(hk_state)
if self.chars[FEATURE_TOGGLE_MUTE]:
current_state = bool(new_state.attributes.get(ATTR_MEDIA_VOLUME_MUTED))
_LOGGER.debug(
'%s: Set current state for "toggle_mute" to %s',
self.entity_id,
current_state,
)
self.chars[FEATURE_TOGGLE_MUTE].set_value(current_state)
@TYPES.register("TelevisionMediaPlayer")
class TelevisionMediaPlayer(RemoteInputSelectAccessory):
"""Generate a Television Media Player accessory."""
def __init__(self, *args):
"""Initialize a Television Media Player accessory object."""
super().__init__(
SUPPORT_SELECT_SOURCE,
ATTR_INPUT_SOURCE,
ATTR_INPUT_SOURCE_LIST,
*args,
)
state = self.hass.states.get(self.entity_id)
features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
self.chars_speaker = []
self._supports_play_pause = features & (SUPPORT_PLAY | SUPPORT_PAUSE)
if features & SUPPORT_VOLUME_MUTE or features & SUPPORT_VOLUME_STEP:
self.chars_speaker.extend(
(CHAR_NAME, CHAR_ACTIVE, CHAR_VOLUME_CONTROL_TYPE, CHAR_VOLUME_SELECTOR)
)
if features & SUPPORT_VOLUME_SET:
self.chars_speaker.append(CHAR_VOLUME)
if CHAR_VOLUME_SELECTOR in self.chars_speaker:
serv_speaker = self.add_preload_service(
SERV_TELEVISION_SPEAKER, self.chars_speaker
)
self.serv_tv.add_linked_service(serv_speaker)
name = f"{self.display_name} Volume"
serv_speaker.configure_char(CHAR_NAME, value=name)
serv_speaker.configure_char(CHAR_ACTIVE, value=1)
self.char_mute = serv_speaker.configure_char(
CHAR_MUTE, value=False, setter_callback=self.set_mute
)
volume_control_type = 1 if CHAR_VOLUME in self.chars_speaker else 2
serv_speaker.configure_char(
CHAR_VOLUME_CONTROL_TYPE, value=volume_control_type
)
self.char_volume_selector = serv_speaker.configure_char(
CHAR_VOLUME_SELECTOR, setter_callback=self.set_volume_step
)
if CHAR_VOLUME in self.chars_speaker:
self.char_volume = serv_speaker.configure_char(
CHAR_VOLUME, setter_callback=self.set_volume
)
self.async_update_state(state)
def set_on_off(self, value):
"""Move switch state to value if call came from HomeKit."""
_LOGGER.debug('%s: Set switch state for "on_off" to %s', self.entity_id, value)
service = SERVICE_TURN_ON if value else SERVICE_TURN_OFF
params = {ATTR_ENTITY_ID: self.entity_id}
self.async_call_service(DOMAIN, service, params)
def set_mute(self, value):
"""Move switch state to value if call came from HomeKit."""
_LOGGER.debug(
'%s: Set switch state for "toggle_mute" to %s', self.entity_id, value
)
params = {ATTR_ENTITY_ID: self.entity_id, ATTR_MEDIA_VOLUME_MUTED: value}
self.async_call_service(DOMAIN, SERVICE_VOLUME_MUTE, params)
def set_volume(self, value):
"""Send volume step value if call came from HomeKit."""
_LOGGER.debug("%s: Set volume to %s", self.entity_id, value)
params = {ATTR_ENTITY_ID: self.entity_id, ATTR_MEDIA_VOLUME_LEVEL: value}
self.async_call_service(DOMAIN, SERVICE_VOLUME_SET, params)
def set_volume_step(self, value):
"""Send volume step value if call came from HomeKit."""
_LOGGER.debug("%s: Step volume by %s", self.entity_id, value)
service = SERVICE_VOLUME_DOWN if value else SERVICE_VOLUME_UP
params = {ATTR_ENTITY_ID: self.entity_id}
self.async_call_service(DOMAIN, service, params)
def set_input_source(self, value):
"""Send input set value if call came from HomeKit."""
_LOGGER.debug("%s: Set current input to %s", self.entity_id, value)
source = self.sources[value]
params = {ATTR_ENTITY_ID: self.entity_id, ATTR_INPUT_SOURCE: source}
self.async_call_service(DOMAIN, SERVICE_SELECT_SOURCE, params)
def set_remote_key(self, value):
"""Send remote key value if call came from HomeKit."""
_LOGGER.debug("%s: Set remote key to %s", self.entity_id, value)
key_name = REMOTE_KEYS.get(value)
if key_name is None:
_LOGGER.warning("%s: Unhandled key press for %s", self.entity_id, value)
return
if key_name == KEY_PLAY_PAUSE and self._supports_play_pause:
# Handle Play Pause by directly updating the media player entity.
state = self.hass.states.get(self.entity_id).state
if state in (STATE_PLAYING, STATE_PAUSED):
service = (
SERVICE_MEDIA_PLAY if state == STATE_PAUSED else SERVICE_MEDIA_PAUSE
)
else:
service = SERVICE_MEDIA_PLAY_PAUSE
params = {ATTR_ENTITY_ID: self.entity_id}
self.async_call_service(DOMAIN, service, params)
return
# Unhandled keys can be handled by listening to the event bus
self.hass.bus.async_fire(
EVENT_HOMEKIT_TV_REMOTE_KEY_PRESSED,
{ATTR_KEY_NAME: key_name, ATTR_ENTITY_ID: self.entity_id},
)
@callback
def async_update_state(self, new_state):
"""Update Television state after state changed."""
current_state = new_state.state
# Power state television
hk_state = 0
if current_state not in MEDIA_PLAYER_OFF_STATES:
hk_state = 1
_LOGGER.debug("%s: Set current active state to %s", self.entity_id, hk_state)
self.char_active.set_value(hk_state)
# Set mute state
if CHAR_VOLUME_SELECTOR in self.chars_speaker:
current_mute_state = bool(new_state.attributes.get(ATTR_MEDIA_VOLUME_MUTED))
_LOGGER.debug(
"%s: Set current mute state to %s",
self.entity_id,
current_mute_state,
)
self.char_mute.set_value(current_mute_state)
self._async_update_input_state(hk_state, new_state)
| |
#!/usr/bin/env python
#
# This source file is part of the osgBoostPython library
#
# Copyright (C) 2009-2010 Jean-Sebastien Guay
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
# http://www.gnu.org/copyleft/lesser.txt.
#
import sys
import math
import thread
from osgpypp import osg, osgDB, osgGA, osgViewer
# import osg
# import osgDB
# import osgGA
# import osgViewer
osg.Matrix = osg.Matrixd
osg.Vec3 = osg.Vec3f
# //
# // A simple demo demonstrating planar reflections using multiple renderings
# // of a subgraph, overriding of state attribures and use of the stencil buffer.
# //
# // The multipass system implemented here is a variation if Mark Kilgard's
# // paper "Improving Shadows and Reflections via the Stencil Buffer" which
# // can be found on the developer parts of the NVidia web site.
# //
# // The variations comes from the fact that the mirrors stencil values
# // are done on the first pass, rather than the second as in Mark's paper.
# // The second pass is now Mark's first pass - drawing the unreflected scene,
# // but also unsets the stencil buffer. This variation stops the unreflected
# // world poking through the mirror to be seen in the final rendering and
# // also obscures the world correctly when on the reverse side of the mirror.
# // Although there is still some unresolved issue with the clip plane needing
# // to be flipped when looking at the reverse side of the mirror. Niether
# // of these issues are mentioned in the Mark's paper, but trip us up when
# // we apply them.
def createMirrorTexturedState(filename):
dstate = osg.StateSet();
dstate.setMode(osg.GL_CULL_FACE,osg.StateAttribute.OFF|osg.StateAttribute.PROTECTED);
#// set up the texture.
image = osgDB.readImageFile(filename);
if (image):
texture = osg.Texture2D();
texture.setImage(image);
dstate.setTextureAttributeAndModes(0,texture,osg.StateAttribute.ON|osg.StateAttribute.PROTECTED);
return dstate;
def createMirrorSurface(xMin,xMax,yMin,yMax,z):
#// set up the drawstate.
#// set up the Geometry.
geom = osg.Geometry();
coords = osg.Vec3Array();
coords.append(osg.Vec3f(xMin,yMax,z))
coords.append(osg.Vec3f(xMin,yMin,z))
coords.append(osg.Vec3f(xMax,yMin,z))
coords.append(osg.Vec3f(xMax,yMax,z))
geom.setVertexArray(coords);
norms = osg.Vec3Array();
norms.append(osg.Vec3f(0.0,0.0,1.0))
geom.setNormalArray(norms);
geom.normalBinding = osg.Geometry.BIND_OVERALL
tcoords = osg.Vec2Array()
tcoords.append(osg.Vec2f(0.0,1.0))
tcoords.append(osg.Vec2f(0.0,0.0))
tcoords.append(osg.Vec2f(1.0,0.0))
tcoords.append(osg.Vec2f(1.0,1.0))
geom.setTexCoordArray(0,tcoords);
colours = osg.Vec4Array();
colours.append(osg.Vec4f(1.0,1.0,1.0,1.0))
geom.setColorArray(colours);
geom.colorBinding = osg.Geometry.BIND_OVERALL;
geom.addPrimitiveSet(osg.DrawArrays(osg.PrimitiveSet.QUADS,0,4));
return geom;
def toRad(degrees):
return degrees*math.pi/180.0
def toDeg(rad):
return rad/math.pi*180.0
def setupBin1(rootNode, mirror,z):
#// set up the stencil ops so that the stencil buffer get set at
#// the mirror plane
stencil = osg.Stencil();
stencil.setFunction(osg.Stencil.ALWAYS,1,4294967295)
stencil.setOperation(osg.Stencil.KEEP, osg.Stencil.KEEP, osg.Stencil.REPLACE);
#// switch off the writing to the color bit planes.
colorMask = osg.ColorMask();
colorMask.setMask(False,False,False,False);
statesetBin1 = osg.StateSet()
statesetBin1.setRenderBinDetails(1,"RenderBin");
statesetBin1.setMode(osg.GL_CULL_FACE,osg.StateAttribute.OFF);
statesetBin1.setAttributeAndModes(stencil,osg.StateAttribute.ON);
statesetBin1.setAttribute(colorMask);
#// set up the mirror geode.
geode = osg.Geode();
geode.addDrawable(mirror);
geode.setStateSet(statesetBin1);
rootNode.addChild(geode);
#// bin one - draw scene without mirror or reflection, unset
#// stencil values where scene is infront of mirror and hence
#// occludes the mirror.
def setupBin2(rootNode, model,z):
stencil = osg.Stencil();
stencil.setFunction(osg.Stencil.ALWAYS,0,4294967295);
stencil.setOperation(osg.Stencil.KEEP, osg.Stencil.KEEP, osg.Stencil.REPLACE);
statesetBin2 = osg.StateSet();
statesetBin2.setRenderBinDetails(2,"RenderBin");
statesetBin2.setAttributeAndModes(stencil,osg.StateAttribute.ON);
groupBin2 = osg.Group();
groupBin2.setStateSet(statesetBin2);
groupBin2.addChild(model);
rootNode.addChild(groupBin2);
#// bin3 - set up the depth to the furthest depth value
def setupBin3(rootNode, mirror,z):
#// set up the stencil ops so that only operator on this mirrors stencil value.
stencil = osg.Stencil()
stencil.setFunction(osg.Stencil.EQUAL,1,4294967295);
stencil.setOperation(osg.Stencil.KEEP, osg.Stencil.KEEP, osg.Stencil.KEEP);
#// switch off the writing to the color bit planes.
colorMask = osg.ColorMask();
colorMask.setMask(False,False,False,False);
#// set up depth so all writing to depth goes to maximum depth.
depth = osg.Depth();
depth.setFunction(osg.Depth.ALWAYS);
depth.setRange(1.0,1.0);
statesetBin3 = osg.StateSet();
statesetBin3.setRenderBinDetails(3,"RenderBin");
statesetBin3.setMode(osg.GL_CULL_FACE,osg.StateAttribute.OFF);
statesetBin3.setAttributeAndModes(stencil,osg.StateAttribute.ON);
statesetBin3.setAttribute(colorMask);
statesetBin3.setAttribute(depth);
#// set up the mirror geode.
geode = osg.Geode();
geode.addDrawable(mirror);
geode.setStateSet(statesetBin3);
rootNode.addChild(geode);
#// bin4 - draw the reflection.
def setupBin4(rootNode,model,z):
#// now create the 'reflection' of the loaded model by applying
#// create a Transform which flips the loaded model about the z axis
#// relative to the mirror node, the loadedModel is added to the
#// Transform so now appears twice in the scene, but is shared so there
#// is negligable memory overhead. Also use an osg.StateSet
#// attached to the Transform to override the face culling on the subgraph
#// to prevert an 'inside' out view of the reflected model.
#// set up the stencil ops so that only operator on this mirrors stencil value.
#// this clip plane removes any of the scene which when mirror would
#// poke through the mirror. However, this clip plane should really
#// flip sides once the eye point goes to the back of the mirror...
clipplane = osg.ClipPlane();
clipplane.setClipPlane(0.0,0.0,-1.0,z);
clipplane.setClipPlaneNum(0);
clipNode = osg.ClipNode();
clipNode.addClipPlane(clipplane);
dstate = clipNode.getOrCreateStateSet();
dstate.setRenderBinDetails(4,"RenderBin");
dstate.setMode(osg.GL_CULL_FACE,osg.StateAttribute.OVERRIDE|osg.StateAttribute.OFF);
stencil = osg.Stencil();
stencil.setFunction(osg.Stencil.EQUAL,1,4294967295);
stencil.setOperation(osg.Stencil.KEEP, osg.Stencil.KEEP, osg.Stencil.KEEP);
dstate.setAttributeAndModes(stencil,osg.StateAttribute.ON);
reverseMatrix = osg.MatrixTransform();
reverseMatrix.stateSet = dstate
reverseMatrix.preMult(osg.Matrix.translate(0.0,0.0,-z)*
osg.Matrix.scale(1.0,1.0,-1.0)*
osg.Matrix.translate(0.0,0.0,z));
reverseMatrix.addChild(model);
clipNode.addChild(reverseMatrix);
rootNode.addChild(clipNode);
#// bin5 - draw the textured mirror and blend it with the reflection.
def setupBin5(rootNode, mirror,z):
#// set up depth so all writing to depth goes to maximum depth.
depth = osg.Depth();
depth.setFunction(osg.Depth.ALWAYS);
stencil = osg.Stencil();
stencil.setFunction(osg.Stencil.EQUAL,1,4294967295);
stencil.setOperation(osg.Stencil.KEEP, osg.Stencil.KEEP, osg.Stencil.ZERO);
#// set up additive blending.
trans = osg.BlendFunc();
trans.setFunction(osg.BlendFunc.ONE,osg.BlendFunc.ONE);
statesetBin5 = createMirrorTexturedState("Images/tank.rgb");
statesetBin5.setRenderBinDetails(5,"RenderBin");
statesetBin5.setMode(osg.GL_CULL_FACE,osg.StateAttribute.OFF);
statesetBin5.setAttributeAndModes(stencil,osg.StateAttribute.ON);
statesetBin5.setAttributeAndModes(trans,osg.StateAttribute.ON);
statesetBin5.setAttribute(depth);
#// set up the mirror geode.
geode = osg.Geode();
geode.addDrawable(mirror);
geode.setStateSet(statesetBin5);
rootNode.addChild(geode);
def createMirroredScene(model):
#// calculate where to place the mirror according to the
#// loaded models bounding sphere.
bs = model.getBound();
width_factor = 1.5;
height_factor = 0.3;
xMin = bs._center.x-bs._radius*width_factor;
xMax = bs._center.x+bs._radius*width_factor;
yMin = bs._center.y-bs._radius*width_factor;
yMax = bs._center.y+bs._radius*width_factor;
z = bs._center.z-bs._radius*height_factor;
#// create a textured, transparent node at the appropriate place.
mirror = createMirrorSurface(xMin,xMax,yMin,yMax,z);
rootNode = osg.MatrixTransform();
rootNode.setMatrix(osg.Matrix.rotate(toRad(45.0),1.0,0.0,0.0));
#// make sure that the global color mask exists.
rootColorMask = osg.ColorMask();
rootColorMask.setMask(True,True,True,True);
#// set up depth to be inherited by the rest of the scene unless
#// overrideen. this is overridden in bin 3.
rootDepth = osg.Depth()
rootDepth.setFunction(osg.Depth.LESS);
rootDepth.setRange(0.0,1.0);
rootStateSet = osg.StateSet();
rootStateSet.setAttribute(rootColorMask);
rootStateSet.setAttribute(rootDepth);
rootNode.setStateSet(rootStateSet)
#// bin1 - set up the stencil values and depth for mirror.
setupBin1(rootNode,mirror,z)
setupBin2(rootNode,model,z)
setupBin3(rootNode,mirror,z)
setupBin4(rootNode,model,z)
setupBin5(rootNode,mirror,z)
return rootNode;
def main(argv):
# use an ArgumentParser object to manage the program arguments.
viewer = osgViewer.Viewer()
# viewer.setThreadingModel(osgViewer.Viewer.SingleThreaded)
# read the scene from the list of file specified commandline args.
loadedModel = osgDB.readNodeFile("cessna.osg")
if loadedModel == None:
raise Exception('Could not load model file (is OSG_FILE_PATH set and correct?)')
# create a transform to spin the model.
loadedModelTransform = osg.MatrixTransform()
loadedModelTransform.addChild(loadedModel)
print loadedModelTransform.getBound()._center
#todo: nc = osg.AnimationPathCallback(loadedModelTransform.getBound()._center,osg.Vec3(0.0,0.0,1.0),osg.inDegrees(45.0));
# loadedModelTransform.setUpdateCallback(nc)
rootNode = osg.Group()
rootNode.stateSet.dataVariance = osg.Object.DYNAMIC
rootNode.addChild(createMirroredScene(loadedModelTransform))
viewer.addEventHandler(osgViewer.HelpHandler())
viewer.addEventHandler(osgViewer.StatsHandler())
viewer.addEventHandler(osgGA.StateSetManipulator(rootNode.stateSet))
viewer.setSceneData(rootNode)
print "set scene data"
#hint to tell viewer to request stencil buffer when setting up windows
# osg.DisplaySettings().setMinimumNumStencilBits(8)
osg.DisplaySettings.instance().setMinimumNumStencilBits(8);
osgDB.writeNodeFile(rootNode, "test_reflect.osg");
viewer.run() #we need run, because that sets up a trackballmanipulator and so we have the correct "look" into the scene.
return 0
if __name__ == "__main__":
main(sys.argv)
| |
from __future__ import division, print_function, absolute_import
from ...utils.six.moves import xrange
import numpy as np
import nose
from dipy.io.bvectxt import orientation_from_string
from dipy.tracking.utils import (affine_for_trackvis, connectivity_matrix,
density_map, length, move_streamlines,
ndbincount, reduce_labels,
reorder_voxels_affine, seeds_from_mask,
target, _rmi, unique_rows)
import dipy.tracking.metrics as metrix
from dipy.tracking.vox2track import streamline_mapping
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_raises, assert_true
def make_streamlines():
streamlines = [ np.array([[0, 0, 0],
[1, 1, 1],
[2, 2, 2],
[5, 10, 12]], 'float'),
np.array([[1, 2, 3],
[3, 2, 0],
[5, 20, 33],
[40, 80, 120]], 'float') ]
return streamlines
def test_density_map():
#One streamline diagonal in volume
streamlines = [np.array([np.arange(10)]*3).T]
shape = (10, 10, 10)
x = np.arange(10)
expected = np.zeros(shape)
expected[x, x, x] = 1.
dm = density_map(streamlines, vol_dims=shape, voxel_size=(1, 1, 1))
assert_array_equal(dm, expected)
#add streamline, make voxel_size smaller. Each streamline should only be
#counted once, even if multiple points lie in a voxel
streamlines.append(np.ones((5, 3)))
shape = (5, 5, 5)
x = np.arange(5)
expected = np.zeros(shape)
expected[x, x, x] = 1.
expected[0, 0, 0] += 1
dm = density_map(streamlines, vol_dims=shape, voxel_size=(2, 2, 2))
assert_array_equal(dm, expected)
#should work with a generator
dm = density_map(iter(streamlines), vol_dims=shape, voxel_size=(2, 2, 2))
assert_array_equal(dm, expected)
# Test passing affine
affine = np.diag([2, 2, 2, 1.])
affine[:3, 3] = 1.
dm = density_map(streamlines, shape, affine=affine)
assert_array_equal(dm, expected)
# Shift the image by 2 voxels, ie 4mm
affine[:3, 3] -= 4.
expected_old = expected
new_shape = [i + 2 for i in shape]
expected = np.zeros(new_shape)
expected[2:, 2:, 2:] = expected_old
dm = density_map(streamlines, new_shape, affine=affine)
assert_array_equal(dm, expected)
def test_connectivity_matrix():
label_volume = np.array([[[3, 0, 0],
[0, 0, 0],
[0, 0, 4]]])
streamlines = [np.array([[0,0,0],[0,0,0],[0,2,2]], 'float'),
np.array([[0,0,0],[0,1,1],[0,2,2]], 'float'),
np.array([[0,2,2],[0,1,1],[0,0,0]], 'float')]
expected = np.zeros((5, 5), 'int')
expected[3, 4] = 2
expected[4, 3] = 1
# Check basic Case
matrix = connectivity_matrix(streamlines, label_volume, (1, 1, 1),
symmetric=False)
assert_array_equal(matrix, expected)
# Test mapping
matrix, mapping = connectivity_matrix(streamlines, label_volume, (1, 1, 1),
symmetric=False, return_mapping=True)
assert_array_equal(matrix, expected)
assert_equal(mapping[3, 4], [0, 1])
assert_equal(mapping[4, 3], [2])
assert_equal(mapping.get((0, 0)), None)
# Test mapping and symmetric
matrix, mapping = connectivity_matrix(streamlines, label_volume, (1, 1, 1),
symmetric=True, return_mapping=True)
assert_equal(mapping[3, 4], [0, 1, 2])
# When symmetric only (3,4) is a key, not (4, 3)
assert_equal(mapping.get((4, 3)), None)
# expected output matrix is symmetric version of expected
expected = expected + expected.T
assert_array_equal(matrix, expected)
# Test mapping_as_streamlines, mapping dict has lists of streamlines
matrix, mapping = connectivity_matrix(streamlines, label_volume, (1, 1, 1),
symmetric=False,
return_mapping=True,
mapping_as_streamlines=True)
assert_true(mapping[3, 4][0] is streamlines[0])
assert_true(mapping[3, 4][1] is streamlines[1])
assert_true(mapping[4, 3][0] is streamlines[2])
# Test passing affine to connectivity_matrix
expected = matrix
affine = np.diag([-1, -1, -1, 1.])
streamlines = [-i for i in streamlines]
matrix = connectivity_matrix(streamlines, label_volume, affine=affine)
# In the symmetrical case, the matrix should be, well, symmetric:
assert_equal(matrix[4,3], matrix[4,3])
def test_ndbincount():
def check(expected):
assert_equal(bc[0, 0], expected[0])
assert_equal(bc[0, 1], expected[1])
assert_equal(bc[1, 0], expected[2])
assert_equal(bc[2, 2], expected[3])
x = np.array([[0, 0], [0, 0], [0, 1], [0, 1], [1, 0], [2, 2]]).T
expected = [2, 2, 1, 1]
#count occurrences in x
bc = ndbincount(x)
assert_equal(bc.shape, (3, 3))
check(expected)
#pass in shape
bc = ndbincount(x, shape=(4, 5))
assert_equal(bc.shape, (4, 5))
check(expected)
#pass in weights
weights = np.arange(6.)
weights[-1] = 1.23
expeceted = [1., 5., 4., 1.23]
bc = ndbincount(x, weights=weights)
check(expeceted)
#raises an error if shape is too small
assert_raises(ValueError, ndbincount, x, None, (2, 2))
def test_reduce_labels():
shape = (4, 5, 6)
#labels from 100 to 220
labels = np.arange(100, np.prod(shape)+100).reshape(shape)
#new labels form 0 to 120, and lookup maps range(0,120) to range(100, 220)
new_labels, lookup = reduce_labels(labels)
assert_array_equal(new_labels, labels-100)
assert_array_equal(lookup, labels.ravel())
def test_move_streamlines():
streamlines = make_streamlines()
affine = np.eye(4)
new_streamlines = move_streamlines(streamlines, affine)
for i, test_sl in enumerate(new_streamlines):
assert_array_equal(test_sl, streamlines[i])
affine[:3,3] += (4,5,6)
new_streamlines = move_streamlines(streamlines, affine)
for i, test_sl in enumerate(new_streamlines):
assert_array_equal(test_sl, streamlines[i]+(4, 5, 6))
affine = np.eye(4)
affine = affine[[2,1,0,3]]
new_streamlines = move_streamlines(streamlines, affine)
for i, test_sl in enumerate(new_streamlines):
assert_array_equal(test_sl, streamlines[i][:, [2, 1, 0]])
affine[:3,3] += (4,5,6)
new_streamlines = move_streamlines(streamlines, affine)
undo_affine = move_streamlines(new_streamlines, np.eye(4),
input_space=affine)
for i, test_sl in enumerate(undo_affine):
assert_array_almost_equal(test_sl, streamlines[i])
# Test that changing affine does affect moving streamlines
affineA = affine.copy()
affineB = affine.copy()
streamlinesA = move_streamlines(streamlines, affineA)
streamlinesB = move_streamlines(streamlines, affineB)
affineB[:] = 0
for (a, b) in zip(streamlinesA, streamlinesB):
assert_array_equal(a, b)
def test_target():
streamlines = [np.array([[0., 0., 0.],
[1., 0., 0.],
[2., 0., 0.]]),
np.array([[0., 0., 0],
[0, 1., 1.],
[0, 2., 2.]])
]
affine = np.eye(4)
mask = np.zeros((4, 4, 4), dtype=bool)
mask[0, 0, 0] = True
# Both pass though
new = list(target(streamlines, mask, affine=affine))
assert_equal(len(new), 2)
new = list(target(streamlines, mask, affine=affine, include=False))
assert_equal(len(new), 0)
# only first
mask[:] = False
mask[1, 0, 0] = True
new = list(target(streamlines, mask, affine=affine))
assert_equal(len(new), 1)
assert_true(new[0] is streamlines[0])
new = list(target(streamlines, mask, affine=affine, include=False))
assert_equal(len(new), 1)
assert_true(new[0] is streamlines[1])
# Test that bad points raise a value error
bad_sl = [ np.array([[10., 10., 10.]])]
new = target(bad_sl, mask, affine=affine)
assert_raises(ValueError, list, new)
bad_sl = [-np.array([[10., 10., 10.]])]
new = target(bad_sl, mask, affine=affine)
assert_raises(ValueError, list, new)
# Test smaller voxels
affine = np.random.random((4, 4)) - .5
affine[3] = [0, 0, 0, 1]
streamlines = list(move_streamlines(streamlines, affine))
new = list(target(streamlines, mask, affine=affine))
assert_equal(len(new), 1)
assert_true(new[0] is streamlines[0])
new = list(target(streamlines, mask, affine=affine, include=False))
assert_equal(len(new), 1)
assert_true(new[0] is streamlines[1])
# Test that changing mask and affine do not break target
include = target(streamlines, mask, affine=affine)
exclude = target(streamlines, mask, affine=affine, include=False)
affine[:] = np.eye(4)
mask[:] = False
include = list(include)
exclude = list(exclude)
assert_equal(len(include), 1)
assert_true(include[0] is streamlines[0])
assert_equal(len(exclude), 1)
assert_true(exclude[0] is streamlines[1])
def test_voxel_ornt():
sh = (40, 40, 40)
sz = (1, 2, 3)
I4 = np.eye(4)
ras = orientation_from_string('ras')
sra = orientation_from_string('sra')
lpi = orientation_from_string('lpi')
srp = orientation_from_string('srp')
affine = reorder_voxels_affine(ras, ras, sh, sz)
assert_array_equal(affine, I4)
affine = reorder_voxels_affine(sra, sra, sh, sz)
assert_array_equal(affine, I4)
affine = reorder_voxels_affine(lpi, lpi, sh, sz)
assert_array_equal(affine, I4)
affine = reorder_voxels_affine(srp, srp, sh, sz)
assert_array_equal(affine, I4)
streamlines = make_streamlines()
box = np.array(sh)*sz
sra_affine = reorder_voxels_affine(ras, sra, sh, sz)
toras_affine = reorder_voxels_affine(sra, ras, sh, sz)
assert_array_equal(np.dot(toras_affine, sra_affine), I4)
expected_sl = (sl[:, [2, 0, 1]] for sl in streamlines)
test_sl = move_streamlines(streamlines, sra_affine)
for ii in xrange(len(streamlines)):
assert_array_equal(next(test_sl), next(expected_sl))
lpi_affine = reorder_voxels_affine(ras, lpi, sh, sz)
toras_affine = reorder_voxels_affine(lpi, ras, sh, sz)
assert_array_equal(np.dot(toras_affine, lpi_affine), I4)
expected_sl = (box - sl for sl in streamlines)
test_sl = move_streamlines(streamlines, lpi_affine)
for ii in xrange(len(streamlines)):
assert_array_equal(next(test_sl), next(expected_sl))
srp_affine = reorder_voxels_affine(ras, srp, sh, sz)
toras_affine = reorder_voxels_affine(srp, ras, (40 ,40, 40), (3, 1, 2))
assert_array_equal(np.dot(toras_affine, srp_affine), I4)
expected_sl = [sl.copy() for sl in streamlines]
for sl in expected_sl:
sl[:, 1] = box[1] - sl[:, 1]
expected_sl = (sl[:, [2, 0, 1]] for sl in expected_sl)
test_sl = move_streamlines(streamlines, srp_affine)
for ii in xrange(len(streamlines)):
assert_array_equal(next(test_sl), next(expected_sl))
def test_streamline_mapping():
streamlines = [np.array([[0, 0, 0], [0, 0, 0], [0, 2, 2]], 'float'),
np.array([[0, 0, 0], [0, 1, 1], [0, 2, 2]], 'float'),
np.array([[0, 2, 2], [0, 1, 1], [0, 0, 0]], 'float')]
mapping = streamline_mapping(streamlines, (1, 1, 1))
expected = {(0, 0, 0):[0, 1, 2], (0, 2, 2):[0, 1, 2], (0, 1, 1):[1, 2]}
assert_equal(mapping, expected)
mapping = streamline_mapping(streamlines, (1, 1, 1),
mapping_as_streamlines=True)
expected = dict((k, [streamlines[i] for i in indices])
for k, indices in expected.items())
assert_equal(mapping, expected)
# Test passing affine
affine = np.eye(4)
affine[:3, 3] = .5
mapping = streamline_mapping(streamlines, affine=affine,
mapping_as_streamlines=True)
assert_equal(mapping, expected)
# Make the voxel size smaller
affine = np.diag([.5, .5, .5, 1.])
affine[:3, 3] = .25
expected = dict((tuple(i*2 for i in key), value)
for key, value in expected.items())
mapping = streamline_mapping(streamlines, affine=affine,
mapping_as_streamlines=True)
assert_equal(mapping, expected)
def test_rmi():
I1 = _rmi([3, 4], [10, 10])
assert_equal(I1, 34)
I1 = _rmi([0, 0], [10, 10])
assert_equal(I1, 0)
assert_raises(ValueError, _rmi, [10, 0], [10, 10])
try:
from numpy import ravel_multi_index
except ImportError:
raise nose.SkipTest()
# Dtype of random integers is system dependent
A, B, C, D = np.random.randint(0, 1000, size=[4, 100])
I1 = _rmi([A, B], dims=[1000, 1000])
I2 = ravel_multi_index([A, B], dims=[1000, 1000])
assert_array_equal(I1, I2)
I1 = _rmi([A, B, C, D], dims=[1000]*4)
I2 = ravel_multi_index([A, B, C, D], dims=[1000]*4)
assert_array_equal(I1, I2)
# Check for overflow with small int types
indices = np.random.randint(0, 255, size=(2, 100))
dims = (1000, 1000)
I1 = _rmi(indices, dims=dims)
I2 = ravel_multi_index(indices, dims=dims)
assert_array_equal(I1, I2)
def test_affine_for_trackvis():
voxel_size = np.array([1., 2, 3.])
affine = affine_for_trackvis(voxel_size)
origin = np.dot(affine, [0, 0, 0, 1])
assert_array_almost_equal(origin[:3], voxel_size / 2)
def test_length():
# Generate a simulated bundle of fibers:
n_streamlines=50
n_pts=100
t = np.linspace(-10, 10, n_pts)
bundle = []
for i in np.linspace(3, 5, n_streamlines):
pts = np.vstack((np.cos(2 * t/np.pi), np.zeros(t.shape) + i, t )).T
bundle.append(pts)
start = np.random.randint(10, 30, n_streamlines)
end = np.random.randint(60, 100, n_streamlines)
bundle = [10 * streamline[start[i]:end[i]] for (i, streamline) in
enumerate(bundle)]
bundle_lengths = length(bundle)
for idx, this_length in enumerate(bundle_lengths):
assert_equal(this_length, metrix.length(bundle[idx]))
def test_seeds_from_mask():
mask = np.random.random_integers(0, 1, size=(10, 10, 10))
seeds = seeds_from_mask(mask, density=1)
assert_equal(mask.sum(), len(seeds))
assert_array_equal(np.argwhere(mask), seeds)
mask[:] = False
mask[3, 3, 3] = True
seeds = seeds_from_mask(mask, density=[3, 4, 5])
assert_equal(len(seeds), 3 * 4 * 5)
assert_true(np.all((seeds > 2.5) & (seeds < 3.5)))
mask[4, 4, 4] = True
seeds = seeds_from_mask(mask, density=[3, 4, 5])
assert_equal(len(seeds), 2 * 3 * 4 * 5)
assert_true(np.all((seeds > 2.5) & (seeds < 4.5)))
in_333 = ((seeds > 2.5) & (seeds < 3.5)).all(1)
assert_equal(in_333.sum(), 3 * 4 * 5)
in_444 = ((seeds > 3.5) & (seeds < 4.5)).all(1)
assert_equal(in_444.sum(), 3 * 4 * 5)
def test_connectivity_matrix_shape():
# Labels: z-planes have labels 0,1,2
labels = np.zeros((3, 3, 3), dtype=int)
labels[:, :, 1] = 1
labels[:, :, 2] = 2
# Streamline set, only moves between first two z-planes.
streamlines = [np.array([[0., 0., 0.],
[0., 0., 0.5],
[0., 0., 1.]]),
np.array([[0., 1., 1.],
[0., 1., 0.5],
[0., 1., 0.]])]
matrix = connectivity_matrix(streamlines, labels, affine=np.eye(4))
assert_equal(matrix.shape, (3, 3))
def test_unique_rows():
"""
Testing the function unique_coords
"""
arr = np.array([[1,2,3],[1,2,3],[2,3,4],[3,4,5]])
arr_w_unique = np.array([[1,2,3],[2,3,4],[3,4,5]])
assert_array_equal(unique_rows(arr), arr_w_unique)
# Should preserve order:
arr = np.array([[2,3,4],[1,2,3],[1,2,3],[3,4,5]])
arr_w_unique = np.array([[2,3,4],[1,2,3],[3,4,5]])
assert_array_equal(unique_rows(arr), arr_w_unique)
# Should work even with longer arrays:
arr = np.array([[2,3,4],[1,2,3],[1,2,3],[3,4,5],
[6,7,8],[0,1,0],[1,0,1]])
arr_w_unique = np.array([[2,3,4],[1,2,3],[3,4,5],
[6,7,8],[0,1,0],[1,0,1]])
assert_array_equal(unique_rows(arr), arr_w_unique)
| |
import datetime
import functools
from dateutil.parser import parse as parse_date
from modularodm import (
fields,
Q,
)
from modularodm.exceptions import NoResultsFound
from modularodm.validators import MaxLengthValidator
from framework.auth import Auth
from framework.exceptions import PermissionsError
from framework.mongo import (
ObjectId,
StoredObject,
validators,
)
from website import (
mails,
settings,
tokens,
)
from website.exceptions import (
InvalidSanctionApprovalToken,
InvalidSanctionRejectionToken,
NodeStateError,
)
from website.prereg import utils as prereg_utils
VIEW_PROJECT_URL_TEMPLATE = settings.DOMAIN + '{node_id}/'
class Sanction(StoredObject):
"""Sanction class is a generic way to track approval states"""
# Tell modularodm not to attach backends
_meta = {
'abstract': True,
}
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
# Neither approved not cancelled
UNAPPROVED = 'unapproved'
# Has approval
APPROVED = 'approved'
# Rejected by at least one person
REJECTED = 'rejected'
# Embargo has been completed
COMPLETED = 'completed'
state = fields.StringField(
default=UNAPPROVED,
validate=validators.choice_in((
UNAPPROVED,
APPROVED,
REJECTED,
COMPLETED,
))
)
DISPLAY_NAME = 'Sanction'
# SHORT_NAME must correspond with the associated foreign field to query against,
# e.g. Node.find_one(Q(sanction.SHORT_NAME, 'eq', sanction))
SHORT_NAME = 'sanction'
APPROVAL_NOT_AUTHORIZED_MESSAGE = 'This user is not authorized to approve this {DISPLAY_NAME}'
APPROVAL_INVALID_TOKEN_MESSAGE = 'Invalid approval token provided for this {DISPLAY_NAME}.'
REJECTION_NOT_AUTHORIZED_MESSAEGE = 'This user is not authorized to reject this {DISPLAY_NAME}'
REJECTION_INVALID_TOKEN_MESSAGE = 'Invalid rejection token provided for this {DISPLAY_NAME}.'
# Controls whether or not the Sanction needs unanimous approval or just a single approval
ANY = 'any'
UNANIMOUS = 'unanimous'
mode = UNANIMOUS
initiation_date = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
# Expiration date-- Sanctions in the UNAPPROVED state that are older than their end_date
# are automatically made ACTIVE by a daily cron job
# Use end_date=None for a non-expiring Sanction
end_date = fields.DateTimeField(default=None)
# Sanction subclasses must have an initiated_by field
# initiated_by = fields.ForeignField('user', backref='initiated')
# Expanded: Dictionary field mapping admin IDs their approval status and relevant tokens:
# {
# 'b3k97': {
# 'has_approved': False,
# 'approval_token': 'Pew7wj1Puf7DENUPFPnXSwa1rf3xPN',
# 'rejection_token': 'TwozClTFOic2PYxHDStby94bCQMwJy'}
# }
approval_state = fields.DictionaryField()
def __repr__(self):
return '<Sanction(end_date={self.end_date!r}) with _id {self._id!r}>'.format(self=self)
@property
def is_pending_approval(self):
return self.state == Sanction.UNAPPROVED
@property
def is_approved(self):
return self.state == Sanction.APPROVED
@property
def is_rejected(self):
return self.state == Sanction.REJECTED
def approve(self, user):
raise NotImplementedError('Sanction subclasses must implement an approve method.')
def reject(self, user):
raise NotImplementedError('Sanction subclasses must implement an approve method.')
def _on_reject(self, user):
"""Callback for rejection of a Sanction
:param User user:
"""
raise NotImplementedError('Sanction subclasses must implement an #_on_reject method')
def _on_complete(self, user):
"""Callback for when a Sanction has approval and enters the ACTIVE state
:param User user:
"""
raise NotImplementedError('Sanction subclasses must implement an #_on_complete method')
def forcibly_reject(self):
self.state = Sanction.REJECTED
class TokenApprovableSanction(Sanction):
# Tell modularodm not to attach backends
_meta = {
'abstract': True,
}
def _validate_authorizer(self, user):
"""Subclasses may choose to provide extra restrictions on who can be an authorizer
:return Boolean: True if user is allowed to be an authorizer else False
"""
return True
def add_authorizer(self, user, node, approved=False, save=False):
"""Add an admin user to this Sanction's approval state.
:param User user: User to add.
:param Node registration: The pending registration node.
:param bool approved: Whether `user` has approved.
:param bool save: Whether to save this object.
"""
valid = self._validate_authorizer(user)
if valid and user._id not in self.approval_state:
self.approval_state[user._id] = {
'has_approved': approved,
'node_id': node._id,
'approval_token': tokens.encode(
{
'user_id': user._id,
'sanction_id': self._id,
'action': 'approve_{}'.format(self.SHORT_NAME)
}
),
'rejection_token': tokens.encode(
{
'user_id': user._id,
'sanction_id': self._id,
'action': 'reject_{}'.format(self.SHORT_NAME)
}
),
}
if save:
self.save()
return True
return False
def remove_authorizer(self, user, save=False):
"""Remove a user as an authorizer
:param User user:
:return Boolean: True if user is removed else False
"""
if user._id not in self.approval_state:
return False
del self.approval_state[user._id]
if save:
self.save()
return True
def _on_approve(self, user, token):
"""Callback for when a single user approves a Sanction. Calls #_on_complete under two conditions:
- mode is ANY and the Sanction has not already been cancelled
- mode is UNANIMOUS and all users have given approval
:param User user:
:param str token: user's approval token
"""
if self.mode == self.ANY or all(authorizer['has_approved'] for authorizer in self.approval_state.values()):
self.state = Sanction.APPROVED
self._on_complete(user)
def token_for_user(self, user, method):
"""
:param str method: 'approval' | 'rejection'
"""
try:
user_state = self.approval_state[user._id]
except KeyError:
raise PermissionsError(self.APPROVAL_NOT_AUTHORIZED_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
return user_state['{0}_token'.format(method)]
def approve(self, user, token):
"""Add user to approval list if user is admin and token verifies."""
try:
if self.approval_state[user._id]['approval_token'] != token:
raise InvalidSanctionApprovalToken(self.APPROVAL_INVALID_TOKEN_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
except KeyError:
raise PermissionsError(self.APPROVAL_NOT_AUTHORIZED_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
self.approval_state[user._id]['has_approved'] = True
self._on_approve(user, token)
def reject(self, user, token):
"""Cancels sanction if user is admin and token verifies."""
try:
if self.approval_state[user._id]['rejection_token'] != token:
raise InvalidSanctionRejectionToken(self.REJECTION_INVALID_TOKEN_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
except KeyError:
raise PermissionsError(self.REJECTION_NOT_AUTHORIZED_MESSAEGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
self.state = Sanction.REJECTED
self._on_reject(user)
def _notify_authorizer(self, user, node):
pass
def _notify_non_authorizer(self, user, node):
pass
def ask(self, group):
"""
:param list group: List of (user, node) tuples containing contributors to notify about the
sanction.
"""
for contrib, node in group:
if contrib._id in self.approval_state:
self._notify_authorizer(contrib, node)
else:
self._notify_non_authorizer(contrib, node)
class EmailApprovableSanction(TokenApprovableSanction):
# Tell modularodm not to attach backends
_meta = {
'abstract': True,
}
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = None
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = None
VIEW_URL_TEMPLATE = ''
APPROVE_URL_TEMPLATE = ''
REJECT_URL_TEMPLATE = ''
# A flag to conditionally run a callback on complete
notify_initiator_on_complete = fields.BooleanField(default=False)
# Store a persistant copy of urls for use when needed outside of a request context.
# This field gets automagically updated whenever models approval_state is modified
# and the model is saved
# {
# 'abcde': {
# 'approve': [APPROVAL_URL],
# 'reject': [REJECT_URL],
# }
# }
stashed_urls = fields.DictionaryField(default=dict)
@staticmethod
def _format_or_empty(template, context):
if context:
return template.format(**context)
return ''
def _view_url(self, user_id, node):
return self._format_or_empty(self.VIEW_URL_TEMPLATE, self._view_url_context(user_id, node))
def _view_url_context(self, user_id, node):
return None
def _approval_url(self, user_id):
return self._format_or_empty(self.APPROVE_URL_TEMPLATE, self._approval_url_context(user_id))
def _approval_url_context(self, user_id):
return None
def _rejection_url(self, user_id):
return self._format_or_empty(self.REJECT_URL_TEMPLATE, self._rejection_url_context(user_id))
def _rejection_url_context(self, user_id):
return None
def _send_approval_request_email(self, user, template, context):
mails.send_mail(
user.username,
template,
user=user,
**context
)
def _email_template_context(self, user, node, is_authorizer=False):
return {}
def _notify_authorizer(self, authorizer, node):
context = self._email_template_context(authorizer, node, is_authorizer=True)
if self.AUTHORIZER_NOTIFY_EMAIL_TEMPLATE:
self._send_approval_request_email(authorizer, self.AUTHORIZER_NOTIFY_EMAIL_TEMPLATE, context)
else:
raise NotImplementedError
def _notify_non_authorizer(self, user, node):
context = self._email_template_context(user, node)
if self.NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE:
self._send_approval_request_email(user, self.NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE, context)
else:
raise NotImplementedError
def add_authorizer(self, user, node, **kwargs):
super(EmailApprovableSanction, self).add_authorizer(user, node, **kwargs)
self.stashed_urls[user._id] = {
'view': self._view_url(user._id, node),
'approve': self._approval_url(user._id),
'reject': self._rejection_url(user._id)
}
self.save()
def _notify_initiator(self):
raise NotImplementedError
def _on_complete(self, *args):
if self.notify_initiator_on_complete:
self._notify_initiator()
class PreregCallbackMixin(object):
def _notify_initiator(self):
from website.project.model import DraftRegistration
registration = self._get_registration()
prereg_schema = prereg_utils.get_prereg_schema()
draft = DraftRegistration.find_one(
Q('registered_node', 'eq', registration)
)
if prereg_schema in registration.registered_schema:
mails.send_mail(
draft.initiator.username,
mails.PREREG_CHALLENGE_ACCEPTED,
user=draft.initiator,
registration_url=registration.absolute_url,
mimetype='html'
)
def _email_template_context(self, user, node, is_authorizer=False, urls=None):
registration = self._get_registration()
prereg_schema = prereg_utils.get_prereg_schema()
if prereg_schema in registration.registered_schema:
return {
'custom_message': ' as part of the Preregistration Challenge (https://cos.io/prereg)'
}
else:
return {}
class Embargo(PreregCallbackMixin, EmailApprovableSanction):
"""Embargo object for registrations waiting to go public."""
DISPLAY_NAME = 'Embargo'
SHORT_NAME = 'embargo'
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_EMBARGO_ADMIN
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_EMBARGO_NON_ADMIN
VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE
APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
initiated_by = fields.ForeignField('user', backref='embargoed')
for_existing_registration = fields.BooleanField(default=False)
@property
def is_completed(self):
return self.state == self.COMPLETED
@property
def embargo_end_date(self):
if self.state == self.APPROVED:
return self.end_date
return False
# NOTE(hrybacki): Old, private registrations are grandfathered and do not
# require to be made public or embargoed. This field differentiates them
# from new registrations entering into an embargo field which should not
# show up in any search related fields.
@property
def pending_registration(self):
return not self.for_existing_registration and self.is_pending_approval
def __repr__(self):
from website.project.model import Node
parent_registration = None
try:
parent_registration = Node.find_one(Q('embargo', 'eq', self))
except NoResultsFound:
pass
return ('<Embargo(parent_registration={0}, initiated_by={1}, '
'end_date={2}) with _id {3}>').format(
parent_registration,
self.initiated_by,
self.end_date,
self._id
)
def _get_registration(self):
from website.project.model import Node
return Node.find_one(Q('embargo', 'eq', self))
def _view_url_context(self, user_id, node):
registration = node or self._get_registration()
return {
'node_id': registration._id
}
def _approval_url_context(self, user_id):
user_approval_state = self.approval_state.get(user_id, {})
approval_token = user_approval_state.get('approval_token')
if approval_token:
registration = self._get_registration()
node_id = user_approval_state.get('node_id', registration._id)
return {
'node_id': node_id,
'token': approval_token,
}
def _rejection_url_context(self, user_id):
user_approval_state = self.approval_state.get(user_id, {})
rejection_token = user_approval_state.get('rejection_token')
if rejection_token:
from website.project.model import Node
root_registration = self._get_registration()
node_id = user_approval_state.get('node_id', root_registration._id)
registration = Node.load(node_id)
return {
'node_id': registration.registered_from,
'token': rejection_token,
}
def _email_template_context(self, user, node, is_authorizer=False, urls=None):
context = super(Embargo, self)._email_template_context(
user,
node,
is_authorizer=is_authorizer
)
urls = urls or self.stashed_urls.get(user._id, {})
registration_link = urls.get('view', self._view_url(user._id, node))
if is_authorizer:
approval_link = urls.get('approve', '')
disapproval_link = urls.get('reject', '')
approval_time_span = settings.EMBARGO_PENDING_TIME.days * 24
registration = self._get_registration()
context.update({
'is_initiator': self.initiated_by == user,
'initiated_by': self.initiated_by.fullname,
'approval_link': approval_link,
'project_name': registration.title,
'disapproval_link': disapproval_link,
'registration_link': registration_link,
'embargo_end_date': self.end_date,
'approval_time_span': approval_time_span,
})
else:
context.update({
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
'embargo_end_date': self.end_date,
})
return context
def _on_reject(self, user):
from website.project.model import NodeLog
parent_registration = self._get_registration()
parent_registration.registered_from.add_log(
action=NodeLog.EMBARGO_CANCELLED,
params={
'node': parent_registration.registered_from_id,
'registration': parent_registration._id,
'embargo_id': self._id,
},
auth=Auth(user),
)
# Remove backref to parent project if embargo was for a new registration
if not self.for_existing_registration:
parent_registration.delete_registration_tree(save=True)
parent_registration.registered_from = None
# Delete parent registration if it was created at the time the embargo was initiated
if not self.for_existing_registration:
parent_registration.is_deleted = True
parent_registration.save()
def disapprove_embargo(self, user, token):
"""Cancels retraction if user is admin and token verifies."""
self.reject(user, token)
def _on_complete(self, user):
from website.project.model import NodeLog
parent_registration = self._get_registration()
if parent_registration.is_spammy:
raise NodeStateError('Cannot complete a spammy registration.')
super(Embargo, self)._on_complete(user)
parent_registration.registered_from.add_log(
action=NodeLog.EMBARGO_APPROVED,
params={
'node': parent_registration.registered_from_id,
'registration': parent_registration._id,
'embargo_id': self._id,
},
auth=Auth(self.initiated_by),
)
self.save()
def approve_embargo(self, user, token):
"""Add user to approval list if user is admin and token verifies."""
self.approve(user, token)
def mark_as_completed(self):
self.state = Sanction.COMPLETED
self.save()
class Retraction(EmailApprovableSanction):
"""
Retraction object for public registrations.
Externally (specifically in user-facing language) retractions should be referred to as "Withdrawals", i.e.
"Retract Registration" -> "Withdraw Registration", "Retracted" -> "Withdrawn", etc.
"""
DISPLAY_NAME = 'Retraction'
SHORT_NAME = 'retraction'
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_RETRACTION_ADMIN
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_RETRACTION_NON_ADMIN
VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE
APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
initiated_by = fields.ForeignField('user', backref='initiated')
justification = fields.StringField(default=None, validate=MaxLengthValidator(2048))
def __repr__(self):
from website.project.model import Node
parent_registration = None
try:
parent_registration = Node.find_one(Q('retraction', 'eq', self))
except NoResultsFound:
pass
return ('<Retraction(parent_registration={0}, initiated_by={1}) '
'with _id {2}>').format(
parent_registration,
self.initiated_by,
self._id
)
def _view_url_context(self, user_id, node):
from website.project.model import Node
registration = Node.find_one(Q('retraction', 'eq', self))
return {
'node_id': registration._id
}
def _approval_url_context(self, user_id):
user_approval_state = self.approval_state.get(user_id, {})
approval_token = user_approval_state.get('approval_token')
if approval_token:
from website.project.model import Node
root_registration = Node.find_one(Q('retraction', 'eq', self))
node_id = user_approval_state.get('node_id', root_registration._id)
return {
'node_id': node_id,
'token': approval_token,
}
def _rejection_url_context(self, user_id):
user_approval_state = self.approval_state.get(user_id, {})
rejection_token = user_approval_state.get('rejection_token')
if rejection_token:
from website.project.model import Node
root_registration = Node.find_one(Q('retraction', 'eq', self))
node_id = user_approval_state.get('node_id', root_registration._id)
registration = Node.load(node_id)
return {
'node_id': registration.registered_from._id,
'token': rejection_token,
}
def _email_template_context(self, user, node, is_authorizer=False, urls=None):
urls = urls or self.stashed_urls.get(user._id, {})
registration_link = urls.get('view', self._view_url(user._id, node))
if is_authorizer:
from website.project.model import Node
approval_link = urls.get('approve', '')
disapproval_link = urls.get('reject', '')
approval_time_span = settings.RETRACTION_PENDING_TIME.days * 24
registration = Node.find_one(Q('retraction', 'eq', self))
return {
'is_initiator': self.initiated_by == user,
'initiated_by': self.initiated_by.fullname,
'project_name': registration.title,
'registration_link': registration_link,
'approval_link': approval_link,
'disapproval_link': disapproval_link,
'approval_time_span': approval_time_span,
}
else:
return {
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
}
def _on_reject(self, user):
from website.project.model import Node, NodeLog
parent_registration = Node.find_one(Q('retraction', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.RETRACTION_CANCELLED,
params={
'node': parent_registration.registered_from_id,
'registration': parent_registration._id,
'retraction_id': self._id,
},
auth=Auth(user),
save=True,
)
def _on_complete(self, user):
from website.project.model import Node, NodeLog
parent_registration = Node.find_one(Q('retraction', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.RETRACTION_APPROVED,
params={
'node': parent_registration.registered_from_id,
'retraction_id': self._id,
'registration': parent_registration._id
},
auth=Auth(self.initiated_by),
)
# Remove any embargoes associated with the registration
if parent_registration.embargo_end_date or parent_registration.is_pending_embargo:
parent_registration.embargo.state = self.REJECTED
parent_registration.registered_from.add_log(
action=NodeLog.EMBARGO_CANCELLED,
params={
'node': parent_registration.registered_from_id,
'registration': parent_registration._id,
'embargo_id': parent_registration.embargo._id,
},
auth=Auth(self.initiated_by),
)
parent_registration.embargo.save()
# Ensure retracted registration is public
# Pass auth=None because the registration initiator may not be
# an admin on components (component admins had the opportunity
# to disapprove the retraction by this point)
for node in parent_registration.node_and_primary_descendants():
node.set_privacy('public', auth=None, save=True, log=False)
node.update_search()
parent_registration.date_modified = datetime.datetime.utcnow()
parent_registration.save()
def approve_retraction(self, user, token):
self.approve(user, token)
def disapprove_retraction(self, user, token):
self.reject(user, token)
class RegistrationApproval(PreregCallbackMixin, EmailApprovableSanction):
DISPLAY_NAME = 'Approval'
SHORT_NAME = 'registration_approval'
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_REGISTRATION_ADMIN
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_REGISTRATION_NON_ADMIN
VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE
APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
initiated_by = fields.ForeignField('user', backref='registration_approved')
def _get_registration(self):
from website.project.model import Node
return Node.find_one(Q('registration_approval', 'eq', self))
def _view_url_context(self, user_id, node):
user_approval_state = self.approval_state.get(user_id, {})
node_id = user_approval_state.get('node_id', node._id)
return {
'node_id': node_id
}
def _approval_url_context(self, user_id):
user_approval_state = self.approval_state.get(user_id, {})
approval_token = user_approval_state.get('approval_token')
if approval_token:
registration = self._get_registration()
node_id = user_approval_state.get('node_id', registration._id)
return {
'node_id': node_id,
'token': approval_token,
}
def _rejection_url_context(self, user_id):
user_approval_state = self.approval_state.get(user_id, {})
rejection_token = self.approval_state.get(user_id, {}).get('rejection_token')
if rejection_token:
from website.project.model import Node
root_registration = self._get_registration()
node_id = user_approval_state.get('node_id', root_registration._id)
registration = Node.load(node_id)
return {
'node_id': registration.registered_from._id,
'token': rejection_token,
}
def _email_template_context(self, user, node, is_authorizer=False, urls=None):
context = super(RegistrationApproval, self)._email_template_context(user, node, is_authorizer, urls)
urls = urls or self.stashed_urls.get(user._id, {})
registration_link = urls.get('view', self._view_url(user._id, node))
if is_authorizer:
approval_link = urls.get('approve', '')
disapproval_link = urls.get('reject', '')
approval_time_span = settings.REGISTRATION_APPROVAL_TIME.days * 24
registration = self._get_registration()
context.update({
'is_initiator': self.initiated_by == user,
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
'approval_link': approval_link,
'disapproval_link': disapproval_link,
'approval_time_span': approval_time_span,
'project_name': registration.title,
})
else:
context.update({
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
})
return context
def _add_success_logs(self, node, user):
from website.project.model import NodeLog
src = node.registered_from
src.add_log(
action=NodeLog.PROJECT_REGISTERED,
params={
'parent_node': src.parent_id,
'node': src._primary_key,
'registration': node._primary_key,
},
auth=Auth(user),
save=False
)
src.save()
def _on_complete(self, user):
from website.project.model import NodeLog
register = self._get_registration()
if register.is_spammy:
raise NodeStateError('Cannot approve a a spammy registration')
super(RegistrationApproval, self)._on_complete(user)
self.state = Sanction.APPROVED
registered_from = register.registered_from
# Pass auth=None because the registration initiator may not be
# an admin on components (component admins had the opportunity
# to disapprove the registration by this point)
register.set_privacy('public', auth=None, log=False)
for child in register.get_descendants_recursive(lambda n: n.primary):
child.set_privacy('public', auth=None, log=False)
# Accounts for system actions where no `User` performs the final approval
auth = Auth(user) if user else None
registered_from.add_log(
action=NodeLog.REGISTRATION_APPROVAL_APPROVED,
params={
'node': registered_from._id,
'registration': register._id,
'registration_approval_id': self._id,
},
auth=auth,
)
for node in register.root.node_and_primary_descendants():
self._add_success_logs(node, user)
node.update_search() # update search if public
self.save()
def _on_reject(self, user):
from website.project.model import NodeLog
register = self._get_registration()
registered_from = register.registered_from
register.delete_registration_tree(save=True)
registered_from.add_log(
action=NodeLog.REGISTRATION_APPROVAL_CANCELLED,
params={
'node': registered_from._id,
'registration': register._id,
'registration_approval_id': self._id,
},
auth=Auth(user),
)
class DraftRegistrationApproval(Sanction):
mode = Sanction.ANY
# Since draft registrations that require approval are not immediately registered,
# meta stores registration_choice and embargo_end_date (when applicable)
meta = fields.DictionaryField(default=dict)
def _send_rejection_email(self, user, draft):
schema = draft.registration_schema
prereg_schema = prereg_utils.get_prereg_schema()
if schema._id == prereg_schema._id:
mails.send_mail(
user.username,
mails.PREREG_CHALLENGE_REJECTED,
user=user,
draft_url=draft.absolute_url
)
else:
raise NotImplementedError(
'TODO: add a generic email template for registration approvals'
)
def approve(self, user):
if settings.PREREG_ADMIN_TAG not in user.system_tags:
raise PermissionsError('This user does not have permission to approve this draft.')
self.state = Sanction.APPROVED
self._on_complete(user)
def reject(self, user):
if settings.PREREG_ADMIN_TAG not in user.system_tags:
raise PermissionsError('This user does not have permission to approve this draft.')
self.state = Sanction.REJECTED
self._on_reject(user)
def _on_complete(self, user):
from website.project.model import DraftRegistration
draft = DraftRegistration.find_one(
Q('approval', 'eq', self)
)
auth = Auth(draft.initiator)
registration = draft.register(
auth=auth,
save=True
)
registration_choice = self.meta['registration_choice']
if registration_choice == 'immediate':
sanction = functools.partial(registration.require_approval, draft.initiator)
elif registration_choice == 'embargo':
sanction = functools.partial(
registration.embargo_registration,
draft.initiator,
parse_date(self.meta.get('embargo_end_date'), ignoretz=True)
)
else:
raise ValueError("'registration_choice' must be either 'embargo' or 'immediate'")
sanction(notify_initiator_on_complete=True)
def _on_reject(self, user, *args, **kwargs):
from website.project.model import DraftRegistration
# clear out previous registration options
self.meta = {}
self.save()
draft = DraftRegistration.find_one(
Q('approval', 'eq', self)
)
self._send_rejection_email(draft.initiator, draft)
class EmbargoTerminationApproval(EmailApprovableSanction):
DISPLAY_NAME = 'Embargo Termination Request'
SHORT_NAME = 'embargo_termination_approval'
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_EMBARGO_TERMINATION_ADMIN
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_EMBARGO_TERMINATION_NON_ADMIN
VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE
APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
embargoed_registration = fields.ForeignField('node')
def _get_registration(self):
return self.embargoed_registration
def _view_url_context(self, user_id, node):
registration = node or self._get_registration()
return {
'node_id': registration._id
}
def _approval_url_context(self, user_id):
user_approval_state = self.approval_state.get(user_id, {})
approval_token = user_approval_state.get('approval_token')
if approval_token:
registration = self._get_registration()
node_id = user_approval_state.get('node_id', registration._id)
return {
'node_id': node_id,
'token': approval_token,
}
def _rejection_url_context(self, user_id):
user_approval_state = self.approval_state.get(user_id, {})
rejection_token = user_approval_state.get('rejection_token')
if rejection_token:
root_registration = self._get_registration()
node_id = user_approval_state.get('node_id', root_registration._id)
return {
'node_id': node_id,
'token': rejection_token,
}
def _email_template_context(self, user, node, is_authorizer=False, urls=None):
context = super(EmbargoTerminationApproval, self)._email_template_context(
user,
node,
is_authorizer=is_authorizer
)
urls = urls or self.stashed_urls.get(user._id, {})
registration_link = urls.get('view', self._view_url(user._id, node))
if is_authorizer:
approval_link = urls.get('approve', '')
disapproval_link = urls.get('reject', '')
approval_time_span = settings.EMBARGO_TERMINATION_PENDING_TIME.days * 24
registration = self._get_registration()
context.update({
'is_initiator': self.initiated_by == user,
'initiated_by': self.initiated_by.fullname,
'approval_link': approval_link,
'project_name': registration.title,
'disapproval_link': disapproval_link,
'registration_link': registration_link,
'embargo_end_date': self.end_date,
'approval_time_span': approval_time_span,
})
else:
context.update({
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
'embargo_end_date': self.end_date,
})
return context
def _on_complete(self, user=None):
super(EmbargoTerminationApproval, self)._on_complete(user)
registration = self._get_registration()
registration.terminate_embargo(Auth(user) if user else None)
def _on_reject(self, user=None):
# Just forget this ever happened.
self.embargoed_registration.embargo_termination_approval = None
self.embargoed_registration.save()
| |
"""Support for Rflink devices."""
import asyncio
from collections import defaultdict
import logging
import async_timeout
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_COMMAND, CONF_HOST, CONF_PORT,
STATE_ON, EVENT_HOMEASSISTANT_STOP)
from homeassistant.core import CoreState, callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.deprecation import get_deprecated
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.dispatcher import (
async_dispatcher_send, async_dispatcher_connect)
from homeassistant.helpers.restore_state import RestoreEntity
REQUIREMENTS = ['rflink==0.0.37']
_LOGGER = logging.getLogger(__name__)
ATTR_EVENT = 'event'
ATTR_STATE = 'state'
CONF_ALIASES = 'aliases'
CONF_ALIASSES = 'aliasses'
CONF_GROUP_ALIASES = 'group_aliases'
CONF_GROUP_ALIASSES = 'group_aliasses'
CONF_GROUP = 'group'
CONF_NOGROUP_ALIASES = 'nogroup_aliases'
CONF_NOGROUP_ALIASSES = 'nogroup_aliasses'
CONF_DEVICE_DEFAULTS = 'device_defaults'
CONF_DEVICE_ID = 'device_id'
CONF_DEVICES = 'devices'
CONF_AUTOMATIC_ADD = 'automatic_add'
CONF_FIRE_EVENT = 'fire_event'
CONF_IGNORE_DEVICES = 'ignore_devices'
CONF_RECONNECT_INTERVAL = 'reconnect_interval'
CONF_SIGNAL_REPETITIONS = 'signal_repetitions'
CONF_WAIT_FOR_ACK = 'wait_for_ack'
DATA_DEVICE_REGISTER = 'rflink_device_register'
DATA_ENTITY_LOOKUP = 'rflink_entity_lookup'
DATA_ENTITY_GROUP_LOOKUP = 'rflink_entity_group_only_lookup'
DEFAULT_RECONNECT_INTERVAL = 10
DEFAULT_SIGNAL_REPETITIONS = 1
CONNECTION_TIMEOUT = 10
EVENT_BUTTON_PRESSED = 'button_pressed'
EVENT_KEY_COMMAND = 'command'
EVENT_KEY_ID = 'id'
EVENT_KEY_SENSOR = 'sensor'
EVENT_KEY_UNIT = 'unit'
RFLINK_GROUP_COMMANDS = ['allon', 'alloff']
DOMAIN = 'rflink'
SERVICE_SEND_COMMAND = 'send_command'
SIGNAL_AVAILABILITY = 'rflink_device_available'
SIGNAL_HANDLE_EVENT = 'rflink_handle_event_{}'
TMP_ENTITY = 'tmp.{}'
DEVICE_DEFAULTS_SCHEMA = vol.Schema({
vol.Optional(CONF_FIRE_EVENT, default=False): cv.boolean,
vol.Optional(CONF_SIGNAL_REPETITIONS,
default=DEFAULT_SIGNAL_REPETITIONS): vol.Coerce(int),
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_PORT): vol.Any(cv.port, cv.string),
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_WAIT_FOR_ACK, default=True): cv.boolean,
vol.Optional(CONF_RECONNECT_INTERVAL,
default=DEFAULT_RECONNECT_INTERVAL): int,
vol.Optional(CONF_IGNORE_DEVICES, default=[]):
vol.All(cv.ensure_list, [cv.string]),
}),
}, extra=vol.ALLOW_EXTRA)
SEND_COMMAND_SCHEMA = vol.Schema({
vol.Required(CONF_DEVICE_ID): cv.string,
vol.Required(CONF_COMMAND): cv.string,
})
def identify_event_type(event):
"""Look at event to determine type of device.
Async friendly.
"""
if EVENT_KEY_COMMAND in event:
return EVENT_KEY_COMMAND
if EVENT_KEY_SENSOR in event:
return EVENT_KEY_SENSOR
return 'unknown'
async def async_setup(hass, config):
"""Set up the Rflink component."""
from rflink.protocol import create_rflink_connection
import serial
# Allow entities to register themselves by device_id to be looked up when
# new rflink events arrive to be handled
hass.data[DATA_ENTITY_LOOKUP] = {
EVENT_KEY_COMMAND: defaultdict(list),
EVENT_KEY_SENSOR: defaultdict(list),
}
hass.data[DATA_ENTITY_GROUP_LOOKUP] = {
EVENT_KEY_COMMAND: defaultdict(list),
}
# Allow platform to specify function to register new unknown devices
hass.data[DATA_DEVICE_REGISTER] = {}
async def async_send_command(call):
"""Send Rflink command."""
_LOGGER.debug('Rflink command for %s', str(call.data))
if not (await RflinkCommand.send_command(
call.data.get(CONF_DEVICE_ID),
call.data.get(CONF_COMMAND))):
_LOGGER.error('Failed Rflink command for %s', str(call.data))
hass.services.async_register(
DOMAIN, SERVICE_SEND_COMMAND, async_send_command,
schema=SEND_COMMAND_SCHEMA)
@callback
def event_callback(event):
"""Handle incoming Rflink events.
Rflink events arrive as dictionaries of varying content
depending on their type. Identify the events and distribute
accordingly.
"""
event_type = identify_event_type(event)
_LOGGER.debug('event of type %s: %s', event_type, event)
# Don't propagate non entity events (eg: version string, ack response)
if event_type not in hass.data[DATA_ENTITY_LOOKUP]:
_LOGGER.debug('unhandled event of type: %s', event_type)
return
# Lookup entities who registered this device id as device id or alias
event_id = event.get(EVENT_KEY_ID, None)
is_group_event = (event_type == EVENT_KEY_COMMAND and
event[EVENT_KEY_COMMAND] in RFLINK_GROUP_COMMANDS)
if is_group_event:
entity_ids = hass.data[DATA_ENTITY_GROUP_LOOKUP][event_type].get(
event_id, [])
else:
entity_ids = hass.data[DATA_ENTITY_LOOKUP][event_type][event_id]
_LOGGER.debug('entity_ids: %s', entity_ids)
if entity_ids:
# Propagate event to every entity matching the device id
for entity in entity_ids:
_LOGGER.debug('passing event to %s', entity)
async_dispatcher_send(hass,
SIGNAL_HANDLE_EVENT.format(entity),
event)
elif not is_group_event:
# If device is not yet known, register with platform (if loaded)
if event_type in hass.data[DATA_DEVICE_REGISTER]:
_LOGGER.debug('device_id not known, adding new device')
# Add bogus event_id first to avoid race if we get another
# event before the device is created
# Any additional events received before the device has been
# created will thus be ignored.
hass.data[DATA_ENTITY_LOOKUP][event_type][
event_id].append(TMP_ENTITY.format(event_id))
hass.async_create_task(
hass.data[DATA_DEVICE_REGISTER][event_type](event))
else:
_LOGGER.debug('device_id not known and automatic add disabled')
# When connecting to tcp host instead of serial port (optional)
host = config[DOMAIN].get(CONF_HOST)
# TCP port when host configured, otherwise serial port
port = config[DOMAIN][CONF_PORT]
@callback
def reconnect(exc=None):
"""Schedule reconnect after connection has been unexpectedly lost."""
# Reset protocol binding before starting reconnect
RflinkCommand.set_rflink_protocol(None)
async_dispatcher_send(hass, SIGNAL_AVAILABILITY, False)
# If HA is not stopping, initiate new connection
if hass.state != CoreState.stopping:
_LOGGER.warning('disconnected from Rflink, reconnecting')
hass.async_create_task(connect())
async def connect():
"""Set up connection and hook it into HA for reconnect/shutdown."""
_LOGGER.info('Initiating Rflink connection')
# Rflink create_rflink_connection decides based on the value of host
# (string or None) if serial or tcp mode should be used
# Initiate serial/tcp connection to Rflink gateway
connection = create_rflink_connection(
port=port,
host=host,
event_callback=event_callback,
disconnect_callback=reconnect,
loop=hass.loop,
ignore=config[DOMAIN][CONF_IGNORE_DEVICES]
)
try:
with async_timeout.timeout(CONNECTION_TIMEOUT,
loop=hass.loop):
transport, protocol = await connection
except (serial.serialutil.SerialException, ConnectionRefusedError,
TimeoutError, OSError, asyncio.TimeoutError) as exc:
reconnect_interval = config[DOMAIN][CONF_RECONNECT_INTERVAL]
_LOGGER.exception(
"Error connecting to Rflink, reconnecting in %s",
reconnect_interval)
# Connection to Rflink device is lost, make entities unavailable
async_dispatcher_send(hass, SIGNAL_AVAILABILITY, False)
hass.loop.call_later(reconnect_interval, reconnect, exc)
return
# There is a valid connection to a Rflink device now so
# mark entities as available
async_dispatcher_send(hass, SIGNAL_AVAILABILITY, True)
# Bind protocol to command class to allow entities to send commands
RflinkCommand.set_rflink_protocol(
protocol, config[DOMAIN][CONF_WAIT_FOR_ACK])
# handle shutdown of Rflink asyncio transport
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP,
lambda x: transport.close())
_LOGGER.info('Connected to Rflink')
hass.async_create_task(connect())
return True
class RflinkDevice(Entity):
"""Representation of a Rflink device.
Contains the common logic for Rflink entities.
"""
platform = None
_state = None
_available = True
def __init__(self, device_id, initial_event=None, name=None, aliases=None,
group=True, group_aliases=None, nogroup_aliases=None,
fire_event=False,
signal_repetitions=DEFAULT_SIGNAL_REPETITIONS):
"""Initialize the device."""
# Rflink specific attributes for every component type
self._initial_event = initial_event
self._device_id = device_id
if name:
self._name = name
else:
self._name = device_id
self._aliases = aliases
self._group = group
self._group_aliases = group_aliases
self._nogroup_aliases = nogroup_aliases
self._should_fire_event = fire_event
self._signal_repetitions = signal_repetitions
@callback
def handle_event_callback(self, event):
"""Handle incoming event for device type."""
# Call platform specific event handler
self._handle_event(event)
# Propagate changes through ha
self.async_schedule_update_ha_state()
# Put command onto bus for user to subscribe to
if self._should_fire_event and identify_event_type(
event) == EVENT_KEY_COMMAND:
self.hass.bus.async_fire(EVENT_BUTTON_PRESSED, {
ATTR_ENTITY_ID: self.entity_id,
ATTR_STATE: event[EVENT_KEY_COMMAND],
})
_LOGGER.debug("Fired bus event for %s: %s",
self.entity_id, event[EVENT_KEY_COMMAND])
def _handle_event(self, event):
"""Platform specific event handler."""
raise NotImplementedError()
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return a name for the device."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
if self.assumed_state:
return False
return self._state
@property
def assumed_state(self):
"""Assume device state until first device event sets state."""
return self._state is None
@property
def available(self):
"""Return True if entity is available."""
return self._available
@callback
def _availability_callback(self, availability):
"""Update availability state."""
self._available = availability
self.async_schedule_update_ha_state()
async def async_added_to_hass(self):
"""Register update callback."""
await super().async_added_to_hass()
# Remove temporary bogus entity_id if added
tmp_entity = TMP_ENTITY.format(self._device_id)
if tmp_entity in self.hass.data[DATA_ENTITY_LOOKUP][
EVENT_KEY_COMMAND][self._device_id]:
self.hass.data[DATA_ENTITY_LOOKUP][
EVENT_KEY_COMMAND][self._device_id].remove(tmp_entity)
# Register id and aliases
self.hass.data[DATA_ENTITY_LOOKUP][
EVENT_KEY_COMMAND][self._device_id].append(self.entity_id)
if self._group:
self.hass.data[DATA_ENTITY_GROUP_LOOKUP][
EVENT_KEY_COMMAND][self._device_id].append(self.entity_id)
# aliases respond to both normal and group commands (allon/alloff)
if self._aliases:
for _id in self._aliases:
self.hass.data[DATA_ENTITY_LOOKUP][
EVENT_KEY_COMMAND][_id].append(self.entity_id)
self.hass.data[DATA_ENTITY_GROUP_LOOKUP][
EVENT_KEY_COMMAND][_id].append(self.entity_id)
# group_aliases only respond to group commands (allon/alloff)
if self._group_aliases:
for _id in self._group_aliases:
self.hass.data[DATA_ENTITY_GROUP_LOOKUP][
EVENT_KEY_COMMAND][_id].append(self.entity_id)
# nogroup_aliases only respond to normal commands
if self._nogroup_aliases:
for _id in self._nogroup_aliases:
self.hass.data[DATA_ENTITY_LOOKUP][
EVENT_KEY_COMMAND][_id].append(self.entity_id)
async_dispatcher_connect(self.hass, SIGNAL_AVAILABILITY,
self._availability_callback)
async_dispatcher_connect(self.hass,
SIGNAL_HANDLE_EVENT.format(self.entity_id),
self.handle_event_callback)
# Process the initial event now that the entity is created
if self._initial_event:
self.handle_event_callback(self._initial_event)
class RflinkCommand(RflinkDevice):
"""Singleton class to make Rflink command interface available to entities.
This class is to be inherited by every Entity class that is actionable
(switches/lights). It exposes the Rflink command interface for these
entities.
The Rflink interface is managed as a class level and set during setup (and
reset on reconnect).
"""
# Keep repetition tasks to cancel if state is changed before repetitions
# are sent
_repetition_task = None
_protocol = None
@classmethod
def set_rflink_protocol(cls, protocol, wait_ack=None):
"""Set the Rflink asyncio protocol as a class variable."""
cls._protocol = protocol
if wait_ack is not None:
cls._wait_ack = wait_ack
@classmethod
def is_connected(cls):
"""Return connection status."""
return bool(cls._protocol)
@classmethod
async def send_command(cls, device_id, action):
"""Send device command to Rflink and wait for acknowledgement."""
return await cls._protocol.send_command_ack(device_id, action)
async def _async_handle_command(self, command, *args):
"""Do bookkeeping for command, send it to rflink and update state."""
self.cancel_queued_send_commands()
if command == 'turn_on':
cmd = 'on'
self._state = True
elif command == 'turn_off':
cmd = 'off'
self._state = False
elif command == 'dim':
# convert brightness to rflink dim level
cmd = str(int(args[0] / 17))
self._state = True
elif command == 'toggle':
cmd = 'on'
# if the state is unknown or false, it gets set as true
# if the state is true, it gets set as false
self._state = self._state in [None, False]
# Cover options for RFlink
elif command == 'close_cover':
cmd = 'DOWN'
self._state = False
elif command == 'open_cover':
cmd = 'UP'
self._state = True
elif command == 'stop_cover':
cmd = 'STOP'
self._state = True
# Send initial command and queue repetitions.
# This allows the entity state to be updated quickly and not having to
# wait for all repetitions to be sent
await self._async_send_command(cmd, self._signal_repetitions)
# Update state of entity
await self.async_update_ha_state()
def cancel_queued_send_commands(self):
"""Cancel queued signal repetition commands.
For example when user changed state while repetitions are still
queued for broadcast. Or when an incoming Rflink command (remote
switch) changes the state.
"""
# cancel any outstanding tasks from the previous state change
if self._repetition_task:
self._repetition_task.cancel()
async def _async_send_command(self, cmd, repetitions):
"""Send a command for device to Rflink gateway."""
_LOGGER.debug(
"Sending command: %s to Rflink device: %s", cmd, self._device_id)
if not self.is_connected():
raise HomeAssistantError('Cannot send command, not connected!')
if self._wait_ack:
# Puts command on outgoing buffer then waits for Rflink to confirm
# the command has been send out in the ether.
await self._protocol.send_command_ack(self._device_id, cmd)
else:
# Puts command on outgoing buffer and returns straight away.
# Rflink protocol/transport handles asynchronous writing of buffer
# to serial/tcp device. Does not wait for command send
# confirmation.
self._protocol.send_command(self._device_id, cmd)
if repetitions > 1:
self._repetition_task = self.hass.async_create_task(
self._async_send_command(cmd, repetitions - 1))
class SwitchableRflinkDevice(RflinkCommand, RestoreEntity):
"""Rflink entity which can switch on/off (eg: light, switch)."""
async def async_added_to_hass(self):
"""Restore RFLink device state (ON/OFF)."""
await super().async_added_to_hass()
old_state = await self.async_get_last_state()
if old_state is not None:
self._state = old_state.state == STATE_ON
def _handle_event(self, event):
"""Adjust state if Rflink picks up a remote command for this device."""
self.cancel_queued_send_commands()
command = event['command']
if command in ['on', 'allon']:
self._state = True
elif command in ['off', 'alloff']:
self._state = False
def async_turn_on(self, **kwargs):
"""Turn the device on."""
return self._async_handle_command("turn_on")
def async_turn_off(self, **kwargs):
"""Turn the device off."""
return self._async_handle_command("turn_off")
DEPRECATED_CONFIG_OPTIONS = [
CONF_ALIASSES,
CONF_GROUP_ALIASSES,
CONF_NOGROUP_ALIASSES]
REPLACEMENT_CONFIG_OPTIONS = [
CONF_ALIASES,
CONF_GROUP_ALIASES,
CONF_NOGROUP_ALIASES]
def remove_deprecated(config):
"""Remove deprecated config options from device config."""
for index, deprecated_option in enumerate(DEPRECATED_CONFIG_OPTIONS):
if deprecated_option in config:
replacement_option = REPLACEMENT_CONFIG_OPTIONS[index]
# generate deprecation warning
get_deprecated(config, replacement_option, deprecated_option)
# remove old config value replacing new one
config[replacement_option] = config.pop(deprecated_option)
| |
'''Deep Dreaming in Keras.
Run the script with:
```
python deep_dream.py path_to_your_base_image.jpg prefix_for_results
```
e.g.:
```
python deep_dream.py img/mypic.jpg results/dream
```
It is preferable to run this script on GPU, for speed.
If running on CPU, prefer the TensorFlow backend (much faster).
Example results: http://i.imgur.com/FX6ROg9.jpg
'''
from __future__ import print_function
from keras.preprocessing.image import load_img, img_to_array
import numpy as np
from scipy.misc import imsave
from scipy.optimize import fmin_l_bfgs_b
import time
import argparse
from keras.applications import vgg16
from keras import backend as K
from keras.layers import Input
parser = argparse.ArgumentParser(description='Deep Dreams with Keras.')
parser.add_argument('base_image_path', metavar='base', type=str,
help='Path to the image to transform.')
parser.add_argument('result_prefix', metavar='res_prefix', type=str,
help='Prefix for the saved results.')
args = parser.parse_args()
base_image_path = args.base_image_path
result_prefix = args.result_prefix
# dimensions of the generated picture.
img_width = 600
img_height = 600
# path to the model weights file.
weights_path = 'vgg16_weights.h5'
# some settings we found interesting
saved_settings = {
'bad_trip': {'features': {'block4_conv1': 0.05,
'block4_conv2': 0.01,
'block4_conv3': 0.01},
'continuity': 0.1,
'dream_l2': 0.8,
'jitter': 5},
'dreamy': {'features': {'block5_conv1': 0.05,
'block5_conv2': 0.02},
'continuity': 0.1,
'dream_l2': 0.02,
'jitter': 0},
}
# the settings we will use in this experiment
settings = saved_settings['dreamy']
# util function to open, resize and format pictures into appropriate tensors
def preprocess_image(image_path):
img = load_img(image_path, target_size=(img_width, img_height))
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = vgg16.preprocess_input(img)
return img
# util function to convert a tensor into a valid image
def deprocess_image(x):
if K.image_dim_ordering() == 'th':
x = x.reshape((3, img_width, img_height))
x = x.transpose((1, 2, 0))
else:
x = x.reshape((img_width, img_height, 3))
# Remove zero-center by mean pixel
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
# 'BGR'->'RGB'
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype('uint8')
return x
if K.image_dim_ordering() == 'th':
img_size = (3, img_width, img_height)
else:
img_size = (img_width, img_height, 3)
# this will contain our generated image
dream = Input(batch_shape=(1,) + img_size)
# build the VGG16 network with our placeholder
# the model will be loaded with pre-trained ImageNet weights
model = vgg16.VGG16(input_tensor=dream,
weights='imagenet', include_top=False)
print('Model loaded.')
# get the symbolic outputs of each "key" layer (we gave them unique names).
layer_dict = dict([(layer.name, layer) for layer in model.layers])
# continuity loss util function
def continuity_loss(x):
assert K.ndim(x) == 4
if K.image_dim_ordering() == 'th':
a = K.square(x[:, :, :img_width - 1, :img_height - 1] -
x[:, :, 1:, :img_height - 1])
b = K.square(x[:, :, :img_width - 1, :img_height - 1] -
x[:, :, :img_width - 1, 1:])
else:
a = K.square(x[:, :img_width - 1, :img_height - 1, :] -
x[:, 1:, :img_height - 1, :])
b = K.square(x[:, :img_width - 1, :img_height - 1, :] -
x[:, :img_width - 1, 1:, :])
return K.sum(K.pow(a + b, 1.25))
# define the loss
loss = K.variable(0.)
for layer_name in settings['features']:
# add the L2 norm of the features of a layer to the loss
assert layer_name in layer_dict.keys(), 'Layer ' + layer_name + ' not found in model.'
coeff = settings['features'][layer_name]
x = layer_dict[layer_name].output
shape = layer_dict[layer_name].output_shape
# we avoid border artifacts by only involving non-border pixels in the loss
if K.image_dim_ordering() == 'th':
loss -= coeff * K.sum(K.square(x[:, :, 2: shape[2] - 2, 2: shape[3] - 2])) / np.prod(shape[1:])
else:
loss -= coeff * K.sum(K.square(x[:, 2: shape[1] - 2, 2: shape[2] - 2, :])) / np.prod(shape[1:])
# add continuity loss (gives image local coherence, can result in an artful blur)
loss += settings['continuity'] * continuity_loss(dream) / np.prod(img_size)
# add image L2 norm to loss (prevents pixels from taking very high values, makes image darker)
loss += settings['dream_l2'] * K.sum(K.square(dream)) / np.prod(img_size)
# feel free to further modify the loss as you see fit, to achieve new effects...
# compute the gradients of the dream wrt the loss
grads = K.gradients(loss, dream)
outputs = [loss]
if isinstance(grads, (list, tuple)):
outputs += grads
else:
outputs.append(grads)
f_outputs = K.function([dream], outputs)
def eval_loss_and_grads(x):
x = x.reshape((1,) + img_size)
outs = f_outputs([x])
loss_value = outs[0]
if len(outs[1:]) == 1:
grad_values = outs[1].flatten().astype('float64')
else:
grad_values = np.array(outs[1:]).flatten().astype('float64')
return loss_value, grad_values
# this Evaluator class makes it possible
# to compute loss and gradients in one pass
# while retrieving them via two separate functions,
# "loss" and "grads". This is done because scipy.optimize
# requires separate functions for loss and gradients,
# but computing them separately would be inefficient.
class Evaluator(object):
def __init__(self):
self.loss_value = None
self.grad_values = None
def loss(self, x):
assert self.loss_value is None
loss_value, grad_values = eval_loss_and_grads(x)
self.loss_value = loss_value
self.grad_values = grad_values
return self.loss_value
def grads(self, x):
assert self.loss_value is not None
grad_values = np.copy(self.grad_values)
self.loss_value = None
self.grad_values = None
return grad_values
evaluator = Evaluator()
# run scipy-based optimization (L-BFGS) over the pixels of the generated image
# so as to minimize the loss
x = preprocess_image(base_image_path)
for i in range(5):
print('Start of iteration', i)
start_time = time.time()
# add a random jitter to the initial image. This will be reverted at decoding time
random_jitter = (settings['jitter'] * 2) * (np.random.random(img_size) - 0.5)
x += random_jitter
# run L-BFGS for 7 steps
x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(),
fprime=evaluator.grads, maxfun=7)
print('Current loss value:', min_val)
# decode the dream and save it
x = x.reshape(img_size)
x -= random_jitter
img = deprocess_image(np.copy(x))
fname = result_prefix + '_at_iteration_%d.png' % i
imsave(fname, img)
end_time = time.time()
print('Image saved as', fname)
print('Iteration %d completed in %ds' % (i, end_time - start_time))
| |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import random
from django.http import Http404
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_POST
from desktop.lib.django_util import JsonResponse
from desktop.lib.i18n import force_unicode
from desktop.models import Document2
from notebook.models import Notebook
from metadata.optimizer_client import OptimizerApi
from metadata.conf import OPTIMIZER
LOG = logging.getLogger(__name__)
def error_handler(view_fn):
def decorator(*args, **kwargs):
try:
return view_fn(*args, **kwargs)
except Http404, e:
raise e
except Exception, e:
LOG.exception(e)
response = {
'status': -1,
'message': force_unicode(str(e))
}
return JsonResponse(response, status=500)
return decorator
@require_POST
@error_handler
def top_tables(request):
response = {'status': -1}
database = request.POST.get('database', 'default')
len = request.POST.get('len', 1000)
if OPTIMIZER.MOCKING.get():
from beeswax.server import dbms
from beeswax.server.dbms import get_query_server_config
db = dbms.get(request.user)
tables = [
{'name': table, 'popularity': random.randint(1, 100) , 'column_count': random.randint(1, 100), 'is_fact': bool(random.getrandbits(1))}
for table in db.get_tables(database=database)
][:len]
else:
"""
Get back:
# u'details': [{u'columnCount': 28, u'name': u'date_dim', u'patternCount': 136, u'workloadPercent': 89, u'total': 92, u'type': u'Dimension', u'eid': u'19'},
"""
api = OptimizerApi()
data = api.top_tables()
tables = [{
'eid': table['eid'],
'name': table['name'],
'popularity': table['workloadPercent'],
'column_count': table['columnCount'],
'patternCount': table['patternCount'],
'total': table['total'],
'is_fact': table['type'] != 'Dimension'
} for table in data['details']
]
response['top_tables'] = tables
response['status'] = 0
return JsonResponse(response)
@require_POST
@error_handler
def table_details(request):
response = {'status': -1}
table_name = request.POST.get('tableName')
api = OptimizerApi()
data = api.table_details(table_name=table_name)
if data['status'] == 'success':
response['status'] = 0
response['details'] = data['details']
else:
response['message'] = 'Optimizer: %s' % data['details']
return JsonResponse(response)
@require_POST
@error_handler
def query_compatibility(request):
response = {'status': -1}
source_platform = request.POST.get('sourcePlatform')
target_platform = request.POST.get('targetPlatform')
query = request.POST.get('query')
api = OptimizerApi()
data = api.query_compatibility(source_platform=source_platform, target_platform=target_platform, query=query)
if data['status'] == 'success':
response['status'] = 0
response['query_compatibility'] = json.loads(data['details'])
else:
response['message'] = 'Optimizer: %s' % data['details']
return JsonResponse(response)
# Mocked
@require_POST
@error_handler
def query_complexity(request):
response = {'status': -1}
snippet = json.loads(request.POST.get('snippet'))
if 'select * from tsqc_date t join atd_au_dtl a on (t.date = a.date)' in snippet['statement'].lower():
comment = 'Large join is happening'
elif 'large' in snippet['statement'].lower():
comment = 'Previously failed 5 times in a row'
elif 'partition' in snippet['statement'].lower():
comment = 'Has 50k partitions'
else:
comment = ''
response['query_complexity'] = {
'level': random.choice(['LOW', 'MEDIUM', 'HIGH']),
'comment': comment
}
response['status'] = 0
return JsonResponse(response)
@require_POST
@error_handler
def similar_queries(request):
response = {'status': -1}
source_platform = request.POST.get('sourcePlatform')
query = request.POST.get('query')
api = OptimizerApi()
data = api.similar_queries(source_platform=source_platform, query=query)
if data['status'] == 'success':
response['status'] = 0
response['similar_queries'] = json.loads(data['details']['similarQueries'])
else:
response['message'] = 'Optimizer: %s' % data['details']
return JsonResponse(response)
@require_POST
@error_handler
def popular_values(request):
response = {'status': -1}
table_name = request.POST.get('tableName')
column_name = request.POST.get('columnName')
if OPTIMIZER.MOCKING.get():
if column_name:
values = [
{
"values": [
"1",
"(6,0)"
],
"columnName": "d_dow",
"tableName": "date_dim"
}
]
else:
values = [
{
"values": [
"('2001q1','2001q2','2001q3')",
"'2001q1'"
],
"columnName": "d_quarter_name",
"tableName": "date_dim"
},
{
"values": [
"1",
"2",
"4"
],
"columnName": "d_qoy",
"tableName": "date_dim"
},
{
"values": [
"Subquery"
],
"columnName": "d_week_seq",
"tableName": "date_dim"
},
{
"values": [
"(cast('1998-08-14' as date) + interval '30' day)",
"(cast ('1998-03-08' as date) + interval '30' day)",
"d1.d_date + 5",
"cast('1998-08-14' as date)",
"cast('1999-04-26' as date)",
"'2002-4-01'",
"(cast('2000-02-02' as date) + interval '90' day)",
"(cast('2002-4-01' as date) + interval '60' day)",
"(cast('2002-01-18' as date) + 60 + interval '60' day)",
"('1999-04-17','1999-10-04','1999-11-10')",
"(cast('1999-04-26' as date) + 30 + interval '30' day)",
"(cast('1999-06-03' as date) + interval '30' day)",
"cast('1998-01-06' as date)",
"(cast('2000-2-01' as date) + interval '60' day)",
"(cast('2002-04-01' as date) + interval '30' day)",
"( cast('2000-03-22' as date ) + interval '90' day )",
"cast('2001-08-21' as date)",
"(cast ('1998-03-08' as date) - interval '30' day)",
"'2000-03-22'",
"(cast('2001-08-21' as date) + interval '14' day)",
"( cast('1999-08-25' as date) + interval '30' day )",
"Subquery",
"'2000-3-01'",
"cast('2002-01-18' as date)",
"(cast ('2001-03-14' as date) - interval '30' day)",
"'2000-02-02'",
"cast('2002-04-01' as date)",
"'2002-03-09'",
"(cast('2000-3-01' as date) + interval '60' day)",
"cast('1999-06-03' as date)",
"cast('1999-08-25' as date)",
"(cast ('2001-03-14' as date) + interval '30' day)",
"'2000-2-01'",
"(cast('1998-01-06' as date) + interval '60' day)"
],
"columnName": "d_date",
"tableName": "date_dim"
},
{
"values": [
"1223",
"1200",
"1202",
"1214+11",
"(select distinct date_dim.d_month_seq+1 from date_dim where date_dim.d_year = 2001 and date_dim.d_moy = 5)",
"1181+11",
"1199",
"1191",
"(1206,1206+1,1206+2,1206+3,1206+4,1206+5,1206+6,1206+7,1206+8,1206+9,1206+10,1206+11)",
"1211 + 11",
"1199 + 11",
"1212",
"(select distinct date_dim.d_month_seq+3 from date_dim where date_dim.d_year = 2001 and date_dim.d_moy = 5)",
"1211",
"1214",
"Subquery",
"(1195,1195+1,1195+2,1195+3,1195+4,1195+5,1195+6,1195+7,1195+8,1195+9,1195+10,1195+11)",
"1200+11",
"1212 + 11",
"1223+11",
"1183 + 11",
"1183",
"1181",
"1191 + 11",
"1202 + 11"
],
"columnName": "d_month_seq",
"tableName": "date_dim"
},
{
"values": [
"11",
"4 + 3",
"12",
"3+2",
"2+3",
"1",
"3",
"2",
"5",
"4",
"6",
"8",
"10"
],
"columnName": "d_moy",
"tableName": "date_dim"
},
{
"values": [
"25",
"16",
"28",
"1",
"3",
"2"
],
"columnName": "d_dom",
"tableName": "date_dim"
},
{
"values": [
"(1998,1998+1)",
"2000 + 1",
"2000 + 2",
"(2000,2000+1,2000+2)",
"(1999,1999+1,1999+2)",
"2000-1",
"2001+1",
"1999 + 2",
"2000+1",
"2000+2",
"1999+1",
"(2002)",
"( 1999, 1999 + 1, 1999 + 2, 1999 + 3 )",
"1999-1",
"( 1998, 1998 + 1, 1998 + 2 )",
"1999",
"1998",
"(1998,1998+1,1998+2)",
"2002",
"2000",
"2001",
"2004"
],
"columnName": "d_year",
"tableName": "date_dim"
},
{
"values": [
"1",
"(6,0)"
],
"columnName": "d_dow",
"tableName": "date_dim"
}
]
else:
api = OptimizerApi()
data = api.popular_filter_values(table_name=table_name, column_name=column_name)
if data['status'] == 'success':
if 'status' in data['details']:
response['values'] = [] # Bug in Opt API
else:
response['values'] = data['details']
response['status'] = 0
else:
response['message'] = 'Optimizer: %s' % data['details']
return JsonResponse(response)
@require_POST
@error_handler
def upload_history(request):
response = {'status': -1}
query_type = 'hive'
queries = [
(doc.uuid, 1000, Notebook(document=doc).get_data()['snippets'][0]['statement'])
for doc in Document2.objects.get_history(doc_type='query-%s' % query_type, user=request.user)[:25]
]
api = OptimizerApi()
response['upload_history'] = api.upload(queries=queries, source_platform=query_type)
response['status'] = 0
return JsonResponse(response)
| |
#!/usr/bin/env python
import os, sys, time, datetime, codecs, shutil, subprocess, re, math, base64
from stat import *
from tiapp import *
from xml.dom.minidom import parseString
this_dir = os.path.dirname(os.path.abspath(__file__))
scripts_root_dir = os.path.dirname(this_dir)
tools_root_dir = os.path.dirname(scripts_root_dir)
sys.path.append(os.path.join(scripts_root_dir, "common"))
sys.path.append(os.path.join(tools_root_dir, "thirdparty"))
import mako.template
from mako import runtime
import simplejson
from csspacker import CSSPacker
ignoreFiles = ['.gitignore', '.cvsignore', '.DS_Store']
ignoreDirs = ['.git','.svn','_svn','CVS']
year = datetime.datetime.now().year
HTML_HEADER = """<!--
WARNING: this is generated code and will be lost if changes are made.
This generated source code is Copyright (c) 2010-%d by Appcelerator, Inc. All Rights Reserved.
-->""" % year
HEADER = """/**
* WARNING: this is generated code and will be lost if changes are made.
* This generated source code is Copyright (c) 2010-%d by Appcelerator, Inc. All Rights Reserved.
*/
""" % year
def compare_versions(version1, version2):
def normalize(v):
v = '.'.join(v.split('.')[:3])
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
return cmp(normalize(version1), normalize(version2))
class AppcTemplate(mako.template.Template):
def render(self, *args, **data):
return runtime._render(self, self.callable_, args, data, as_unicode=True)
class Compiler(object):
def __init__(self, project_path, deploytype, ti_sdk_dir):
start_time = time.time()
self.minify = deploytype == "production"
self.packages = []
self.ti_sdk_dir = ti_sdk_dir
self.project_dependencies = [] # modules that the project uses
self.modules_map = {} # all modules including deps => individual module deps
self.modules_to_cache = [] # all modules to be baked into require.cache()
self.modules_to_load = [] # all modules to be required at load time
self.tiplus_modules_to_load = [] # all modules to be required at load time
# initialize paths
self.baseapp_dir = os.path.join(tools_root_dir, "templates", "baseapp", "mobileweb")
self.sdk_path = os.path.join(self.ti_sdk_dir, "mobileweb")
self.sdk_src_path = os.path.join(self.sdk_path, 'src')
self.themes_path = os.path.join(self.sdk_path, 'themes')
self.ti_package_path = os.path.join(self.sdk_path, 'titanium')
self.modules_path = os.path.abspath(os.path.join(self.sdk_path, '..', '..', '..', '..', 'modules'))
self.project_path = project_path
self.build_path = os.path.join(project_path, 'build', 'mobileweb')
self.resources_path = os.path.join(project_path, 'Resources')
self.i18n_path = os.path.join(project_path, 'i18n')
self.ti_js_file = os.path.join(self.build_path, 'titanium.js')
sdk_version = os.path.basename(self.ti_sdk_dir)
print '[INFO] Titanium Mobile Web Compiler v%s' % sdk_version
if not os.path.exists(self.project_path):
print '[ERROR] Invalid project "%s"' % self.project_path
sys.exit(1)
# read the package.json
self.load_package_json()
# register the titanium package
self.packages.append({
'name': self.package_json['name'],
'location': './titanium',
'main': self.package_json['main']
})
# read the tiapp.xml
tiapp_xml = TiAppXML(os.path.join(self.project_path, 'tiapp.xml'), deploytype)
print '[INFO] Compiling Mobile Web project "%s" [%s]' % (tiapp_xml['name'], deploytype)
# create the build directory
if os.path.exists(self.build_path):
shutil.rmtree(self.build_path, True)
try:
os.makedirs(self.build_path)
except:
pass
# copy all of the project's resources to the build directory
self.copy(self.themes_path, os.path.join(self.build_path, 'themes'))
self.copy(self.resources_path, self.build_path, ['android', 'iphone'])
self.copy(os.path.join(self.resources_path, 'mobileweb'), self.build_path, ['apple_startup_images', 'splash'])
self.copy(os.path.join(self.resources_path, 'mobileweb', 'apple_startup_images', 'Default.jpg'), self.build_path)
self.copy(os.path.join(self.resources_path, 'mobileweb', 'apple_startup_images', 'Default-Portrait.jpg'), self.build_path)
self.copy(os.path.join(self.resources_path, 'mobileweb', 'apple_startup_images', 'Default-Landscape.jpg'), self.build_path)
self.copy(self.ti_package_path, os.path.join(self.build_path, 'titanium'))
# scan project for dependencies
self.find_project_dependencies()
# scan all dependencies for distinct list of modules
self.find_modules_to_cache()
self.modules_to_cache.append('Ti/_/image')
self.modules_to_cache.append('Ti/_/include')
if len(tiapp_xml['precache']['requires']):
for req in tiapp_xml['precache']['requires']:
self.modules_to_cache.append('commonjs:' + req)
if len(tiapp_xml['precache']['includes']):
for inc in tiapp_xml['precache']['includes']:
self.modules_to_cache.append('url:' + inc)
# find only the top most modules to be required
areDeps = {}
for module in self.modules_to_cache:
# check if module is a dependent of another module
for m in self.modules_map:
deps = self.modules_map[m]
if module in deps:
areDeps[module] = 1
for module in self.modules_map:
if not module in areDeps:
self.modules_to_load.append(module)
# determine theme
theme = tiapp_xml['mobileweb']['theme']
if not os.path.exists(os.path.join(self.themes_path, theme)):
print '[ERROR] Theme "%s" does not exist' % theme
sys.exit(1)
# check what we need to precache
precache_images = []
if 'Ti/UI/TableViewRow' in self.modules_map:
precache_images.append('/themes/' + theme + '/UI/TableViewRow/child.png')
if len(tiapp_xml['precache']['images']):
for img in tiapp_xml['precache']['images']:
precache_images.append(img)
# detect Ti+ modules
if len(tiapp_xml['modules']):
print '[INFO] Locating Ti+ modules...'
for module in tiapp_xml['modules']:
if module['platform'] == '' or module['platform'] == 'mobileweb':
if 'version' in module and module['version']:
# search <project dir>/modules/mobileweb/<module>/<version>/
module_dir = os.path.join(self.project_path, 'modules', 'mobileweb', module['id'], module['version'])
if not os.path.exists(module_dir):
# search <project dir>/modules/commonjs/<module>/<version>/
module_dir = os.path.join(self.project_path, 'modules', 'commonjs', module['id'], module['version'])
if not os.path.exists(module_dir):
# search <global module dir>/<module>/<version>/
module_dir = os.path.join(self.modules_path, 'mobileweb', module['id'], module['version'])
if not os.path.exists(module_dir):
# search <global commonjs dir>/<module>/<version>/
module_dir = os.path.join(self.modules_path, 'commonjs', module['id'], module['version'])
if not os.path.exists(module_dir):
print '[ERROR] Unable to find Ti+ module "%s", v%s' % (module['id'], module['version'])
sys.exit(1)
else:
# no version number, gotta do it the hard way
# search <project dir>/modules/mobileweb/<module>/
module_dir = self.locate_module(os.path.join(self.project_path, 'modules', 'mobileweb', module['id']))
if module_dir is None:
# search <project dir>/modules/commonjs/<module>/<version>/
module_dir = self.locate_module(os.path.join(self.project_path, 'modules', 'commonjs', module['id']))
if module_dir is None:
# search <global module dir>/<module>/<version>/
module_dir = self.locate_module(os.path.join(self.modules_path, 'mobileweb', module['id']))
if module_dir is None:
# search <global commonjs dir>/<module>/<version>/
module_dir = self.locate_module(os.path.join(self.modules_path, 'commonjs', module['id']))
if module_dir is None:
print '[ERROR] Unable to find Ti+ module "%s"' % module['id']
sys.exit(1)
module_package_json_file = os.path.join(module_dir, 'package.json')
if not os.path.exists(module_package_json_file):
print '[ERROR] Ti+ module "%s" is invalid: missing package.json' % module['id']
sys.exit(1)
module_manifest_file = os.path.join(module_dir, 'manifest')
if not os.path.exists(module_manifest_file):
print '[ERROR] Ti+ module "%s" is invalid: missing manifest' % module['id']
sys.exit(1)
manifest = {}
for line in open(module_manifest_file).readlines():
line = line.strip()
if line[0:1] == '#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()] = value.strip()
if 'minsdk' in manifest and compare_versions(manifest['minsdk'], sdk_version):
print '[ERROR] Ti+ module "%s" requires a minimum SDK version of %s: current version %s' % (module['id'], manifest['minsdk'], sdk_version)
sys.exit(1)
module_package_json = simplejson.load(codecs.open(module_package_json_file, 'r', 'utf-8'))
main_file = module_package_json['main']
if main_file.endswith('.js'):
main_file = main_file[:-3]
lib = ''
if 'directories' in module_package_json and 'lib' in module_package_json['directories']:
lib = module_package_json['directories']['lib']
if lib.startswith('/'):
lib = lib[1:]
main_file_path = os.path.join(module_dir, lib, main_file + '.js')
if not os.path.exists(main_file_path):
print '[ERROR] Ti+ module "%s" is invalid: missing main "%s"' % (module['id'], main_file_path)
sys.exit(1)
print '[INFO] Bundling Ti+ module "%s"' % module['id']
self.project_dependencies.append(main_file)
self.modules_to_cache.append(module['id'] + '/' + main_file)
self.tiplus_modules_to_load.append(module['id'])
if len(lib):
lib = '/' + lib
self.packages.append({
'name': module['id'],
'location': './modules/' + module['id'] + lib,
'main': main_file
})
# TODO: need to combine ALL Ti+ module .js files into the titanium.js, not just the main file
# TODO: need to combine ALL Ti+ module .css files into the titanium.css
# copy entire module directory to build directory
shutil.copytree(module_dir, os.path.join(self.build_path, 'modules', module['id']))
# detect circular dependencies
for module in self.modules_to_cache:
if module in self.modules_map:
mydeps = self.modules_map[module]
for dep in mydeps:
if dep in self.modules_map and module in self.modules_map[dep]:
print '[WARN] Circular dependency detected: %s dependent on %s' % (module, dep)
print '[INFO] Found %s dependenc%s, %s package%s, %s module%s' % (
len(self.project_dependencies), 'y' if len(self.project_dependencies) == 1 else 'ies',
len(self.packages), '' if len(self.packages) == 1 else 's',
len(self.modules_to_cache), '' if len(self.project_dependencies) == 1 else 's')
# TODO: break up the dependencies into layers
# TODO: minify the project's code first
app_names = {}
locales = []
if os.path.exists(self.i18n_path):
print '[INFO] Processing i18n strings...'
for dir in os.listdir(self.i18n_path):
app = self.load_i18n(os.path.join(self.i18n_path, dir, 'app.xml'))
if app is not None and 'appname' in app:
app_names[dir] = app['appname']
strings = self.load_i18n(os.path.join(self.i18n_path, dir, 'strings.xml'))
if strings is not None:
locales.append(dir)
locale_path = os.path.join(self.build_path, 'titanium', 'Ti', 'Locale', dir)
try:
os.makedirs(locale_path)
except:
pass
i18n_file = codecs.open(os.path.join(locale_path, 'i18n.js'), 'w', 'utf-8')
i18n_file.write('define(%s);' % simplejson.dumps(strings))
i18n_file.close()
if dir in tiapp_xml['precache']['locales']:
self.modules_to_cache.append('Ti/Locale/%s/i18n' % dir)
# build the titanium.js
print '[INFO] Assembling titanium.js...'
ti_js = codecs.open(self.ti_js_file, 'w', 'utf-8')
ti_js.write(HEADER + '\n')
# 1) read in the config.js and fill in the template
ti_js.write(AppcTemplate(codecs.open(os.path.join(self.sdk_src_path, 'config.js'), 'r', 'utf-8').read(), input_encoding='utf-8', output_encoding='utf-8').render(
app_analytics = tiapp_xml['analytics'],
app_copyright = tiapp_xml['copyright'],
app_description = tiapp_xml['description'],
app_guid = tiapp_xml['guid'],
app_id = tiapp_xml['id'],
app_name = tiapp_xml['name'],
app_names = simplejson.dumps(app_names),
app_publisher = tiapp_xml['publisher'],
app_url = tiapp_xml['url'],
app_version = tiapp_xml['version'],
deploy_type = deploytype,
locales = simplejson.dumps(locales),
packages = simplejson.dumps(self.packages, sort_keys=True),
project_id = tiapp_xml['id'],
project_name = tiapp_xml['name'],
ti_fs_registry = tiapp_xml['mobileweb']['filesystem']['registry'],
ti_theme = theme,
ti_githash = self.package_json['titanium']['githash'],
ti_timestamp = self.package_json['titanium']['timestamp'],
ti_version = sdk_version,
has_analytics_use_xhr = tiapp_xml['mobileweb']['analytics']['use-xhr'],
has_show_errors = 'false' if deploytype == 'production' or tiapp_xml['mobileweb']['disable-error-screen'] == 'true' else 'true',
jsQuoteEscapeFilter = lambda str: str.replace("\\\"","\\\\\\\"")
))
# 2) copy in the loader
ti_js.write(codecs.open(os.path.join(self.sdk_src_path, 'loader.js'), 'r', 'utf-8').read())
# 3) cache the dependencies
ti_js.write('require.cache({\n');
first = True
for x in self.modules_to_cache:
is_cjs = False
if x.startswith('commonjs:'):
is_cjs = True
x = x[9:]
dep = self.resolve(x, None)
if not len(dep):
continue
if not first:
ti_js.write(',\n')
first = False
filename = dep[1]
if not filename.endswith('.js'):
filename += '.js'
file_path = os.path.join(dep[0], filename)
if x.startswith('url:'):
source = file_path + '.uncompressed.js'
if self.minify:
os.rename(file_path, source)
print '[INFO] Minifying include %s' % file_path
p = subprocess.Popen('java -Xms256m -Xmx256m -jar "%s" --compilation_level SIMPLE_OPTIMIZATIONS --js "%s" --js_output_file "%s"' % (os.path.join(this_dir, 'closureCompiler', 'compiler.jar'), source, file_path), shell=True, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
print '[ERROR] Failed to minify "%s"' % file_path
for line in stderr.split('\n'):
if len(line):
print '[ERROR] %s' % line
print '[WARN] Leaving %s un-minified' % file_path
os.remove(file_path)
shutil.copy(source, file_path)
ti_js.write('"%s":"%s"' % (x, codecs.open(file_path, 'r', 'utf-8').read().strip().replace('\\', '\\\\').replace('\n', '\\n\\\n').replace('\"', '\\\"')))
elif is_cjs:
ti_js.write('"%s":function(){\n/* %s */\ndefine(function(require, exports, module){\n%s\n});\n}' % (x, file_path.replace(self.build_path, ''), codecs.open(file_path, 'r', 'utf-8').read()))
else:
ti_js.write('"%s":function(){\n/* %s */\n\n%s\n}' % (x, file_path.replace(self.build_path, ''), codecs.open(file_path, 'r', 'utf-8').read()))
image_mime_types = {
'.png': 'image/png',
'.gif': 'image/gif',
'.jpg': 'image/jpg',
'.jpeg': 'image/jpg'
}
for x in precache_images:
x = x.replace('\\', '/')
y = x
if y.startswith(os.sep):
y = '.' + y
img = os.path.join(self.resources_path, os.sep.join(y.split('/')))
if os.path.exists(img):
fname, ext = os.path.splitext(img.lower())
if ext in image_mime_types:
if not first:
ti_js.write(',\n')
first = False
ti_js.write('"url:%s":"data:%s;base64,%s"' % (x, image_mime_types[ext], base64.b64encode(open(img,'rb').read())))
ti_js.write('});\n')
# 4) write the ti.app.properties
def addProp(prop, val):
tiapp_xml['properties'][prop] = {
'type': 'string',
'value': val
}
addProp('ti.fs.backend', tiapp_xml['mobileweb']['filesystem']['backend'])
addProp('ti.map.backend', tiapp_xml['mobileweb']['map']['backend'])
addProp('ti.map.apikey', tiapp_xml['mobileweb']['map']['apikey'])
s = ''
for name in tiapp_xml['properties']:
prop = tiapp_xml['properties'][name]
if prop['type'] == 'bool':
s += 'p.setBool("' + name + '",' + prop['value'] + ');\n'
elif prop['type'] == 'int':
s += 'p.setInt("' + name + '",' + prop['value'] + ');\n'
elif prop['type'] == 'double':
s += 'p.setDouble("' + name + '",' + prop['value'] + ');\n'
else:
s += 'p.setString("' + name + '","' + str(prop['value']).replace('"', '\\"') + '");\n'
ti_js.write('require("Ti/App/Properties", function(p) {\n%s});\n' % s)
# 5) write require() to load all Ti modules
self.modules_to_load.sort()
self.modules_to_load += self.tiplus_modules_to_load
ti_js.write('require(%s);' % simplejson.dumps(self.modules_to_load))
# 6) close the titanium.js
ti_js.close()
# build the splash screen
splash_html = ''
splash_css = ''
if tiapp_xml['mobileweb']['splash']['enabled'] == 'true':
print '[INFO] Processing splash screen...'
splash_path = os.path.join(self.project_path, 'Resources', 'mobileweb', 'splash')
splash_root_path = os.path.join(self.project_path, 'Resources')
if not os.path.exists(splash_path):
splash_path = os.path.join(self.baseapp_dir, 'resources', 'splash')
splash_root_path = splash_path
splash_html_file = os.path.join(splash_path, 'splash.html')
splash_css_file = os.path.join(splash_path, 'splash.css')
if os.path.exists(splash_html_file):
splash_html = codecs.open(splash_html_file, 'r', 'utf-8').read()
if os.path.exists(splash_css_file):
splash_css = codecs.open(splash_css_file, 'r', 'utf-8').read()
if tiapp_xml['mobileweb']['splash']['inline-css-images'] == 'true':
parts = splash_css.split('url(')
for i in range(1, len(parts)):
j = parts[i].find(')')
if j != -1:
img = parts[i][:j].replace('"', '').replace('\'', '').strip()
if img.find('data:') == -1:
if img[1] == '/':
img_path = os.path.join(splash_root_path, img[1:])
else:
img_path = os.path.join(splash_path, img)
if os.path.exists(img_path):
fname, ext = os.path.splitext(img_path.lower())
if ext in image_mime_types:
parts[i] = 'data:%s;base64,%s%s' % (image_mime_types[ext], base64.b64encode(open(img_path,'rb').read()), parts[i][j:])
splash_css = 'url('.join(parts)
# build the titanium.css file
print '[INFO] Assembling titanium.css...'
ti_css = HEADER + '\n' + splash_css + '\n' + codecs.open(os.path.join(self.themes_path, 'common.css'), 'r', 'utf-8').read()
# TODO: need to rewrite absolute paths for urls
if len(theme):
theme_path = os.path.join(self.resources_path, 'themes', theme)
if not os.path.exists(theme_path):
theme_path = os.path.join(self.resources_path, theme)
if not os.path.exists(theme_path):
theme_path = os.path.join(self.themes_path, theme)
if not os.path.exists(theme_path):
print '[ERROR] Unable to locate theme "%s"' % theme
else:
for dirname, dirnames, filenames in os.walk(theme_path):
for filename in filenames:
fname, ext = os.path.splitext(filename.lower())
if ext == '.css':
ti_css += codecs.open(os.path.join(dirname, filename), 'r', 'utf-8').read()
# detect any fonts and add font face rules to the css file
fonts = {}
for dirname, dirnames, filenames in os.walk(self.resources_path):
for filename in filenames:
fname, ext = os.path.splitext(filename.lower())
if ext == '.otf' or ext == '.woff':
if not fname in fonts:
fonts[fname] = []
fonts[fname].append(os.path.join(dirname, filename)[len(self.resources_path):])
for font in fonts:
ti_css += '@font-face{font-family:%s;src:url(%s);}\n' % (font, '),url('.join(fonts[font]))
# minify the css
if self.minify:
ti_css = CSSPacker(ti_css).pack()
# write the titanium.css
ti_css_file = codecs.open(os.path.join(self.build_path, 'titanium.css'), 'w', 'utf-8')
ti_css_file.write(ti_css)
ti_css_file.close()
# minify all javascript, html, and css files
if self.minify:
# TODO: only minify non-project code (i.e. Titanium and Ti+ modules)
subprocess.call('java -Xms256m -Xmx256m -cp "%s%s%s" -Djava.awt.headless=true minify "%s"' % (os.path.join(this_dir, 'minify'), os.pathsep, os.path.join(this_dir, 'closureCompiler', 'compiler.jar'), self.build_path), shell=True)
# elif ext == '.json':
# TODO: minify json
# elif ext == '.css':
# TODO: minify css
# elif ext == '.html':
# TODO: minify html
# create the favicon and apple touch icons
icon_file = os.path.join(self.resources_path, tiapp_xml['icon'])
fname, ext = os.path.splitext(icon_file.lower())
if os.path.exists(icon_file) and (ext == '.png' or ext == '.jpg' or ext == '.gif'):
self.build_icons(icon_file)
else:
icon_file = os.path.join(self.resources_path, 'mobileweb', 'appicon.png')
if os.path.exists(icon_file):
self.build_icons(icon_file)
# create the filesystem registry
print '[INFO] Building filesystem registry...'
filesystem_registry = 'ts\t' + str(int(os.path.getctime(self.build_path)) * 1000) + '\n' + self.walk_fs(self.build_path, 0)
filesystem_registry_file = codecs.open(os.path.join(self.build_path, 'titanium', 'filesystem.registry'), 'w', 'utf-8')
filesystem_registry_file.write(filesystem_registry)
filesystem_registry_file.close()
# if we're preloading the filesystem registry, write it to the require cache
if tiapp_xml['mobileweb']['filesystem']['registry'] == 'preload':
ti_js = codecs.open(self.ti_js_file, 'a', 'utf-8')
ti_js.write('require.cache({"url:/titanium/filesystem.registry":"' + filesystem_registry.strip().replace('\n', '|') + '"});')
ti_js.close()
# get status bar style
status_bar_style = 'default'
if 'statusbar-style' in tiapp_xml:
status_bar_style = tiapp_xml['statusbar-style']
if status_bar_style == 'opaque_black' or status_bar_style == 'opaque':
status_bar_style = 'black'
elif status_bar_style == 'translucent_black' or status_bar_style == 'transparent' or status_bar_style == 'translucent':
status_bar_style = 'black-translucent'
else:
status_bar_style = 'default'
# populate index.html
index_html_file = codecs.open(os.path.join(self.build_path, 'index.html'), 'w', 'utf-8')
index_html_file.write(AppcTemplate(codecs.open(os.path.join(self.sdk_src_path, 'index.html'), 'r', 'utf-8').read().strip(), input_encoding='utf-8', output_encoding='utf-8').render(
ti_header = HTML_HEADER,
project_name = tiapp_xml['name'] or '',
app_description = tiapp_xml['description'] or '',
app_publisher = tiapp_xml['publisher'] or '',
splash_screen = splash_html,
ti_generator = 'Appcelerator Titanium Mobile ' + sdk_version,
ti_statusbar_style = status_bar_style,
ti_css = ti_css,
ti_js = codecs.open(self.ti_js_file, 'r', 'utf-8').read()
))
index_html_file.close()
total_time = round(time.time() - start_time)
total_minutes = math.floor(total_time / 60)
total_seconds = total_time % 60
if total_minutes > 0:
print '[INFO] Finished in %s minutes %s seconds' % (int(total_minutes), int(total_seconds))
else:
print '[INFO] Finished in %s seconds' % int(total_time)
def load_i18n(self, xml_file):
if not os.path.exists(xml_file):
return None
strings = {}
dom = parseString(codecs.open(xml_file, 'r', 'utf-8', 'replace').read().encode('utf-8'))
root = dom.documentElement
for node in root.childNodes:
if node.nodeType == 1 and node.nodeName == 'string':
name = node.getAttribute('name')
if name is not '':
val = ''
for inner in node.childNodes:
if inner.nodeType == node.TEXT_NODE:
val = val + inner.data
strings[name] = val.encode('utf-8').decode('string-escape').strip()
return strings
def walk_fs(self, path, depth):
s = ''
listing = os.listdir(path)
listing.sort()
for file in listing:
p = os.path.join(path, file)
# TODO: screen out specific file/folder patterns (i.e. uncompressed js files)
if os.path.isdir(p):
s += ('\t' * depth) + file + '\n' + self.walk_fs(p, depth + 1)
else:
s += ('\t' * depth) + file + '\t' + str(os.path.getsize(p)) + '\n'
return s
def resolve(self, it, ref):
parts = it.split('!')
it = parts[-1]
if it.startswith('url:'):
it = it[4:]
if it.startswith('/'):
it = '.' + it
parts = it.split('/')
for p in self.packages:
if p['name'] == parts[0]:
return [self.compact_path(os.path.join(self.build_path, p['location'])), it]
return [self.build_path, it]
if it.find(':') != -1:
return []
if it.startswith('/') or (len(parts) == 1 and it.endswith('.js')):
return [self.build_path, it]
if it.startswith('.') and ref is not None:
it = self.compact_path(ref + it)
parts = it.split('/')
for p in self.packages:
if p['name'] == parts[0]:
if p['name'] != 'Ti':
it = it.replace(p['name'] + '/', '')
return [self.compact_path(os.path.join(self.build_path, p['location'])), it]
return [self.build_path, it]
def copy(self, src_path, dest_path, ignore=None):
if os.path.exists(src_path):
print '[INFO] Copying %s...' % src_path
if os.path.isdir(src_path):
for root, dirs, files in os.walk(src_path):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name)
if ignore is not None and root == src_path:
for name in ignore:
if name in dirs:
dirs.remove(name)
for file in files:
if file in ignoreFiles or file.startswith('._'):
continue
source = os.path.join(root, file)
dest = os.path.expanduser(source.replace(src_path, dest_path, 1))
dest_dir = os.path.expanduser(os.path.split(dest)[0])
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
shutil.copy(source, dest)
else:
shutil.copy(src_path, dest_path)
def compact_path(self, path):
result = []
path = path.replace('\\', '/').split('/');
while len(path):
segment = path[0]
path = path[1:]
if segment == '..' and len(result) and lastSegment != '..':
result.pop()
lastSegment = result[-1]
elif segment != '.':
lastSegment = segment
result.append(segment)
return '/'.join(result);
def build_icons(self, src):
print '[INFO] Generating app icons...'
favicon = os.path.join(self.build_path, 'favicon.png')
s = 'java -Xms256m -Xmx256m -cp "%s%s%s" -Dquiet=true -Djava.awt.headless=true resize "%s"' % (os.path.join(this_dir, 'imageResizer'), os.pathsep, os.path.join(this_dir, 'imageResizer', 'imgscalr-lib-4.2.jar'), src)
s += ' "%s" %d %d' % (favicon, 16, 16)
s += ' "%s" %d %d' % (os.path.join(self.build_path, 'apple-touch-icon-precomposed.png'), 57, 57)
s += ' "%s" %d %d' % (os.path.join(self.build_path, 'apple-touch-icon-57x57-precomposed.png'), 57, 57)
s += ' "%s" %d %d' % (os.path.join(self.build_path, 'apple-touch-icon-72x72-precomposed.png'), 72, 72)
s += ' "%s" %d %d' % (os.path.join(self.build_path, 'apple-touch-icon-114x114-precomposed.png'), 114, 114)
subprocess.call(s, shell=True)
os.rename(favicon, os.path.join(self.build_path, 'favicon.ico'))
def load_package_json(self):
package_json_file = os.path.join(self.ti_package_path, 'package.json')
if not os.path.exists(package_json_file):
print '[ERROR] Unable to open titanium package manifest "%s"' % package_json_file
sys.exit(1)
self.package_json = simplejson.load(codecs.open(package_json_file, 'r', 'utf-8'))
def locate_module(self, path):
module_dir = None
module['version'] = '0.0.0'
for dir in os.listdir(path):
if compare_versions(module['version'], dir) == -1:
module['version'] = dir
module_dir = os.path.join(path, dir)
return module_dir
def find_project_dependencies(self):
print '[INFO] Scanning project for dependencies...'
# TODO: using an AST, scan the entire project's source and identify all dependencies
self.project_dependencies += [
'Ti',
'Ti/Accelerometer',
'Ti/Analytics',
'Ti/API',
'Ti/App',
'Ti/App/Properties',
'Ti/Blob',
'Ti/Buffer',
'Ti/Codec',
'Ti/Facebook',
'Ti/Facebook/LoginButton',
'Ti/Filesystem',
'Ti/Filesystem/File',
'Ti/Filesystem/FileStream',
'Ti/IOStream',
'Ti/Locale',
'Ti/Media',
'Ti/Media/VideoPlayer',
'Ti/Network',
'Ti/Network/HTTPClient',
'Ti/Platform',
'Ti/Platform/DisplayCaps',
'Ti/Gesture',
'Ti/Geolocation',
'Ti/XML',
'Ti/UI/View',
'Ti/Map',
'Ti/Map/View',
'Ti/Map/Annotation',
'Ti/Media/VideoPlayer',
'Ti/UI',
'Ti/UI/Clipboard',
'Ti/UI/MobileWeb',
'Ti/UI/TableViewRow',
'Ti/UI/Tab',
'Ti/UI/TabGroup',
'Ti/UI/Window',
'Ti/UI/2DMatrix',
'Ti/UI/ActivityIndicator',
'Ti/UI/AlertDialog',
'Ti/UI/Animation',
'Ti/UI/Button',
'Ti/UI/EmailDialog',
'Ti/UI/ImageView',
'Ti/UI/Label',
'Ti/UI/OptionDialog',
'Ti/UI/Picker',
'Ti/UI/PickerColumn',
'Ti/UI/PickerRow',
'Ti/UI/ProgressBar',
'Ti/UI/ScrollableView',
'Ti/UI/ScrollView',
'Ti/UI/Slider',
'Ti/UI/Switch',
'Ti/UI/TableViewSection',
'Ti/UI/TableView',
'Ti/UI/TextArea',
'Ti/UI/TextField',
'Ti/UI/WebView',
'Ti/UI/MobileWeb/NavigationGroup',
'Ti/Utils',
'Ti/Yahoo'
]
def parse_deps(self, deps):
found = []
if len(deps) > 2:
deps = deps[1:-1]
deps = deps.split(',')
for dep in deps:
dep = dep.strip().split(' ')[0].strip()
if dep.startswith('\'') or dep.startswith('"'):
found.append(simplejson.loads(dep))
return found
def find_modules_to_cache(self):
print '[INFO] Searching for all required modules...'
self.require_cache = {}
for module in self.project_dependencies:
self.parse_module(module, None)
self.modules_to_cache = []
for module in self.require_cache:
self.modules_to_cache.append(module)
def parse_module(self, module, ref):
if module in self.require_cache or module == 'require':
return
parts = module.split('!')
if len(parts) == 1:
if module.startswith('.') and ref is not None:
module = self.compact_path(ref + module)
self.require_cache[module] = 1
dep = self.resolve(module, ref)
if not len(dep):
return
if len(parts) > 1:
self.require_cache['url:' + parts[1]] = 1
filename = dep[1]
if not filename.endswith('.js'):
filename += '.js'
source = codecs.open(os.path.join(dep[0], filename), 'r', 'utf-8').read()
pattern = re.compile('define\(\s*([\'\"][^\'\"]*[\'\"]\s*)?,?\s*(\[[^\]]+\])\s*?,?\s*(function|\{)')
results = pattern.search(source)
if results is None:
self.modules_map[module] = []
else:
groups = results.groups()
if groups is not None and len(groups):
if groups[1] is None:
self.modules_map[module] = []
else:
deps = self.parse_deps(groups[1])
for i in range(0, len(deps)):
dep = deps[i]
parts = dep.split('!')
ref = module.split('/')
ref.pop()
ref = '/'.join(ref) + '/'
if dep.startswith('.'):
deps[i] = self.compact_path(ref + dep)
if len(parts) == 1:
if dep.startswith('./'):
parts = module.split('/')
parts.pop()
parts.append(dep)
self.parse_module(self.compact_path('/'.join(parts)), ref)
else:
self.parse_module(dep, ref)
else:
self.modules_map[dep] = parts[0]
self.parse_module(parts[0], module)
if parts[0] == 'Ti/_/text':
if dep.startswith('./'):
parts = module.split('/')
parts.pop()
parts.append(dep)
self.parse_module(self.compact_path('/'.join(parts)), ref)
else:
self.parse_module(dep, ref)
self.modules_map[module] = deps
| |
import logging
import time
from teuthology.misc import get_testdir, reconnect
from teuthology.orchestra import run
from teuthology.orchestra.remote import Remote
from teuthology.task import install as install_task
log = logging.getLogger(__name__)
def clear_firewall(ctx):
"""
Remove any iptables rules created by teuthology. These rules are
identified by containing a comment with 'teuthology' in it. Non-teuthology
firewall rules are unaffected.
"""
log.info("Clearing teuthology firewall rules...")
ctx.cluster.run(
args=[
"sudo", "sh", "-c",
"iptables-save | grep -v teuthology | iptables-restore"
],
)
log.info("Cleared teuthology firewall rules.")
def shutdown_daemons(ctx):
log.info('Unmounting ceph-fuse and killing daemons...')
ctx.cluster.run(args=['sudo', 'stop', 'ceph-all', run.Raw('||'),
'sudo', 'service', 'ceph', 'stop', run.Raw('||'),
'sudo', 'systemctl', 'stop', 'ceph.target'],
check_status=False, timeout=180)
ctx.cluster.run(
args=[
'if', 'grep', '-q', 'ceph-fuse', '/etc/mtab', run.Raw(';'),
'then',
'grep', 'ceph-fuse', '/etc/mtab', run.Raw('|'),
'grep', '-o', " /.* fuse", run.Raw('|'),
'grep', '-o', "/.* ", run.Raw('|'),
'xargs', '-n', '1', 'sudo', 'fusermount', '-u', run.Raw(';'),
'fi',
run.Raw(';'),
'if', 'grep', '-q', 'rbd-fuse', '/etc/mtab', run.Raw(';'),
'then',
'grep', 'rbd-fuse', '/etc/mtab', run.Raw('|'),
'grep', '-o', " /.* fuse", run.Raw('|'),
'grep', '-o', "/.* ", run.Raw('|'),
'xargs', '-n', '1', 'sudo', 'fusermount', '-u', run.Raw(';'),
'fi',
run.Raw(';'),
'sudo',
'killall',
'--quiet',
'ceph-mon',
'ceph-osd',
'ceph-mds',
'ceph-mgr',
'ceph-fuse',
'ceph-disk',
'radosgw',
'ceph_test_rados',
'rados',
'rbd-fuse',
'apache2',
run.Raw('||'),
'true', # ignore errors from ceph binaries not being found
],
timeout=120,
)
log.info('All daemons killed.')
def kill_hadoop(ctx):
log.info("Terminating Hadoop services...")
ctx.cluster.run(args=[
"pkill", "-f", "-KILL", "java.*hadoop",
],
check_status=False,
timeout=60
)
def kill_valgrind(ctx):
# http://tracker.ceph.com/issues/17084
ctx.cluster.run(
args=['sudo', 'pkill', '-f', '-9', 'valgrind.bin'],
check_status=False,
timeout=20,
)
def remove_osd_mounts(ctx):
"""
unmount any osd data mounts (scratch disks)
"""
log.info('Unmount any osd data directories...')
ctx.cluster.run(
args=[
'grep',
'/var/lib/ceph/osd/',
'/etc/mtab',
run.Raw('|'),
'awk', '{print $2}', run.Raw('|'),
'xargs', '-r',
'sudo', 'umount', '-l', run.Raw(';'),
'true'
],
timeout=120
)
def remove_osd_tmpfs(ctx):
"""
unmount tmpfs mounts
"""
log.info('Unmount any osd tmpfs dirs...')
ctx.cluster.run(
args=[
'egrep', 'tmpfs\s+/mnt', '/etc/mtab', run.Raw('|'),
'awk', '{print $2}', run.Raw('|'),
'xargs', '-r',
'sudo', 'umount', run.Raw(';'),
'true'
],
timeout=120
)
def stale_kernel_mount(remote):
proc = remote.run(
args=[
'sudo', 'find',
'/sys/kernel/debug/ceph',
'-mindepth', '1',
run.Raw('!'),
'-path', '/sys/kernel/debug/ceph/meta',
run.Raw('!'),
'-path', '/sys/kernel/debug/ceph/meta/client_features',
'-type', 'd',
run.Raw('|'),
'read'
],
check_status=False
)
return proc.exitstatus == 0
def reboot(ctx, remotes):
for remote in remotes:
if stale_kernel_mount(remote):
log.warning('Stale kernel mount on %s!', remote.name)
log.info('force/no-sync rebooting %s', remote.name)
# -n is ignored in systemd versions through v229, which means this
# only works on trusty -- on 7.3 (v219) and xenial (v229) reboot -n
# still calls sync().
# args = ['sync', run.Raw('&'),
# 'sleep', '5', run.Raw(';'),
# 'sudo', 'reboot', '-f', '-n']
args = ['for', 'sysrq', 'in', 's', 'u', 'b', run.Raw(';'),
'do', 'echo', run.Raw('$sysrq'), run.Raw('|'),
'sudo', 'tee', '/proc/sysrq-trigger', run.Raw(';'),
'done']
else:
log.info('rebooting %s', remote.name)
args = ['sudo', 'reboot']
try:
remote.run(args=args, wait=False)
except Exception:
log.exception('ignoring exception during reboot command')
# we just ignore these procs because reboot -f doesn't actually
# send anything back to the ssh client!
if remotes:
log.info('waiting for nodes to reboot')
time.sleep(8) # if we try and reconnect too quickly, it succeeds!
reconnect(ctx, 480) # allow 8 minutes for the reboots
def reset_syslog_dir(ctx):
log.info('Resetting syslog output locations...')
nodes = {}
for remote in ctx.cluster.remotes.keys():
proc = remote.run(
args=[
'if', 'test', '-e', '/etc/rsyslog.d/80-cephtest.conf',
run.Raw(';'),
'then',
'sudo', 'rm', '-f', '--', '/etc/rsyslog.d/80-cephtest.conf',
run.Raw('&&'),
'sudo', 'service', 'rsyslog', 'restart',
run.Raw(';'),
'fi',
run.Raw(';'),
],
timeout=60,
)
nodes[remote.name] = proc
for name, proc in nodes.items():
log.info('Waiting for %s to restart syslog...', name)
proc.wait()
def dpkg_configure(ctx):
for remote in ctx.cluster.remotes.keys():
if remote.os.package_type != 'deb':
continue
log.info(
'Waiting for dpkg --configure -a and apt-get -f install...')
remote.run(
args=[
'sudo', 'dpkg', '--configure', '-a',
run.Raw(';'),
'sudo', 'DEBIAN_FRONTEND=noninteractive',
'apt-get', '-y', '--force-yes', '-f', 'install',
run.Raw('||'),
':',
],
timeout=180,
check_status=False,
)
def remove_yum_timedhosts(ctx):
# Workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1233329
log.info("Removing yum timedhosts files...")
for remote in ctx.cluster.remotes.keys():
if remote.os.package_type != 'rpm':
continue
remote.run(
args="sudo find /var/cache/yum -name 'timedhosts' -exec rm {} \;",
check_status=False, timeout=180
)
def remove_ceph_packages(ctx):
"""
remove ceph and ceph dependent packages by force
force is needed since the node's repo might have changed and
in many cases autocorrect will not work due to missing packages
due to repo changes
"""
log.info("Force remove ceph packages")
ceph_packages_to_remove = ['ceph-common', 'ceph-mon', 'ceph-osd',
'libcephfs1', 'libcephfs2',
'librados2', 'librgw2', 'librbd1', 'python-rgw',
'ceph-selinux', 'python-cephfs', 'ceph-base',
'python-rbd', 'python-rados', 'ceph-mds',
'ceph-mgr', 'libcephfs-java', 'libcephfs-jni',
'ceph-deploy', 'libapache2-mod-fastcgi'
]
pkgs = str.join(' ', ceph_packages_to_remove)
for remote in ctx.cluster.remotes.keys():
if remote.os.package_type == 'rpm':
log.info("Remove any broken repos")
dist_release = remote.os.name
remote.run(
args=['sudo', 'rm', run.Raw("/etc/yum.repos.d/*ceph*")],
check_status=False
)
remote.run(
args=['sudo', 'rm', run.Raw("/etc/yum.repos.d/*fcgi*")],
check_status=False,
)
remote.run(
args=['sudo', 'rm', run.Raw("/etc/yum.repos.d/*samba*")],
check_status=False,
)
remote.run(
args=['sudo', 'rm', run.Raw("/etc/yum.repos.d/*nfs-ganesha*")],
check_status=False,
)
remote.run(
args=['sudo', 'rpm', '--rebuilddb']
)
if dist_release in ['opensuse', 'sle']:
remote.sh('sudo zypper clean')
log.info('Remove any ceph packages')
remote.sh('sudo zypper remove --non-interactive',
check_status=False
)
else:
remote.sh('sudo yum clean all')
log.info('Remove any ceph packages')
remote.sh('sudo yum remove -y', check_status=False)
else:
log.info("Remove any broken repos")
remote.run(
args=['sudo', 'rm', run.Raw("/etc/apt/sources.list.d/*ceph*")],
check_status=False,
)
remote.run(
args=['sudo', 'rm', run.Raw("/etc/apt/sources.list.d/*samba*")],
check_status=False,
)
remote.run(
args=['sudo', 'rm', run.Raw("/etc/apt/sources.list.d/*nfs-ganesha*")],
check_status=False,
)
log.info("Autoclean")
remote.run(
args=['sudo', 'apt-get', 'autoclean'],
check_status=False,
)
log.info('Remove any ceph packages')
remote.run(
args=[
'sudo', 'dpkg', '--remove', '--force-remove-reinstreq',
run.Raw(pkgs)
],
check_status=False
)
log.info("Autoclean")
remote.run(
args=['sudo', 'apt-get', 'autoclean']
)
def remove_installed_packages(ctx):
dpkg_configure(ctx)
conf = dict(
project='ceph',
debuginfo='true',
)
packages = install_task.get_package_list(ctx, conf)
debs = packages['deb'] + \
['salt-common', 'salt-minion', 'calamari-server',
'python-rados', 'multipath-tools']
rpms = packages['rpm'] + \
['salt-common', 'salt-minion', 'calamari-server',
'multipath-tools', 'device-mapper-multipath']
install_task.remove_packages(
ctx,
conf,
dict(
deb=debs,
rpm=rpms,
)
)
install_task.remove_sources(ctx, conf)
def remove_ceph_data(ctx):
log.info("Removing any stale ceph data...")
ctx.cluster.run(
args=[
'sudo', 'rm', '-rf', '/etc/ceph',
run.Raw('/var/run/ceph*'),
],
)
def remove_testing_tree(ctx):
log.info('Clearing filesystem of test data...')
ctx.cluster.run(
args=[
'sudo', 'rm', '-rf', get_testdir(ctx),
# just for old time's sake
run.Raw('&&'),
'sudo', 'rm', '-rf', '/tmp/cephtest',
run.Raw('&&'),
'sudo', 'rm', '-rf', '/home/ubuntu/cephtest',
],
)
def remove_configuration_files(ctx):
"""
Goes through a list of commonly used configuration files used for testing
that should not be left behind.
For example, sometimes ceph-deploy may be configured via
``~/.cephdeploy.conf`` to alter how it handles installation by specifying
a default section in its config with custom locations.
"""
ctx.cluster.run(
args=[
'rm', '-f', '/home/ubuntu/.cephdeploy.conf'
],
timeout=30
)
def undo_multipath(ctx):
"""
Undo any multipath device mappings created, an
remove the packages/daemon that manages them so they don't
come back unless specifically requested by the test.
"""
log.info('Removing any multipath config/pkgs...')
for remote in ctx.cluster.remotes.keys():
remote.run(
args=[
'sudo', 'multipath', '-F',
],
check_status=False,
timeout=60
)
def synch_clocks(remotes):
log.info('Synchronizing clocks...')
for remote in remotes:
remote.run(
args=[
'sudo', 'systemctl', 'stop', 'ntp.service', run.Raw('||'),
'sudo', 'systemctl', 'stop', 'ntpd.service', run.Raw('||'),
'sudo', 'systemctl', 'stop', 'chronyd.service',
run.Raw('&&'),
'sudo', 'ntpdate-debian', run.Raw('||'),
'sudo', 'ntp', '-gq', run.Raw('||'),
'sudo', 'ntpd', '-gq', run.Raw('||'),
'sudo', 'chronyc', 'sources',
run.Raw('&&'),
'sudo', 'hwclock', '--systohc', '--utc',
run.Raw('&&'),
'sudo', 'systemctl', 'start', 'ntp.service', run.Raw('||'),
'sudo', 'systemctl', 'start', 'ntpd.service', run.Raw('||'),
'sudo', 'systemctl', 'start', 'chronyd.service',
run.Raw('||'),
'true', # ignore errors; we may be racing with ntpd startup
],
timeout=60,
)
def unlock_firmware_repo(ctx):
log.info('Making sure firmware.git is not locked...')
ctx.cluster.run(args=['sudo', 'rm', '-f',
'/lib/firmware/updates/.git/index.lock', ])
def check_console(hostname):
remote = Remote(hostname)
shortname = remote.shortname
console = remote.console
if not console:
return
cname = '{host}.{domain}'.format(
host=shortname,
domain=console.ipmidomain,
)
log.info('checking console status of %s' % cname)
if console.check_status():
log.info('console ready on %s' % cname)
return
if console.check_power('on'):
log.info('attempting to reboot %s' % cname)
console.power_cycle()
else:
log.info('attempting to power on %s' % cname)
console.power_on()
timeout = 100
log.info('checking console status of %s with timeout %s' %
(cname, timeout))
if console.check_status(timeout=timeout):
log.info('console ready on %s' % cname)
else:
log.error("Failed to get console status for %s, " % cname)
| |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/route-preference/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines route preference configuration.
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__external_route_preference",
"__internal_route_preference",
)
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__external_route_preference = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..max"]},
),
is_leaf=True,
yang_name="external-route-preference",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=True,
)
self.__internal_route_preference = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..max"]},
),
is_leaf=True,
yang_name="internal-route-preference",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"route-preference",
"config",
]
def _get_external_route_preference(self):
"""
Getter method for external_route_preference, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/route_preference/config/external_route_preference (uint8)
YANG Description: Administrative Distance(preference) for external ISIS routes.
"""
return self.__external_route_preference
def _set_external_route_preference(self, v, load=False):
"""
Setter method for external_route_preference, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/route_preference/config/external_route_preference (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_external_route_preference is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_external_route_preference() directly.
YANG Description: Administrative Distance(preference) for external ISIS routes.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["1..max"]},
),
is_leaf=True,
yang_name="external-route-preference",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """external_route_preference must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..max']}), is_leaf=True, yang_name="external-route-preference", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=True)""",
}
)
self.__external_route_preference = t
if hasattr(self, "_set"):
self._set()
def _unset_external_route_preference(self):
self.__external_route_preference = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..max"]},
),
is_leaf=True,
yang_name="external-route-preference",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=True,
)
def _get_internal_route_preference(self):
"""
Getter method for internal_route_preference, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/route_preference/config/internal_route_preference (uint8)
YANG Description: Administrative Distance(preference) for internal ISIS routes.
"""
return self.__internal_route_preference
def _set_internal_route_preference(self, v, load=False):
"""
Setter method for internal_route_preference, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/route_preference/config/internal_route_preference (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_internal_route_preference is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_internal_route_preference() directly.
YANG Description: Administrative Distance(preference) for internal ISIS routes.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["1..max"]},
),
is_leaf=True,
yang_name="internal-route-preference",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """internal_route_preference must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..max']}), is_leaf=True, yang_name="internal-route-preference", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=True)""",
}
)
self.__internal_route_preference = t
if hasattr(self, "_set"):
self._set()
def _unset_internal_route_preference(self):
self.__internal_route_preference = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..max"]},
),
is_leaf=True,
yang_name="internal-route-preference",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=True,
)
external_route_preference = __builtin__.property(
_get_external_route_preference, _set_external_route_preference
)
internal_route_preference = __builtin__.property(
_get_internal_route_preference, _set_internal_route_preference
)
_pyangbind_elements = OrderedDict(
[
("external_route_preference", external_route_preference),
("internal_route_preference", internal_route_preference),
]
)
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/route-preference/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines route preference configuration.
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__external_route_preference",
"__internal_route_preference",
)
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__external_route_preference = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..max"]},
),
is_leaf=True,
yang_name="external-route-preference",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=True,
)
self.__internal_route_preference = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..max"]},
),
is_leaf=True,
yang_name="internal-route-preference",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"route-preference",
"config",
]
def _get_external_route_preference(self):
"""
Getter method for external_route_preference, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/route_preference/config/external_route_preference (uint8)
YANG Description: Administrative Distance(preference) for external ISIS routes.
"""
return self.__external_route_preference
def _set_external_route_preference(self, v, load=False):
"""
Setter method for external_route_preference, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/route_preference/config/external_route_preference (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_external_route_preference is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_external_route_preference() directly.
YANG Description: Administrative Distance(preference) for external ISIS routes.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["1..max"]},
),
is_leaf=True,
yang_name="external-route-preference",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """external_route_preference must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..max']}), is_leaf=True, yang_name="external-route-preference", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=True)""",
}
)
self.__external_route_preference = t
if hasattr(self, "_set"):
self._set()
def _unset_external_route_preference(self):
self.__external_route_preference = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..max"]},
),
is_leaf=True,
yang_name="external-route-preference",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=True,
)
def _get_internal_route_preference(self):
"""
Getter method for internal_route_preference, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/route_preference/config/internal_route_preference (uint8)
YANG Description: Administrative Distance(preference) for internal ISIS routes.
"""
return self.__internal_route_preference
def _set_internal_route_preference(self, v, load=False):
"""
Setter method for internal_route_preference, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/route_preference/config/internal_route_preference (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_internal_route_preference is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_internal_route_preference() directly.
YANG Description: Administrative Distance(preference) for internal ISIS routes.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["1..max"]},
),
is_leaf=True,
yang_name="internal-route-preference",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """internal_route_preference must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..max']}), is_leaf=True, yang_name="internal-route-preference", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=True)""",
}
)
self.__internal_route_preference = t
if hasattr(self, "_set"):
self._set()
def _unset_internal_route_preference(self):
self.__internal_route_preference = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..max"]},
),
is_leaf=True,
yang_name="internal-route-preference",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=True,
)
external_route_preference = __builtin__.property(
_get_external_route_preference, _set_external_route_preference
)
internal_route_preference = __builtin__.property(
_get_internal_route_preference, _set_internal_route_preference
)
_pyangbind_elements = OrderedDict(
[
("external_route_preference", external_route_preference),
("internal_route_preference", internal_route_preference),
]
)
| |
from collections import OrderedDict
from music21 import roman, stream, chord, harmony, key, interval
letter_ordering = ['c', 'db', 'd', 'eb', 'e', 'f', 'gb', 'g', 'ab', 'a', 'bb', 'b']
# from resource.py
LETTER_PARTIAL_CO_RELABELS = [[['C##', 'D'], ['A#', 'Bb']],
[['C##', 'D'], ['E#', 'F']],
[['F##', 'G'], ['D#', 'Eb']],
[['F##', 'G'], ['D##', 'E']],
[['F##', 'G'], ['A#', 'Bb']],
[['G##', 'A'], ['B#', 'C']],
[['G##', 'A'], ['E#', 'F']],
[['G#', 'Ab'], ['B#', 'C']],
[['E#', 'F'], ['G#', 'Ab']],
[['E#', 'F'], ['D#', 'Eb']]]
LETTER_PARTIAL_RELABELS_FOR_USER = {'-': 'b',
'C##': 'D',
'E#': 'F',
'B#': 'C'}
ROMAN2LETTER_RELABELS = {'bVII7[maj7]': 'B-maj7'}
LETTER2ROMAN_RELABELS = {'B-maj7': 'bVII7[maj7]'}
ROMAN_PARTIAL_RELABELS = {'6/5': '65', '4/3': '43', '6/4': '64'}
# LETTER_PARTIAL_RELABELS_FOR_USER = {'-': 'b'}
MAKE_NOTE_EXCEPTIONS = ['Vs4', 'V7s4']
# TODO: It6 treated as bVI7 as a quick hack
ROMAN_RELABELS = {'bVII7[maj7]': 'bVII7',
'It6': 'bVI7'}
def root_interval(sym, sym_next):
# print sym, sym_next
ch = harmony.ChordSymbol(sym)
ch_next = harmony.ChordSymbol(sym_next)
ch_root = ch.findRoot()
ch_next_root = ch_next.findRoot()
intvl = interval.Interval(ch_root, ch_next_root)
semitones = intvl.chromatic.semitones
return semitones
def is_fifth_root_movement_from_sym_pair(sym, sym_next):
semitones = root_interval(sym, sym_next)
# print sym, sym_next, semitones
if semitones == -7 or semitones == 5: # a fifth down
return True
return False
def is_fifth_root_movement(root, root_next):
intvl = interval.Interval(root, root_next)
semitones = intvl.chromatic.semitones
if semitones == -7 or semitones == 5: # a fifth down
return True
return False
def get_roots_and_chord_qualities(seq):
roots = []
chord_qualities = []
for sym in seq:
ch = harmony.ChordSymbol(sym)
roots.append(ch.findRoot())
if ch.isMajorTriad():
chord_qualities.append('M')
elif ch.isDominantSeventh():
chord_qualities.append('d')
else:
chord_qualities.append('')
return roots, chord_qualities
def get_targets(seq):
# TODO: could use the quality of the chord to constrain this more
# the last chord in a subsequence that's a fifth movement is a targe
roots, chord_qualities = get_roots_and_chord_qualities(seq)
fifth_mvts_booleans = []
for i in range(len(seq)-1):
fifth = is_fifth_root_movement(roots[i], roots[i+1])
fifth_mvts_booleans.append(fifth)
targets = {}
if len(fifth_mvts_booleans) == 1:
targets[seq[-1]] = 1
# print targets
return targets
for i in range(len(fifth_mvts_booleans)):
# print seq[i], seq[i+1],
next_is_fifth = i < len(fifth_mvts_booleans) - 1 and \
fifth_mvts_booleans[i+1]
last_fifth_mvt = fifth_mvts_booleans[i] and not next_is_fifth
could_be_V = chord_qualities[i] == 'M' or chord_qualities[i] == 'd'
# print last_fifth_mvt, could_be_V
if last_fifth_mvt and could_be_V:
target = seq[i+1]
# print target, True
# weighted by how many steps away from start
if target not in targets:
targets[target] = 1.0 / (i + 1)
else:
targets[target] += 1.0 / (i + 1)
else:
# just for printing debugging
target = seq[i+1]
# print target, False
# print targets
return targets
def letter2music21(sym):
# return replace_flat_dash_primary(sym, 0)
return preprocess_letters_before_sym2chord(sym)
def get_chordSymbol(sym):
sym_music21 = letter2music21(sym)
#print sym_music21
letter, postfix = check(sym_music21)
#print letter, postfix
if postfix is not None:
sym_music21 = letter + postfix
#print 'sym, sym_music21', sym, sym_music21
ch1 = harmony.ChordSymbol(sym_music21)
# print ch1
return ch1
def is_roman_numeral(sym):
signs_of_roman = ['i', 'I', 'v', 'V', 'It']
signs_of_not_roman = ['dim']
is_roman = False
for sign in signs_of_roman:
if sign in sym:
is_roman = True
for sign in signs_of_not_roman:
if sign in sym:
is_roman = False
return is_roman
def sym2roman(sym):
try:
chord_sym = roman.RomanNumeral(sym)
# print chord_sym
except:
print "WARNING: chord symbol not valid", sym
chord_sym = None
return chord_sym
def replace_flat_dash_primary(sym, part_ind):
# don't replace 'b' with '-' when 'b' is for the pitch name, not acci
if len(sym) > 1 and sym[1] == 'b' and sym[0] != 'b':
sym = sym.replace('b', '-')
elif 'bb' == sym[:2]: # i.e. bb7
if len(sym) > 2:
sym = sym[0] + '-' + sym[2:]
else:
sym = sym[0] + '-'
elif part_ind == 1 and len(sym) == 1 and sym[0] == 'b':
sym = 'B'
return sym
def add_min_to_lowercase(sym, part_ind):
# only for the first part before '/', and not dominant chords
not_minor_figures = ['x', 'o', 'h', 'd']
minor = True
for fig in not_minor_figures:
if fig in sym:
minor = False
break
if part_ind == 0 and sym.islower() and minor:
# need to first get the lettername part
# without the inversion and added notes
sym += 'm'
return sym
def replace_chord_with_dominant_seventh(sym):
return sym.replace('x7', 'b7')
def replace_d_with_dim(sym):
if 'dim' in sym:
return sym
elif len(sym) > 1 and 'd' in sym[1:] and sym.index('d', 1) != 0:
ind = sym.index('d', 1)
sym = sym[:ind] + 'dim' + sym[ind+1:]
return sym
def preprocess_letters_before_sym2chord(sym):
parts = sym.split('/')
parts = [replace_flat_dash_primary(part, i) for i, part in enumerate(parts)]
# TODO: does not add min to right place with figures that have extensions such as a7
# need to break it down before adding m
# parts = [add_min_to_lowercase(part, i) for i, part in enumerate(parts)]
parts = [replace_chord_with_dominant_seventh(part) for part in parts]
parts = [replace_d_with_dim(part) for part in parts]
return '/'.join(parts)
def sym2chord(sym, transpose=0):
ch = None
if is_roman_numeral(sym):
try:
chord_sym = roman.RomanNumeral(sym)
# transpose
chord_sym.transpose(transpose, inPlace=True)
pches = chord_sym.pitches
if chord_sym.secondaryRomanNumeral is not None:
ch = chord.Chord(pches).transpose(-12)
else:
ch = chord.Chord(pches)
except:
print 'WARNING: symbol not found', sym
else:
try:
sym = preprocess_letters_before_sym2chord(sym)
chord_sym = harmony.ChordSymbol(sym)
ch = chord.Chord(chord_sym.pitches).transpose(12)
except:
print 'WARNING: symbol not found', sym
return ch
def roman2letter(sym):
# print 'roman2letter', sym
chord_sym = sym2roman(sym)
if chord_sym is None:
# return sym, False
return None
ch = chord.Chord(chord_sym.pitches)
lettername, ch_type = harmony.chordSymbolFigureFromChord(ch, True)
# print lettername, ch_type
return lettername
def tokenize_transpose(sym, transpose):
return '%s_%s' % (sym, transpose)
def roman2letter_transpose(sym, transpose=0,
translation_dict={},
return_dict=False):
# print 'roman2letter', sym
if sym in ROMAN_RELABELS.keys():
sym = ROMAN_RELABELS[sym]
key = tokenize_transpose(sym, transpose)
if key in translation_dict:
return translation_dict[key]
rn = roman.RomanNumeral(sym)
rn.transpose(transpose, inPlace=True)
pitches = rn.pitches
# pitches = [pch.midi+transpose for pch in rn.pitches]
ch = chord.Chord(pitches)
# print ch
# ch = sym2chord(sym, transpose=transpose)
if ch is None:
# return sym, False
return None
lettername, ch_type = harmony.chordSymbolFigureFromChord(ch, True)
# somehow music21 is not able to cope with
## ch = roman2letter_transpose('V', transpose=-1)
# if lettername == 'Chord Symbol Cannot Be Identified':
# rn.transpose(transpose, inPlace=True)
# ch = chord.Chord(rn.pitches)
# lettername, ch_type = harmony.chordSymbolFigureFromChord(ch, True)
# from resource.py
for co_replacements in LETTER_PARTIAL_CO_RELABELS:
all_in = True
for replacements in co_replacements:
if replacements[0] not in lettername:
all_in = False
if all_in:
for replacements in co_replacements:
lettername = lettername.replace(replacements[0],
replacements[1])
for k, v in LETTER_PARTIAL_RELABELS_FOR_USER.iteritems():
if k in lettername:
lettername = lettername.replace(k, v)
translation_dict[key] = lettername
# print lettername#, ch_type
if return_dict:
return lettername, translation_dict
return lettername
def letter2roman(sym):
# for now assume in key of C
print '--- letter2roman', sym,
try:
chord_sym = harmony.ChordSymbol(sym)
except:
print 'WARNING: chord symbol does not exist'
return None
ch = chord.Chord(chord_sym.pitches)
rn = roman.romanNumeralFromChord(ch, key.Key('C'))
print rn.figure
return rn.figure
def syms2score(syms):
score = stream.Stream()
for sym in syms:
score.append(sym2chord(sym))
return score
# def syms2score(syms):
# score = stream.Stream()
# for sym in syms:
# rn = roman.RomanNumeral(sym)
# rn.lyric = sym
# score.append(rn)
# return score
def roman2letter_subroutine(sym):
rn = roman.RomanNumeral(sym)
# print rn
ch = chord.Chord(rn.pitches)
# print ch
cs = harmony.chordSymbolFromChord(ch)
# print cs
# print cs.figure
return cs.figure
def check(sym):
fixes = OrderedDict()
fixes['7s4'] = 'sus4'
fixes['s4'] = 'sus4'
fixes['h'] = 'm7b5'
# print fixes.keys()
postfix = None
sym = sym.replace('x', 'o')
partial_sym = sym
for k, v in fixes.iteritems():
if k in sym and 'sus4' not in sym:
ind = sym.index(k)
# print 'ind', ind, sym
partial_sym = sym[:ind]
partial_sym = partial_sym.upper()
postfix = v
break
return partial_sym, postfix
def roman2letter(sym):
# print '...', sym,
partial_sym, postfix = check(sym)
# print 'partial_sym', partial_sym, postfix
letter = roman2letter_subroutine(partial_sym)
if postfix is not None:
letter = letter + postfix
# print letter
return letter
if __name__ == '__main__':
# lettername = roman2letter('ii/o6/5')
# lettername = roman2letter('It6')
# print lettername
# ch = roman2letter_transpose('V', transpose=-1)
# print ch
# ch = roman2letter_transpose('VII6', transpose=-1)
# print ch
# lettername = roman2letter('iih7')
print replace_d_with_dim('ddim')
print '-------'
print get_chordSymbol('a7')
print get_chordSymbol('e-9')
| |
# -*- encoding: utf-8 -*-
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""SQLAlchemy storage backend."""
import collections
import datetime
import threading
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import utils as db_utils
from oslo_log import log
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy import sql
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LW
from ironic.common import states
from ironic.common import utils
from ironic.db import api
from ironic.db.sqlalchemy import models
CONF = cfg.CONF
CONF.import_opt('heartbeat_timeout',
'ironic.conductor.manager',
group='conductor')
LOG = log.getLogger(__name__)
_CONTEXT = threading.local()
def get_backend():
"""The backend is this module itself."""
return Connection()
def _session_for_read():
return enginefacade.reader.using(_CONTEXT)
def _session_for_write():
return enginefacade.writer.using(_CONTEXT)
def model_query(model, *args, **kwargs):
"""Query helper for simpler session usage.
:param session: if present, the session to use
"""
with _session_for_read() as session:
query = session.query(model, *args)
return query
def add_identity_filter(query, value):
"""Adds an identity filter to a query.
Filters results by ID, if supplied value is a valid integer.
Otherwise attempts to filter results by UUID.
:param query: Initial query to add filter to.
:param value: Value for filtering results by.
:return: Modified query.
"""
if strutils.is_int_like(value):
return query.filter_by(id=value)
elif uuidutils.is_uuid_like(value):
return query.filter_by(uuid=value)
else:
raise exception.InvalidIdentity(identity=value)
def add_port_filter(query, value):
"""Adds a port-specific filter to a query.
Filters results by address, if supplied value is a valid MAC
address. Otherwise attempts to filter results by identity.
:param query: Initial query to add filter to.
:param value: Value for filtering results by.
:return: Modified query.
"""
if utils.is_valid_mac(value):
return query.filter_by(address=value)
else:
return add_identity_filter(query, value)
def add_port_filter_by_node(query, value):
if strutils.is_int_like(value):
return query.filter_by(node_id=value)
else:
query = query.join(models.Node,
models.Port.node_id == models.Node.id)
return query.filter(models.Node.uuid == value)
def add_node_filter_by_chassis(query, value):
if strutils.is_int_like(value):
return query.filter_by(chassis_id=value)
else:
query = query.join(models.Chassis,
models.Node.chassis_id == models.Chassis.id)
return query.filter(models.Chassis.uuid == value)
def _paginate_query(model, limit=None, marker=None, sort_key=None,
sort_dir=None, query=None):
if not query:
query = model_query(model)
sort_keys = ['id']
if sort_key and sort_key not in sort_keys:
sort_keys.insert(0, sort_key)
try:
query = db_utils.paginate_query(query, model, limit, sort_keys,
marker=marker, sort_dir=sort_dir)
except db_exc.InvalidSortKey:
raise exception.InvalidParameterValue(
_('The sort_key value "%(key)s" is an invalid field for sorting')
% {'key': sort_key})
return query.all()
class Connection(api.Connection):
"""SqlAlchemy connection."""
def __init__(self):
pass
def _add_nodes_filters(self, query, filters):
if filters is None:
filters = []
if 'chassis_uuid' in filters:
# get_chassis_by_uuid() to raise an exception if the chassis
# is not found
chassis_obj = self.get_chassis_by_uuid(filters['chassis_uuid'])
query = query.filter_by(chassis_id=chassis_obj.id)
if 'associated' in filters:
if filters['associated']:
query = query.filter(models.Node.instance_uuid != sql.null())
else:
query = query.filter(models.Node.instance_uuid == sql.null())
if 'reserved' in filters:
if filters['reserved']:
query = query.filter(models.Node.reservation != sql.null())
else:
query = query.filter(models.Node.reservation == sql.null())
if 'reserved_by_any_of' in filters:
query = query.filter(models.Node.reservation.in_(
filters['reserved_by_any_of']))
if 'maintenance' in filters:
query = query.filter_by(maintenance=filters['maintenance'])
if 'driver' in filters:
query = query.filter_by(driver=filters['driver'])
if 'provision_state' in filters:
query = query.filter_by(provision_state=filters['provision_state'])
if 'provisioned_before' in filters:
limit = (timeutils.utcnow() -
datetime.timedelta(seconds=filters['provisioned_before']))
query = query.filter(models.Node.provision_updated_at < limit)
if 'inspection_started_before' in filters:
limit = ((timeutils.utcnow()) -
(datetime.timedelta(
seconds=filters['inspection_started_before'])))
query = query.filter(models.Node.inspection_started_at < limit)
return query
def get_nodeinfo_list(self, columns=None, filters=None, limit=None,
marker=None, sort_key=None, sort_dir=None):
# list-ify columns default values because it is bad form
# to include a mutable list in function definitions.
if columns is None:
columns = [models.Node.id]
else:
columns = [getattr(models.Node, c) for c in columns]
query = model_query(*columns, base_model=models.Node)
query = self._add_nodes_filters(query, filters)
return _paginate_query(models.Node, limit, marker,
sort_key, sort_dir, query)
def get_node_list(self, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Node)
query = self._add_nodes_filters(query, filters)
return _paginate_query(models.Node, limit, marker,
sort_key, sort_dir, query)
def reserve_node(self, tag, node_id):
with _session_for_write():
query = model_query(models.Node)
query = add_identity_filter(query, node_id)
# be optimistic and assume we usually create a reservation
count = query.filter_by(reservation=None).update(
{'reservation': tag}, synchronize_session=False)
try:
node = query.one()
if count != 1:
# Nothing updated and node exists. Must already be
# locked.
raise exception.NodeLocked(node=node_id,
host=node['reservation'])
return node
except NoResultFound:
raise exception.NodeNotFound(node_id)
def release_node(self, tag, node_id):
with _session_for_write():
query = model_query(models.Node)
query = add_identity_filter(query, node_id)
# be optimistic and assume we usually release a reservation
count = query.filter_by(reservation=tag).update(
{'reservation': None}, synchronize_session=False)
try:
if count != 1:
node = query.one()
if node['reservation'] is None:
raise exception.NodeNotLocked(node=node_id)
else:
raise exception.NodeLocked(node=node_id,
host=node['reservation'])
except NoResultFound:
raise exception.NodeNotFound(node_id)
def create_node(self, values):
# ensure defaults are present for new nodes
if 'uuid' not in values:
values['uuid'] = uuidutils.generate_uuid()
if 'power_state' not in values:
values['power_state'] = states.NOSTATE
if 'provision_state' not in values:
values['provision_state'] = states.ENROLL
node = models.Node()
node.update(values)
with _session_for_write() as session:
try:
session.add(node)
session.flush()
except db_exc.DBDuplicateEntry as exc:
if 'name' in exc.columns:
raise exception.DuplicateName(name=values['name'])
elif 'instance_uuid' in exc.columns:
raise exception.InstanceAssociated(
instance_uuid=values['instance_uuid'],
node=values['uuid'])
raise exception.NodeAlreadyExists(uuid=values['uuid'])
return node
def get_node_by_id(self, node_id):
query = model_query(models.Node).filter_by(id=node_id)
try:
return query.one()
except NoResultFound:
raise exception.NodeNotFound(node=node_id)
def get_node_by_uuid(self, node_uuid):
query = model_query(models.Node).filter_by(uuid=node_uuid)
try:
return query.one()
except NoResultFound:
raise exception.NodeNotFound(node=node_uuid)
def get_node_by_name(self, node_name):
query = model_query(models.Node).filter_by(name=node_name)
try:
return query.one()
except NoResultFound:
raise exception.NodeNotFound(node=node_name)
def get_node_by_instance(self, instance):
if not uuidutils.is_uuid_like(instance):
raise exception.InvalidUUID(uuid=instance)
query = (model_query(models.Node)
.filter_by(instance_uuid=instance))
try:
result = query.one()
except NoResultFound:
raise exception.InstanceNotFound(instance=instance)
return result
def destroy_node(self, node_id):
with _session_for_write():
query = model_query(models.Node)
query = add_identity_filter(query, node_id)
try:
node_ref = query.one()
except NoResultFound:
raise exception.NodeNotFound(node=node_id)
# Get node ID, if an UUID was supplied. The ID is
# required for deleting all ports, attached to the node.
if uuidutils.is_uuid_like(node_id):
node_id = node_ref['id']
port_query = model_query(models.Port)
port_query = add_port_filter_by_node(port_query, node_id)
port_query.delete()
query.delete()
def update_node(self, node_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Node.")
raise exception.InvalidParameterValue(err=msg)
try:
return self._do_update_node(node_id, values)
except db_exc.DBDuplicateEntry as e:
if 'name' in e.columns:
raise exception.DuplicateName(name=values['name'])
elif 'uuid' in e.columns:
raise exception.NodeAlreadyExists(uuid=values['uuid'])
elif 'instance_uuid' in e.columns:
raise exception.InstanceAssociated(
instance_uuid=values['instance_uuid'],
node=node_id)
else:
raise e
def _do_update_node(self, node_id, values):
with _session_for_write():
query = model_query(models.Node)
query = add_identity_filter(query, node_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.NodeNotFound(node=node_id)
# Prevent instance_uuid overwriting
if values.get("instance_uuid") and ref.instance_uuid:
raise exception.NodeAssociated(
node=node_id, instance=ref.instance_uuid)
if 'provision_state' in values:
values['provision_updated_at'] = timeutils.utcnow()
if values['provision_state'] == states.INSPECTING:
values['inspection_started_at'] = timeutils.utcnow()
values['inspection_finished_at'] = None
elif (ref.provision_state == states.INSPECTING and
values['provision_state'] == states.MANAGEABLE):
values['inspection_finished_at'] = timeutils.utcnow()
values['inspection_started_at'] = None
elif (ref.provision_state == states.INSPECTING and
values['provision_state'] == states.INSPECTFAIL):
values['inspection_started_at'] = None
ref.update(values)
return ref
def get_port_by_id(self, port_id):
query = model_query(models.Port).filter_by(id=port_id)
try:
return query.one()
except NoResultFound:
raise exception.PortNotFound(port=port_id)
def get_port_by_uuid(self, port_uuid):
query = model_query(models.Port).filter_by(uuid=port_uuid)
try:
return query.one()
except NoResultFound:
raise exception.PortNotFound(port=port_uuid)
def get_port_by_address(self, address):
query = model_query(models.Port).filter_by(address=address)
try:
return query.one()
except NoResultFound:
raise exception.PortNotFound(port=address)
def get_port_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None):
return _paginate_query(models.Port, limit, marker,
sort_key, sort_dir)
def get_ports_by_node_id(self, node_id, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Port)
query = query.filter_by(node_id=node_id)
return _paginate_query(models.Port, limit, marker,
sort_key, sort_dir, query)
def create_port(self, values):
if not values.get('uuid'):
values['uuid'] = uuidutils.generate_uuid()
port = models.Port()
port.update(values)
with _session_for_write() as session:
try:
session.add(port)
session.flush()
except db_exc.DBDuplicateEntry as exc:
if 'address' in exc.columns:
raise exception.MACAlreadyExists(mac=values['address'])
raise exception.PortAlreadyExists(uuid=values['uuid'])
return port
def update_port(self, port_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Port.")
raise exception.InvalidParameterValue(err=msg)
try:
with _session_for_write() as session:
query = model_query(models.Port)
query = add_port_filter(query, port_id)
ref = query.one()
ref.update(values)
session.flush()
except NoResultFound:
raise exception.PortNotFound(port=port_id)
except db_exc.DBDuplicateEntry:
raise exception.MACAlreadyExists(mac=values['address'])
return ref
def destroy_port(self, port_id):
with _session_for_write():
query = model_query(models.Port)
query = add_port_filter(query, port_id)
count = query.delete()
if count == 0:
raise exception.PortNotFound(port=port_id)
def get_chassis_by_id(self, chassis_id):
query = model_query(models.Chassis).filter_by(id=chassis_id)
try:
return query.one()
except NoResultFound:
raise exception.ChassisNotFound(chassis=chassis_id)
def get_chassis_by_uuid(self, chassis_uuid):
query = model_query(models.Chassis).filter_by(uuid=chassis_uuid)
try:
return query.one()
except NoResultFound:
raise exception.ChassisNotFound(chassis=chassis_uuid)
def get_chassis_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None):
return _paginate_query(models.Chassis, limit, marker,
sort_key, sort_dir)
def create_chassis(self, values):
if not values.get('uuid'):
values['uuid'] = uuidutils.generate_uuid()
chassis = models.Chassis()
chassis.update(values)
with _session_for_write() as session:
try:
session.add(chassis)
session.flush()
except db_exc.DBDuplicateEntry:
raise exception.ChassisAlreadyExists(uuid=values['uuid'])
return chassis
def update_chassis(self, chassis_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Chassis.")
raise exception.InvalidParameterValue(err=msg)
with _session_for_write():
query = model_query(models.Chassis)
query = add_identity_filter(query, chassis_id)
count = query.update(values)
if count != 1:
raise exception.ChassisNotFound(chassis=chassis_id)
ref = query.one()
return ref
def destroy_chassis(self, chassis_id):
def chassis_not_empty():
"""Checks whether the chassis does not have nodes."""
query = model_query(models.Node)
query = add_node_filter_by_chassis(query, chassis_id)
return query.count() != 0
with _session_for_write():
if chassis_not_empty():
raise exception.ChassisNotEmpty(chassis=chassis_id)
query = model_query(models.Chassis)
query = add_identity_filter(query, chassis_id)
count = query.delete()
if count != 1:
raise exception.ChassisNotFound(chassis=chassis_id)
def register_conductor(self, values, update_existing=False):
with _session_for_write() as session:
query = (model_query(models.Conductor)
.filter_by(hostname=values['hostname']))
try:
ref = query.one()
if ref.online is True and not update_existing:
raise exception.ConductorAlreadyRegistered(
conductor=values['hostname'])
except NoResultFound:
ref = models.Conductor()
session.add(ref)
ref.update(values)
# always set online and updated_at fields when registering
# a conductor, especially when updating an existing one
ref.update({'updated_at': timeutils.utcnow(),
'online': True})
return ref
def get_conductor(self, hostname):
try:
return (model_query(models.Conductor)
.filter_by(hostname=hostname, online=True)
.one())
except NoResultFound:
raise exception.ConductorNotFound(conductor=hostname)
def unregister_conductor(self, hostname):
with _session_for_write():
query = (model_query(models.Conductor)
.filter_by(hostname=hostname, online=True))
count = query.update({'online': False})
if count == 0:
raise exception.ConductorNotFound(conductor=hostname)
def touch_conductor(self, hostname):
with _session_for_write():
query = (model_query(models.Conductor)
.filter_by(hostname=hostname))
# since we're not changing any other field, manually set updated_at
# and since we're heartbeating, make sure that online=True
count = query.update({'updated_at': timeutils.utcnow(),
'online': True})
if count == 0:
raise exception.ConductorNotFound(conductor=hostname)
def clear_node_reservations_for_conductor(self, hostname):
nodes = []
with _session_for_write():
query = (model_query(models.Node)
.filter_by(reservation=hostname))
nodes = [node['uuid'] for node in query]
query.update({'reservation': None})
if nodes:
nodes = ', '.join(nodes)
LOG.warn(_LW('Cleared reservations held by %(hostname)s: '
'%(nodes)s'), {'hostname': hostname, 'nodes': nodes})
def get_active_driver_dict(self, interval=None):
if interval is None:
interval = CONF.conductor.heartbeat_timeout
limit = timeutils.utcnow() - datetime.timedelta(seconds=interval)
result = (model_query(models.Conductor)
.filter_by(online=True)
.filter(models.Conductor.updated_at >= limit)
.all())
# build mapping of drivers to the set of hosts which support them
d2c = collections.defaultdict(set)
for row in result:
for driver in row['drivers']:
d2c[driver].add(row['hostname'])
return d2c
def get_offline_conductors(self):
interval = CONF.conductor.heartbeat_timeout
limit = timeutils.utcnow() - datetime.timedelta(seconds=interval)
result = (model_query(models.Conductor).filter_by()
.filter(models.Conductor.updated_at < limit)
.all())
return [row['hostname'] for row in result]
def touch_node_provisioning(self, node_id):
with _session_for_write():
query = model_query(models.Node)
query = add_identity_filter(query, node_id)
count = query.update({'provision_updated_at': timeutils.utcnow()})
if count == 0:
raise exception.NodeNotFound(node_id)
| |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Database code for Swift """
from contextlib import contextmanager, closing
import base64
import json
import logging
import os
from uuid import uuid4
import sys
import time
import errno
import six
import six.moves.cPickle as pickle
from swift import gettext_ as _
from tempfile import mkstemp
from eventlet import sleep, Timeout
import sqlite3
from swift.common.constraints import MAX_META_COUNT, MAX_META_OVERALL_SIZE, \
check_utf8
from swift.common.utils import Timestamp, renamer, \
mkdirs, lock_parent_directory, fallocate, md5
from swift.common.exceptions import LockTimeout
from swift.common.swob import HTTPBadRequest
#: Whether calls will be made to preallocate disk space for database files.
DB_PREALLOCATION = False
#: Whether calls will be made to log queries (py3 only)
QUERY_LOGGING = False
#: Timeout for trying to connect to a DB
BROKER_TIMEOUT = 25
#: Pickle protocol to use
PICKLE_PROTOCOL = 2
#: Max size of .pending file in bytes. When this is exceeded, the pending
# records will be merged.
PENDING_CAP = 131072
SQLITE_ARG_LIMIT = 999
RECLAIM_PAGE_SIZE = 10000
def utf8encode(*args):
return [(s.encode('utf8') if isinstance(s, six.text_type) else s)
for s in args]
def native_str_keys_and_values(metadata):
if six.PY2:
uni_keys = [k for k in metadata if isinstance(k, six.text_type)]
for k in uni_keys:
sv = metadata[k]
del metadata[k]
metadata[k.encode('utf-8')] = [
x.encode('utf-8') if isinstance(x, six.text_type) else x
for x in sv]
else:
bin_keys = [k for k in metadata if isinstance(k, six.binary_type)]
for k in bin_keys:
sv = metadata[k]
del metadata[k]
metadata[k.decode('utf-8')] = [
x.decode('utf-8') if isinstance(x, six.binary_type) else x
for x in sv]
ZERO_LIKE_VALUES = {None, '', 0, '0'}
def zero_like(count):
"""
We've cargo culted our consumers to be tolerant of various expressions of
zero in our databases for backwards compatibility with less disciplined
producers.
"""
return count in ZERO_LIKE_VALUES
def _db_timeout(timeout, db_file, call):
with LockTimeout(timeout, db_file):
retry_wait = 0.001
while True:
try:
return call()
except sqlite3.OperationalError as e:
if 'locked' not in str(e):
raise
sleep(retry_wait)
retry_wait = min(retry_wait * 2, 0.05)
class DatabaseConnectionError(sqlite3.DatabaseError):
"""More friendly error messages for DB Errors."""
def __init__(self, path, msg, timeout=0):
self.path = path
self.timeout = timeout
self.msg = msg
def __str__(self):
return 'DB connection error (%s, %s):\n%s' % (
self.path, self.timeout, self.msg)
class DatabaseAlreadyExists(sqlite3.DatabaseError):
"""More friendly error messages for DB Errors."""
def __init__(self, path):
self.path = path
def __str__(self):
return 'DB %s already exists' % self.path
class GreenDBConnection(sqlite3.Connection):
"""SQLite DB Connection handler that plays well with eventlet."""
def __init__(self, database, timeout=None, *args, **kwargs):
if timeout is None:
timeout = BROKER_TIMEOUT
self.timeout = timeout
self.db_file = database
super(GreenDBConnection, self).__init__(database, 0, *args, **kwargs)
def cursor(self, cls=None):
if cls is None:
cls = GreenDBCursor
return sqlite3.Connection.cursor(self, cls)
def commit(self):
return _db_timeout(
self.timeout, self.db_file,
lambda: sqlite3.Connection.commit(self))
class GreenDBCursor(sqlite3.Cursor):
"""SQLite Cursor handler that plays well with eventlet."""
def __init__(self, *args, **kwargs):
self.timeout = args[0].timeout
self.db_file = args[0].db_file
super(GreenDBCursor, self).__init__(*args, **kwargs)
def execute(self, *args, **kwargs):
return _db_timeout(
self.timeout, self.db_file, lambda: sqlite3.Cursor.execute(
self, *args, **kwargs))
def dict_factory(crs, row):
"""
This should only be used when you need a real dict,
i.e. when you're going to serialize the results.
"""
return dict(
((col[0], row[idx]) for idx, col in enumerate(crs.description)))
def chexor(old, name, timestamp):
"""
Each entry in the account and container databases is XORed by the 128-bit
hash on insert or delete. This serves as a rolling, order-independent hash
of the contents. (check + XOR)
:param old: hex representation of the current DB hash
:param name: name of the object or container being inserted
:param timestamp: internalized timestamp of the new record
:returns: a hex representation of the new hash value
"""
if name is None:
raise Exception('name is None!')
new = md5(('%s-%s' % (name, timestamp)).encode('utf8'),
usedforsecurity=False).hexdigest()
return '%032x' % (int(old, 16) ^ int(new, 16))
def get_db_connection(path, timeout=30, logger=None, okay_to_create=False):
"""
Returns a properly configured SQLite database connection.
:param path: path to DB
:param timeout: timeout for connection
:param okay_to_create: if True, create the DB if it doesn't exist
:returns: DB connection object
"""
try:
connect_time = time.time()
conn = sqlite3.connect(path, check_same_thread=False,
factory=GreenDBConnection, timeout=timeout)
if QUERY_LOGGING and logger and not six.PY2:
conn.set_trace_callback(logger.debug)
if path != ':memory:' and not okay_to_create:
# attempt to detect and fail when connect creates the db file
stat = os.stat(path)
if stat.st_size == 0 and stat.st_ctime >= connect_time:
os.unlink(path)
raise DatabaseConnectionError(path,
'DB file created by connect?')
conn.row_factory = sqlite3.Row
conn.text_factory = str
with closing(conn.cursor()) as cur:
cur.execute('PRAGMA synchronous = NORMAL')
cur.execute('PRAGMA count_changes = OFF')
cur.execute('PRAGMA temp_store = MEMORY')
cur.execute('PRAGMA journal_mode = DELETE')
conn.create_function('chexor', 3, chexor)
except sqlite3.DatabaseError:
import traceback
raise DatabaseConnectionError(path, traceback.format_exc(),
timeout=timeout)
return conn
class TombstoneReclaimer(object):
"""Encapsulates reclamation of deleted rows in a database."""
def __init__(self, broker, age_timestamp):
"""
Encapsulates reclamation of deleted rows in a database.
:param broker: an instance of :class:`~swift.common.db.DatabaseBroker`.
:param age_timestamp: a float timestamp: tombstones older than this
time will be deleted.
"""
self.broker = broker
self.age_timestamp = age_timestamp
self.marker = ''
self.remaining_tombstones = self.reclaimed = 0
self.finished = False
# limit 1 offset N gives back the N+1th matching row; that row is used
# as an exclusive end_marker for a batch of deletes, so a batch
# comprises rows satisfying self.marker <= name < end_marker.
self.batch_query = '''
SELECT name FROM %s WHERE deleted = 1
AND name >= ?
ORDER BY NAME LIMIT 1 OFFSET ?
''' % self.broker.db_contains_type
self.clean_batch_query = '''
DELETE FROM %s WHERE deleted = 1
AND name >= ? AND %s < %s
''' % (self.broker.db_contains_type, self.broker.db_reclaim_timestamp,
self.age_timestamp)
def _reclaim(self, conn):
curs = conn.execute(self.batch_query, (self.marker, RECLAIM_PAGE_SIZE))
row = curs.fetchone()
end_marker = row[0] if row else ''
if end_marker:
# do a single book-ended DELETE and bounce out
curs = conn.execute(self.clean_batch_query + ' AND name < ?',
(self.marker, end_marker))
self.marker = end_marker
self.reclaimed += curs.rowcount
self.remaining_tombstones += RECLAIM_PAGE_SIZE - curs.rowcount
else:
# delete off the end
curs = conn.execute(self.clean_batch_query, (self.marker,))
self.finished = True
self.reclaimed += curs.rowcount
def reclaim(self):
"""
Perform reclaim of deleted rows older than ``age_timestamp``.
"""
while not self.finished:
with self.broker.get() as conn:
self._reclaim(conn)
conn.commit()
def get_tombstone_count(self):
"""
Return the number of remaining tombstones newer than ``age_timestamp``.
Executes the ``reclaim`` method if it has not already been called on
this instance.
:return: The number of tombstones in the ``broker`` that are newer than
``age_timestamp``.
"""
if not self.finished:
self.reclaim()
with self.broker.get() as conn:
curs = conn.execute('''
SELECT COUNT(*) FROM %s WHERE deleted = 1
AND name >= ?
''' % (self.broker.db_contains_type,), (self.marker,))
tombstones = curs.fetchone()[0]
self.remaining_tombstones += tombstones
return self.remaining_tombstones
class DatabaseBroker(object):
"""Encapsulates working with a database."""
delete_meta_whitelist = []
def __init__(self, db_file, timeout=BROKER_TIMEOUT, logger=None,
account=None, container=None, pending_timeout=None,
stale_reads_ok=False, skip_commits=False):
"""Encapsulates working with a database.
:param db_file: path to a database file.
:param timeout: timeout used for database operations.
:param logger: a logger instance.
:param account: name of account.
:param container: name of container.
:param pending_timeout: timeout used when attempting to take a lock to
write to pending file.
:param stale_reads_ok: if True then no error is raised if pending
commits cannot be committed before the database is read, otherwise
an error is raised.
:param skip_commits: if True then this broker instance will never
commit records from the pending file to the database;
:meth:`~swift.common.db.DatabaseBroker.put_record` should not
called on brokers with skip_commits True.
"""
self.conn = None
self._db_file = db_file
self.pending_file = self._db_file + '.pending'
self.pending_timeout = pending_timeout or 10
self.stale_reads_ok = stale_reads_ok
self.db_dir = os.path.dirname(db_file)
self.timeout = timeout
self.logger = logger or logging.getLogger()
self.account = account
self.container = container
self._db_version = -1
self.skip_commits = skip_commits
def __str__(self):
"""
Returns a string identifying the entity under broker to a human.
The baseline implementation returns a full pathname to a database.
This is vital for useful diagnostics.
"""
return self.db_file
def initialize(self, put_timestamp=None, storage_policy_index=None):
"""
Create the DB
The storage_policy_index is passed through to the subclass's
``_initialize`` method. It is ignored by ``AccountBroker``.
:param put_timestamp: internalized timestamp of initial PUT request
:param storage_policy_index: only required for containers
"""
if self._db_file == ':memory:':
tmp_db_file = None
conn = get_db_connection(self._db_file, self.timeout, self.logger)
else:
mkdirs(self.db_dir)
fd, tmp_db_file = mkstemp(suffix='.tmp', dir=self.db_dir)
os.close(fd)
conn = sqlite3.connect(tmp_db_file, check_same_thread=False,
factory=GreenDBConnection, timeout=0)
if QUERY_LOGGING and not six.PY2:
conn.set_trace_callback(self.logger.debug)
# creating dbs implicitly does a lot of transactions, so we
# pick fast, unsafe options here and do a big fsync at the end.
with closing(conn.cursor()) as cur:
cur.execute('PRAGMA synchronous = OFF')
cur.execute('PRAGMA temp_store = MEMORY')
cur.execute('PRAGMA journal_mode = MEMORY')
conn.create_function('chexor', 3, chexor)
conn.row_factory = sqlite3.Row
conn.text_factory = str
conn.executescript("""
CREATE TABLE outgoing_sync (
remote_id TEXT UNIQUE,
sync_point INTEGER,
updated_at TEXT DEFAULT 0
);
CREATE TABLE incoming_sync (
remote_id TEXT UNIQUE,
sync_point INTEGER,
updated_at TEXT DEFAULT 0
);
CREATE TRIGGER outgoing_sync_insert AFTER INSERT ON outgoing_sync
BEGIN
UPDATE outgoing_sync
SET updated_at = STRFTIME('%s', 'NOW')
WHERE ROWID = new.ROWID;
END;
CREATE TRIGGER outgoing_sync_update AFTER UPDATE ON outgoing_sync
BEGIN
UPDATE outgoing_sync
SET updated_at = STRFTIME('%s', 'NOW')
WHERE ROWID = new.ROWID;
END;
CREATE TRIGGER incoming_sync_insert AFTER INSERT ON incoming_sync
BEGIN
UPDATE incoming_sync
SET updated_at = STRFTIME('%s', 'NOW')
WHERE ROWID = new.ROWID;
END;
CREATE TRIGGER incoming_sync_update AFTER UPDATE ON incoming_sync
BEGIN
UPDATE incoming_sync
SET updated_at = STRFTIME('%s', 'NOW')
WHERE ROWID = new.ROWID;
END;
""")
if not put_timestamp:
put_timestamp = Timestamp(0).internal
self._initialize(conn, put_timestamp,
storage_policy_index=storage_policy_index)
conn.commit()
if tmp_db_file:
conn.close()
with open(tmp_db_file, 'r+b') as fp:
os.fsync(fp.fileno())
with lock_parent_directory(self.db_file, self.pending_timeout):
if os.path.exists(self.db_file):
# It's as if there was a "condition" where different parts
# of the system were "racing" each other.
raise DatabaseAlreadyExists(self.db_file)
renamer(tmp_db_file, self.db_file)
self.conn = get_db_connection(self.db_file, self.timeout,
self.logger)
else:
self.conn = conn
def delete_db(self, timestamp):
"""
Mark the DB as deleted
:param timestamp: internalized delete timestamp
"""
# first, clear the metadata
cleared_meta = {}
for k in self.metadata:
if k.lower() in self.delete_meta_whitelist:
continue
cleared_meta[k] = ('', timestamp)
self.update_metadata(cleared_meta)
# then mark the db as deleted
with self.get() as conn:
conn.execute(
"""
UPDATE %s_stat
SET delete_timestamp = ?,
status = 'DELETED',
status_changed_at = ?
WHERE delete_timestamp < ? """ % self.db_type,
(timestamp, timestamp, timestamp))
conn.commit()
@property
def db_file(self):
return self._db_file
def get_device_path(self):
suffix_path = os.path.dirname(self.db_dir)
partition_path = os.path.dirname(suffix_path)
dbs_path = os.path.dirname(partition_path)
return os.path.dirname(dbs_path)
def quarantine(self, reason):
"""
The database will be quarantined and a
sqlite3.DatabaseError will be raised indicating the action taken.
"""
device_path = self.get_device_path()
quar_path = os.path.join(device_path, 'quarantined',
self.db_type + 's',
os.path.basename(self.db_dir))
try:
renamer(self.db_dir, quar_path, fsync=False)
except OSError as e:
if e.errno not in (errno.EEXIST, errno.ENOTEMPTY):
raise
quar_path = "%s-%s" % (quar_path, uuid4().hex)
renamer(self.db_dir, quar_path, fsync=False)
detail = _('Quarantined %(db_dir)s to %(quar_path)s due to '
'%(reason)s') % {'db_dir': self.db_dir,
'quar_path': quar_path,
'reason': reason}
self.logger.error(detail)
raise sqlite3.DatabaseError(detail)
def possibly_quarantine(self, exc_type, exc_value, exc_traceback):
"""
Checks the exception info to see if it indicates a quarantine situation
(malformed or corrupted database). If not, the original exception will
be reraised. If so, the database will be quarantined and a new
sqlite3.DatabaseError will be raised indicating the action taken.
"""
if 'database disk image is malformed' in str(exc_value):
exc_hint = 'malformed database'
elif 'malformed database schema' in str(exc_value):
exc_hint = 'malformed database'
elif ' is not a database' in str(exc_value):
# older versions said 'file is not a database'
# now 'file is encrypted or is not a database'
exc_hint = 'corrupted database'
elif 'disk I/O error' in str(exc_value):
exc_hint = 'disk error while accessing database'
else:
six.reraise(exc_type, exc_value, exc_traceback)
self.quarantine(exc_hint)
@contextmanager
def updated_timeout(self, new_timeout):
"""Use with "with" statement; updates ``timeout`` within the block."""
old_timeout = self.timeout
try:
self.timeout = new_timeout
if self.conn:
self.conn.timeout = new_timeout
yield old_timeout
finally:
self.timeout = old_timeout
if self.conn:
self.conn.timeout = old_timeout
@contextmanager
def maybe_get(self, conn):
if conn:
yield conn
else:
with self.get() as conn:
yield conn
@contextmanager
def get(self):
"""Use with the "with" statement; returns a database connection."""
if not self.conn:
if self.db_file != ':memory:' and os.path.exists(self.db_file):
try:
self.conn = get_db_connection(self.db_file, self.timeout,
self.logger)
except (sqlite3.DatabaseError, DatabaseConnectionError):
self.possibly_quarantine(*sys.exc_info())
else:
raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
conn = self.conn
self.conn = None
try:
yield conn
conn.rollback()
self.conn = conn
except sqlite3.DatabaseError:
try:
conn.close()
except Exception:
pass
self.possibly_quarantine(*sys.exc_info())
except (Exception, Timeout):
conn.close()
raise
@contextmanager
def lock(self):
"""Use with the "with" statement; locks a database."""
if not self.conn:
if self.db_file != ':memory:' and os.path.exists(self.db_file):
self.conn = get_db_connection(self.db_file, self.timeout,
self.logger)
else:
raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
conn = self.conn
self.conn = None
orig_isolation_level = conn.isolation_level
conn.isolation_level = None
conn.execute('BEGIN IMMEDIATE')
try:
yield True
except (Exception, Timeout):
pass
try:
conn.execute('ROLLBACK')
conn.isolation_level = orig_isolation_level
self.conn = conn
except (Exception, Timeout):
logging.exception(
_('Broker error trying to rollback locked connection'))
conn.close()
def newid(self, remote_id):
"""
Re-id the database. This should be called after an rsync.
:param remote_id: the ID of the remote database being rsynced in
"""
with self.get() as conn:
row = conn.execute('''
UPDATE %s_stat SET id=?
''' % self.db_type, (str(uuid4()),))
row = conn.execute('''
SELECT ROWID FROM %s ORDER BY ROWID DESC LIMIT 1
''' % self.db_contains_type).fetchone()
sync_point = row['ROWID'] if row else -1
conn.execute('''
INSERT OR REPLACE INTO incoming_sync (sync_point, remote_id)
VALUES (?, ?)
''', (sync_point, remote_id))
self._newid(conn)
conn.commit()
def _newid(self, conn):
# Override for additional work when receiving an rsynced db.
pass
def _is_deleted(self, conn):
"""
Check if the database is considered deleted
:param conn: database conn
:returns: True if the DB is considered to be deleted, False otherwise
"""
raise NotImplementedError()
def is_deleted(self):
"""
Check if the DB is considered to be deleted.
:returns: True if the DB is considered to be deleted, False otherwise
"""
if self.db_file != ':memory:' and not os.path.exists(self.db_file):
return True
self._commit_puts_stale_ok()
with self.get() as conn:
return self._is_deleted(conn)
def empty(self):
"""
Check if the broker abstraction contains any undeleted records.
"""
raise NotImplementedError()
def is_reclaimable(self, now, reclaim_age):
"""
Check if the broker abstraction is empty, and has been marked deleted
for at least a reclaim age.
"""
info = self.get_replication_info()
return (zero_like(info['count']) and
(Timestamp(now - reclaim_age) >
Timestamp(info['delete_timestamp']) >
Timestamp(info['put_timestamp'])))
def merge_timestamps(self, created_at, put_timestamp, delete_timestamp):
"""
Used in replication to handle updating timestamps.
:param created_at: create timestamp
:param put_timestamp: put timestamp
:param delete_timestamp: delete timestamp
"""
with self.get() as conn:
old_status = self._is_deleted(conn)
conn.execute('''
UPDATE %s_stat SET created_at=MIN(?, created_at),
put_timestamp=MAX(?, put_timestamp),
delete_timestamp=MAX(?, delete_timestamp)
''' % self.db_type, (created_at, put_timestamp, delete_timestamp))
if old_status != self._is_deleted(conn):
timestamp = Timestamp.now()
self._update_status_changed_at(conn, timestamp.internal)
conn.commit()
def get_items_since(self, start, count):
"""
Get a list of objects in the database between start and end.
:param start: start ROWID
:param count: number to get
:returns: list of objects between start and end
"""
self._commit_puts_stale_ok()
with self.get() as conn:
curs = conn.execute('''
SELECT * FROM %s WHERE ROWID > ? ORDER BY ROWID ASC LIMIT ?
''' % self.db_contains_type, (start, count))
curs.row_factory = dict_factory
return [r for r in curs]
def get_sync(self, id, incoming=True):
"""
Gets the most recent sync point for a server from the sync table.
:param id: remote ID to get the sync_point for
:param incoming: if True, get the last incoming sync, otherwise get
the last outgoing sync
:returns: the sync point, or -1 if the id doesn't exist.
"""
with self.get() as conn:
row = conn.execute(
"SELECT sync_point FROM %s_sync WHERE remote_id=?"
% ('incoming' if incoming else 'outgoing'), (id,)).fetchone()
if not row:
return -1
return row['sync_point']
def get_syncs(self, incoming=True):
"""
Get a serialized copy of the sync table.
:param incoming: if True, get the last incoming sync, otherwise get
the last outgoing sync
:returns: list of {'remote_id', 'sync_point'}
"""
with self.get() as conn:
curs = conn.execute('''
SELECT remote_id, sync_point FROM %s_sync
''' % ('incoming' if incoming else 'outgoing'))
result = []
for row in curs:
result.append({'remote_id': row[0], 'sync_point': row[1]})
return result
def get_max_row(self, table=None):
if not table:
table = self.db_contains_type
query = '''
SELECT SQLITE_SEQUENCE.seq
FROM SQLITE_SEQUENCE
WHERE SQLITE_SEQUENCE.name == '%s'
LIMIT 1
''' % (table, )
with self.get() as conn:
row = conn.execute(query).fetchone()
return row[0] if row else -1
def get_replication_info(self):
"""
Get information about the DB required for replication.
:returns: dict containing keys from get_info plus max_row and metadata
Note:: get_info's <db_contains_type>_count is translated to just
"count" and metadata is the raw string.
"""
info = self.get_info()
info['count'] = info.pop('%s_count' % self.db_contains_type)
info['metadata'] = self.get_raw_metadata()
info['max_row'] = self.get_max_row()
return info
def get_info(self):
self._commit_puts_stale_ok()
with self.get() as conn:
curs = conn.execute('SELECT * from %s_stat' % self.db_type)
curs.row_factory = dict_factory
return curs.fetchone()
def put_record(self, record):
"""
Put a record into the DB. If the DB has an associated pending file with
space then the record is appended to that file and a commit to the DB
is deferred. If the DB is in-memory or its pending file is full then
the record will be committed immediately.
:param record: a record to be added to the DB.
:raises DatabaseConnectionError: if the DB file does not exist or if
``skip_commits`` is True.
:raises LockTimeout: if a timeout occurs while waiting to take a lock
to write to the pending file.
"""
if self._db_file == ':memory:':
self.merge_items([record])
return
if not os.path.exists(self.db_file):
raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
if self.skip_commits:
raise DatabaseConnectionError(self.db_file,
'commits not accepted')
with lock_parent_directory(self.pending_file, self.pending_timeout):
pending_size = 0
try:
pending_size = os.path.getsize(self.pending_file)
except OSError as err:
if err.errno != errno.ENOENT:
raise
if pending_size > PENDING_CAP:
self._commit_puts([record])
else:
with open(self.pending_file, 'a+b') as fp:
# Colons aren't used in base64 encoding; so they are our
# delimiter
fp.write(b':')
fp.write(base64.b64encode(pickle.dumps(
self.make_tuple_for_pickle(record),
protocol=PICKLE_PROTOCOL)))
fp.flush()
def _skip_commit_puts(self):
return (self._db_file == ':memory:' or self.skip_commits or not
os.path.exists(self.pending_file))
def _commit_puts(self, item_list=None):
"""
Scan for .pending files and commit the found records by feeding them
to merge_items(). Assume that lock_parent_directory has already been
called.
:param item_list: A list of items to commit in addition to .pending
"""
if self._skip_commit_puts():
if item_list:
# this broker instance should not be used to commit records,
# but if it is then raise an error rather than quietly
# discarding the records in item_list.
raise DatabaseConnectionError(self.db_file,
'commits not accepted')
return
if item_list is None:
item_list = []
self._preallocate()
if not os.path.getsize(self.pending_file):
if item_list:
self.merge_items(item_list)
return
with open(self.pending_file, 'r+b') as fp:
for entry in fp.read().split(b':'):
if entry:
try:
if six.PY2:
data = pickle.loads(base64.b64decode(entry))
else:
data = pickle.loads(base64.b64decode(entry),
encoding='utf8')
self._commit_puts_load(item_list, data)
except Exception:
self.logger.exception(
_('Invalid pending entry %(file)s: %(entry)s'),
{'file': self.pending_file, 'entry': entry})
if item_list:
self.merge_items(item_list)
try:
os.ftruncate(fp.fileno(), 0)
except OSError as err:
if err.errno != errno.ENOENT:
raise
def _commit_puts_stale_ok(self):
"""
Catch failures of _commit_puts() if broker is intended for
reading of stats, and thus does not care for pending updates.
"""
if self._skip_commit_puts():
return
try:
with lock_parent_directory(self.pending_file,
self.pending_timeout):
self._commit_puts()
except (LockTimeout, sqlite3.OperationalError):
if not self.stale_reads_ok:
raise
def _commit_puts_load(self, item_list, entry):
"""
Unmarshall the :param:entry tuple and append it to :param:item_list.
This is implemented by a particular broker to be compatible
with its :func:`merge_items`.
"""
raise NotImplementedError
def merge_items(self, item_list, source=None):
"""
Save :param:item_list to the database.
"""
raise NotImplementedError
def make_tuple_for_pickle(self, record):
"""
Turn this db record dict into the format this service uses for
pending pickles.
"""
raise NotImplementedError
def merge_syncs(self, sync_points, incoming=True):
"""
Merge a list of sync points with the incoming sync table.
:param sync_points: list of sync points where a sync point is a dict of
{'sync_point', 'remote_id'}
:param incoming: if True, get the last incoming sync, otherwise get
the last outgoing sync
"""
with self.get() as conn:
for rec in sync_points:
try:
conn.execute('''
INSERT INTO %s_sync (sync_point, remote_id)
VALUES (?, ?)
''' % ('incoming' if incoming else 'outgoing'),
(rec['sync_point'], rec['remote_id']))
except sqlite3.IntegrityError:
conn.execute('''
UPDATE %s_sync SET sync_point=max(?, sync_point)
WHERE remote_id=?
''' % ('incoming' if incoming else 'outgoing'),
(rec['sync_point'], rec['remote_id']))
conn.commit()
def _preallocate(self):
"""
The idea is to allocate space in front of an expanding db. If it gets
within 512k of a boundary, it allocates to the next boundary.
Boundaries are 2m, 5m, 10m, 25m, 50m, then every 50m after.
"""
if not DB_PREALLOCATION or self._db_file == ':memory:':
return
MB = (1024 * 1024)
def prealloc_points():
for pm in (1, 2, 5, 10, 25, 50):
yield pm * MB
while True:
pm += 50
yield pm * MB
stat = os.stat(self.db_file)
file_size = stat.st_size
allocated_size = stat.st_blocks * 512
for point in prealloc_points():
if file_size <= point - MB / 2:
prealloc_size = point
break
if allocated_size < prealloc_size:
with open(self.db_file, 'rb+') as fp:
fallocate(fp.fileno(), int(prealloc_size))
def get_raw_metadata(self):
with self.get() as conn:
try:
row = conn.execute('SELECT metadata FROM %s_stat' %
self.db_type).fetchone()
if not row:
self.quarantine("missing row in %s_stat table" %
self.db_type)
metadata = row[0]
except sqlite3.OperationalError as err:
if 'no such column: metadata' not in str(err):
raise
metadata = ''
return metadata
@property
def metadata(self):
"""
Returns the metadata dict for the database. The metadata dict values
are tuples of (value, timestamp) where the timestamp indicates when
that key was set to that value.
"""
metadata = self.get_raw_metadata()
if metadata:
metadata = json.loads(metadata)
native_str_keys_and_values(metadata)
else:
metadata = {}
return metadata
@staticmethod
def validate_metadata(metadata):
"""
Validates that metadata falls within acceptable limits.
:param metadata: to be validated
:raises HTTPBadRequest: if MAX_META_COUNT or MAX_META_OVERALL_SIZE
is exceeded, or if metadata contains non-UTF-8 data
"""
meta_count = 0
meta_size = 0
for key, (value, timestamp) in metadata.items():
if key and not check_utf8(key):
raise HTTPBadRequest('Metadata must be valid UTF-8')
if value and not check_utf8(value):
raise HTTPBadRequest('Metadata must be valid UTF-8')
key = key.lower()
if value and key.startswith(('x-account-meta-',
'x-container-meta-')):
prefix = 'x-account-meta-'
if key.startswith('x-container-meta-'):
prefix = 'x-container-meta-'
key = key[len(prefix):]
meta_count = meta_count + 1
meta_size = meta_size + len(key) + len(value)
if meta_count > MAX_META_COUNT:
raise HTTPBadRequest('Too many metadata items; max %d'
% MAX_META_COUNT)
if meta_size > MAX_META_OVERALL_SIZE:
raise HTTPBadRequest('Total metadata too large; max %d'
% MAX_META_OVERALL_SIZE)
def update_metadata(self, metadata_updates, validate_metadata=False):
"""
Updates the metadata dict for the database. The metadata dict values
are tuples of (value, timestamp) where the timestamp indicates when
that key was set to that value. Key/values will only be overwritten if
the timestamp is newer. To delete a key, set its value to ('',
timestamp). These empty keys will eventually be removed by
:func:`reclaim`
"""
old_metadata = self.metadata
if set(metadata_updates).issubset(set(old_metadata)):
for key, (value, timestamp) in metadata_updates.items():
if timestamp > old_metadata[key][1]:
break
else:
return
with self.get() as conn:
try:
row = conn.execute('SELECT metadata FROM %s_stat' %
self.db_type).fetchone()
if not row:
self.quarantine("missing row in %s_stat table" %
self.db_type)
md = row[0]
md = json.loads(md) if md else {}
native_str_keys_and_values(md)
except sqlite3.OperationalError as err:
if 'no such column: metadata' not in str(err):
raise
conn.execute("""
ALTER TABLE %s_stat
ADD COLUMN metadata TEXT DEFAULT '' """ % self.db_type)
md = {}
for key, value_timestamp in metadata_updates.items():
value, timestamp = value_timestamp
if key not in md or timestamp > md[key][1]:
md[key] = value_timestamp
if validate_metadata:
DatabaseBroker.validate_metadata(md)
conn.execute('UPDATE %s_stat SET metadata = ?' % self.db_type,
(json.dumps(md),))
conn.commit()
def reclaim(self, age_timestamp, sync_timestamp):
"""
Delete reclaimable rows and metadata from the db.
By default this method will delete rows from the db_contains_type table
that are marked deleted and whose created_at timestamp is <
age_timestamp, and deletes rows from incoming_sync and outgoing_sync
where the updated_at timestamp is < sync_timestamp. In addition, this
calls the :meth:`_reclaim_metadata` method.
Subclasses may reclaim other items by overriding :meth:`_reclaim`.
:param age_timestamp: max created_at timestamp of object rows to delete
:param sync_timestamp: max update_at timestamp of sync rows to delete
"""
if not self._skip_commit_puts():
with lock_parent_directory(self.pending_file,
self.pending_timeout):
self._commit_puts()
tombstone_reclaimer = TombstoneReclaimer(self, age_timestamp)
tombstone_reclaimer.reclaim()
with self.get() as conn:
self._reclaim_other_stuff(conn, age_timestamp, sync_timestamp)
conn.commit()
return tombstone_reclaimer
def _reclaim_other_stuff(self, conn, age_timestamp, sync_timestamp):
"""
This is only called once at the end of reclaim after tombstone reclaim
has been completed.
"""
self._reclaim_sync(conn, sync_timestamp)
self._reclaim_metadata(conn, age_timestamp)
def _reclaim_sync(self, conn, sync_timestamp):
try:
conn.execute('''
DELETE FROM outgoing_sync WHERE updated_at < ?
''', (sync_timestamp,))
conn.execute('''
DELETE FROM incoming_sync WHERE updated_at < ?
''', (sync_timestamp,))
except sqlite3.OperationalError as err:
# Old dbs didn't have updated_at in the _sync tables.
if 'no such column: updated_at' not in str(err):
raise
def _reclaim_metadata(self, conn, timestamp):
"""
Removes any empty metadata values older than the timestamp using the
given database connection. This function will not call commit on the
conn, but will instead return True if the database needs committing.
This function was created as a worker to limit transactions and commits
from other related functions.
:param conn: Database connection to reclaim metadata within.
:param timestamp: Empty metadata items last updated before this
timestamp will be removed.
:returns: True if conn.commit() should be called
"""
timestamp = Timestamp(timestamp)
try:
row = conn.execute('SELECT metadata FROM %s_stat' %
self.db_type).fetchone()
if not row:
self.quarantine("missing row in %s_stat table" %
self.db_type)
md = row[0]
if md:
md = json.loads(md)
keys_to_delete = []
for key, (value, value_timestamp) in md.items():
if value == '' and Timestamp(value_timestamp) < timestamp:
keys_to_delete.append(key)
if keys_to_delete:
for key in keys_to_delete:
del md[key]
conn.execute('UPDATE %s_stat SET metadata = ?' %
self.db_type, (json.dumps(md),))
return True
except sqlite3.OperationalError as err:
if 'no such column: metadata' not in str(err):
raise
return False
def update_put_timestamp(self, timestamp):
"""
Update the put_timestamp. Only modifies it if it is greater than
the current timestamp.
:param timestamp: internalized put timestamp
"""
with self.get() as conn:
conn.execute(
'UPDATE %s_stat SET put_timestamp = ?'
' WHERE put_timestamp < ?' % self.db_type,
(timestamp, timestamp))
conn.commit()
def update_status_changed_at(self, timestamp):
"""
Update the status_changed_at field in the stat table. Only
modifies status_changed_at if the timestamp is greater than the
current status_changed_at timestamp.
:param timestamp: internalized timestamp
"""
with self.get() as conn:
self._update_status_changed_at(conn, timestamp)
conn.commit()
def _update_status_changed_at(self, conn, timestamp):
conn.execute(
'UPDATE %s_stat SET status_changed_at = ?'
' WHERE status_changed_at < ?' % self.db_type,
(timestamp, timestamp))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.