gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import routes
import webob
import webtest
from quantum.api import extensions
from quantum.common import config
from quantum.db import db_base_plugin_v2
from quantum.openstack.common import jsonutils
from quantum.openstack.common import log as logging
from quantum.plugins.common import constants
from quantum.tests import base
from quantum.tests.unit import extension_stubs as ext_stubs
import quantum.tests.unit.extensions
from quantum.tests.unit import testlib_api
from quantum import wsgi
LOG = logging.getLogger(__name__)
ROOTDIR = os.path.dirname(os.path.dirname(__file__))
ETCDIR = os.path.join(ROOTDIR, 'etc')
def etcdir(*p):
return os.path.join(ETCDIR, *p)
extensions_path = ':'.join(quantum.tests.unit.extensions.__path__)
class ExtensionsTestApp(wsgi.Router):
def __init__(self, options={}):
mapper = routes.Mapper()
controller = ext_stubs.StubBaseAppController()
mapper.resource("dummy_resource", "/dummy_resources",
controller=controller)
super(ExtensionsTestApp, self).__init__(mapper)
class FakePluginWithExtension(db_base_plugin_v2.QuantumDbPluginV2):
"""A fake plugin used only for extension testing in this file."""
supported_extension_aliases = ["FOXNSOX"]
def method_to_support_foxnsox_extension(self, context):
self._log("method_to_support_foxnsox_extension", context)
class ResourceExtensionTest(base.BaseTestCase):
class ResourceExtensionController(wsgi.Controller):
def index(self, request):
return "resource index"
def show(self, request, id):
return {'data': {'id': id}}
def notimplemented_function(self, request, id):
return webob.exc.HTTPClientError(NotImplementedError())
def custom_member_action(self, request, id):
return {'member_action': 'value'}
def custom_collection_action(self, request, **kwargs):
return {'collection': 'value'}
class DummySvcPlugin(wsgi.Controller):
def get_plugin_type(self):
return constants.DUMMY
def index(self, request, **kwargs):
return "resource index"
def custom_member_action(self, request, **kwargs):
return {'member_action': 'value'}
def collection_action(self, request, **kwargs):
return {'collection': 'value'}
def show(self, request, id):
return {'data': {'id': id}}
def test_exceptions_notimplemented(self):
controller = self.ResourceExtensionController()
member = {'notimplemented_function': "GET"}
res_ext = extensions.ResourceExtension('tweedles', controller,
member_actions=member)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
# Ideally we would check for a 501 code here but webtest doesn't take
# anything that is below 200 or above 400 so we can't actually check
# it. It throws webtest.AppError instead.
try:
test_app.get("/tweedles/some_id/notimplemented_function")
# Shouldn't be reached
self.assertTrue(False)
except webtest.AppError:
pass
def test_resource_can_be_added_as_extension(self):
res_ext = extensions.ResourceExtension(
'tweedles', self.ResourceExtensionController())
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
index_response = test_app.get("/tweedles")
self.assertEqual(200, index_response.status_int)
self.assertEqual("resource index", index_response.body)
show_response = test_app.get("/tweedles/25266")
self.assertEqual({'data': {'id': "25266"}}, show_response.json)
def test_resource_gets_prefix_of_plugin(self):
class DummySvcPlugin(wsgi.Controller):
def index(self, request):
return ""
def get_plugin_type(self):
return constants.DUMMY
res_ext = extensions.ResourceExtension(
'tweedles', DummySvcPlugin(), path_prefix="/dummy_svc")
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
index_response = test_app.get("/dummy_svc/tweedles")
self.assertEqual(200, index_response.status_int)
def test_resource_extension_with_custom_member_action(self):
controller = self.ResourceExtensionController()
member = {'custom_member_action': "GET"}
res_ext = extensions.ResourceExtension('tweedles', controller,
member_actions=member)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.get("/tweedles/some_id/custom_member_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['member_action'],
"value")
def test_resource_ext_with_custom_member_action_gets_plugin_prefix(self):
controller = self.DummySvcPlugin()
member = {'custom_member_action': "GET"}
collections = {'collection_action': "GET"}
res_ext = extensions.ResourceExtension('tweedles', controller,
path_prefix="/dummy_svc",
member_actions=member,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.get("/dummy_svc/tweedles/1/custom_member_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['member_action'],
"value")
response = test_app.get("/dummy_svc/tweedles/collection_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'],
"value")
def test_plugin_prefix_with_parent_resource(self):
controller = self.DummySvcPlugin()
parent = dict(member_name="tenant",
collection_name="tenants")
member = {'custom_member_action': "GET"}
collections = {'collection_action': "GET"}
res_ext = extensions.ResourceExtension('tweedles', controller, parent,
path_prefix="/dummy_svc",
member_actions=member,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
index_response = test_app.get("/dummy_svc/tenants/1/tweedles")
self.assertEqual(200, index_response.status_int)
response = test_app.get("/dummy_svc/tenants/1/"
"tweedles/1/custom_member_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['member_action'],
"value")
response = test_app.get("/dummy_svc/tenants/2/"
"tweedles/collection_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'],
"value")
def test_resource_extension_for_get_custom_collection_action(self):
controller = self.ResourceExtensionController()
collections = {'custom_collection_action': "GET"}
res_ext = extensions.ResourceExtension('tweedles', controller,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.get("/tweedles/custom_collection_action")
self.assertEqual(200, response.status_int)
LOG.debug(jsonutils.loads(response.body))
self.assertEqual(jsonutils.loads(response.body)['collection'], "value")
def test_resource_extension_for_put_custom_collection_action(self):
controller = self.ResourceExtensionController()
collections = {'custom_collection_action': "PUT"}
res_ext = extensions.ResourceExtension('tweedles', controller,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.put("/tweedles/custom_collection_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'], 'value')
def test_resource_extension_for_post_custom_collection_action(self):
controller = self.ResourceExtensionController()
collections = {'custom_collection_action': "POST"}
res_ext = extensions.ResourceExtension('tweedles', controller,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.post("/tweedles/custom_collection_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'], 'value')
def test_resource_extension_for_delete_custom_collection_action(self):
controller = self.ResourceExtensionController()
collections = {'custom_collection_action': "DELETE"}
res_ext = extensions.ResourceExtension('tweedles', controller,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.delete("/tweedles/custom_collection_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'], 'value')
def test_resource_ext_for_formatted_req_on_custom_collection_action(self):
controller = self.ResourceExtensionController()
collections = {'custom_collection_action': "GET"}
res_ext = extensions.ResourceExtension('tweedles', controller,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.get("/tweedles/custom_collection_action.json")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'], "value")
def test_resource_ext_for_nested_resource_custom_collection_action(self):
controller = self.ResourceExtensionController()
collections = {'custom_collection_action': "GET"}
parent = dict(collection_name='beetles', member_name='beetle')
res_ext = extensions.ResourceExtension('tweedles', controller,
collection_actions=collections,
parent=parent)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.get("/beetles/beetle_id"
"/tweedles/custom_collection_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'], "value")
def test_resource_extension_with_custom_member_action_and_attr_map(self):
controller = self.ResourceExtensionController()
member = {'custom_member_action': "GET"}
params = {
'tweedles': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '', 'is_visible': True},
}
}
res_ext = extensions.ResourceExtension('tweedles', controller,
member_actions=member,
attr_map=params)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.get("/tweedles/some_id/custom_member_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['member_action'],
"value")
def test_returns_404_for_non_existent_extension(self):
test_app = _setup_extensions_test_app(SimpleExtensionManager(None))
response = test_app.get("/non_extistant_extension", status='*')
self.assertEqual(404, response.status_int)
class ActionExtensionTest(base.BaseTestCase):
def setUp(self):
super(ActionExtensionTest, self).setUp()
self.extension_app = _setup_extensions_test_app()
def test_extended_action_for_adding_extra_data(self):
action_name = 'FOXNSOX:add_tweedle'
action_params = dict(name='Beetle')
req_body = jsonutils.dumps({action_name: action_params})
response = self.extension_app.post('/dummy_resources/1/action',
req_body,
content_type='application/json')
self.assertEqual("Tweedle Beetle Added.", response.body)
def test_extended_action_for_deleting_extra_data(self):
action_name = 'FOXNSOX:delete_tweedle'
action_params = dict(name='Bailey')
req_body = jsonutils.dumps({action_name: action_params})
response = self.extension_app.post("/dummy_resources/1/action",
req_body,
content_type='application/json')
self.assertEqual("Tweedle Bailey Deleted.", response.body)
def test_returns_404_for_non_existent_action(self):
non_existent_action = 'blah_action'
action_params = dict(name="test")
req_body = jsonutils.dumps({non_existent_action: action_params})
response = self.extension_app.post("/dummy_resources/1/action",
req_body,
content_type='application/json',
status='*')
self.assertEqual(404, response.status_int)
def test_returns_404_for_non_existent_resource(self):
action_name = 'add_tweedle'
action_params = dict(name='Beetle')
req_body = jsonutils.dumps({action_name: action_params})
response = self.extension_app.post("/asdf/1/action", req_body,
content_type='application/json',
status='*')
self.assertEqual(404, response.status_int)
class RequestExtensionTest(base.BaseTestCase):
def test_headers_can_be_extended(self):
def extend_headers(req, res):
assert req.headers['X-NEW-REQUEST-HEADER'] == "sox"
res.headers['X-NEW-RESPONSE-HEADER'] = "response_header_data"
return res
app = self._setup_app_with_request_handler(extend_headers, 'GET')
response = app.get("/dummy_resources/1",
headers={'X-NEW-REQUEST-HEADER': "sox"})
self.assertEqual(response.headers['X-NEW-RESPONSE-HEADER'],
"response_header_data")
def test_extend_get_resource_response(self):
def extend_response_data(req, res):
data = jsonutils.loads(res.body)
data['FOXNSOX:extended_key'] = req.GET.get('extended_key')
res.body = jsonutils.dumps(data)
return res
app = self._setup_app_with_request_handler(extend_response_data, 'GET')
response = app.get("/dummy_resources/1?extended_key=extended_data")
self.assertEqual(200, response.status_int)
response_data = jsonutils.loads(response.body)
self.assertEqual('extended_data',
response_data['FOXNSOX:extended_key'])
self.assertEqual('knox', response_data['fort'])
def test_get_resources(self):
app = _setup_extensions_test_app()
response = app.get("/dummy_resources/1?chewing=newblue")
response_data = jsonutils.loads(response.body)
self.assertEqual('newblue', response_data['FOXNSOX:googoose'])
self.assertEqual("Pig Bands!", response_data['FOXNSOX:big_bands'])
def test_edit_previously_uneditable_field(self):
def _update_handler(req, res):
data = jsonutils.loads(res.body)
data['uneditable'] = req.params['uneditable']
res.body = jsonutils.dumps(data)
return res
base_app = webtest.TestApp(setup_base_app())
response = base_app.put("/dummy_resources/1",
{'uneditable': "new_value"})
self.assertEqual(response.json['uneditable'], "original_value")
ext_app = self._setup_app_with_request_handler(_update_handler,
'PUT')
ext_response = ext_app.put("/dummy_resources/1",
{'uneditable': "new_value"})
self.assertEqual(ext_response.json['uneditable'], "new_value")
def _setup_app_with_request_handler(self, handler, verb):
req_ext = extensions.RequestExtension(verb,
'/dummy_resources/:(id)',
handler)
manager = SimpleExtensionManager(None, None, req_ext)
return _setup_extensions_test_app(manager)
class ExtensionManagerTest(base.BaseTestCase):
def test_invalid_extensions_are_not_registered(self):
class InvalidExtension(object):
"""Invalid extension.
This Extension doesn't implement extension methods :
get_name, get_description, get_namespace and get_updated
"""
def get_alias(self):
return "invalid_extension"
ext_mgr = extensions.ExtensionManager('')
ext_mgr.add_extension(InvalidExtension())
ext_mgr.add_extension(ext_stubs.StubExtension("valid_extension"))
self.assertTrue('valid_extension' in ext_mgr.extensions)
self.assertFalse('invalid_extension' in ext_mgr.extensions)
class PluginAwareExtensionManagerTest(base.BaseTestCase):
def test_unsupported_extensions_are_not_loaded(self):
stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1", "e3"])
plugin_info = {constants.CORE: stub_plugin}
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(ext_stubs.StubExtension("e1"))
ext_mgr.add_extension(ext_stubs.StubExtension("e2"))
ext_mgr.add_extension(ext_stubs.StubExtension("e3"))
self.assertTrue("e1" in ext_mgr.extensions)
self.assertFalse("e2" in ext_mgr.extensions)
self.assertTrue("e3" in ext_mgr.extensions)
def test_extensions_are_not_loaded_for_plugins_unaware_of_extensions(self):
class ExtensionUnawarePlugin(object):
"""This plugin does not implement supports_extension method.
Extensions will not be loaded when this plugin is used.
"""
pass
plugin_info = {constants.CORE: ExtensionUnawarePlugin()}
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(ext_stubs.StubExtension("e1"))
self.assertFalse("e1" in ext_mgr.extensions)
def test_extensions_not_loaded_for_plugin_without_expected_interface(self):
class PluginWithoutExpectedIface(object):
"""Does not implement get_foo method as expected by extension."""
supported_extension_aliases = ["supported_extension"]
plugin_info = {constants.CORE: PluginWithoutExpectedIface()}
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(
ext_stubs.ExtensionExpectingPluginInterface("supported_extension"))
self.assertFalse("e1" in ext_mgr.extensions)
def test_extensions_are_loaded_for_plugin_with_expected_interface(self):
class PluginWithExpectedInterface(object):
"""Implements get_foo method as expected by extension."""
supported_extension_aliases = ["supported_extension"]
def get_foo(self, bar=None):
pass
plugin_info = {constants.CORE: PluginWithExpectedInterface()}
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(
ext_stubs.ExtensionExpectingPluginInterface("supported_extension"))
self.assertTrue("supported_extension" in ext_mgr.extensions)
def test_extensions_expecting_quantum_plugin_interface_are_loaded(self):
class ExtensionForQuamtumPluginInterface(ext_stubs.StubExtension):
"""This Extension does not implement get_plugin_interface method.
This will work with any plugin implementing QuantumPluginBase
"""
pass
stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1"])
plugin_info = {constants.CORE: stub_plugin}
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(ExtensionForQuamtumPluginInterface("e1"))
self.assertTrue("e1" in ext_mgr.extensions)
def test_extensions_without_need_for__plugin_interface_are_loaded(self):
class ExtensionWithNoNeedForPluginInterface(ext_stubs.StubExtension):
"""This Extension does not need any plugin interface.
This will work with any plugin implementing QuantumPluginBase
"""
def get_plugin_interface(self):
return None
stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1"])
plugin_info = {constants.CORE: stub_plugin}
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(ExtensionWithNoNeedForPluginInterface("e1"))
self.assertTrue("e1" in ext_mgr.extensions)
def test_extension_loaded_for_non_core_plugin(self):
class NonCorePluginExtenstion(ext_stubs.StubExtension):
def get_plugin_interface(self):
return None
stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1"])
plugin_info = {constants.DUMMY: stub_plugin}
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(NonCorePluginExtenstion("e1"))
self.assertTrue("e1" in ext_mgr.extensions)
class ExtensionControllerTest(testlib_api.WebTestCase):
def setUp(self):
super(ExtensionControllerTest, self).setUp()
self.test_app = _setup_extensions_test_app()
def test_index_gets_all_registerd_extensions(self):
response = self.test_app.get("/extensions." + self.fmt)
res_body = self.deserialize(response)
foxnsox = res_body["extensions"][0]
self.assertEqual(foxnsox["alias"], "FOXNSOX")
self.assertEqual(foxnsox["namespace"],
"http://www.fox.in.socks/api/ext/pie/v1.0")
def test_extension_can_be_accessed_by_alias(self):
response = self.test_app.get("/extensions/FOXNSOX." + self.fmt)
foxnsox_extension = self.deserialize(response)
foxnsox_extension = foxnsox_extension['extension']
self.assertEqual(foxnsox_extension["alias"], "FOXNSOX")
self.assertEqual(foxnsox_extension["namespace"],
"http://www.fox.in.socks/api/ext/pie/v1.0")
def test_show_returns_not_found_for_non_existent_extension(self):
response = self.test_app.get("/extensions/non_existent" + self.fmt,
status="*")
self.assertEqual(response.status_int, 404)
class ExtensionControllerTestXML(ExtensionControllerTest):
fmt = 'xml'
def app_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
return ExtensionsTestApp(conf)
def setup_base_app():
config_file = 'quantum.conf.test'
args = ['--config-file', etcdir(config_file)]
config.parse(args=args)
app = config.load_paste_app('extensions_test_app')
return app
def setup_extensions_middleware(extension_manager=None):
extension_manager = (extension_manager or
extensions.PluginAwareExtensionManager(
extensions_path,
{constants.CORE: FakePluginWithExtension()}))
config_file = 'quantum.conf.test'
args = ['--config-file', etcdir(config_file)]
config.parse(args=args)
app = config.load_paste_app('extensions_test_app')
return extensions.ExtensionMiddleware(app, ext_mgr=extension_manager)
def _setup_extensions_test_app(extension_manager=None):
return webtest.TestApp(setup_extensions_middleware(extension_manager))
class SimpleExtensionManager(object):
def __init__(self, resource_ext=None, action_ext=None, request_ext=None):
self.resource_ext = resource_ext
self.action_ext = action_ext
self.request_ext = request_ext
def get_resources(self):
resource_exts = []
if self.resource_ext:
resource_exts.append(self.resource_ext)
return resource_exts
def get_actions(self):
action_exts = []
if self.action_ext:
action_exts.append(self.action_ext)
return action_exts
def get_request_extensions(self):
request_extensions = []
if self.request_ext:
request_extensions.append(self.request_ext)
return request_extensions
| |
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for rm command."""
from __future__ import absolute_import
import re
import gslib.tests.testcase as testcase
from gslib.tests.testcase.base import MAX_BUCKET_LENGTH
from gslib.tests.testcase.integration_testcase import SkipForS3
from gslib.tests.util import GenerationFromURI as urigen
from gslib.tests.util import ObjectToURI as suri
from gslib.util import Retry
class TestRm(testcase.GsUtilIntegrationTestCase):
"""Integration tests for rm command."""
def _RunRemoveCommandAndCheck(self, command_and_args, objects_to_remove=None,
buckets_to_remove=None, stdin=None):
"""Tests a remove command in the presence of eventual listing consistency.
Eventual listing consistency means that a remove command may not see all
of the objects to be removed at once. When removing multiple objects
(or buckets via -r), some calls may return no matches and multiple calls
to the rm command may be necessary to reach the desired state. This function
retries the rm command, incrementally tracking what has been removed and
ensuring that the exact set of objects/buckets are removed across all
retried calls.
The caller is responsible for confirming the existence of buckets/objects
prior to calling this function.
Args:
command_and_args: List of strings representing the rm command+args to run.
objects_to_remove: List of object URL strings (optionally including
generation) that should be removed by the command, if any.
buckets_to_remove: List of bucket URL strings that should be removed by
the command, if any.
stdin: String of data to pipe to the process as standard input (for
testing -I option).
"""
cumulative_stderr_lines = set()
bucket_strings = []
for bucket_to_remove in buckets_to_remove or []:
bucket_strings.append('Removing %s/...' % bucket_to_remove)
object_strings = []
for object_to_remove in objects_to_remove or []:
object_strings.append('Removing %s...' % object_to_remove)
expected_stderr_lines = set(object_strings + bucket_strings)
@Retry(AssertionError, tries=5, timeout_secs=1)
def _RunRmCommandAndCheck():
"""Runs the command with retries, updating+checking cumulative output."""
stderr = self.RunGsUtil(command_and_args, return_stderr=True,
expected_status=None, stdin=stdin)
update_lines = True
# Retry 404's and 409's due to eventual listing consistency, but don't add
# the output to the set.
if ('No URLs matched' in stderr or
'409 BucketNotEmpty' in stderr or
'409 VersionedBucketNotEmpty' in stderr):
update_lines = False
# For recursive deletes of buckets, it is possible that the bucket is
# deleted before the objects are all present in the listing, in which case
# we will never see all of the expected "Removing object..." messages.
# Since this is still a successful outcome, just return successfully.
if '-r' in command_and_args and 'bucket does not exist' in stderr:
for bucket_to_remove in buckets_to_remove:
matching_bucket = re.match(r'.*404\s+%s\s+bucket does not exist' %
re.escape(bucket_to_remove), stderr)
if matching_bucket:
for line in cumulative_stderr_lines:
if 'Removing %s/...' % bucket_to_remove in line:
return
if 'Removing %s/...' % bucket_to_remove in stderr:
return
if update_lines:
cumulative_stderr_lines.update(set(stderr.splitlines()))
# Ensure all of the expected strings are present.
self.assertEqual(cumulative_stderr_lines, expected_stderr_lines)
_RunRmCommandAndCheck()
def test_all_versions_current(self):
"""Test that 'rm -a' for an object with a current version works."""
bucket_uri = self.CreateVersionedBucket()
key_uri = bucket_uri.clone_replace_name('foo')
key_uri.set_contents_from_string('bar')
g1 = urigen(key_uri)
key_uri.set_contents_from_string('baz')
g2 = urigen(key_uri)
self.AssertNObjectsInBucket(bucket_uri, 2, versioned=True)
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1(stderr_lines):
stderr = self.RunGsUtil(['-m', 'rm', '-a', suri(key_uri)],
return_stderr=True)
stderr_lines.update(set(stderr.splitlines()))
stderr = '\n'.join(stderr_lines)
self.assertEqual(stderr.count('Removing %s://' % self.default_provider),
2)
self.assertIn('Removing %s#%s...' % (suri(key_uri), g1), stderr)
self.assertIn('Removing %s#%s...' % (suri(key_uri), g2), stderr)
all_stderr_lines = set()
_Check1(all_stderr_lines)
self.AssertNObjectsInBucket(bucket_uri, 0, versioned=True)
def test_all_versions_no_current(self):
"""Test that 'rm -a' for an object without a current version works."""
bucket_uri = self.CreateVersionedBucket()
key_uri = bucket_uri.clone_replace_name('foo')
key_uri.set_contents_from_string('bar')
g1 = urigen(key_uri)
key_uri.set_contents_from_string('baz')
g2 = urigen(key_uri)
self._RunRemoveCommandAndCheck(
['-m', 'rm', '-a', suri(key_uri)],
objects_to_remove=['%s#%s' % (suri(key_uri), g1),
'%s#%s' % (suri(key_uri), g2)])
self.AssertNObjectsInBucket(bucket_uri, 0, versioned=True)
def test_fails_for_missing_obj(self):
bucket_uri = self.CreateVersionedBucket()
stderr = self.RunGsUtil(['rm', '-a', '%s' % suri(bucket_uri, 'foo')],
return_stderr=True, expected_status=1)
self.assertIn('No URLs matched', stderr)
def test_remove_all_versions_recursive_on_bucket(self):
"""Test that 'rm -r' works on bucket."""
bucket_uri = self.CreateVersionedBucket()
k1_uri = bucket_uri.clone_replace_name('foo')
k2_uri = bucket_uri.clone_replace_name('foo2')
k1_uri.set_contents_from_string('bar')
k2_uri.set_contents_from_string('bar2')
k1g1 = urigen(k1_uri)
k2g1 = urigen(k2_uri)
k1_uri.set_contents_from_string('baz')
k2_uri.set_contents_from_string('baz2')
k1g2 = urigen(k1_uri)
k2g2 = urigen(k2_uri)
self.AssertNObjectsInBucket(bucket_uri, 4, versioned=True)
self._RunRemoveCommandAndCheck(
['rm', '-r', suri(bucket_uri)],
objects_to_remove=['%s#%s' % (suri(k1_uri), k1g1),
'%s#%s' % (suri(k1_uri), k1g2),
'%s#%s' % (suri(k2_uri), k2g1),
'%s#%s' % (suri(k2_uri), k2g2)],
buckets_to_remove=[suri(bucket_uri)])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check():
# Bucket should no longer exist.
stderr = self.RunGsUtil(['ls', '-a', suri(bucket_uri)],
return_stderr=True, expected_status=1)
self.assertIn('bucket does not exist', stderr)
_Check()
def test_remove_all_versions_recursive_on_subdir(self):
"""Test that 'rm -r' works on subdir."""
bucket_uri = self.CreateVersionedBucket()
k1_uri = bucket_uri.clone_replace_name('dir/foo')
k2_uri = bucket_uri.clone_replace_name('dir/foo2')
k1_uri.set_contents_from_string('bar')
k2_uri.set_contents_from_string('bar2')
k1g1 = urigen(k1_uri)
k2g1 = urigen(k2_uri)
k1_uri.set_contents_from_string('baz')
k2_uri.set_contents_from_string('baz2')
k1g2 = urigen(k1_uri)
k2g2 = urigen(k2_uri)
self.AssertNObjectsInBucket(bucket_uri, 4, versioned=True)
self._RunRemoveCommandAndCheck(
['rm', '-r', '%s' % suri(bucket_uri, 'dir')],
objects_to_remove=['%s#%s' % (suri(k1_uri), k1g1),
'%s#%s' % (suri(k1_uri), k1g2),
'%s#%s' % (suri(k2_uri), k2g1),
'%s#%s' % (suri(k2_uri), k2g2)])
self.AssertNObjectsInBucket(bucket_uri, 0, versioned=True)
def test_missing_first_force(self):
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri, object_name='present',
contents='foo')
self.AssertNObjectsInBucket(bucket_uri, 1)
self.RunGsUtil(['rm', '%s' % suri(bucket_uri, 'missing'),
suri(object_uri)], expected_status=1)
stderr = self.RunGsUtil(
['rm', '-f', '%s' % suri(bucket_uri, 'missing'), suri(object_uri)],
return_stderr=True, expected_status=1)
self.assertEqual(stderr.count('Removing %s://' % self.default_provider), 1)
self.RunGsUtil(['stat', suri(object_uri)], expected_status=1)
def test_some_missing(self):
"""Test that 'rm -a' fails when some but not all uris don't exist."""
bucket_uri = self.CreateVersionedBucket()
key_uri = bucket_uri.clone_replace_name('foo')
key_uri.set_contents_from_string('bar')
self.AssertNObjectsInBucket(bucket_uri, 1, versioned=True)
stderr = self.RunGsUtil(['rm', '-a', suri(key_uri),
'%s' % suri(bucket_uri, 'missing')],
return_stderr=True, expected_status=1)
self.assertEqual(stderr.count('Removing %s://' % self.default_provider), 1)
self.assertIn('No URLs matched', stderr)
def test_some_missing_force(self):
"""Test that 'rm -af' succeeds despite hidden first uri."""
bucket_uri = self.CreateVersionedBucket()
key_uri = bucket_uri.clone_replace_name('foo')
key_uri.set_contents_from_string('bar')
self.AssertNObjectsInBucket(bucket_uri, 1, versioned=True)
stderr = self.RunGsUtil(
['rm', '-af', suri(key_uri), '%s' % suri(bucket_uri, 'missing')],
return_stderr=True, expected_status=1)
self.assertEqual(stderr.count('Removing %s://' % self.default_provider), 1)
self.AssertNObjectsInBucket(bucket_uri, 0)
def test_folder_objects_deleted(self):
"""Test for 'rm -r' of a folder with a dir_$folder$ marker."""
bucket_uri = self.CreateVersionedBucket()
key_uri = bucket_uri.clone_replace_name('abc/o1')
key_uri.set_contents_from_string('foobar')
folder_uri = bucket_uri.clone_replace_name('abc_$folder$')
folder_uri.set_contents_from_string('')
self.AssertNObjectsInBucket(bucket_uri, 2, versioned=True)
# This could fail due to eventual listing consistency, so use retry and
# expected_status=None to guard against No URLs matched exceptions.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _RemoveAndCheck():
self.RunGsUtil(['rm', '-r', '%s' % suri(bucket_uri, 'abc')],
expected_status=None)
self.AssertNObjectsInBucket(bucket_uri, 0, versioned=True)
_RemoveAndCheck()
# Bucket should not be deleted (Should not get ServiceException).
bucket_uri.get_location(validate=False)
def test_folder_objects_deleted_with_wildcard(self):
"""Test for 'rm -r' of a folder with a dir_$folder$ marker."""
bucket_uri = self.CreateVersionedBucket()
key_uri = bucket_uri.clone_replace_name('abc/o1')
key_uri.set_contents_from_string('foobar')
folder_uri = bucket_uri.clone_replace_name('abc_$folder$')
folder_uri.set_contents_from_string('')
self.AssertNObjectsInBucket(bucket_uri, 2, versioned=True)
self._RunRemoveCommandAndCheck(
['rm', '-r', '%s' % suri(bucket_uri, '**')],
objects_to_remove=['%s#%s' % (suri(key_uri), urigen(key_uri)),
'%s#%s' % (suri(folder_uri), urigen(folder_uri))])
self.AssertNObjectsInBucket(bucket_uri, 0, versioned=True)
# Bucket should not be deleted (Should not get ServiceException).
bucket_uri.get_location(validate=False)
@SkipForS3('Listing/removing S3 DeleteMarkers is not supported')
def test_recursive_bucket_rm(self):
"""Test for 'rm -r' of a bucket."""
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri, contents='foo')
self.AssertNObjectsInBucket(bucket_uri, 1)
self._RunRemoveCommandAndCheck(
['rm', '-r', suri(bucket_uri)],
objects_to_remove=['%s#%s' % (suri(object_uri), urigen(object_uri))],
buckets_to_remove=[suri(bucket_uri)])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
# Bucket should be deleted.
stderr = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)],
return_stderr=True, expected_status=1)
self.assertIn('bucket does not exist', stderr)
_Check1()
# Now try same thing, but for a versioned bucket with multiple versions of
# an object present.
bucket_uri = self.CreateVersionedBucket()
self.CreateObject(bucket_uri, 'obj', 'z')
self.CreateObject(bucket_uri, 'obj', 'z')
final_uri = self.CreateObject(bucket_uri, 'obj', 'z')
self.AssertNObjectsInBucket(bucket_uri, 3, versioned=True)
self._RunRemoveCommandAndCheck(['rm', suri(bucket_uri, '**')],
objects_to_remove=['%s' % final_uri])
stderr = self.RunGsUtil(['rb', suri(bucket_uri)],
return_stderr=True, expected_status=1)
self.assertIn('Bucket is not empty', stderr)
# Now try with rm -r.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
self.RunGsUtil(['rm', '-r', suri(bucket_uri)])
# Bucket should be deleted.
stderr = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)],
return_stderr=True, expected_status=1)
self.assertIn('bucket does not exist', stderr)
_Check2()
def test_recursive_bucket_rm_with_wildcarding(self):
"""Tests removing all objects and buckets matching a bucket wildcard."""
buri_base = 'gsutil-test-%s' % self.GetTestMethodName()
buri_base = buri_base[:MAX_BUCKET_LENGTH-20]
buri_base = '%s-%s' % (buri_base, self.MakeRandomTestString())
buri1 = self.CreateBucket(bucket_name='%s-tbuck1' % buri_base)
buri2 = self.CreateBucket(bucket_name='%s-tbuck2' % buri_base)
buri3 = self.CreateBucket(bucket_name='%s-tb3' % buri_base)
ouri1 = self.CreateObject(bucket_uri=buri1, object_name='o1', contents='z')
ouri2 = self.CreateObject(bucket_uri=buri2, object_name='o2', contents='z')
self.CreateObject(bucket_uri=buri3, object_name='o3', contents='z')
self.AssertNObjectsInBucket(buri1, 1)
self.AssertNObjectsInBucket(buri2, 1)
self.AssertNObjectsInBucket(buri3, 1)
self._RunRemoveCommandAndCheck(
['rm', '-r', '%s://%s-tbu*' % (self.default_provider, buri_base)],
objects_to_remove=['%s#%s' % (suri(ouri1), urigen(ouri1)),
'%s#%s' % (suri(ouri2), urigen(ouri2))],
buckets_to_remove=[suri(buri1), suri(buri2)])
self.AssertNObjectsInBucket(buri3, 1)
def test_rm_quiet(self):
"""Test that 'rm -q' outputs no progress indications."""
bucket_uri = self.CreateBucket()
key_uri = self.CreateObject(bucket_uri=bucket_uri, contents='foo')
self.AssertNObjectsInBucket(bucket_uri, 1)
self._RunRemoveCommandAndCheck(['-q', 'rm', suri(key_uri)], [])
self.AssertNObjectsInBucket(bucket_uri, 0)
def test_rm_object_with_slash(self):
"""Tests removing a bucket that has an object with a slash in it."""
bucket_uri = self.CreateVersionedBucket()
ouri1 = self.CreateObject(bucket_uri=bucket_uri,
object_name='/dirwithslash/foo', contents='z')
ouri2 = self.CreateObject(bucket_uri=bucket_uri,
object_name='dirnoslash/foo', contents='z')
ouri3 = self.CreateObject(bucket_uri=bucket_uri,
object_name='dirnoslash/foo2', contents='z')
self.AssertNObjectsInBucket(bucket_uri, 3, versioned=True)
self._RunRemoveCommandAndCheck(
['rm', '-r', suri(bucket_uri)],
objects_to_remove=['%s#%s' % (suri(ouri1), urigen(ouri1)),
'%s#%s' % (suri(ouri2), urigen(ouri2)),
'%s#%s' % (suri(ouri3), urigen(ouri3))],
buckets_to_remove=[suri(bucket_uri)])
def test_slasher_horror_film(self):
"""Tests removing a bucket with objects that are filled with slashes."""
bucket_uri = self.CreateVersionedBucket()
ouri1 = self.CreateObject(bucket_uri=bucket_uri,
object_name='h/e/l//lo',
contents='Halloween')
ouri2 = self.CreateObject(bucket_uri=bucket_uri,
object_name='/h/e/l/l/o',
contents='A Nightmare on Elm Street')
ouri3 = self.CreateObject(bucket_uri=bucket_uri,
object_name='//h//e/l//l/o',
contents='Friday the 13th')
ouri4 = self.CreateObject(bucket_uri=bucket_uri,
object_name='//h//e//l//l//o',
contents='I Know What You Did Last Summer')
ouri5 = self.CreateObject(bucket_uri=bucket_uri,
object_name='/',
contents='Scream')
ouri6 = self.CreateObject(bucket_uri=bucket_uri,
object_name='//',
contents='Child\'s Play')
ouri7 = self.CreateObject(bucket_uri=bucket_uri,
object_name='///',
contents='The Prowler')
ouri8 = self.CreateObject(bucket_uri=bucket_uri,
object_name='////',
contents='Black Christmas')
ouri9 = self.CreateObject(
bucket_uri=bucket_uri,
object_name='everything/is/better/with/slashes///////',
contents='Maniac')
self.AssertNObjectsInBucket(bucket_uri, 9, versioned=True)
# We add a slash to URIs with a trailing slash,
# because ObjectToURI (suri) removes one trailing slash.
objects_to_remove = [
'%s#%s' % (suri(ouri1), urigen(ouri1)),
'%s#%s' % (suri(ouri2), urigen(ouri2)),
'%s#%s' % (suri(ouri3), urigen(ouri3)),
'%s#%s' % (suri(ouri4), urigen(ouri4)),
'%s#%s' % (suri(ouri5) + '/', urigen(ouri5)),
'%s#%s' % (suri(ouri6) + '/', urigen(ouri6)),
'%s#%s' % (suri(ouri7) + '/', urigen(ouri7)),
'%s#%s' % (suri(ouri8) + '/', urigen(ouri8)),
'%s#%s' % (suri(ouri9) + '/', urigen(ouri9))]
self._RunRemoveCommandAndCheck(['-m', 'rm', '-r', suri(bucket_uri)],
objects_to_remove=objects_to_remove,
buckets_to_remove=[suri(bucket_uri)])
@SkipForS3('GCS versioning headers not supported by S3')
def test_rm_failing_precondition(self):
"""Test for '-h x-goog-if-generation-match:value rm' of an object."""
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri, contents='foo')
stderr = self.RunGsUtil(['-h', 'x-goog-if-generation-match:12345', 'rm',
suri(object_uri)], return_stderr=True,
expected_status=1)
self.assertRegexpMatches(
stderr, r'PreconditionException: 412 Precondition\s*Failed')
def test_stdin_args(self):
"""Tests rm with the -I option."""
buri1 = self.CreateVersionedBucket()
ouri1 = self.CreateObject(bucket_uri=buri1,
object_name='foo',
contents='foocontents')
self.CreateObject(bucket_uri=buri1, object_name='bar',
contents='barcontents')
ouri3 = self.CreateObject(bucket_uri=buri1,
object_name='baz',
contents='bazcontents')
buri2 = self.CreateVersionedBucket()
ouri4 = self.CreateObject(bucket_uri=buri2,
object_name='moo',
contents='moocontents')
self.AssertNObjectsInBucket(buri1, 3, versioned=True)
self.AssertNObjectsInBucket(buri2, 1, versioned=True)
objects_to_remove = ['%s#%s' % (suri(ouri1), urigen(ouri1)),
'%s#%s' % (suri(ouri3), urigen(ouri3)),
'%s#%s' % (suri(ouri4), urigen(ouri4))]
stdin = '\n'.join(objects_to_remove)
self._RunRemoveCommandAndCheck(['rm', '-I'],
objects_to_remove=objects_to_remove,
stdin=stdin)
self.AssertNObjectsInBucket(buri1, 1, versioned=True)
self.AssertNObjectsInBucket(buri2, 0, versioned=True)
def test_rm_nonexistent_bucket_recursive(self):
stderr = self.RunGsUtil(
['rm', '-rf', '%s://%s' % (self.default_provider,
self.nonexistent_bucket_name)],
return_stderr=True, expected_status=1)
self.assertIn('Encountered non-existent bucket', stderr)
def test_rm_multiple_nonexistent_objects(self):
bucket_uri = self.CreateBucket()
nonexistent_object1 = suri(bucket_uri, 'nonexistent1')
nonexistent_object2 = suri(bucket_uri, 'nonexistent1')
stderr = self.RunGsUtil(
['rm', '-rf', nonexistent_object1, nonexistent_object2],
return_stderr=True, expected_status=1)
self.assertIn('2 files/objects could not be removed.', stderr)
| |
from a10sdk.common.A10BaseClass import A10BaseClass
class PasscodeCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param passcode_font_custom: {"description": "Specify custom font", "format": "string-rlx", "minLength": 1, "maxLength": 63, "not": "passcode-face", "type": "string"}
:param passcode_color: {"default": 0, "type": "number", "description": "Specify font color", "format": "flag"}
:param passcode_face: {"not": "passcode-font-custom", "enum": ["Arial", "Courier_New", "Georgia", "Times_New_Roman", "Verdana"], "type": "string", "description": "'Arial': Arial; 'Courier_New': Courier New; 'Georgia': Georgia; 'Times_New_Roman': Times New Roman; 'Verdana': Verdana; ", "format": "enum"}
:param passcode_font: {"default": 0, "type": "number", "description": "Sepcify font", "format": "flag"}
:param passcode_text: {"minLength": 1, "maxLength": 63, "type": "string", "description": "Specify passcode text (Default: Passcode)", "format": "string-rlx"}
:param passcode_color_value: {"description": "Specify 6-digit HEX color value", "format": "string", "minLength": 6, "maxLength": 6, "not": "passcode-color-name", "type": "string"}
:param passcode_color_name: {"not": "passcode-color-value", "enum": ["aqua", "black", "blue", "fuchsia", "gray", "green", "lime", "maroon", "navy", "olive", "orange", "purple", "red", "silver", "teal", "white", "yellow"], "type": "string", "description": "'aqua': aqua; 'black': black; 'blue': blue; 'fuchsia': fuchsia; 'gray': gray; 'green': green; 'lime': lime; 'maroon': maroon; 'navy': navy; 'olive': olive; 'orange': orange; 'purple': purple; 'red': red; 'silver': silver; 'teal': teal; 'white': white; 'yellow': yellow; ", "format": "enum"}
:param passcode_size: {"description": "Specify font size", "minimum": 1, "type": "number", "maximum": 7, "format": "number"}
:param passcode: {"default": 0, "type": "number", "description": "Configure passcode text in default logon page", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "passcode-cfg"
self.DeviceProxy = ""
self.passcode_font_custom = ""
self.passcode_color = ""
self.passcode_face = ""
self.passcode_font = ""
self.passcode_text = ""
self.passcode_color_value = ""
self.passcode_color_name = ""
self.passcode_size = ""
self.passcode = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Background(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param bgfile: {"description": "Specify background image filename", "format": "string-rlx", "minLength": 1, "maxLength": 63, "not": "bgcolor", "type": "string"}
:param bgstyle: {"enum": ["tile", "stretch", "fit"], "type": "string", "description": "'tile': Tile; 'stretch': Stretch; 'fit': Fit; ", "format": "enum"}
:param bgcolor_value: {"description": "Specify 6-digit HEX color value", "format": "string", "minLength": 6, "maxLength": 6, "not": "bgcolor-name", "type": "string"}
:param bgcolor_name: {"not": "bgcolor-value", "enum": ["aqua", "black", "blue", "fuchsia", "gray", "green", "lime", "maroon", "navy", "olive", "orange", "purple", "red", "silver", "teal", "white", "yellow"], "type": "string", "description": "'aqua': aqua; 'black': black; 'blue': blue; 'fuchsia': fuchsia; 'gray': gray; 'green': green; 'lime': lime; 'maroon': maroon; 'navy': navy; 'olive': olive; 'orange': orange; 'purple': purple; 'red': red; 'silver': silver; 'teal': teal; 'white': white; 'yellow': yellow; ", "format": "enum"}
:param bgcolor: {"default": 0, "not": "bgfile", "type": "number", "description": "Specify background color", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "background"
self.DeviceProxy = ""
self.bgfile = ""
self.bgstyle = ""
self.bgcolor_value = ""
self.bgcolor_name = ""
self.bgcolor = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class FailMsgCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param fail_font_custom: {"description": "Specify custom font", "format": "string-rlx", "minLength": 1, "maxLength": 63, "not": "fail-face", "type": "string"}
:param fail_color: {"default": 0, "type": "number", "description": "Specify font color", "format": "flag"}
:param fail_size: {"description": "Specify font size", "minimum": 1, "type": "number", "maximum": 7, "format": "number"}
:param fail_msg: {"default": 0, "type": "number", "description": "Configure login failure message in default logon page", "format": "flag"}
:param fail_text: {"minLength": 1, "maxLength": 63, "type": "string", "description": "Specify login failure message (Default: Invalid username or password. Please try again.)", "format": "string-rlx"}
:param fail_color_value: {"description": "Specify 6-digit HEX color value", "format": "string", "minLength": 6, "maxLength": 6, "not": "fail-color-name", "type": "string"}
:param fail_font: {"default": 0, "type": "number", "description": "Sepcify font", "format": "flag"}
:param fail_color_name: {"not": "fail-color-value", "enum": ["aqua", "black", "blue", "fuchsia", "gray", "green", "lime", "maroon", "navy", "olive", "orange", "purple", "red", "silver", "teal", "white", "yellow"], "type": "string", "description": "'aqua': aqua; 'black': black; 'blue': blue; 'fuchsia': fuchsia; 'gray': gray; 'green': green; 'lime': lime; 'maroon': maroon; 'navy': navy; 'olive': olive; 'orange': orange; 'purple': purple; 'red': red; 'silver': silver; 'teal': teal; 'white': white; 'yellow': yellow; ", "format": "enum"}
:param fail_face: {"not": "fail-font-custom", "enum": ["Arial", "Courier_New", "Georgia", "Times_New_Roman", "Verdana"], "type": "string", "description": "'Arial': Arial; 'Courier_New': Courier New; 'Georgia': Georgia; 'Times_New_Roman': Times New Roman; 'Verdana': Verdana; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "fail-msg-cfg"
self.DeviceProxy = ""
self.fail_font_custom = ""
self.fail_color = ""
self.fail_size = ""
self.fail_msg = ""
self.fail_text = ""
self.fail_color_value = ""
self.fail_font = ""
self.fail_color_name = ""
self.fail_face = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class PasswordCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param pass_color_value: {"description": "Specify 6-digit HEX color value", "format": "string", "minLength": 6, "maxLength": 6, "not": "pass-color-name", "type": "string"}
:param pass_color: {"default": 0, "type": "number", "description": "Specify font color", "format": "flag"}
:param pass_color_name: {"not": "pass-color-value", "enum": ["aqua", "black", "blue", "fuchsia", "gray", "green", "lime", "maroon", "navy", "olive", "orange", "purple", "red", "silver", "teal", "white", "yellow"], "type": "string", "description": "'aqua': aqua; 'black': black; 'blue': blue; 'fuchsia': fuchsia; 'gray': gray; 'green': green; 'lime': lime; 'maroon': maroon; 'navy': navy; 'olive': olive; 'orange': orange; 'purple': purple; 'red': red; 'silver': silver; 'teal': teal; 'white': white; 'yellow': yellow; ", "format": "enum"}
:param pass_face: {"not": "pass-font-custom", "enum": ["Arial", "Courier_New", "Georgia", "Times_New_Roman", "Verdana"], "type": "string", "description": "'Arial': Arial; 'Courier_New': Courier New; 'Georgia': Georgia; 'Times_New_Roman': Times New Roman; 'Verdana': Verdana; ", "format": "enum"}
:param pass_font_custom: {"description": "Specify custom font", "format": "string-rlx", "minLength": 1, "maxLength": 63, "not": "pass-face", "type": "string"}
:param pass_size: {"description": "Specify font size", "minimum": 1, "type": "number", "maximum": 7, "format": "number"}
:param pass_text: {"minLength": 1, "maxLength": 63, "type": "string", "description": "Specify password text (Default: Password)", "format": "string-rlx"}
:param pass_font: {"default": 0, "type": "number", "description": "Sepcify font", "format": "flag"}
:param password: {"default": 0, "type": "number", "description": "Configure password text in default logon page", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "password-cfg"
self.DeviceProxy = ""
self.pass_color_value = ""
self.pass_color = ""
self.pass_color_name = ""
self.pass_face = ""
self.pass_font_custom = ""
self.pass_size = ""
self.pass_text = ""
self.pass_font = ""
self.password = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class UsernameCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param username: {"default": 0, "type": "number", "description": "Configure username text in default logon page", "format": "flag"}
:param user_font: {"default": 0, "type": "number", "description": "Sepcify font", "format": "flag"}
:param user_text: {"minLength": 1, "maxLength": 63, "type": "string", "description": "Specify username text (Default: User Name)", "format": "string-rlx"}
:param user_size: {"description": "Specify font size", "minimum": 1, "type": "number", "maximum": 7, "format": "number"}
:param user_color_value: {"description": "Specify 6-digit HEX color value", "format": "string", "minLength": 6, "maxLength": 6, "not": "user-color-name", "type": "string"}
:param user_font_custom: {"description": "Specify custom font", "format": "string-rlx", "minLength": 1, "maxLength": 63, "not": "user-face", "type": "string"}
:param user_color: {"default": 0, "type": "number", "description": "Specify font color", "format": "flag"}
:param user_face: {"not": "user-font-custom", "enum": ["Arial", "Courier_New", "Georgia", "Times_New_Roman", "Verdana"], "type": "string", "description": "'Arial': Arial; 'Courier_New': Courier New; 'Georgia': Georgia; 'Times_New_Roman': Times New Roman; 'Verdana': Verdana; ", "format": "enum"}
:param user_color_name: {"not": "user-color-value", "enum": ["aqua", "black", "blue", "fuchsia", "gray", "green", "lime", "maroon", "navy", "olive", "orange", "purple", "red", "silver", "teal", "white", "yellow"], "type": "string", "description": "'aqua': aqua; 'black': black; 'blue': blue; 'fuchsia': fuchsia; 'gray': gray; 'green': green; 'lime': lime; 'maroon': maroon; 'navy': navy; 'olive': olive; 'orange': orange; 'purple': purple; 'red': red; 'silver': silver; 'teal': teal; 'white': white; 'yellow': yellow; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "username-cfg"
self.DeviceProxy = ""
self.username = ""
self.user_font = ""
self.user_text = ""
self.user_size = ""
self.user_color_value = ""
self.user_font_custom = ""
self.user_color = ""
self.user_face = ""
self.user_color_name = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Logon(A10BaseClass):
"""Class Description::
Logon page configuration.
Class logon supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param action_url: {"description": "Specify form action URL in default logon page (Default: /logon.fo)", "format": "string-rlx", "minLength": 1, "optional": true, "maxLength": 63, "type": "string"}
:param password_var: {"description": "Specify password variable name in default logon page (Default: pwd)", "format": "string-rlx", "minLength": 1, "optional": true, "maxLength": 63, "type": "string"}
:param enable_passcode: {"default": 0, "optional": true, "type": "number", "description": "Enable passcode field in default logon page", "format": "flag"}
:param username_var: {"description": "Specify username variable name in default logon page (Default: user)", "format": "string-rlx", "minLength": 1, "optional": true, "maxLength": 63, "type": "string"}
:param submit_text: {"description": "Specify submit button text in default logon page (Default: Log In)", "format": "string-rlx", "minLength": 1, "optional": true, "maxLength": 63, "type": "string"}
:param passcode_var: {"description": "Specify passcode variable name in default logon page (Default: passcode)", "format": "string-rlx", "minLength": 1, "optional": true, "maxLength": 63, "type": "string"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/aam/authentication/portal/{name}/logon`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "logon"
self.a10_url="/axapi/v3/aam/authentication/portal/{name}/logon"
self.DeviceProxy = ""
self.action_url = ""
self.password_var = ""
self.passcode_cfg = {}
self.enable_passcode = ""
self.username_var = ""
self.submit_text = ""
self.background = {}
self.passcode_var = ""
self.fail_msg_cfg = {}
self.password_cfg = {}
self.username_cfg = {}
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| |
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as optimize
from scipy.interpolate import interp1d
import collections
from eos_mod import MGD_PowerLaw,debye_fun,Ptot_powerlaw,press_vinet
def set_dic(a):
param_d['V0'] = a[0]
param_d['K0'] = a[1]
param_d['K0p'] = a[2]
param_d['theta0'] = np.exp(a[3])#
param_d['gamma0'] = np.exp(a[4])
param_d['q'] = a[5]
def set_const():
param_d['const']['Natom'] = 4
param_d['const']['kB'] = 8.6173324e-5 #eV per K
param_d['const']['P_factor'] = 160.217657#GPa in 1 eV/Ang^3
param_d['const']['R'] = 8.314462/1.6021764e-19 # eV/K per mol
# 1eV/ang^3 = 160.217657GPa, 1eV = 1.6021764e-19Joules, 1Ang3 = e-30m^3
param_d['const']['C_DP'] = 3*param_d['const']['R']#Dulong-Petit limit for Cv
#read data from file
dat = np.loadtxt(fname='UpUs-Pt.md', delimiter='|', skiprows=3)
print dat
Up = dat[:,0]
Us = dat[:,1]
plt.plot(Up,Us,'rx')
plt.xlabel('Up[km/s]')
plt.ylabel('Us[km/s]')
plt.show()
print Up,Us
#set Pt density
rho1original = 21.472 #unit grams/cm3
atompermol = 6.022*(10**23) #at/mol
unitcell = 4 #at/unitcell
v_unitcell = 60.38*10**(-24) #unit cm3
rho1 = rho1original/10**24 #grams/ang^3
#compute pho2 based on conservation of Mass
rho2 = rho1*Us/(Us-Up) #unit grams/Ang^3
print "rho2:",rho2
#Atmospheric pressure is
p1 = 101325*10**(-9) #unit GPa
p2 = rho1*Us*Up*10**(24) + p1 #unit GPa, and this is properly unit analyzed
#edit units here: km and m
print "p2: ",p2
#let the initial internal energy E1 to be 0.
m_cell = 195*4/atompermol # (g/unitcell)
f_conv_E = m_cell/160.217657 # (g/cell)*(eV/(GPa*Ang^3))
E2 = 0.5*(p1+p2) * f_conv_E *(1/rho1-1/rho2)#unit eV/unitcell
print "E2: ", E2
param_d = collections.OrderedDict()
param_d['const'] = collections.OrderedDict()
set_const()
V_a = 195*param_d['const']['Natom']/atompermol/rho2 #unit Ang^3/unitcell
print "V_a" , V_a
# 1eV/ang^3 = 160.217657GPa, 1eV = 1.6021764e-19Joules, 1Ang3 = e-30m^3
fei_report = [60.38, 277,5.08,np.log(230),np.log(2.72),0.5]
set_dic(fei_report)
#compute Vinet energy
def energy_vinet( V_a, param_d ):
V0 = param_d['V0']
K0 = param_d['K0']
K0p = param_d['K0p']
P_factor = param_d['const']['P_factor']
x = (V_a/V0)**(1.0/3)
eta = 3.0*(K0p- 1)/2.0
energy_a = V0*K0/P_factor*9.0/(eta**2.0) * (1 + (eta*(1-x)-1) * np.exp(eta*(1-x)))
#print "vinet: " , energy_a
return energy_a
#compute thermal part energy
def energy_mgd_powlaw( V_a, T_a, param_d ):
# get parameter values
theta0 = param_d['theta0']
C_DP = param_d['const']['C_DP']
P_factor = param_d['const']['P_factor']
gamma= param_d['gamma0']*(V_a/param_d['V0'])**param_d['q']
theta = param_d['theta0']*np.exp((-1)*(gamma-param_d['gamma0'])/param_d['q'])
T_ref_a = 300 # here we use isothermal reference compression curve
energy_therm_a = C_DP/atompermol*param_d['const']['Natom']*(T_a*debye_fun(theta/T_a) - T_ref_a*debye_fun(theta/T_ref_a ))
return energy_therm_a
#return model total enerty
def energy_mod_total(T_a,V_a,param_d):
return energy_vinet(V_a,param_d) + energy_mgd_powlaw(V_a,T_a,param_d)
def findroot(T_a,V_a,param_d,E):
return energy_mod_total(T_a,V_a,param_d) - E
#find the temperature
result = []
for ind in range(len(V_a)):
T_root = optimize.brentq(findroot, a=300,b=3000,args = (V_a[ind],param_d,E2[ind]),full_output = False)
result.append(T_root)
print "result: ", result
#edit the dictionary
#plot energy from 300K to 3000K, compare with E2,
#eos_mod.py & eval_Pt_eos.py
P_resi = MGD_PowerLaw(V_a, T_root, param_d) - p2
print P_resi
plt.figure()
plt.plot(p2,P_resi,'x')
plt.xlabel('absolute P')
plt.ylabel('resi_P')
plt.show()
plt.plot(result,P_resi,'rx')
plt.xlabel('T')
plt.ylabel('resi_P')
plt.show()
Pthermal = Ptot_powerlaw(V_a,result,param_d,300)
plt.plot(Pthermal,P_resi+Pthermal,'rx')
plt.xlabel('thermal')
plt.ylabel('resi_P')
plt.show()
##conversion from PVT to vs vp.
def mass_conserv(us,up,rho1, rho2):
return rho1*us - rho2*(us-up)
def momentum_conserv(us,up,rho1,p1,p2):
#return rho1*us*up + p1 - p2
p1 = 101325*10**(-9) #unit GPa
return p2 - rho1*us*up*10**(24) + p1 #unit GPa
def energy_conserv(us,up,p1,p2,rho1,rho2,E2):
#return (p1+p2)*(1/rho1 + 1/rho2)/2 - E2
m_cell = 195*4/atompermol # (g/unitcell)
f_conv_E = m_cell/160.217657 # (g/cell)*(eV/(GPa*Ang^3))
return E2 - 0.5*(p1+p2) * f_conv_E *(1/rho1-1/rho2)#unit eV/unitcell
mass_range = max(rho2*(Us-Up))
momentum_range = max(p2-p1)
energy_range = max(E2)
print "testhere: ",mass_range, momentum_range,energy_range
def fitfunc(u,rho1,rho2,p1,p2,E2):
us = u[0]
up = u[1]
return np.array(mass_conserv(us,up,rho1,rho2)*mass_conserv(us,up,rho1,rho2)/mass_range/mass_range+
momentum_conserv(us,up,rho1,p1,p2) * momentum_conserv(us,up,rho1,p1,p2)/momentum_range/momentum_range +
energy_conserv(us,up,p1,p2,rho1,rho2,E2) * energy_conserv(us,up,p1,p2,rho1,rho2,E2)/energy_range/energy_range,dtype = 'f8')
guess = [0.3,4]
#set other parameters
#print "test three functions: ", mass_conserv(Us,Up,rho1,rho2), momentum_conserv(Us,Up,rho1,p1,p2), energy_conserv(Us,Up,p1,p2,rho1,rho2,E2)
#rho1new = rho1 * np.ones(len(rho2))
#p1new = p1* np.ones(len(p2))
#for i in range(len(rho2)):
# popt = optimize.minimize(fitfunc,guess[:],args=(rho1new[i],rho2[i],p1new[i],p2[i],E2[i]),full_output=1)
# print popt
#print"done"
##ployfit rho and T
#print "temperaure",result #unit K
#print "density and its corresponding volume", rho2, V_a# unit grams/Ang^3
##get Pmod and Emod
#Emod = energy_mod_total(result,V_a,param_d)
#Pmod = MGD_PowerLaw(V_a, result, param_d)
#print Emod, Pmod
#for i in range(len(rho2)):
# popt = optimize.leastsq(fitfunc,guess[:],args=(rho1new[i],rho2[i],p1new[i],Pmod[i],Emod[i]),full_output=1)
#print popt
#modfit = np.polyfit(result,rho2,3)
#print modfit
#p = np.poly1d(modfit)
#tem_range = np.array([300,500,700,900,1100,1300,1500,1700,1900,2100,2300,2500,2700,2900,3000])
#print "using polyfit: ", p(tem_range),tem_range
#print "computed density and its corresponding temperature: ", rho2, result# unit grams/Ang^3
#####Compute Jamieson's Temperature
param_d['theta0'] = 200
param_d['gamma0'] = 2.40
param_d['q'] = 1
#compute thermal part energy
def energy_Jam_mgd_powlaw( V_a, T_a, param_d ):
# get parameter values
theta0 = param_d['theta0']
gamma= param_d['gamma0']*(V_a/param_d['V0'])**param_d['q']
theta = param_d['theta0']*np.exp((-1)*(gamma-param_d['gamma0'])/param_d['q'])
T_ref_a = 298 # here we use isothermal reference compression curve
energy_therm_a = 0.12786*(T_a*debye_fun(theta/T_a) - T_ref_a*debye_fun(theta/T_ref_a ))
return energy_therm_a
def Jam_fit(T_a,P,param_d):
return P - (21.449 * 2.40) * energy_Jam_mgd_powlaw(195*param_d['const']['Natom']/atompermol/21.449,T_a,param_d)
r = []
for ind in range(len(p2)):
T_root = optimize.brentq(Jam_fit, a=300,b=3000,args = (p2[ind],param_d),full_output = False)
r.append(T_root)
print "r: ", r
####
rho0 = 21.4449/10**24 #grams/Ang^3
rho2 = rho0*Us/(Us-Up) #unit grams/Ang^3
V_0 = 195*param_d['const']['Natom']/atompermol/rho0 #unit Ang^3/unitcell
#read Jam's data here and fit the vinet model for 300K
dat = np.loadtxt("Fig.txt", delimiter = ",", skiprows = 1)
V300_a = (1 - dat[:,2]) * V_0
P300_a = dat[:,1]
def V_fit(param_a, P_a=P300_a, V_a=V300_a):
param_d = {'V0':param_a[0],'K0':param_a[1],'K0p':param_a[2]}
Pmod_a = press_vinet(V_a,param_d)
resid_a = P_a-Pmod_a
return resid_a
param0_a = np.array([V_0, 100.0, 4.0])
print V_fit(param0_a)
paramfit_a = optimize.leastsq( V_fit, param0_a )
print "%%%%%%%%%%%%"
print "paramfit_a"
print paramfit_a
paramtrue_a = paramfit_a[0]
print "true params: ", paramtrue_a
#set true dictionary for Jam's vinet model
paramtrue = dict()
paramtrue = {'V0':paramtrue_a[0],'K0':paramtrue_a[1],'K0p':paramtrue_a[2]}
#using computed V_a to find the corresponding P_vinet
V_a = 195*param_d['const']['Natom']/atompermol/rho2 #unit Ang^3/unitcell
print "V_a: " , V_a
P300 = press_vinet(V_a, paramtrue)
print "P300 is: ", P300
#get the thermal pressure
Pth = p2-P300
print "pth ", Pth
#plt.plot(p2,V_a)
#plt.show()
mask_a = p2 > P300
print "now p2 is: ", p2
def findT(T, Pth,rho2):
atompermol = 6.022*(10**23) #at/mol
rho0 = 21.4449 #unit g/cm^3
V_0 = 195*param_d['const']['Natom']/atompermol/rho0*1e24 #unit Ang^3/unitcell
g0 = 2.40
thetaD = 200 #unit K
thetaV = thetaD * np.exp(g0*(1-rho0/rho2))
T0 = 300
fcv = 0.12786/0.12664 #the ratio of Cvmax/DP
kT0 = 1.0/40 #unit eV
unit_conv = 160.217657
return Pth - g0/V_0 * 3 * fcv * kT0 * (T/T0 * debye_fun(thetaV/T) - debye_fun(thetaV/T0)) * unit_conv
#from IPython import embed; embed(); import ipdb; ipdb.set_trace()
print "findT: ", findT(300,Pth,rho2*1e24), findT(3000,Pth,rho2*1e24)
thetaV = 200 * np.exp(2.4*(1-rho0/(rho2*1e24)))
print "thetaV", thetaV
Tfit = np.nan*np.ones(p2.shape)
for ind in range(len(Pth)):
if mask_a[ind]:
T_root = optimize.brentq(findT, a=300,b=10000,args = (Pth[ind],rho2[ind]*1e24),full_output = False)
Tfit[ind] = T_root
#print "Tfit",Tfit
#print "eta",(V_0-V_a)/V_0
#print "p2:",p2
pvt_a = np.vstack((p2,(V_0-V_a)/V_0,Tfit))
print pvt_a.T
plt.plot(Tfit/1000,p2,'rx')
plt.xlim(0,2)
plt.ylim(0,100)
plt.xlabel("temperature [kK]")
plt.ylabel("P [GPa]")
plt.show()
a = V_0*np.linspace(1,0.5,num = 100)
p = press_vinet(a,paramtrue)
plt.plot(p2,V_a,'rx', p,a,'b')
plt.xlim(0,200)
plt.show()
| |
from TOSSIM import *
import sys
from tinyos.tossim.TossimApp import *
EVENTS = 5000
n = NescApp("Unknown App", "app.xml")
vars = n.variables.variables()
t = Tossim(vars)
r = t.radio()
nodes = {}
last_noise_filename = None
debug_channels = {
'Boot': True,
'Start': False,
'Debug': True,
'Messages': False,
'Start': False,
'Timeout': False,
'Sensors': False,
'GPS': False,
'Fire': False,
'MessagesReceived': False,
'MessagesSent': False,
}
t.addChannel("Boot", sys.stdout)
t.addChannel("Debug", sys.stdout)
def load_noise(filename):
global last_noise_filename
last_noise_filename = filename
noise = open(filename, "r")
for line in noise:
str1 = line.strip()
if str1:
val = int(str1)
for i in nodes:
t.getNode(i).addNoiseTraceReading(val)
for i in nodes:
t.getNode(i).createNoiseModel()
def boot_node(nodeid):
time = (31 + t.ticksPerSecond() / 10) * nodeid + 1
print("Booting node " + str(nodeid) + " in time " + str(time))
t.getNode(nodeid).bootAtTime(time)
"""
Commands
"""
def run(args):
if len(nodes) == 0:
print("You need to load a topology first")
else:
if len(args) == 1:
try:
events = int(args[0])
print("--------------------------")
print("Running " + str(events) + " events")
print("--------------------------")
for i in range(events):
t.runNextEvent()
except ValueError:
print("ERROR: run argument must be a number!")
else:
print('You have to specify how many events you want to run')
print("Example: 'run 3' will run 3 events")
def help(args):
print("----------------------------")
print("Available commands")
print("----------------------------")
print("load topology <filename> : Load a topology from a file")
print("load noise <filename> : Load a noise model from a file")
print("boot : Boot all nodes in the network")
print("run <events> : run the next <events> in the node network")
print("print topology : Prints the entire topology of the network")
print("print state : Print the state for each node in the network")
print("print routing : For each sensor node prints the routing node that it is connected to")
print("print server : Prints the server log file")
print("print rank : Prints, for each routing node, its rank in the network")
print("on <node id> Turn on node with id <node id>")
print("off <node id> Turn off node with id <node id>")
print("var <node id> <variable name> : Print the variable value of the node with <node id>")
print("add <src_id> <dest_id> <gain> Add a link in the network topology from node <src_id> to <dest_id> with gain <gain>")
print("script <file> : Loads commands from a file with name <file>")
print("debug : Print all available debug channels")
print("debug enable <Channel> : Enables the debug channel <Channel>")
print("debug disable <Channel> : Disables the debug channel <Channel>")
print("stop : Stops the simulator and continue when the user presses Enter")
print("exit : Exit from the simulator")
def exit(args):
print("Exiting...")
sys.exit()
def load(args):
if len(args) == 2:
type = args[0]
file_name = args[1]
if type == 'topology':
failed = False
try:
f = open(file_name, 'r')
nodes.clear()
except IOError:
try:
f = open('../topologies/' + file_name, 'r')
except IOError:
try:
f = open('topologies/' + file_name, 'r')
except:
failed = True
if failed:
print("Cannot find topology file " + file_name)
else:
global t
global r
t = Tossim(vars)
r = t.radio()
for line in f:
s = line.split()
if s:
print " ", s[0], " ", s[1], " ", s[2]
node1 = int(s[0])
node2 = int(s[1])
gain = float(s[2])
r.add(node1, node2, gain)
m1 = t.getNode(node1)
m2 = t.getNode(node2)
if not nodes.has_key(node1):
nodes[node1] = m1
if not nodes.has_key(node2):
nodes[node2] = m1
elif type == 'noise':
failed = False
try:
load_noise(file_name)
except IOError:
try:
load_noise('../noise/' + file_name)
except IOError:
try:
load_noise('noise/' + file_name)
except IOError:
failed = True
if failed:
print('Cannot find noise model file ' + file_name)
else:
print("Noise model from " + file_name + " loaded")
else:
print("Usage: load topology|noise <filename>")
else:
print("Usage: load topology|noise <filename>")
def boot(args):
if len(nodes) == 0:
print("You need to load a topology and a noise model first")
print("Try load command")
print("Type 'help' for more info")
else:
for i in nodes:
boot_node(i)
def print_info(args):
if len(args) == 0:
print("Missing argument of print command")
else:
arg = args[0]
if arg == 'topology':
print("------------------------")
print("Network topology")
print("------------------------")
for src in nodes:
for dest in nodes:
if r.connected(src, dest):
print(str(src) + " --> " + str(dest))
elif arg == 'state':
print("------------------------")
print("Nodes' states")
print("------------------------")
for i in nodes:
node = t.getNode(i)
if node.isOn():
state = 'ON'
else:
state = 'OFF'
print("Node " + str(i) + ": " + state)
elif arg == 'routing':
print("------------------------")
print(" Sensor node -> Routing node")
print("------------------------")
for i in nodes:
if i > 99:
m = t.getNode(i)
var = m.getVariable('NodeP.routeNodeAddr')
value = var.getData()
print(str(i) + " --> " + str(value))
elif arg == 'rank':
print("------------------------")
print(" Routing node : Rank")
print("------------------------")
for i in nodes:
if i > 0 and i < 100:
m = t.getNode(i)
var = m.getVariable('NodeP.rank')
value = var.getData()
print(str(i) + " : " + str(value))
elif arg == 'server':
print("------------------------")
print(" Server Log file")
print("------------------------")
file = open('log.txt', 'r')
for line in file:
print(line)
file.close()
print("------------------------")
print(" End of Server Log File")
print("------------------------")
else:
print("Wrong argument for print command")
print("Usage: print topology")
def off(args):
if len(args) == 1:
try:
id = int(args[0])
node = t.getNode(id)
node.turnOff()
print("Node " + str(id) + " turned off")
except ValueError:
print("Argument of 'off' command must be the id of the node")
else:
print("Wrong argument for off command")
print("Usage: off <node id>")
def on(args):
if len(args) == 1:
try:
id = int(args[0])
node = t.getNode(id)
node.turnOn()
print("Node " + str(id) + " turned on")
except ValueError:
print("Argument of 'on' command must be the id of the node")
else:
print("Wrong argument for on command")
print("Usage: on <node id>")
def var(args):
if len(args) == 2:
try:
id = int(args[0])
var = args[1]
node = t.getNode(id)
v = node.getVariable(var)
value = v.getData()
print("Node " + str(id) + " variable: " + var + " value: " + str(value))
except ValueError:
print("Node id for 'var' command must be a number")
else:
print("Wrong arguments for var command")
print("Usage: var <node id> <variable>")
def add(args):
if len(args) == 3:
try:
src_id = int(args[0])
dest_id = int(args[1])
gain = int(args[2])
r.add(src_id, dest_id, gain)
m1 = t.getNode(src_id)
m2 = t.getNode(dest_id)
if not nodes.has_key(src_id):
nodes[src_id] = m1
print("New node in the network. id: " + str(src_id))
boot_node(src_id)
if not nodes.has_key(dest_id):
nodes[dest_id] = m1
print("New node in the network. id: " + str(dest_id))
boot_node(dest_id)
load_noise(last_noise_filename)
except ValueError:
print("Arguments of 'add' command should be numbers")
else:
print("Wrong usage of command add")
print("Usage: add <src_id> <dest_id> <gain>")
def script(args):
if len(args) == 0:
print("Command script takes one argument which is the script file name")
else:
filename = args[0]
try:
file = open(filename, 'r')
except IOError:
# Cannot find file
# Lets try to find it in some scripts directory
try:
file = open('../scripts/' + filename, 'r')
except IOError:
try:
file = open('scripts/' + filename, 'r')
except IOError:
print("Error")
if file:
for line in file:
process_input(line)
file.close()
else:
print("Cannot find file " + filename + " in:")
print(filename)
print("scripts/")
def print_channels():
global debug_channels
print("Available channels")
for channel in debug_channels:
state = debug_channels[channel]
if state:
state_str = 'ENABLED'
else:
state_str = 'DISABLED'
print(str(channel) + ": " + state_str)
def debug(args):
if len(args) == 2:
global debug_channels
action = args[0]
channel = args[1]
if debug_channels.has_key(channel):
if action == 'enable':
t.addChannel(channel, sys.stdout)
debug_channels[channel] = True
elif action == 'disable':
t.removeChannel(channel, sys.stdout)
debug_channels[channel] = False
else:
print("Wrong argument for command debug")
else:
print("Channel " + str(channel) + " is not available")
print_channels()
else:
print("Command debug takes 2 arguments")
print("Usage: debug enable|disable <Channel>")
print_channels()
def stop(args):
print("---------------------")
print("Simulation stopped now")
print("Press enter to continue...")
print("---------------------")
raw_input()
options = {
'help': help,
'run': run,
'exit': exit,
'load': load,
'boot': boot,
'print': print_info,
'off': off,
'on': on,
'var': var,
'add': add,
'script': script,
'debug': debug,
'stop': stop
}
def get_command(array):
return array[0]
def get_args(array):
return array[1:len(array)]
def process_input(user_input):
input_array = user_input.split()
if(len(input_array) > 0):
command = get_command(input_array)
args = get_args(input_array)
if options.has_key(command):
options[command](args)
else:
print("You need to type a valid command")
print("Type 'help' to see a list of all available commands")
print("----------------------------------")
if len(sys.argv) >= 2:
script_file = open(sys.argv[1], "r")
for line in script_file:
process_input(line)
print("-------------------------------------------")
print(" Fire Detection Sensor Network Simulator")
print("-------------------------------------------")
# Main loop
while True:
print("> Type a command or just 'help'")
user_input = raw_input()
process_input(user_input)
| |
from __future__ import print_function
from builtins import zip
from builtins import str
from builtins import range
from builtins import object
import os
import re
import ast
import glob
import hashlib
from pathlib import Path
from typing import Union
import pandas
import numpy as np
import datetime
import subprocess
from ast import literal_eval as make_tuple
import sibispy
from sibispy import sibislogger as slog
from sibispy import utils as sutils
from sibispy import config_file_parser as cfg_parser
from sibispy.cluster_util import SlurmScheduler
class redcap_to_casesdir(object):
def __init__(self):
self.__import_forms = dict()
self.__export_forms = dict()
self.__export_rename = dict()
# Make lookup dicts for mapping radio/dropdown codes to labels
self.__code_to_label_dict = dict()
self.__metadata_dict = dict()
self.__event_dict = dict()
self.__demographic_event_skips = list()
self.__forms_dir = None
self.__sibis_defs = None
self.__scanner_dict = None
def configure(self, sessionObj, redcap_metadata):
# Make sure it was set up correctly
if not sessionObj.get_ordered_config_load() :
slog.info('recap_to_cases_dir.configure',"ERROR: session has to be configured with ordered_config_load set to True")
return False
# reading script specific settings
(cfgParser,err_msg) = sessionObj.get_config_sys_parser()
if err_msg:
slog.info('recap_to_cases_dir.configure',str(err_msg))
return False
self.__sibis_defs = cfgParser.get_category('redcap_to_casesdir')
self.__scanner_dict = self.__sibis_defs['scanner_dict']
for TYPE in list(self.__scanner_dict.keys()) :
self.__scanner_dict[TYPE] = self.__scanner_dict[TYPE].split(",")
# Reading in events
self.__event_dict = self.__transform_dict_string_into_tuple__('event_dictionary')
if not self.__event_dict:
return False
# Reading in which events to skip demographics generation for (i.e. midyears)
self.__demographic_event_skips = self.__sibis_defs['skip_demographics_for']
# reading in all forms and variables that should be exported to cases_dir
self.__forms_dir = os.path.join(sessionObj.get_operations_dir(),'redcap_to_casesdir')
if not os.path.exists(self.__forms_dir) :
slog.info('redcap_to_casesdir.configure','ERROR: ' + str(self.__forms_dir) + " does not exist!")
return False
exports_files = glob.glob(os.path.join(self.__forms_dir, '*.txt'))
for f in exports_files:
file = open(f, 'r')
contents = [line.strip() for line in file.readlines()]
file.close()
export_name = re.sub('\.txt$', '', os.path.basename(f))
import_form = re.sub('\n', '', contents[0])
self.__import_forms[export_name] = import_form
self.__export_forms[export_name] = [re.sub('\[.*\]', '', field) for field in contents[1:]] + ['%s_complete' % import_form]
self.__export_rename[export_name] = dict()
for field in contents[1:]:
match = re.match('^(.+)\[(.+)\]$', field)
if match:
self.__export_rename[export_name][match.group(1)] = match.group(2)
return self.__organize_metadata__(redcap_metadata)
def __transform_dict_string_into_tuple__(self,dict_name):
dict_str = self.__sibis_defs[dict_name]
dict_keys = list(dict_str.keys())
if not len(dict_keys):
slog.info('redcap_to_casesdir.configure',"ERROR: Cannot find '" + dict_name + "'' in config file!")
return None
dict_tup = dict()
for key in dict_keys:
# turn string into tuple
dict_tup[key] = make_tuple("(" + dict_str[key] +")")
return dict_tup
# Organize REDCap metadata (data dictionary)
def __organize_metadata__(self,redcap_metadata):
# turn metadata into easily digested dict
for field in redcap_metadata:
field_tuple = (field['field_type'],
field['text_validation_type_or_show_slider_number'],
field['field_label'],
field['text_validation_min'],
field['text_validation_max'],
field['select_choices_or_calculations'])
self.__metadata_dict[field['field_name']] = field_tuple
meta_data_dict = self.__transform_dict_string_into_tuple__('general_datadict')
if not meta_data_dict :
return False
self.__metadata_dict.update(meta_data_dict)
if not self.__check_all_forms__():
return False
self.__make_code_label_dict__(redcap_metadata)
return True
# Filter confidential fields from all forms
def __check_all_forms__(self):
# Filter each form
text_list = list()
non_redcap_list = list()
for export_name in list(self.__export_forms.keys()):
(form_text_list, form_non_redcap_list) = self.__check_form__(export_name)
if form_text_list :
text_list += form_text_list
if form_non_redcap_list:
non_redcap_list += form_non_redcap_list
if text_list:
slog.info('redcap_to_casesdir.__check_all_forms__.' + hashlib.sha1(str(text_list).encode()).hexdigest()[0:6], "ERROR: The txt file(s) in '" + str(self.__forms_dir) + "' list non-numeric redcap variable names!",
form_variable_list = str(text_list),
info = "Remove it from form file or modify definition in REDCap")
if non_redcap_list :
slog.info('redcap_to_casesdir.__check_all_forms__.' + hashlib.sha1(str(text_list).encode()).hexdigest()[0:6], "ERROR: The txt file(s) in '" + str(self.__forms_dir) + "' list variables that do not exist in redcap!",
form_variable_list = str(non_redcap_list),
info = "Remove it from form or modify definition REDCap")
if non_redcap_list or text_list:
return False
return True
# Filter potentially confidential fields out of given list, based on project
# metadata
def __check_form__(self, export_name):
text_list = list()
non_redcap_list = list()
for field_name in self.__export_forms[export_name]:
try:
(field_type, field_validation, field_label, text_val_min,
text_val_max, choices) = self.__metadata_dict[re.sub('___.*', '', field_name)]
if (field_type != 'text' and field_type != 'notes') or (field_validation in ['number', 'integer', 'time']):
pass
else:
text_list.append([export_name,field_name, field_type, field_validation])
except:
if '_complete' not in field_name:
non_redcap_list.append([export_name,field_name])
return (text_list,non_redcap_list)
def __make_code_label_dict__(self,redcap_metadata):
# First turn metadata into easily digested dict
for field in redcap_metadata:
if field['field_type'] in ['radio', 'dropdown']:
field_dict = {'': ''}
choices = field['select_choices_or_calculations']
for choice in choices.split('|'):
code_label = [c.strip() for c in choice.split(',')]
field_dict[code_label[0]] = ', '.join(code_label[1:])
self.__code_to_label_dict[field['field_name']] = field_dict
# used to be get_export_form_names
def get_export_names_of_forms(self):
return list(self.__export_forms.keys())
def create_datadict(self, export_name, datadict_dir):
if export_name not in self.__export_forms.keys() :
slog.info('redcap_to_casesdir.create_datadict',"ERROR: could not create data dictionary for form " + export_name)
return None
export_form_entry_list = self.__export_forms[export_name]
size_entry_list = len(export_form_entry_list)
export_form_list = [export_name] * size_entry_list
return self.__create_datadicts_general__(datadict_dir, export_name, export_form_list,export_form_entry_list)
# defining entry_list only makes sense if export_forms_list only consists of one
# entry !
def create_all_datadicts(self, datadict_dir):
for export_name in self.get_export_names_of_forms():
self.create_datadict(export_name,datadict_dir)
self.create_demographic_datadict(datadict_dir)
# Create custom form for demographics
def create_demographic_datadict(self, datadict_dir):
meta_data_dict = self.__transform_dict_string_into_tuple__('demographic_datadict')
if not meta_data_dict:
return False
self.__metadata_dict.update(meta_data_dict)
dict_str = self.__sibis_defs['demographic_datadict']
export_entry_list = list(dict_str.keys())
export_form_list = ['demographics'] * len(export_entry_list)
return self.__create_datadicts_general__(datadict_dir, 'demographics', export_form_list,export_entry_list)
# for each entry in the form list you have to define a variable
def __create_datadicts_general__(self,datadict_dir, datadict_base_file,export_forms_list, variable_list):
redcap_datadict_columns = ["Variable / Field Name", "Form Name",
"Section Header", "Field Type", "Field Label",
"Choices, Calculations, OR Slider Labels",
"Field Note",
"Text Validation Type OR Show Slider Number",
"Text Validation Min", "Text Validation Max",
"Identifier?",
"Branching Logic (Show field only if...)",
"Required Field?", "Custom Alignment",
"Question Number (surveys only)",
"Matrix Group Name", "Matrix Ranking?"]
# Insert standard set of data elements into each datadict.
for i in range(3):
elements = ['subject', 'arm', 'visit']
export_forms_list.insert(i, export_forms_list[0])
variable_list.insert(i, elements[i])
if not os.path.exists(datadict_dir):
os.makedirs(datadict_dir)
ddict = pandas.DataFrame(index=variable_list,columns=redcap_datadict_columns)
for name_of_form, var in zip(export_forms_list, variable_list):
field_name = re.sub('___.*', '', var)
ddict["Variable / Field Name"][var] = field_name
ddict["Form Name"][var] = name_of_form
# Check if var is in data dict ('FORM_complete' fields are NOT)
if field_name in list(self.__metadata_dict.keys()):
ddict["Field Type"][var] = self.__metadata_dict[field_name][0]
# need to transfer to utf-8 code otherwise can create problems when
# writing dictionary to file it just is a text field so it should not matter
# .encode('utf-8')
# Not needed in Python 3 anymore
ddict["Field Label"][var] = self.__metadata_dict[field_name][2]
ddict["Text Validation Type OR Show Slider Number"][var] = self.__metadata_dict[field_name][1]
ddict["Text Validation Min"][var] = self.__metadata_dict[field_name][3]
ddict["Text Validation Max"][var] = self.__metadata_dict[field_name][4]
#.encode('utf-8')
ddict["Choices, Calculations, OR Slider Labels"][var] = self.__metadata_dict[field_name][5]
# Finally, write the data dictionary to a CSV file
dicFileName = os.path.join(datadict_dir,datadict_base_file + '_datadict.csv')
try:
sutils.safe_dataframe_to_csv(ddict,dicFileName)
return dicFileName
except Exception as err_msg:
slog.info('redcap_to_casesdir.__create_datadicts_general__',"ERROR: could not export dictionary" + dicFileName,
err_msg = str(err_msg))
return None
# Truncate age to 2 digits for increased identity protection
def __truncate_age__(self, age_in):
matched = re.match('([0-9]*\.[0-9]*)', str(age_in))
if matched:
return round(float(matched.group(1)), 2)
else:
return age_in
def __get_scanner_mfg_and_model__(self, mri_scanner, expid):
if mri_scanner == 'nan' :
return ["",""]
mri_scanner= mri_scanner.upper()
for TYPE in list(self.__scanner_dict.keys()) :
if TYPE in mri_scanner :
return self.__scanner_dict[TYPE]
slog.info(expid, "Error: Do not know scanner type", script='redcap_to_casesdir.py', mri_scanner = mri_scanner)
return ["",""]
# NCANDA SPECIFIC - Generalize later
# Create "demographics" file "by hand" - this includes some text fields
def export_subject_demographics(
self,
subject,
subject_code,
arm_code,
visit_code,
site,
visit_age,
subject_data,
visit_data,
exceeds_criteria_baseline,
siblings_enrolled_yn_corrected,
siblings_id_first_corrected,
measures_dir,
conditional=False,
verbose=False,
):
# Latino and race coding arrives here as floating point numbers; make
# int strings from that (cannot use "int()" because it would fail for
# missing data
hispanic_code = re.sub('(.0)|(nan)', '', str(subject_data['hispanic']))
race_code = re.sub('(.0)|(nan)', '', str(subject_data['race']))
# scanner manufacturer map
scanner_mfg, scanner_model = self.__get_scanner_mfg_and_model__(str(visit_data['mri_scanner']), subject + "-" + visit_code)
# Definig enroll_exception_drinking_2
if exceeds_criteria_baseline < 0 :
exceeds_criteria_baseline=int(subject_data['enroll_exception___drinking'])
if siblings_enrolled_yn_corrected < 0:
siblings_enrolled_yn_corrected=subject_data['siblings_enrolled___true']
# No sibling than by default it is the subject itself
if siblings_enrolled_yn_corrected == 0 :
siblings_id_first_corrected = subject_code
elif siblings_id_first_corrected == None :
# if there is a sibling and if not a special case, then use default
siblings_id_first_corrected=subject_data['siblings_id1']
# unless not defined either -> then it must be the first subject
if type(siblings_id_first_corrected) is not str or siblings_id_first_corrected == "" :
siblings_id_first_corrected = subject_code
# if you add a line pe
if race_code == '6':
# if other race is specified, mark race label with manually curated
# race code
race_label=subject_data['race_other_code']
else :
race_label=self.__code_to_label_dict['race'][race_code]
if pandas.isnull(subject_data['family_id']):
family_id = ""
else:
family_id = str(int(subject_data['family_id']))
if conditional:
visit_age = ''
else:
visit_age = self.__truncate_age__(visit_age)
demographics = [
['subject', subject_code],
['arm', arm_code],
['visit', visit_code],
['site', site],
['sex', subject[8]],
['visit_age', visit_age],
['mri_structural_age', self.__truncate_age__(visit_data['mri_t1_age'])],
['mri_diffusion_age', self.__truncate_age__(visit_data['mri_dti_age'])],
['mri_restingstate_age', self.__truncate_age__(visit_data['mri_rsfmri_age'])],
['exceeds_bl_drinking',
'NY'[int(subject_data['enroll_exception___drinking'])]],
['exceeds_bl_drinking_2',exceeds_criteria_baseline],
['siblings_enrolled_yn',
'NY'[int(subject_data['siblings_enrolled___true'])]],
['siblings_id_first', subject_data['siblings_id1']],
['siblings_enrolled_yn_2',
'NY'[int(siblings_enrolled_yn_corrected)]],
# ['siblings_id_first_2', siblings_id_first_corrected],
['family_id', family_id],
['hispanic', self.__code_to_label_dict['hispanic'][hispanic_code][0:1]],
['race', race_code],
['race_label', race_label],
['participant_id', subject],
['scanner', scanner_mfg],
['scanner_model', scanner_model],
]
series = pandas.Series()
for (key, value) in demographics:
series.at[key] = value
target_path = os.path.join(measures_dir, 'demographics.csv')
if conditional and os.path.exists(target_path):
pass
else:
return sutils.safe_dataframe_to_csv(pandas.DataFrame(series).T,
target_path,
verbose=verbose)
def export_subject_form(self, export_name, subject, subject_code, arm_code, visit_code, all_records, measures_dir,verbose = False):
# Remove the complete field from the list of forms
complete = '{}_complete'.format(self.__import_forms.get(export_name))
fields = [column for column in self.__export_forms.get(export_name)
if column != complete]
# Select data for this form - "reindex" is necessary to put
# fields in listed order - REDCap returns them lexicographically sorted
fields = [i for i in fields if i not in ['subject', 'arm', 'visit']]
record = all_records[fields].reindex(fields, axis=1)
# if I read it correctly then this statement is not possible
if len(record) > 1:
slog.info(subject + "-" + visit_code, "ERROR: muliple records for that visit found for form '" + export_name + "'!" )
return None
# Nothing to do
if not len(record):
if verbose :
slog.info(subject + "-" + visit_code, "Info: visit data did not contain records of form '" + export_name + "'!" )
return None
# First, add the three index columns
record.insert(0, 'subject', subject_code)
record.insert(1, 'arm', arm_code)
record.insert(2, 'visit', visit_code)
field_idx = 0
output_fields = []
for field in record.columns:
# Rename field for output if necessary
if field in list(self.__export_rename[export_name].keys()):
output_field = self.__export_rename[export_name][field]
else:
output_field = field
output_fields.append(output_field)
# If this is an "age" field, truncate to 2 digits for privacy
if re.match('.*_age$', field):
record[field] = record[field].apply(self.__truncate_age__)
# If this is a radio or dropdown field
# (except "FORM_[missing_]why"), add a separate column for the
# coded label
if field in list(self.__code_to_label_dict.keys()) and not re.match('.*_why$', field):
code = str(record[field].iloc[0])
label = ''
if code in list(self.__code_to_label_dict[field].keys()):
label = self.__code_to_label_dict[field][code]
field_idx += 1
record.insert(field_idx, output_field + '_label', label)
output_fields.append(output_field + '_label')
field_idx += 1
# Apply renaming to columns
record.columns = output_fields
# Figure out path for CSV file and export this record
return sutils.safe_dataframe_to_csv(record,os.path.join(measures_dir, export_name + '.csv'),verbose=verbose)
# First get data for all fields across all forms in this event - this
# speeds up transfers over getting each form separately
def get_subject_specific_form_data(self,subject,event,forms_this_event, redcap_project,select_exports=None):
# define fields and forms to export
all_fields = ['study_id']
forbidden_export_fields = ['subject', 'visit', 'arm']
export_list = []
for export_name in list(self.__export_forms.keys()):
if export_name in forbidden_export_fields:
continue
if (self.__import_forms[export_name] in forms_this_event):
if (not select_exports or export_name in select_exports):
all_fields += [re.sub('___.*', '', field_name) for field_name in self.__export_forms[export_name]]
export_list.append(export_name)
# Remove the fields we are forbidden to export from REDCap
all_fields = np.setdiff1d(all_fields, forbidden_export_fields).tolist()
# Get data
all_records = redcap_project.export_records(fields=all_fields,records=[subject], events=[event],format='df')
# return results
return (all_records,export_list)
# Export selected REDCap data to cases dir
def export_subject_all_forms(self,redcap_project, site, subject, event, subject_data, visit_age, visit_data, arm_code, visit_code, subject_code, subject_datadir,forms_this_event, exceeds_criteria_baseline, siblings_enrolled_yn_corrected,siblings_id_first_corrected, select_exports=None, verbose=False):
# Do not really use this feature later
# Mark subjects/visits that have QA completed by creating a hidden marker file
# qafile_path = os.path.join(subject_datadir, '.qacomplete')
# if visit_data['mri_qa_completed'] == '1':
# try:
# if not os.path.exists(qafile_path):
# qafile = open(qafile_path, 'w')
# qafile.close()
# except IOError as error:
# slog.info(subject_code,"ERROR: unable to open QA marker file in {}. {}".format(subject_datadir, error))
# else:
# try:
# if os.path.exists(qafile_path):
# os.remove(qafile_path)
# except OSError as error:
# print("ERROR: unable to remove QA marker file {}. {}".format(qafile_path, error))
# Check if the "measures" subdirectory already exists - this is where all
# the csv files go. Create it if necessary.
measures_dir = os.path.join(subject_datadir, 'measures')
if not os.path.exists(measures_dir):
os.makedirs(measures_dir)
# Export demographics (if selected)
conditional = event in self.__demographic_event_skips
if not select_exports or 'demographics' in select_exports:
self.export_subject_demographics(
subject=subject,
subject_code=subject_code,
arm_code=arm_code,
visit_code=visit_code,
site=site,
visit_age=visit_age,
subject_data=subject_data,
visit_data=visit_data,
exceeds_criteria_baseline=exceeds_criteria_baseline,
siblings_enrolled_yn_corrected=siblings_enrolled_yn_corrected,
siblings_id_first_corrected=siblings_id_first_corrected,
measures_dir=measures_dir,
conditional=conditional,
verbose=verbose)
(all_records,export_list) = self.get_subject_specific_form_data(subject,event,forms_this_event, redcap_project, select_exports)
# Now go form by form and export data
for export_name in export_list:
self.export_subject_form(export_name, subject, subject_code, arm_code, visit_code, all_records, measures_dir, verbose)
# What Arm and Visit of the study is this event?
def translate_subject_and_event( self, subject_code, event_label):
if event_label in list(self.__event_dict.keys()):
(arm_code,visit_code) = self.__event_dict[event_label]
else:
slog.info(str(subject_code),"ERROR: Cannot determine study Arm and Visit from event %s" % event_label )
return (None,None,None)
pipeline_workdir_rel = os.path.join( subject_code, arm_code, visit_code )
return (arm_code,visit_code,pipeline_workdir_rel)
def days_between_dates( self, date_from_str, date_to_str, date_format_ymd=sutils.date_format_ymd):
return (datetime.datetime.strptime( date_to_str, date_format_ymd ) - datetime.datetime.strptime( date_from_str, date_format_ymd ) ).days
def get_event_dictionary(self):
return self.__event_dict
def schedule_cluster_job(self, job_script: str, job_title: str, submit_log: Union[str, Path] = None,
job_log: str = '/dev/null', verbose: bool = False) -> bool:
slurm_config = self.__sibis_defs['cluster_config']
slurm = SlurmScheduler(slurm_config)
return slurm.schedule_job(job_script, job_title, submit_log, job_log, verbose)
def schedule_old_cluster_job(self,job_script, job_title,submit_log=None, job_log=None, verbose=False):
qsub_cmd= '/opt/sge/bin/lx-amd64/qsub'
if not os.path.exists(qsub_cmd):
slog.info(job_title + "-" +hashlib.sha1(str(job_script).encode('utf-8')).hexdigest()[0:6],"ERROR: Failed to schedule job as '" + qsub_cmd + "' cannot be found!", job_script = str(job_script))
return False
sge_env = os.environ.copy()
sge_env['SGE_ROOT'] = '/opt/sge'
sge_param = self.__sibis_defs['old_cluster_parameters'].split(',')
if job_log :
sge_param += ['-o', job_log]
else :
sge_param += ['-o','/dev/null']
qsub_args= [ qsub_cmd ] + sge_param + ['-N', '%s' % (job_title) ]
#stderr=subprocess.STDOUT
qsub_process = subprocess.Popen( qsub_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr= subprocess.PIPE, env=sge_env)
(stdoutdata, stderrdata) = qsub_process.communicate(str(job_script).encode('utf-8'))
cmd_str='echo "%s" | %s\n' % (job_script," ".join(qsub_args))
if stderrdata :
slog.info(job_title + "-" + hashlib.sha1(str(stderrdata).encode('utf-8')).hexdigest()[0:6],"ERROR: Failed to schedule job !", cmd = cmd_str, err_msg = str(stderrdata))
return False
if verbose:
print(cmd_str)
if stdoutdata:
print(stdoutdata.decode('utf-8'))
if submit_log:
with open(submit_log, "a") as myfile:
myfile.write(cmd_str)
myfile.write(stdoutdata.decode('utf-8'))
return True
| |
#
# The Python Imaging Library.
# $Id$
#
# standard channel operations
#
# History:
# 1996-03-24 fl Created
# 1996-08-13 fl Added logical operations (for "1" images)
# 2000-10-12 fl Added offset method (from Image.py)
#
# Copyright (c) 1997-2000 by Secret Labs AB
# Copyright (c) 1996-2000 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from . import Image
def constant(image, value):
"""Fill a channel with a given grey level.
:rtype: :py:class:`~PIL.Image.Image`
"""
return Image.new("L", image.size, value)
def duplicate(image):
"""Copy a channel. Alias for :py:meth:`PIL.Image.Image.copy`.
:rtype: :py:class:`~PIL.Image.Image`
"""
return image.copy()
def invert(image):
"""
Invert an image (channel).
.. code-block:: python
out = MAX - image
:rtype: :py:class:`~PIL.Image.Image`
"""
image.load()
return image._new(image.im.chop_invert())
def lighter(image1, image2):
"""
Compares the two images, pixel by pixel, and returns a new image containing
the lighter values.
.. code-block:: python
out = max(image1, image2)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_lighter(image2.im))
def darker(image1, image2):
"""
Compares the two images, pixel by pixel, and returns a new image
containing the darker values.
.. code-block:: python
out = min(image1, image2)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_darker(image2.im))
def difference(image1, image2):
"""
Returns the absolute value of the pixel-by-pixel difference between the two
images.
.. code-block:: python
out = abs(image1 - image2)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_difference(image2.im))
def multiply(image1, image2):
"""
Superimposes two images on top of each other.
If you multiply an image with a solid black image, the result is black. If
you multiply with a solid white image, the image is unaffected.
.. code-block:: python
out = image1 * image2 / MAX
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_multiply(image2.im))
def screen(image1, image2):
"""
Superimposes two inverted images on top of each other.
.. code-block:: python
out = MAX - ((MAX - image1) * (MAX - image2) / MAX)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_screen(image2.im))
def add(image1, image2, scale=1.0, offset=0):
"""
Adds two images, dividing the result by scale and adding the
offset. If omitted, scale defaults to 1.0, and offset to 0.0.
.. code-block:: python
out = ((image1 + image2) / scale + offset)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_add(image2.im, scale, offset))
def subtract(image1, image2, scale=1.0, offset=0):
"""
Subtracts two images, dividing the result by scale and adding the
offset. If omitted, scale defaults to 1.0, and offset to 0.0.
.. code-block:: python
out = ((image1 - image2) / scale + offset)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_subtract(image2.im, scale, offset))
def add_modulo(image1, image2):
"""Add two images, without clipping the result.
.. code-block:: python
out = ((image1 + image2) % MAX)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_add_modulo(image2.im))
def subtract_modulo(image1, image2):
"""Subtract two images, without clipping the result.
.. code-block:: python
out = ((image1 - image2) % MAX)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_subtract_modulo(image2.im))
def logical_and(image1, image2):
"""Logical AND between two images.
.. code-block:: python
out = ((image1 and image2) % MAX)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_and(image2.im))
def logical_or(image1, image2):
"""Logical OR between two images.
.. code-block:: python
out = ((image1 or image2) % MAX)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_or(image2.im))
def logical_xor(image1, image2):
"""Logical XOR between two images.
.. code-block:: python
out = ((bool(image1) != bool(image2)) % MAX)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_xor(image2.im))
def blend(image1, image2, alpha):
"""Blend images using constant transparency weight. Alias for
:py:meth:`PIL.Image.Image.blend`.
:rtype: :py:class:`~PIL.Image.Image`
"""
return Image.blend(image1, image2, alpha)
def composite(image1, image2, mask):
"""Create composite using transparency mask. Alias for
:py:meth:`PIL.Image.Image.composite`.
:rtype: :py:class:`~PIL.Image.Image`
"""
return Image.composite(image1, image2, mask)
def offset(image, xoffset, yoffset=None):
"""Returns a copy of the image where data has been offset by the given
distances. Data wraps around the edges. If **yoffset** is omitted, it
is assumed to be equal to **xoffset**.
:param xoffset: The horizontal distance.
:param yoffset: The vertical distance. If omitted, both
distances are set to the same value.
:rtype: :py:class:`~PIL.Image.Image`
"""
if yoffset is None:
yoffset = xoffset
image.load()
return image._new(image.im.offset(xoffset, yoffset))
| |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/interface-ref/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state for interface-ref
"""
__slots__ = ("_path_helper", "_extmethods", "__interface", "__subinterface")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__interface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
self.__subinterface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"interfaces",
"interface",
"interface-ref",
"state",
]
def _get_interface(self):
"""
Getter method for interface, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/interface_ref/state/interface (leafref)
YANG Description: Reference to a base interface. If a reference to a
subinterface is required, this leaf must be specified
to indicate the base interface.
"""
return self.__interface
def _set_interface(self, v, load=False):
"""
Setter method for interface, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/interface_ref/state/interface (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface() directly.
YANG Description: Reference to a base interface. If a reference to a
subinterface is required, this leaf must be specified
to indicate the base interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """interface must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=False)""",
}
)
self.__interface = t
if hasattr(self, "_set"):
self._set()
def _unset_interface(self):
self.__interface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
def _get_subinterface(self):
"""
Getter method for subinterface, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/interface_ref/state/subinterface (leafref)
YANG Description: Reference to a subinterface -- this requires the base
interface to be specified using the interface leaf in
this container. If only a reference to a base interface
is requuired, this leaf should not be set.
"""
return self.__subinterface
def _set_subinterface(self, v, load=False):
"""
Setter method for subinterface, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/interface_ref/state/subinterface (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_subinterface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_subinterface() directly.
YANG Description: Reference to a subinterface -- this requires the base
interface to be specified using the interface leaf in
this container. If only a reference to a base interface
is requuired, this leaf should not be set.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """subinterface must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="subinterface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=False)""",
}
)
self.__subinterface = t
if hasattr(self, "_set"):
self._set()
def _unset_subinterface(self):
self.__subinterface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
interface = __builtin__.property(_get_interface)
subinterface = __builtin__.property(_get_subinterface)
_pyangbind_elements = OrderedDict(
[("interface", interface), ("subinterface", subinterface)]
)
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/interface-ref/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state for interface-ref
"""
__slots__ = ("_path_helper", "_extmethods", "__interface", "__subinterface")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__interface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
self.__subinterface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"interfaces",
"interface",
"interface-ref",
"state",
]
def _get_interface(self):
"""
Getter method for interface, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/interface_ref/state/interface (leafref)
YANG Description: Reference to a base interface. If a reference to a
subinterface is required, this leaf must be specified
to indicate the base interface.
"""
return self.__interface
def _set_interface(self, v, load=False):
"""
Setter method for interface, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/interface_ref/state/interface (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface() directly.
YANG Description: Reference to a base interface. If a reference to a
subinterface is required, this leaf must be specified
to indicate the base interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """interface must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=False)""",
}
)
self.__interface = t
if hasattr(self, "_set"):
self._set()
def _unset_interface(self):
self.__interface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
def _get_subinterface(self):
"""
Getter method for subinterface, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/interface_ref/state/subinterface (leafref)
YANG Description: Reference to a subinterface -- this requires the base
interface to be specified using the interface leaf in
this container. If only a reference to a base interface
is requuired, this leaf should not be set.
"""
return self.__subinterface
def _set_subinterface(self, v, load=False):
"""
Setter method for subinterface, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/interface_ref/state/subinterface (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_subinterface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_subinterface() directly.
YANG Description: Reference to a subinterface -- this requires the base
interface to be specified using the interface leaf in
this container. If only a reference to a base interface
is requuired, this leaf should not be set.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """subinterface must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="subinterface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=False)""",
}
)
self.__subinterface = t
if hasattr(self, "_set"):
self._set()
def _unset_subinterface(self):
self.__subinterface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
interface = __builtin__.property(_get_interface)
subinterface = __builtin__.property(_get_subinterface)
_pyangbind_elements = OrderedDict(
[("interface", interface), ("subinterface", subinterface)]
)
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import functools
import os
from collections import defaultdict
from multiprocessing import cpu_count
from pants.backend.jvm.subsystems.java import Java
from pants.backend.jvm.subsystems.jvm_platform import JvmPlatform
from pants.backend.jvm.subsystems.scala_platform import ScalaPlatform
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.javac_plugin import JavacPlugin
from pants.backend.jvm.tasks.classpath_util import ClasspathUtil
from pants.backend.jvm.tasks.jvm_compile.compile_context import CompileContext, DependencyContext
from pants.backend.jvm.tasks.jvm_compile.execution_graph import (ExecutionFailure, ExecutionGraph,
Job)
from pants.backend.jvm.tasks.jvm_dependency_analyzer import JvmDependencyAnalyzer
from pants.backend.jvm.tasks.nailgun_task import NailgunTaskBase
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.fingerprint_strategy import TaskIdentityFingerprintStrategy
from pants.base.worker_pool import WorkerPool
from pants.base.workunit import WorkUnitLabel
from pants.build_graph.resources import Resources
from pants.build_graph.target_scopes import Scopes
from pants.goal.products import MultipleRootedProducts
from pants.reporting.reporting_utils import items_to_report_element
from pants.util.dirutil import fast_relpath, safe_delete, safe_mkdir, safe_walk
from pants.util.fileutil import create_size_estimators
from pants.util.memo import memoized_property
class ResolvedJarAwareTaskIdentityFingerprintStrategy(TaskIdentityFingerprintStrategy):
"""Task fingerprint strategy that also includes the resolved coordinates of dependent jars."""
def __init__(self, task, classpath_products):
super(ResolvedJarAwareTaskIdentityFingerprintStrategy, self).__init__(task)
self._classpath_products = classpath_products
def compute_fingerprint(self, target):
if isinstance(target, Resources):
# Just do nothing, this kind of dependency shouldn't affect result's hash.
return None
hasher = self._build_hasher(target)
if isinstance(target, JarLibrary):
# NB: Collects only the jars for the current jar_library, and hashes them to ensure that both
# the resolved coordinates, and the requested coordinates are used. This ensures that if a
# source file depends on a library with source compatible but binary incompatible signature
# changes between versions, that you won't get runtime errors due to using an artifact built
# against a binary incompatible version resolved for a previous compile.
classpath_entries = self._classpath_products.get_artifact_classpath_entries_for_targets(
[target])
for _, entry in classpath_entries:
hasher.update(str(entry.coordinate))
return hasher.hexdigest()
def __hash__(self):
return hash((type(self), self._task.fingerprint))
def __eq__(self, other):
return (isinstance(other, ResolvedJarAwareTaskIdentityFingerprintStrategy) and
super(ResolvedJarAwareTaskIdentityFingerprintStrategy, self).__eq__(other))
class JvmCompile(NailgunTaskBase):
"""A common framework for JVM compilation.
To subclass for a specific JVM language, implement the static values and methods
mentioned below under "Subclasses must implement".
"""
size_estimators = create_size_estimators()
@classmethod
def size_estimator_by_name(cls, estimation_strategy_name):
return cls.size_estimators[estimation_strategy_name]
@staticmethod
def _analysis_for_target(analysis_dir, target):
return os.path.join(analysis_dir, target.id + '.analysis')
@staticmethod
def _portable_analysis_for_target(analysis_dir, target):
return JvmCompile._analysis_for_target(analysis_dir, target) + '.portable'
@classmethod
def register_options(cls, register):
super(JvmCompile, cls).register_options(register)
register('--args', advanced=True, type=list,
default=list(cls.get_args_default(register.bootstrap)), fingerprint=True,
help='Pass these extra args to the compiler.')
register('--confs', advanced=True, type=list, default=['default'],
help='Compile for these Ivy confs.')
# TODO: Stale analysis should be automatically ignored via Task identities:
# https://github.com/pantsbuild/pants/issues/1351
register('--clear-invalid-analysis', advanced=True, type=bool,
help='When set, any invalid/incompatible analysis files will be deleted '
'automatically. When unset, an error is raised instead.')
register('--warnings', default=True, type=bool, fingerprint=True,
help='Compile with all configured warnings enabled.')
register('--warning-args', advanced=True, type=list, fingerprint=True,
default=list(cls.get_warning_args_default()),
help='Extra compiler args to use when warnings are enabled.')
register('--no-warning-args', advanced=True, type=list, fingerprint=True,
default=list(cls.get_no_warning_args_default()),
help='Extra compiler args to use when warnings are disabled.')
register('--fatal-warnings-enabled-args', advanced=True, type=list, fingerprint=True,
default=list(cls.get_fatal_warnings_enabled_args_default()),
help='Extra compiler args to use when fatal warnings are enabled.')
register('--fatal-warnings-disabled-args', advanced=True, type=list, fingerprint=True,
default=list(cls.get_fatal_warnings_disabled_args_default()),
help='Extra compiler args to use when fatal warnings are disabled.')
register('--debug-symbols', type=bool, fingerprint=True,
help='Compile with debug symbol enabled.')
register('--debug-symbol-args', advanced=True, type=list, fingerprint=True,
default=['-C-g:lines,source,vars'],
help='Extra args to enable debug symbol.')
register('--delete-scratch', advanced=True, default=True, type=bool,
help='Leave intermediate scratch files around, for debugging build problems.')
register('--worker-count', advanced=True, type=int, default=cpu_count(),
help='The number of concurrent workers to use when '
'compiling with {task}. Defaults to the '
'current machine\'s CPU count.'.format(task=cls._name))
register('--size-estimator', advanced=True,
choices=list(cls.size_estimators.keys()), default='filesize',
help='The method of target size estimation. The size estimator estimates the size '
'of targets in order to build the largest targets first (subject to dependency '
'constraints). Choose \'random\' to choose random sizes for each target, which '
'may be useful for distributed builds.')
register('--capture-log', advanced=True, type=bool,
fingerprint=True,
help='Capture compilation output to per-target logs.')
register('--capture-classpath', advanced=True, type=bool, default=True,
fingerprint=True,
help='Capture classpath to per-target newline-delimited text files. These files will '
'be packaged into any jar artifacts that are created from the jvm targets.')
register('--unused-deps', choices=['ignore', 'warn', 'fatal'], default='warn',
fingerprint=True,
help='Controls whether unused deps are checked, and whether they cause warnings or '
'errors.')
register('--use-classpath-jars', advanced=True, type=bool, fingerprint=True,
help='Use jar files on the compile_classpath. Note: Using this option degrades '
'incremental compile between targets.')
@classmethod
def prepare(cls, options, round_manager):
super(JvmCompile, cls).prepare(options, round_manager)
round_manager.require_data('compile_classpath')
# Require codegen we care about
# TODO(John Sirois): roll this up in Task - if the list of labels we care about for a target
# predicate to filter the full build graph is exposed, the requirement can be made automatic
# and in turn codegen tasks could denote the labels they produce automating wiring of the
# produce side
round_manager.require_data('java')
round_manager.require_data('scala')
# Allow the deferred_sources_mapping to take place first
round_manager.require_data('deferred_sources')
# Subclasses must implement.
# --------------------------
_name = None
_supports_concurrent_execution = None
@classmethod
def subsystem_dependencies(cls):
return super(JvmCompile, cls).subsystem_dependencies() + (Java, JvmPlatform, ScalaPlatform)
@classmethod
def name(cls):
return cls._name
@classmethod
def compiler_plugin_types(cls):
"""A tuple of target types which are compiler plugins."""
return ()
@classmethod
def get_args_default(cls, bootstrap_option_values):
"""Override to set default for --args option.
:param bootstrap_option_values: The values of the "bootstrap options" (e.g., pants_workdir).
Implementations can use these when generating the default.
See src/python/pants/options/options_bootstrapper.py for
details.
"""
return ()
@classmethod
def get_warning_args_default(cls):
"""Override to set default for --warning-args option."""
return ()
@classmethod
def get_no_warning_args_default(cls):
"""Override to set default for --no-warning-args option."""
return ()
@classmethod
def get_fatal_warnings_enabled_args_default(cls):
"""Override to set default for --fatal-warnings-enabled-args option."""
return ()
@classmethod
def get_fatal_warnings_disabled_args_default(cls):
"""Override to set default for --fatal-warnings-disabled-args option."""
return ()
@property
def cache_target_dirs(self):
return True
def select(self, target):
raise NotImplementedError()
def select_source(self, source_file_path):
raise NotImplementedError()
def create_analysis_tools(self):
"""Returns an AnalysisTools implementation.
Subclasses must implement.
"""
raise NotImplementedError()
def compile(self, args, classpath, sources, classes_output_dir, upstream_analysis, analysis_file,
log_file, settings, fatal_warnings, javac_plugins_to_exclude):
"""Invoke the compiler.
Must raise TaskError on compile failure.
Subclasses must implement.
:param list args: Arguments to the compiler (such as jmake or zinc).
:param list classpath: List of classpath entries.
:param list sources: Source files.
:param str classes_output_dir: Where to put the compiled output.
:param upstream_analysis:
:param analysis_file: Where to write the compile analysis.
:param log_file: Where to write logs.
:param JvmPlatformSettings settings: platform settings determining the -source, -target, etc for
javac to use.
:param fatal_warnings: whether to convert compilation warnings to errors.
:param javac_plugins_to_exclude: A list of names of javac plugins that mustn't be used in
this compilation, even if requested (typically because
this compilation is building those same plugins).
"""
raise NotImplementedError()
# Subclasses may override.
# ------------------------
def extra_compile_time_classpath_elements(self):
"""Extra classpath elements common to all compiler invocations.
E.g., jars for compiler plugins.
These are added at the end of the classpath, after any dependencies, so that if they
overlap with any explicit dependencies, the compiler sees those first. This makes
missing dependency accounting much simpler.
"""
return []
def write_extra_resources(self, compile_context):
"""Writes any extra, out-of-band resources for a target to its classes directory.
E.g., targets that produce scala compiler plugins or annotation processor files
produce an info file. The resources will be added to the runtime_classpath.
Returns a list of pairs (root, [absolute paths of files under root]).
"""
pass
def __init__(self, *args, **kwargs):
super(JvmCompile, self).__init__(*args, **kwargs)
self._targets_to_compile_settings = None
# JVM options for running the compiler.
self._jvm_options = self.get_options().jvm_options
self._args = list(self.get_options().args)
if self.get_options().warnings:
self._args.extend(self.get_options().warning_args)
else:
self._args.extend(self.get_options().no_warning_args)
if self.get_options().debug_symbols:
self._args.extend(self.get_options().debug_symbol_args)
# The ivy confs for which we're building.
self._confs = self.get_options().confs
# Determines which sources are relevant to this target.
self._sources_predicate = self.select_source
self._capture_log = self.get_options().capture_log
self._delete_scratch = self.get_options().delete_scratch
self._clear_invalid_analysis = self.get_options().clear_invalid_analysis
try:
worker_count = self.get_options().worker_count
except AttributeError:
# tasks that don't support concurrent execution have no worker_count registered
worker_count = 1
self._worker_count = worker_count
self._size_estimator = self.size_estimator_by_name(self.get_options().size_estimator)
self._analysis_tools = self.create_analysis_tools()
self._dep_context = DependencyContext(self.compiler_plugin_types(),
dict(include_scopes=Scopes.JVM_COMPILE_SCOPES,
respect_intransitive=True))
@property
def _unused_deps_check_enabled(self):
return self.get_options().unused_deps != 'ignore'
@memoized_property
def _dep_analyzer(self):
return JvmDependencyAnalyzer(get_buildroot(),
self.context.products.get_data('runtime_classpath'),
self.context.products.get_data('product_deps_by_src'))
@property
def _analysis_parser(self):
return self._analysis_tools.parser
def _fingerprint_strategy(self, classpath_products):
return ResolvedJarAwareTaskIdentityFingerprintStrategy(self, classpath_products)
def _compile_context(self, target, target_workdir):
analysis_file = JvmCompile._analysis_for_target(target_workdir, target)
portable_analysis_file = JvmCompile._portable_analysis_for_target(target_workdir, target)
classes_dir = os.path.join(target_workdir, 'classes')
jar_file = os.path.join(target_workdir, 'z.jar')
log_file = os.path.join(target_workdir, 'debug.log')
strict_deps = self._compute_language_property(target, lambda x: x.strict_deps)
return CompileContext(target,
analysis_file,
portable_analysis_file,
classes_dir,
jar_file,
log_file,
self._compute_sources_for_target(target),
strict_deps)
def execute(self):
# In case we have no relevant targets and return early create the requested product maps.
self._create_empty_products()
relevant_targets = list(self.context.targets(predicate=self.select))
if not relevant_targets:
return
# Clone the compile_classpath to the runtime_classpath.
compile_classpath = self.context.products.get_data('compile_classpath')
classpath_product = self.context.products.get_data('runtime_classpath', compile_classpath.copy)
def classpath_for_context(context):
if self.get_options().use_classpath_jars:
return context.jar_file
return context.classes_dir
fingerprint_strategy = self._fingerprint_strategy(classpath_product)
# Note, JVM targets are validated (`vts.update()`) as they succeed. As a result,
# we begin writing artifacts out to the cache immediately instead of waiting for
# all targets to finish.
with self.invalidated(relevant_targets,
invalidate_dependents=True,
fingerprint_strategy=fingerprint_strategy,
topological_order=True) as invalidation_check:
# Initialize the classpath for all targets.
compile_contexts = {vt.target: self._compile_context(vt.target, vt.results_dir)
for vt in invalidation_check.all_vts}
for cc in compile_contexts.values():
classpath_product.add_for_target(cc.target, [(conf, classpath_for_context(cc))
for conf in self._confs])
# Register products for valid targets.
valid_targets = [vt.target for vt in invalidation_check.all_vts if vt.valid]
self._register_vts([compile_contexts[t] for t in valid_targets])
# Build any invalid targets (which will register products in the background).
if invalidation_check.invalid_vts:
invalid_targets = [vt.target for vt in invalidation_check.invalid_vts]
self.do_compile(
invalidation_check,
compile_contexts,
invalid_targets,
self.extra_compile_time_classpath_elements(),
)
if not self.get_options().use_classpath_jars:
# Once compilation has completed, replace the classpath entry for each target with
# its jar'd representation.
for cc in compile_contexts.values():
for conf in self._confs:
classpath_product.remove_for_target(cc.target, [(conf, cc.classes_dir)])
classpath_product.add_for_target(cc.target, [(conf, cc.jar_file)])
def do_compile(self,
invalidation_check,
compile_contexts,
invalid_targets,
extra_compile_time_classpath_elements):
"""Executes compilations for the invalid targets contained in a single chunk."""
assert invalid_targets, "compile_chunk should only be invoked if there are invalid targets."
# This ensures the workunit for the worker pool is set before attempting to compile.
with self.context.new_workunit('isolation-{}-pool-bootstrap'.format(self._name)) \
as workunit:
# This uses workunit.parent as the WorkerPool's parent so that child workunits
# of different pools will show up in order in the html output. This way the current running
# workunit is on the bottom of the page rather than possibly in the middle.
worker_pool = WorkerPool(workunit.parent,
self.context.run_tracker,
self._worker_count)
# Prepare the output directory for each invalid target, and confirm that analysis is valid.
for target in invalid_targets:
cc = compile_contexts[target]
safe_mkdir(cc.classes_dir)
self.validate_analysis(cc.analysis_file)
# Get the classpath generated by upstream JVM tasks and our own prepare_compile().
classpath_products = self.context.products.get_data('runtime_classpath')
extra_compile_time_classpath = self._compute_extra_classpath(
extra_compile_time_classpath_elements)
# Now create compile jobs for each invalid target one by one.
jobs = self._create_compile_jobs(classpath_products,
compile_contexts,
extra_compile_time_classpath,
invalid_targets,
invalidation_check.invalid_vts)
exec_graph = ExecutionGraph(jobs)
try:
exec_graph.execute(worker_pool, self.context.log)
except ExecutionFailure as e:
raise TaskError("Compilation failure: {}".format(e))
def _record_compile_classpath(self, classpath, targets, outdir):
text = '\n'.join(classpath)
for target in targets:
path = os.path.join(outdir, 'compile_classpath', '{}.txt'.format(target.id))
safe_mkdir(os.path.dirname(path), clean=False)
with open(path, 'w') as f:
f.write(text.encode('utf-8'))
def _compile_vts(self, vts, sources, analysis_file, upstream_analysis, classpath, outdir,
log_file, progress_message, settings, fatal_warnings, counter):
"""Compiles sources for the given vts into the given output dir.
vts - versioned target set
sources - sources for this target set
analysis_file - the analysis file to manipulate
classpath - a list of classpath entries
outdir - the output dir to send classes to
May be invoked concurrently on independent target sets.
Postcondition: The individual targets in vts are up-to-date, as if each were
compiled individually.
"""
if not sources:
self.context.log.warn('Skipping {} compile for targets with no sources:\n {}'
.format(self.name(), vts.targets))
else:
counter_val = str(counter()).rjust(counter.format_length(), b' ')
counter_str = '[{}/{}] '.format(counter_val, counter.size)
# Do some reporting.
self.context.log.info(
counter_str,
'Compiling ',
items_to_report_element(sources, '{} source'.format(self.name())),
' in ',
items_to_report_element([t.address.reference() for t in vts.targets], 'target'),
' (',
progress_message,
').')
with self.context.new_workunit('compile', labels=[WorkUnitLabel.COMPILER]):
# The compiler may delete classfiles, then later exit on a compilation error. Then if the
# change triggering the error is reverted, we won't rebuild to restore the missing
# classfiles. So we force-invalidate here, to be on the safe side.
vts.force_invalidate()
if self.get_options().capture_classpath:
self._record_compile_classpath(classpath, vts.targets, outdir)
# If compiling a plugin, don't try to use it on itself.
javac_plugins_to_exclude = (t.plugin for t in vts.targets if isinstance(t, JavacPlugin))
self.compile(self._args, classpath, sources, outdir, upstream_analysis, analysis_file,
log_file, settings, fatal_warnings, javac_plugins_to_exclude)
def check_artifact_cache(self, vts):
"""Localizes the fetched analysis for targets we found in the cache."""
def post_process(cached_vts):
for vt in cached_vts:
cc = self._compile_context(vt.target, vt.results_dir)
safe_delete(cc.analysis_file)
self._analysis_tools.localize(cc.portable_analysis_file, cc.analysis_file)
return self.do_check_artifact_cache(vts, post_process_cached_vts=post_process)
def _create_empty_products(self):
if self.context.products.is_required_data('classes_by_source'):
make_products = lambda: defaultdict(MultipleRootedProducts)
self.context.products.safe_create_data('classes_by_source', make_products)
if self.context.products.is_required_data('product_deps_by_src') \
or self._unused_deps_check_enabled:
self.context.products.safe_create_data('product_deps_by_src', dict)
def compute_classes_by_source(self, compile_contexts):
"""Compute a map of (context->(src->classes)) for the given compile_contexts.
It's possible (although unfortunate) for multiple targets to own the same sources, hence
the top level division. Srcs are relative to buildroot. Classes are absolute paths.
Returning classes with 'None' as their src indicates that the compiler analysis indicated
that they were un-owned. This case is triggered when annotation processors generate
classes (or due to bugs in classfile tracking in zinc/jmake.)
"""
buildroot = get_buildroot()
# Build a mapping of srcs to classes for each context.
classes_by_src_by_context = defaultdict(dict)
for compile_context in compile_contexts:
# Walk the context's jar to build a set of unclaimed classfiles.
unclaimed_classes = set()
with compile_context.open_jar(mode='r') as jar:
for name in jar.namelist():
if not name.endswith('/'):
unclaimed_classes.add(os.path.join(compile_context.classes_dir, name))
# Grab the analysis' view of which classfiles were generated.
classes_by_src = classes_by_src_by_context[compile_context]
if os.path.exists(compile_context.analysis_file):
products = self._analysis_parser.parse_products_from_path(compile_context.analysis_file,
compile_context.classes_dir)
for src, classes in products.items():
relsrc = os.path.relpath(src, buildroot)
classes_by_src[relsrc] = classes
unclaimed_classes.difference_update(classes)
# Any remaining classfiles were unclaimed by sources/analysis.
classes_by_src[None] = list(unclaimed_classes)
return classes_by_src_by_context
def classname_for_classfile(self, compile_context, class_file_name):
assert class_file_name.startswith(compile_context.classes_dir)
rel_classfile_path = class_file_name[len(compile_context.classes_dir) + 1:]
return ClasspathUtil.classname_for_rel_classfile(rel_classfile_path)
def _register_vts(self, compile_contexts):
classes_by_source = self.context.products.get_data('classes_by_source')
product_deps_by_src = self.context.products.get_data('product_deps_by_src')
# Register a mapping between sources and classfiles (if requested).
if classes_by_source is not None:
ccbsbc = self.compute_classes_by_source(compile_contexts).items()
for compile_context, computed_classes_by_source in ccbsbc:
classes_dir = compile_context.classes_dir
for source in compile_context.sources:
classes = computed_classes_by_source.get(source, [])
classes_by_source[source].add_abs_paths(classes_dir, classes)
# Register classfile product dependencies (if requested).
if product_deps_by_src is not None:
for compile_context in compile_contexts:
product_deps_by_src[compile_context.target] = \
self._analysis_parser.parse_deps_from_path(compile_context.analysis_file)
def _check_unused_deps(self, compile_context):
"""Uses `product_deps_by_src` to check unused deps and warn or error."""
with self.context.new_workunit('unused-check', labels=[WorkUnitLabel.COMPILER]):
# Compute replacement deps.
replacement_deps = self._dep_analyzer.compute_unused_deps(compile_context.target)
if not replacement_deps:
return
# Warn or error for unused.
def joined_dep_msg(deps):
return '\n '.join('\'{}\','.format(dep.address.spec) for dep in sorted(deps))
flat_replacements = set(r for replacements in replacement_deps.values() for r in replacements)
replacements_msg = ''
if flat_replacements:
replacements_msg = 'Suggested replacements:\n {}\n'.format(joined_dep_msg(flat_replacements))
unused_msg = (
'unused dependencies:\n {}\n{}'
'(If you\'re seeing this message in error, you might need to '
'change the `scope` of the dependencies.)'.format(
joined_dep_msg(replacement_deps.keys()),
replacements_msg,
)
)
if self.get_options().unused_deps == 'fatal':
raise TaskError(unused_msg)
else:
self.context.log.warn('Target {} had {}\n'.format(
compile_context.target.address.spec, unused_msg))
def _upstream_analysis(self, compile_contexts, classpath_entries):
"""Returns tuples of classes_dir->analysis_file for the closure of the target."""
# Reorganize the compile_contexts by class directory.
compile_contexts_by_directory = {}
for compile_context in compile_contexts.values():
compile_contexts_by_directory[compile_context.classes_dir] = compile_context
# If we have a compile context for the target, include it.
for entry in classpath_entries:
if not entry.endswith('.jar'):
compile_context = compile_contexts_by_directory.get(entry)
if not compile_context:
self.context.log.debug('Missing upstream analysis for {}'.format(entry))
else:
yield compile_context.classes_dir, compile_context.analysis_file
def exec_graph_key_for_target(self, compile_target):
return "compile({})".format(compile_target.address.spec)
def _create_compile_jobs(self, classpath_products, compile_contexts, extra_compile_time_classpath,
invalid_targets, invalid_vts):
class Counter(object):
def __init__(self, size, initial=0):
self.size = size
self.count = initial
def __call__(self):
self.count += 1
return self.count
def format_length(self):
return len(str(self.size))
counter = Counter(len(invalid_vts))
def check_cache(vts):
"""Manually checks the artifact cache (usually immediately before compilation.)
Returns true if the cache was hit successfully, indicating that no compilation is necessary.
"""
if not self.artifact_cache_reads_enabled():
return False
cached_vts, _, _ = self.check_artifact_cache([vts])
if not cached_vts:
self.context.log.debug('Missed cache during double check for {}'
.format(vts.target.address.spec))
return False
assert cached_vts == [vts], (
'Cache returned unexpected target: {} vs {}'.format(cached_vts, [vts])
)
self.context.log.info('Hit cache during double check for {}'.format(vts.target.address.spec))
counter()
return True
def should_compile_incrementally(vts):
"""Check to see if the compile should try to re-use the existing analysis.
Returns true if we should try to compile the target incrementally.
"""
if not vts.is_incremental:
return False
if not self._clear_invalid_analysis:
return True
return os.path.exists(compile_context.analysis_file)
def work_for_vts(vts, ctx):
progress_message = ctx.target.address.spec
# Capture a compilation log if requested.
log_file = ctx.log_file if self._capture_log else None
# Double check the cache before beginning compilation
hit_cache = check_cache(vts)
if not hit_cache:
# Compute the compile classpath for this target.
cp_entries = [compile_context.classes_dir]
cp_entries.extend(ClasspathUtil.compute_classpath(ctx.dependencies(self._dep_context),
classpath_products,
extra_compile_time_classpath,
self._confs))
# TODO: always provide transitive analysis, but not always all classpath entries?
upstream_analysis = dict(self._upstream_analysis(compile_contexts, cp_entries))
# Write analysis to a temporary file, and move it to the final location on success.
tmp_analysis_file = "{}.tmp".format(ctx.analysis_file)
if should_compile_incrementally(vts):
# If this is an incremental compile, rebase the analysis to our new classes directory.
self._analysis_tools.rebase_from_path(ctx.analysis_file,
tmp_analysis_file,
vts.previous_results_dir,
vts.results_dir)
else:
# Otherwise, simply ensure that it is empty.
safe_delete(tmp_analysis_file)
tgt, = vts.targets
fatal_warnings = self._compute_language_property(tgt, lambda x: x.fatal_warnings)
self._compile_vts(vts,
ctx.sources,
tmp_analysis_file,
upstream_analysis,
cp_entries,
ctx.classes_dir,
log_file,
progress_message,
tgt.platform,
fatal_warnings,
counter)
os.rename(tmp_analysis_file, ctx.analysis_file)
self._analysis_tools.relativize(ctx.analysis_file, ctx.portable_analysis_file)
# Write any additional resources for this target to the target workdir.
self.write_extra_resources(ctx)
# Jar the compiled output.
self._create_context_jar(ctx)
# Update the products with the latest classes.
self._register_vts([ctx])
# Once products are registered, check for unused dependencies (if enabled).
if not hit_cache and self._unused_deps_check_enabled:
self._check_unused_deps(ctx)
jobs = []
invalid_target_set = set(invalid_targets)
for ivts in invalid_vts:
# Invalidated targets are a subset of relevant targets: get the context for this one.
compile_target = ivts.targets[0]
compile_context = compile_contexts[compile_target]
compile_target_closure = compile_target.closure()
# dependencies of the current target which are invalid for this chunk
invalid_dependencies = (compile_target_closure & invalid_target_set) - [compile_target]
jobs.append(Job(self.exec_graph_key_for_target(compile_target),
functools.partial(work_for_vts, ivts, compile_context),
[self.exec_graph_key_for_target(target) for target in invalid_dependencies],
self._size_estimator(compile_context.sources),
# If compilation and analysis work succeeds, validate the vts.
# Otherwise, fail it.
on_success=ivts.update,
on_failure=ivts.force_invalidate))
return jobs
def _create_context_jar(self, compile_context):
"""Jar up the compile_context to its output jar location.
TODO(stuhood): In the medium term, we hope to add compiler support for this step, which would
allow the jars to be used as compile _inputs_ as well. Currently using jar'd compile outputs as
compile inputs would make the compiler's analysis useless.
see https://github.com/twitter-forks/sbt/tree/stuhood/output-jars
"""
root = compile_context.classes_dir
with compile_context.open_jar(mode='w') as jar:
for abs_sub_dir, dirnames, filenames in safe_walk(root):
for name in dirnames + filenames:
abs_filename = os.path.join(abs_sub_dir, name)
arcname = fast_relpath(abs_filename, root)
jar.write(abs_filename, arcname)
def validate_analysis(self, path):
"""Throws a TaskError for invalid analysis files."""
try:
self._analysis_parser.validate_analysis(path)
except Exception as e:
if self._clear_invalid_analysis:
self.context.log.warn("Invalid analysis detected at path {} ... pants will remove these "
"automatically, but\nyou may experience spurious warnings until "
"clean-all is executed.\n{}".format(path, e))
safe_delete(path)
else:
raise TaskError("An internal build directory contains invalid/mismatched analysis: please "
"run `clean-all` if your tools versions changed recently:\n{}".format(e))
def _compute_sources_for_target(self, target):
"""Computes and returns the sources (relative to buildroot) for the given target."""
def resolve_target_sources(target_sources):
resolved_sources = []
for tgt in target_sources:
if tgt.has_sources():
resolved_sources.extend(tgt.sources_relative_to_buildroot())
return resolved_sources
sources = [s for s in target.sources_relative_to_buildroot() if self._sources_predicate(s)]
# TODO: Make this less hacky. Ideally target.java_sources will point to sources, not targets.
if hasattr(target, 'java_sources') and target.java_sources:
sources.extend(resolve_target_sources(target.java_sources))
return sources
def _compute_language_property(self, target, selector):
"""Computes the a language property setting for the given target sources.
:param target The target whose language property will be calculated.
:param selector A function that takes a target or platform and returns the boolean value of the
property for that target or platform, or None if that target or platform does
not directly define the property.
If the target does not override the language property, returns true iff the property
is true for any of the matched languages for the target.
"""
if selector(target) is not None:
return selector(target)
prop = False
if target.has_sources('.java'):
prop |= selector(Java.global_instance())
if target.has_sources('.scala'):
prop |= selector(ScalaPlatform.global_instance())
return prop
def _compute_extra_classpath(self, extra_compile_time_classpath_elements):
"""Compute any extra compile-time-only classpath elements.
TODO(benjy): Model compile-time vs. runtime classpaths more explicitly.
"""
def extra_compile_classpath_iter():
for conf in self._confs:
for jar in extra_compile_time_classpath_elements:
yield (conf, jar)
return list(extra_compile_classpath_iter())
| |
from __future__ import absolute_import, division, print_function
import fnmatch
from toolz import compose, identity
from datashape.predicates import isscalar
from ..expr import (
Expr, ElemWise, SimpleSelection, Sort, Apply, Distinct, Join, By, Label,
Summary, by, ReLabel, Like, Reduction, Head
)
from .python import (
compute, rrowfunc, rowfunc, pair_assemble, reduce_by_funcs, binops
)
from ..expr.broadcast import broadcast_collect
from ..expr.optimize import simple_selections
from ..compatibility import builtins, unicode
from ..expr import reductions
from ..dispatch import dispatch
from .core import compute_up
import py4j
from pyspark import SparkContext
import pyspark
from pyspark.rdd import RDD
__all__ = ['RDD', 'pyspark', 'SparkContext']
# PySpark adds a SIGCHLD signal handler, but that breaks other packages, so we
# remove it
# See https://issues.apache.org/jira/browse/SPARK-1394
try:
import signal
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
except:
pass
@dispatch(Expr, RDD)
def optimize(expr, seq):
return simple_selections(broadcast_collect(expr))
@dispatch(ElemWise, RDD)
def compute_up(t, rdd, **kwargs):
func = rowfunc(t)
return rdd.map(func)
@dispatch(SimpleSelection, RDD)
def compute_up(t, rdd, **kwargs):
predicate = optimize(t.predicate, rdd)
predicate = rrowfunc(predicate, t._child)
return rdd.filter(predicate)
rdd_reductions = {
reductions.sum: RDD.sum,
reductions.min: RDD.min,
reductions.max: RDD.max,
reductions.count: RDD.count,
reductions.mean: RDD.mean,
reductions.var: RDD.variance,
reductions.std: RDD.stdev,
reductions.nunique: compose(RDD.count, RDD.distinct)
}
@dispatch(tuple(rdd_reductions), RDD)
def compute_up(t, rdd, **kwargs):
return rdd_reductions[type(t)](rdd)
def istruthy(x):
return not not x
@dispatch(reductions.any, RDD)
def compute_up(t, rdd, **kwargs):
return istruthy(rdd.filter(identity).take(1))
@dispatch(reductions.all, RDD)
def compute_up(t, rdd, **kwargs):
return not rdd.filter(lambda x: not x).take(1)
@dispatch(Head, RDD)
def compute_up(t, rdd, **kwargs):
return rdd.take(t.n)
@dispatch(Apply, RDD)
def compute_up(t, rdd, **kwargs):
if t._splittable:
return rdd.mapPartitions(t.func)
else:
raise NotImplementedError("Can only apply splittable functions."
"To apply function to each partition add "
"splittable=True kwarg to call to apply. "
"t.apply(func, dshape, splittable=True)")
@dispatch(Sort, RDD)
def compute_up(t, rdd, **kwargs):
if isinstance(t.key, (str, unicode, tuple, list)):
key = rowfunc(t._child[t.key])
else:
key = optimize(t.key, rdd)
key = rrowfunc(key, t._child)
return (rdd.keyBy(key)
.sortByKey(ascending=t.ascending)
.map(lambda x: x[1]))
@dispatch(Distinct, RDD)
def compute_up(t, rdd, **kwargs):
if t.on:
raise NotImplementedError(
'spark backend cannot specify what columns to distinct on'
)
return rdd.distinct()
def jgetattr(data, attr, default=None):
"""Spark's API doesn't properly implement the ``getattr`` interface, so
we work around it.
"""
try:
return getattr(data, attr, default)
except py4j.protocol.Py4JJavaError:
return default
@compute_up.register(Join, RDD, RDD)
def spark_join(t, lhs, rhs, **kwargs):
on_left = rowfunc(t.lhs[t.on_left])
on_right = rowfunc(t.rhs[t.on_right])
lhs = lhs.keyBy(on_left)
rhs = rhs.keyBy(on_right)
how = t.how
if how == 'inner':
joiner = lhs.join
elif how == 'left':
joiner = lhs.leftOuterJoin
elif how == 'right':
joiner = lhs.rightOuterJoin
elif how == 'outer':
joiner = lhs.fullOuterJoin
else:
raise ValueError("Invalid join type %r, must be one of "
"{'inner', 'left', 'right', 'outer'}" % how)
rdd = joiner(rhs)
assemble = pair_assemble(t)
return rdd.map(lambda x: assemble(x[1]))
@dispatch(By, RDD)
def compute_up(t, rdd, **kwargs):
grouper = optimize(t.grouper, rdd)
apply = optimize(t.apply, rdd)
t = by(grouper, apply)
if ((isinstance(t.apply, Reduction) and type(t.apply) in binops) or
(isinstance(t.apply, Summary) and
builtins.all(type(val) in binops for val in t.apply.values))):
grouper, binop, combiner, initial = reduce_by_funcs(t)
if isscalar(t.grouper.dshape.measure):
keyfunc = lambda x: (x,)
else:
keyfunc = identity
if isscalar(t.apply.dshape.measure):
valfunc = lambda x: (x,)
else:
valfunc = identity
unpack = lambda kv: keyfunc(kv[0]) + valfunc(kv[1])
create = lambda v: binop(initial, v)
return (rdd.keyBy(grouper)
.combineByKey(create, binop, combiner)
.map(unpack))
else:
raise NotImplementedError("By only implemented for common reductions."
"\nGot %s" % type(t.apply))
@dispatch((Label, ReLabel), RDD)
def compute_up(t, rdd, **kwargs):
return rdd
@dispatch(Summary, RDD)
def compute_up(t, rdd, **kwargs):
rdd = rdd.cache()
return tuple(compute(value, {t._child: rdd}) for value in t.values)
@dispatch(Like, RDD)
def compute_up(t, rdd, **kwargs):
def func(value, pattern=t.pattern):
return fnmatch.fnmatch(value, pattern)
return rdd.map(func)
| |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from future.moves.xmlrpc import client as xmlrpc_client
from future.moves.urllib.parse import urlparse
from future.utils import native_str
import logging
import os
import socket
import re
from time import sleep
from flexget.utils.template import RenderError
from flexget.utils.pathscrub import pathscrub
from flexget import plugin
from flexget.event import event
from flexget.entry import Entry
from flexget.config_schema import one_or_more
from flexget.utils.bittorrent import Torrent, is_torrent_file
log = logging.getLogger('rtorrent')
class _Method(object):
# some magic to bind an XML-RPC method to an RPC server.
# supports "nested" methods (e.g. examples.getStateName)
def __init__(self, send, name):
self.__send = send
self.__name = name
def __getattr__(self, name):
return _Method(self.__send, "%s.%s" % (self.__name, name))
def __call__(self, *args):
return self.__send(self.__name, args)
class SCGITransport(xmlrpc_client.Transport):
""" Used to override the default xmlrpclib transport to support SCGI """
def __init__(self, *args, **kwargs):
self.verbose = 0
xmlrpc_client.Transport.__init__(self, *args, **kwargs)
def request(self, host, handler, request_body, verbose=False):
return self.single_request(host, handler, request_body, verbose)
def single_request(self, host, handler, request_body, verbose=0):
# Add SCGI headers to the request.
headers = [('CONTENT_LENGTH', native_str(len(request_body))), ('SCGI', '1')]
header = '\x00'.join(['%s\x00%s' % (key, value) for key, value in headers]) + '\x00'
header = '%d:%s' % (len(header), header)
request_body = '%s,%s' % (header, request_body)
sock = None
try:
if host:
parsed_host = urlparse(host)
host = parsed_host.hostname
port = parsed_host.port
addr_info = socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM)
sock = socket.socket(*addr_info[0][:3])
sock.connect(addr_info[0][4])
else:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(handler)
self.verbose = verbose
sock.send(request_body.encode())
return self.parse_response(sock.makefile())
finally:
if sock:
sock.close()
def parse_response(self, response):
p, u = self.getparser()
response_body = ''
while True:
data = response.read(1024)
if not data:
break
response_body += data
if self.verbose:
log.info('body: %s', repr(response_body))
# Remove SCGI headers from the response.
response_header, response_body = re.split(r'\n\s*?\n', response_body, maxsplit=1)
p.feed(response_body)
p.close()
return u.close()
class SCGIServerProxy(object):
""" Enable connection to SCGI proxy """
def __init__(self, uri, transport=None, encoding=None,
verbose=False, allow_none=False, use_datetime=False):
parsed_url = urlparse(uri)
self.__host = uri if parsed_url.scheme else None
self.__handler = urlparse(uri).path
if not self.__handler:
self.__handler = '/'
if not transport:
transport = SCGITransport(use_datetime=use_datetime)
self.__transport = transport
self.__encoding = encoding or 'utf-8'
self.__verbose = verbose
self.__allow_none = allow_none
def __close(self):
self.__transport.close()
def __request(self, method_name, params):
# call a method on the remote server
request = xmlrpc_client.dumps(params, method_name, encoding=self.__encoding,
allow_none=self.__allow_none).encode(self.__encoding)
response = self.__transport.request(
self.__host,
self.__handler,
request.decode('utf-8'),
verbose=self.__verbose
)
if len(response) == 1:
response = response[0]
return response
def __repr__(self):
return (
"<ServerProxy for %s%s>" %
(self.__host, self.__handler)
)
__str__ = __repr__
def __getattr__(self, name):
# magic method dispatcher
return _Method(self.__request, name)
# note: to call a remote object with an non-standard name, use
# result getattr(server, "strange-python-name")(args)
def __call__(self, attr):
"""A workaround to get special attributes on the ServerProxy
without interfering with the magic __getattr__
"""
if attr == "close":
return self.__close
elif attr == "transport":
return self.__transport
raise AttributeError("Attribute %r not found" % (attr,))
class RTorrent(object):
""" rTorrent API client """
default_fields = (
'hash',
'name',
'up_total', 'down_total', 'down_rate',
'is_open', 'is_active',
'custom1', 'custom2', 'custom3', 'custom4', 'custom5',
'state', 'complete',
'bytes_done', 'down.rate', 'left_bytes',
'ratio',
'base_path',
)
required_fields = (
'hash',
'name',
'base_path'
)
def __init__(self, uri, username=None, password=None):
"""
New connection to rTorrent
:param uri: RTorrent URL. Supports both http(s) and scgi
:param username: Username for basic auth over http(s)
:param password: Password for basic auth over http(s)
"""
self.uri = uri
self.username = username
self.password = password
self._version = None
parsed_uri = urlparse(uri)
# Reformat uri with username and password for HTTP(s) Auth
if self.username and self.password:
if parsed_uri.scheme not in ['http', 'https']:
raise IOError('Username and password only supported on http(s)')
data = {
'scheme': parsed_uri.scheme,
'hostname': parsed_uri.hostname,
'port': parsed_uri.port,
'path': parsed_uri.path,
'query': parsed_uri.query,
'username': self.username,
'password': self.password,
}
self.uri = '%(scheme)s://%(username)s:%(password)s@%(hostname)s%(path)s%(query)s' % data
# Determine the proxy server
if parsed_uri.scheme in ['http', 'https']:
sp = xmlrpc_client.ServerProxy
elif parsed_uri.scheme == 'scgi':
sp = SCGIServerProxy
elif parsed_uri.scheme == '' and parsed_uri.path:
self.uri = parsed_uri.path
sp = SCGIServerProxy
else:
raise IOError('Unsupported scheme %s for uri %s' % (parsed_uri.scheme, self.uri))
self._server = sp(self.uri)
def _clean_fields(self, fields, reverse=False):
if not fields:
fields = list(self.default_fields)
if reverse:
for field in ['up.total', 'down.total', 'down.rate']:
if field in fields:
fields[fields.index(field)] = native_str(field.replace('.', '_'))
return fields
for required_field in self.required_fields:
if required_field not in fields:
fields.insert(0, required_field)
for field in ['up_total', 'down_total', 'down_rate']:
if field in fields:
fields[fields.index(field)] = native_str(field.replace('_', '.'))
return fields
@property
def version(self):
return [int(v) for v in self._server.system.client_version().split('.')]
def load(self, raw_torrent, fields=None, start=False, mkdir=True):
if fields is None:
fields = {}
# First param is empty 'target'
params = ['', xmlrpc_client.Binary(raw_torrent)]
# Additional fields to set
for key, val in fields.items():
# Values must be escaped if within params
params.append('d.%s.set=%s' % (key, re.escape(native_str(val))))
if mkdir and 'directory' in fields:
result = self._server.execute.throw('', 'mkdir', '-p', fields['directory'])
if result != 0:
raise xmlrpc_client.Error('Failed creating directory %s' % fields['directory'])
# by default rtorrent won't allow calls over 512kb in size.
xmlrpc_size = len(xmlrpc_client.dumps(tuple(params), 'raw_start')) + 71680 # Add 70kb for buffer
if xmlrpc_size > 524288:
prev_size = self._server.network.xmlrpc.size_limit()
self._server.network.xmlrpc.size_limit.set('', xmlrpc_size)
# Call load method and return the response
if start:
result = self._server.load.raw_start(*params)
else:
result = self._server.load.raw(*params)
if xmlrpc_size > 524288:
self._server.network.xmlrpc.size_limit.set('', prev_size)
return result
def torrent(self, info_hash, fields=None):
""" Get the details of a torrent """
info_hash = native_str(info_hash)
if not fields:
fields = list(self.default_fields)
fields = self._clean_fields(fields)
multi_call = xmlrpc_client.MultiCall(self._server)
for field in fields:
method_name = 'd.%s' % field
getattr(multi_call, method_name)(info_hash)
resp = multi_call()
# TODO: Maybe we should return a named tuple or a Torrent class?
return dict(list(zip(self._clean_fields(fields, reverse=True), [val for val in resp])))
def torrents(self, view='main', fields=None):
if not fields:
fields = list(self.default_fields)
fields = self._clean_fields(fields)
params = ['d.%s=' % field for field in fields]
params.insert(0, view)
resp = self._server.d.multicall(params)
# Response is formatted as a list of lists, with just the values
return [dict(list(zip(self._clean_fields(fields, reverse=True), val))) for val in resp]
def update(self, info_hash, fields):
multi_call = xmlrpc_client.MultiCall(self._server)
for key, val in fields.items():
method_name = 'd.%s.set' % key
getattr(multi_call, method_name)(native_str(info_hash), native_str(val))
return multi_call()[0]
def delete(self, info_hash):
return self._server.d.erase(native_str(info_hash))
def stop(self, info_hash):
self._server.d.stop(info_hash)
return self._server.d.close(native_str(info_hash))
def start(self, info_hash):
return self._server.d.start(native_str(info_hash))
def move(self, info_hash, dst_path):
info_hash = native_str(info_hash)
self.stop(info_hash)
torrent = self.torrent(info_hash, fields=['base_path'])
try:
log.verbose('Creating destination directory `%s`' % dst_path)
self._server.execute.throw('', 'mkdir', '-p', dst_path)
except xmlrpc_client.Error:
raise xmlrpc_client.Error("unable to create folder %s" % dst_path)
self._server.execute.throw('', 'mv', '-u', torrent['base_path'], dst_path)
self._server.d.set_directory(info_hash, dst_path)
self.start(info_hash)
class RTorrentPluginBase(object):
priority_map = {
'high': 3,
'medium': 2,
'low': 1,
'off': 0,
}
def _build_options(self, config, entry, entry_first=True):
options = {}
for opt_key in ('path', 'message', 'priority',
'custom1', 'custom2', 'custom3', 'custom4', 'custom5'):
# Values do not merge config with task
# Task takes priority then config is used
entry_value = entry.get(opt_key)
config_value = config.get(opt_key)
if entry_first:
if entry_value:
options[opt_key] = entry.render(entry_value)
elif config_value:
options[opt_key] = entry.render(config_value)
else:
if config_value:
options[opt_key] = entry.render(config_value)
elif entry_value:
options[opt_key] = entry.render(entry_value)
# Convert priority from string to int
priority = options.get('priority')
if priority and priority in self.priority_map:
options['priority'] = self.priority_map[priority]
# Map Flexget path to directory in rTorrent
if options.get('path'):
options['directory'] = options['path']
del options['path']
if 'directory' in options:
options['directory'] = pathscrub(options['directory'])
return options
def on_task_start(self, task, config):
try:
client = RTorrent(config['uri'], username=config.get('username'),
password=config.get('password'))
if client.version < [0, 9, 2]:
log.error('rtorrent version >=0.9.2 required, found {0}'.format('.'.join(map(str, client.version))))
task.abort('rtorrent version >=0.9.2 required, found {0}'.format('.'.join(map(str, client.version))))
except (IOError, xmlrpc_client.Error) as e:
raise plugin.PluginError("Couldn't connect to rTorrent: %s" % str(e))
class RTorrentOutputPlugin(RTorrentPluginBase):
schema = {
'type': 'object',
'properties': {
# connection info
'uri': {'type': 'string'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'start': {'type': 'boolean', 'default': True},
'mkdir': {'type': 'boolean', 'default': True},
'action': {'type': 'string', 'emun': ['update', 'delete', 'add'], 'default': 'add'},
# properties to set on rtorrent download object
'message': {'type': 'string'},
'priority': {'type': 'string'},
'path': {'type': 'string'},
'custom1': {'type': 'string'},
'custom2': {'type': 'string'},
'custom3': {'type': 'string'},
'custom4': {'type': 'string'},
'custom5': {'type': 'string'},
},
'required': ['uri'],
'additionalProperties': False,
}
def _verify_load(self, client, info_hash):
for i in range(0, 5):
try:
return client.torrent(info_hash, fields=['hash'])
except (IOError, xmlrpc_client.Error):
sleep(0.5)
raise
def on_task_download(self, task, config):
# If the download plugin is not enabled, we need to call it to get
# our temp .torrent files
if config['action'] == 'add' and 'download' not in task.config:
download = plugin.get_plugin_by_name('download')
download.instance.get_temp_files(task, handle_magnets=True, fail_html=True)
def on_task_output(self, task, config):
client = RTorrent(config['uri'], username=config.get('username'),
password=config.get('password'))
for entry in task.accepted:
if task.options.test:
log.info('Would add %s to rTorrent' % entry['url'])
continue
if config['action'] == 'add':
try:
options = self._build_options(config, entry)
except RenderError as e:
entry.fail("failed to render properties %s" % str(e))
continue
self.add_entry(client, entry, options, start=config['start'], mkdir=config['mkdir'])
info_hash = entry.get('torrent_info_hash')
if not info_hash:
entry.fail('Failed to %s as no info_hash found' % config['action'])
continue
if config['action'] == 'delete':
self.delete_entry(client, entry)
if config['action'] == 'update':
self.update_entry(client, entry, config)
def delete_entry(self, client, entry):
try:
client.delete(entry['torrent_info_hash'])
log.verbose('Deleted %s (%s) in rtorrent ' % (entry['title'], entry['torrent_info_hash']))
except (IOError, xmlrpc_client.Error) as e:
entry.fail('Failed to delete: %s' % str(e))
return
def update_entry(self, client, entry, config):
info_hash = entry['torrent_info_hash']
# First check if it already exists
try:
existing = client.torrent(info_hash, fields=['base_path'])
except IOError as e:
entry.fail("Error updating torrent %s" % str(e))
return
except xmlrpc_client.Error as e:
existing = False
# Build options but make config values override entry values
try:
options = self._build_options(config, entry, entry_first=False)
except RenderError as e:
entry.fail("failed to render properties %s" % str(e))
return
if existing and 'directory' in options:
# Check if changing to another directory which requires a move
if options['directory'] != existing['base_path'] \
and options['directory'] != os.path.dirname(existing['base_path']):
try:
log.verbose("Path is changing, moving files from '%s' to '%s'"
% (existing['base_path'], options['directory']))
client.move(info_hash, options['directory'])
except (IOError, xmlrpc_client.Error) as e:
entry.fail('Failed moving torrent: %s' % str(e))
return
# Remove directory from update otherwise rTorrent will append the title to the directory path
if 'directory' in options:
del options['directory']
try:
client.update(info_hash, options)
log.verbose('Updated %s (%s) in rtorrent ' % (entry['title'], info_hash))
except (IOError, xmlrpc_client.Error) as e:
entry.fail('Failed to update: %s' % str(e))
return
def add_entry(self, client, entry, options, start=True, mkdir=False):
if 'torrent_info_hash' not in entry:
entry.fail('missing torrent_info_hash')
return
if entry['url'].startswith('magnet:'):
torrent_raw = 'd10:magnet-uri%d:%se' % (len(entry['url']), entry['url'])
else:
# Check that file is downloaded
if 'file' not in entry:
entry.fail('file missing?')
return
# Verify the temp file exists
if not os.path.exists(entry['file']):
entry.fail("Downloaded temp file '%s' doesn't exist!?" % entry['file'])
return
# Verify valid torrent file
if not is_torrent_file(entry['file']):
entry.fail("Downloaded temp file '%s' is not a torrent file" % entry['file'])
return
try:
with open(entry['file'], 'rb') as f:
torrent_raw = f.read()
except IOError as e:
entry.fail('Failed to add to rTorrent %s' % str(e))
return
try:
Torrent(torrent_raw)
except SyntaxError as e:
entry.fail('Strange, unable to decode torrent, raise a BUG: %s' % str(e))
return
# First check if it already exists
try:
if client.torrent(entry['torrent_info_hash']):
log.warning("Torrent %s already exists, won't add" % entry['title'])
return
except IOError as e:
entry.fail("Error checking if torrent already exists %s" % str(e))
except xmlrpc_client.Error:
# No existing found
pass
try:
resp = client.load(torrent_raw, fields=options, start=start, mkdir=mkdir)
if resp != 0:
entry.fail('Failed to add to rTorrent invalid return value %s' % resp)
except (IOError, xmlrpc_client.Error) as e:
log.exception(e)
entry.fail('Failed to add to rTorrent %s' % str(e))
return
# Verify the torrent loaded
try:
self._verify_load(client, entry['torrent_info_hash'])
log.info('%s added to rtorrent' % entry['title'])
except (IOError, xmlrpc_client.Error) as e:
entry.fail('Failed to verify torrent loaded: %s' % str(e))
def on_task_exit(self, task, config):
""" Make sure all temp files are cleaned up when task exists """
# If download plugin is enabled, it will handle cleanup.
if 'download' not in task.config:
download = plugin.get_plugin_by_name('download')
download.instance.cleanup_temp_files(task)
on_task_abort = on_task_exit
class RTorrentInputPlugin(RTorrentPluginBase):
schema = {
'type': 'object',
'properties': {
'uri': {'type': 'string'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'view': {'type': 'string', 'default': 'main'},
'fields': one_or_more({'type': 'string', 'enum': list(RTorrent.default_fields)}),
},
'required': ['uri'],
'additionalProperties': False
}
def on_task_input(self, task, config):
client = RTorrent(config['uri'], username=config.get('username'),
password=config.get('password'))
fields = config.get('fields')
try:
torrents = client.torrents(config['view'], fields=fields)
except (IOError, xmlrpc_client.Error) as e:
task.abort('Could not get torrents (%s): %s' % (config['view'], e))
return
entries = []
for torrent in torrents:
entry = Entry(
title=torrent['name'],
url='%s/%s' % (config['uri'], torrent['hash']),
path=torrent['base_path'],
torrent_info_hash=torrent['hash'],
)
for attr, value in torrent.items():
entry[attr] = value
entries.append(entry)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(RTorrentOutputPlugin, 'rtorrent', api_ver=2)
plugin.register(RTorrentInputPlugin, 'from_rtorrent', api_ver=2)
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
from oslo_config import cfg
import six
import webob.exc
import heat.api.middleware.fault as fault
import heat.api.openstack.v1.stacks as stacks
from heat.common import exception as heat_exc
from heat.common import identifier
from heat.common import policy
from heat.common import template_format
from heat.common import urlfetch
from heat.rpc import api as rpc_api
from heat.rpc import client as rpc_client
from heat.tests.api.openstack_v1 import tools
from heat.tests import common
class InstantiationDataTest(common.HeatTestCase):
def test_parse_error_success(self):
with stacks.InstantiationData.parse_error_check('Garbage'):
pass
def test_parse_error(self):
def generate_error():
with stacks.InstantiationData.parse_error_check('Garbage'):
raise ValueError
self.assertRaises(webob.exc.HTTPBadRequest, generate_error)
def test_parse_error_message(self):
# make sure the parser error gets through to the caller.
bad_temp = '''
heat_template_version: '2013-05-23'
parameters:
KeyName:
type: string
description: bla
'''
def generate_error():
with stacks.InstantiationData.parse_error_check('foo'):
template_format.parse(bad_temp)
parse_ex = self.assertRaises(webob.exc.HTTPBadRequest, generate_error)
self.assertIn('foo', six.text_type(parse_ex))
def test_stack_name(self):
body = {'stack_name': 'wibble'}
data = stacks.InstantiationData(body)
self.assertEqual('wibble', data.stack_name())
def test_stack_name_missing(self):
body = {'not the stack_name': 'wibble'}
data = stacks.InstantiationData(body)
self.assertRaises(webob.exc.HTTPBadRequest, data.stack_name)
def test_template_inline(self):
template = {'foo': 'bar', 'blarg': 'wibble'}
body = {'template': template}
data = stacks.InstantiationData(body)
self.assertEqual(template, data.template())
def test_template_string_json(self):
template = ('{"heat_template_version": "2013-05-23",'
'"foo": "bar", "blarg": "wibble"}')
body = {'template': template}
data = stacks.InstantiationData(body)
self.assertEqual(json.loads(template), data.template())
def test_template_string_yaml(self):
template = '''HeatTemplateFormatVersion: 2012-12-12
foo: bar
blarg: wibble
'''
parsed = {u'HeatTemplateFormatVersion': u'2012-12-12',
u'blarg': u'wibble',
u'foo': u'bar'}
body = {'template': template}
data = stacks.InstantiationData(body)
self.assertEqual(parsed, data.template())
def test_template_url(self):
template = {'heat_template_version': '2013-05-23',
'foo': 'bar',
'blarg': 'wibble'}
url = 'http://example.com/template'
body = {'template_url': url}
data = stacks.InstantiationData(body)
self.m.StubOutWithMock(urlfetch, 'get')
urlfetch.get(url).AndReturn(json.dumps(template))
self.m.ReplayAll()
self.assertEqual(template, data.template())
self.m.VerifyAll()
def test_template_priority(self):
template = {'foo': 'bar', 'blarg': 'wibble'}
url = 'http://example.com/template'
body = {'template': template, 'template_url': url}
data = stacks.InstantiationData(body)
self.m.StubOutWithMock(urlfetch, 'get')
self.m.ReplayAll()
self.assertEqual(template, data.template())
self.m.VerifyAll()
def test_template_missing(self):
template = {'foo': 'bar', 'blarg': 'wibble'}
body = {'not the template': template}
data = stacks.InstantiationData(body)
self.assertRaises(webob.exc.HTTPBadRequest, data.template)
def test_parameters(self):
params = {'foo': 'bar', 'blarg': 'wibble'}
body = {'parameters': params,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}}
data = stacks.InstantiationData(body)
self.assertEqual(body, data.environment())
def test_environment_only_params(self):
env = {'parameters': {'foo': 'bar', 'blarg': 'wibble'}}
body = {'environment': env}
data = stacks.InstantiationData(body)
self.assertEqual(env, data.environment())
def test_environment_and_parameters(self):
body = {'parameters': {'foo': 'bar'},
'environment': {'parameters': {'blarg': 'wibble'}}}
expect = {'parameters': {'blarg': 'wibble',
'foo': 'bar'},
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}}
data = stacks.InstantiationData(body)
self.assertEqual(expect, data.environment())
def test_parameters_override_environment(self):
# This tests that the cli parameters will override
# any parameters in the environment.
body = {'parameters': {'foo': 'bar',
'tester': 'Yes'},
'environment': {'parameters': {'blarg': 'wibble',
'tester': 'fail'}}}
expect = {'parameters': {'blarg': 'wibble',
'foo': 'bar',
'tester': 'Yes'},
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}}
data = stacks.InstantiationData(body)
self.assertEqual(expect, data.environment())
def test_environment_bad_format(self):
env = {'somethingnotsupported': {'blarg': 'wibble'}}
body = {'environment': json.dumps(env)}
data = stacks.InstantiationData(body)
self.assertRaises(webob.exc.HTTPBadRequest, data.environment)
def test_environment_missing(self):
env = {'foo': 'bar', 'blarg': 'wibble'}
body = {'not the environment': env}
data = stacks.InstantiationData(body)
self.assertEqual({'parameters': {}, 'encrypted_param_names': [],
'parameter_defaults': {}, 'resource_registry': {}},
data.environment())
def test_args(self):
body = {
'parameters': {},
'environment': {},
'stack_name': 'foo',
'template': {},
'template_url': 'http://example.com/',
'timeout_mins': 60,
}
data = stacks.InstantiationData(body)
self.assertEqual({'timeout_mins': 60}, data.args())
@mock.patch.object(policy.Enforcer, 'enforce')
class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'''
Tests the API class which acts as the WSGI controller,
the endpoint processing API requests after they are routed
'''
def setUp(self):
super(StackControllerTest, self).setUp()
# Create WSGI controller instance
class DummyConfig(object):
bind_port = 8004
cfgopts = DummyConfig()
self.controller = stacks.StackController(options=cfgopts)
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
req = self._get('/stacks')
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
engine_resp = [
{
u'stack_identity': dict(identity),
u'updated_time': u'2012-07-09T09:13:11Z',
u'template_description': u'blah',
u'description': u'blah',
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': identity.stack_name,
u'stack_action': u'CREATE',
u'stack_status': u'COMPLETE',
u'parameters': {},
u'outputs': [],
u'notification_topics': [],
u'capabilities': [],
u'disable_rollback': True,
u'timeout_mins': 60,
}
]
mock_call.return_value = engine_resp
result = self.controller.index(req, tenant_id=identity.tenant)
expected = {
'stacks': [
{
'links': [{"href": self._url(identity),
"rel": "self"}],
'id': '1',
u'updated_time': u'2012-07-09T09:13:11Z',
u'description': u'blah',
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': u'wordpress',
u'stack_status': u'CREATE_COMPLETE'
}
]
}
self.assertEqual(expected, result)
default_args = {'limit': None, 'sort_keys': None, 'marker': None,
'sort_dir': None, 'filters': None, 'tenant_safe': True,
'show_deleted': False, 'show_nested': False,
'show_hidden': False, 'tags': None,
'tags_any': None, 'not_tags': None,
'not_tags_any': None}
mock_call.assert_called_once_with(
req.context, ('list_stacks', default_args), version='1.8')
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_whitelists_pagination_params(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {
'limit': 10,
'sort_keys': 'fake sort keys',
'marker': 'fake marker',
'sort_dir': 'fake sort dir',
'balrog': 'you shall not pass!'
}
req = self._get('/stacks', params=params)
mock_call.return_value = []
self.controller.index(req, tenant_id=self.tenant)
rpc_call_args, _ = mock_call.call_args
engine_args = rpc_call_args[1][1]
self.assertEqual(13, len(engine_args))
self.assertIn('limit', engine_args)
self.assertIn('sort_keys', engine_args)
self.assertIn('marker', engine_args)
self.assertIn('sort_dir', engine_args)
self.assertIn('filters', engine_args)
self.assertIn('tenant_safe', engine_args)
self.assertNotIn('balrog', engine_args)
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_limit_not_int(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {'limit': 'not-an-int'}
req = self._get('/stacks', params=params)
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req,
tenant_id=self.tenant)
self.assertEqual("Only integer is acceptable by 'limit'.",
six.text_type(ex))
self.assertFalse(mock_call.called)
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_whitelist_filter_params(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {
'id': 'fake id',
'status': 'fake status',
'name': 'fake name',
'action': 'fake action',
'username': 'fake username',
'tenant': 'fake tenant',
'owner_id': 'fake owner-id',
'balrog': 'you shall not pass!'
}
req = self._get('/stacks', params=params)
mock_call.return_value = []
self.controller.index(req, tenant_id=self.tenant)
rpc_call_args, _ = mock_call.call_args
engine_args = rpc_call_args[1][1]
self.assertIn('filters', engine_args)
filters = engine_args['filters']
self.assertEqual(7, len(filters))
self.assertIn('id', filters)
self.assertIn('status', filters)
self.assertIn('name', filters)
self.assertIn('action', filters)
self.assertIn('username', filters)
self.assertIn('tenant', filters)
self.assertIn('owner_id', filters)
self.assertNotIn('balrog', filters)
def test_index_returns_stack_count_if_with_count_is_true(
self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {'with_count': 'True'}
req = self._get('/stacks', params=params)
engine = self.controller.rpc_client
engine.list_stacks = mock.Mock(return_value=[])
engine.count_stacks = mock.Mock(return_value=0)
result = self.controller.index(req, tenant_id=self.tenant)
self.assertEqual(0, result['count'])
def test_index_doesnt_return_stack_count_if_with_count_is_false(
self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {'with_count': 'false'}
req = self._get('/stacks', params=params)
engine = self.controller.rpc_client
engine.list_stacks = mock.Mock(return_value=[])
engine.count_stacks = mock.Mock()
result = self.controller.index(req, tenant_id=self.tenant)
self.assertNotIn('count', result)
assert not engine.count_stacks.called
def test_index_with_count_is_invalid(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {'with_count': 'invalid_value'}
req = self._get('/stacks', params=params)
exc = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index,
req, tenant_id=self.tenant)
excepted = ('Unrecognized value "invalid_value" for "with_count", '
'acceptable values are: true, false')
self.assertIn(excepted, six.text_type(exc))
@mock.patch.object(rpc_client.EngineClient, 'count_stacks')
def test_index_doesnt_break_with_old_engine(self, mock_count_stacks,
mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {'with_count': 'True'}
req = self._get('/stacks', params=params)
engine = self.controller.rpc_client
engine.list_stacks = mock.Mock(return_value=[])
mock_count_stacks.side_effect = AttributeError("Should not exist")
result = self.controller.index(req, tenant_id=self.tenant)
self.assertNotIn('count', result)
def test_index_enforces_global_index_if_global_tenant(self, mock_enforce):
params = {'global_tenant': 'True'}
req = self._get('/stacks', params=params)
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
self.controller.index(req, tenant_id=self.tenant)
mock_enforce.assert_called_with(action='global_index',
scope=self.controller.REQUEST_SCOPE,
context=self.context)
def test_global_index_sets_tenant_safe_to_false(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
params = {'global_tenant': 'True'}
req = self._get('/stacks', params=params)
self.controller.index(req, tenant_id=self.tenant)
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=False)
def test_global_index_show_deleted_false(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
params = {'show_deleted': 'False'}
req = self._get('/stacks', params=params)
self.controller.index(req, tenant_id=self.tenant)
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_deleted=False)
def test_global_index_show_deleted_true(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
params = {'show_deleted': 'True'}
req = self._get('/stacks', params=params)
self.controller.index(req, tenant_id=self.tenant)
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_deleted=True)
def test_global_index_show_nested_false(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
params = {'show_nested': 'False'}
req = self._get('/stacks', params=params)
self.controller.index(req, tenant_id=self.tenant)
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_nested=False)
def test_global_index_show_nested_true(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
params = {'show_nested': 'True'}
req = self._get('/stacks', params=params)
self.controller.index(req, tenant_id=self.tenant)
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_nested=True)
def test_index_show_deleted_True_with_count_True(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock(return_value=0)
params = {'show_deleted': 'True',
'with_count': 'True'}
req = self._get('/stacks', params=params)
result = self.controller.index(req, tenant_id=self.tenant)
self.assertEqual(0, result['count'])
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_deleted=True)
rpc_client.count_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_deleted=True,
show_nested=False,
show_hidden=False,
tags=None,
tags_any=None,
not_tags=None,
not_tags_any=None)
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_detail(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'detail', True)
req = self._get('/stacks/detail')
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
engine_resp = [
{
u'stack_identity': dict(identity),
u'updated_time': u'2012-07-09T09:13:11Z',
u'template_description': u'blah',
u'description': u'blah',
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': identity.stack_name,
u'stack_action': u'CREATE',
u'stack_status': u'COMPLETE',
u'parameters': {'foo': 'bar'},
u'outputs': ['key', 'value'],
u'notification_topics': [],
u'capabilities': [],
u'disable_rollback': True,
u'timeout_mins': 60,
}
]
mock_call.return_value = engine_resp
result = self.controller.detail(req, tenant_id=identity.tenant)
expected = {
'stacks': [
{
'links': [{"href": self._url(identity),
"rel": "self"}],
'id': '1',
u'updated_time': u'2012-07-09T09:13:11Z',
u'template_description': u'blah',
u'description': u'blah',
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': identity.stack_name,
u'stack_status': u'CREATE_COMPLETE',
u'parameters': {'foo': 'bar'},
u'outputs': ['key', 'value'],
u'notification_topics': [],
u'capabilities': [],
u'disable_rollback': True,
u'timeout_mins': 60,
}
]
}
self.assertEqual(expected, result)
default_args = {'limit': None, 'sort_keys': None, 'marker': None,
'sort_dir': None, 'filters': None, 'tenant_safe': True,
'show_deleted': False, 'show_nested': False,
'show_hidden': False, 'tags': None,
'tags_any': None, 'not_tags': None,
'not_tags_any': None}
mock_call.assert_called_once_with(
req.context, ('list_stacks', default_args), version='1.8')
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_rmt_aterr(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
req = self._get('/stacks')
mock_call.side_effect = tools.to_remote_error(AttributeError())
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.index,
req, tenant_id=self.tenant)
self.assertEqual(400, resp.json['code'])
self.assertEqual('AttributeError', resp.json['error']['type'])
mock_call.assert_called_once_with(
req.context, ('list_stacks', mock.ANY), version='1.8')
def test_index_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', False)
req = self._get('/stacks')
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.index,
req, tenant_id=self.tenant)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_rmt_interr(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
req = self._get('/stacks')
mock_call.side_effect = tools.to_remote_error(Exception())
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.index,
req, tenant_id=self.tenant)
self.assertEqual(500, resp.json['code'])
self.assertEqual('Exception', resp.json['error']['type'])
mock_call.assert_called_once_with(
req.context, ('list_stacks', mock.ANY), version='1.8')
def test_create(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': identity.stack_name,
'parameters': parameters,
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': identity.stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndReturn(dict(identity))
self.m.ReplayAll()
response = self.controller.create(req,
tenant_id=identity.tenant,
body=body)
expected = {'stack':
{'id': '1',
'links': [{'href': self._url(identity), 'rel': 'self'}]}}
self.assertEqual(expected, response)
self.m.VerifyAll()
def test_create_with_tags(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': identity.stack_name,
'parameters': parameters,
'tags': 'tag1,tag2',
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': identity.stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30, 'tags': ['tag1', 'tag2']},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndReturn(dict(identity))
self.m.ReplayAll()
response = self.controller.create(req,
tenant_id=identity.tenant,
body=body)
expected = {'stack':
{'id': '1',
'links': [{'href': self._url(identity), 'rel': 'self'}]}}
self.assertEqual(expected, response)
self.m.VerifyAll()
def test_adopt(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
template = {
"heat_template_version": "2013-05-23",
"parameters": {"app_dbx": {"type": "string"}},
"resources": {"res1": {"type": "GenericResourceType"}}}
parameters = {"app_dbx": "test"}
adopt_data = {
"status": "COMPLETE",
"name": "rtrove1",
"parameters": parameters,
"template": template,
"action": "CREATE",
"id": "8532f0d3-ea84-444e-b2bb-2543bb1496a4",
"resources": {"res1": {
"status": "COMPLETE",
"name": "database_password",
"resource_id": "yBpuUROjfGQ2gKOD",
"action": "CREATE",
"type": "GenericResourceType",
"metadata": {}}}}
body = {'template': None,
'stack_name': identity.stack_name,
'parameters': parameters,
'timeout_mins': 30,
'adopt_stack_data': str(adopt_data)}
req = self._post('/stacks', json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': identity.stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30,
'adopt_stack_data': str(adopt_data)},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndReturn(dict(identity))
self.m.ReplayAll()
response = self.controller.create(req,
tenant_id=identity.tenant,
body=body)
expected = {'stack':
{'id': '1',
'links': [{'href': self._url(identity), 'rel': 'self'}]}}
self.assertEqual(expected, response)
self.m.VerifyAll()
def test_adopt_timeout_not_int(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
body = {'template': None,
'stack_name': identity.stack_name,
'parameters': {},
'timeout_mins': 'not-an-int',
'adopt_stack_data': 'does not matter'}
req = self._post('/stacks', json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req,
tenant_id=self.tenant, body=body)
self.assertEqual("Only integer is acceptable by 'timeout_mins'.",
six.text_type(ex))
self.assertFalse(mock_call.called)
def test_adopt_error(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
parameters = {"app_dbx": "test"}
adopt_data = ["Test"]
body = {'template': None,
'stack_name': identity.stack_name,
'parameters': parameters,
'timeout_mins': 30,
'adopt_stack_data': str(adopt_data)}
req = self._post('/stacks', json.dumps(body))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(400, resp.status_code)
self.assertEqual('400 Bad Request', resp.status)
self.assertIn('Invalid adopt data', resp.text)
self.m.VerifyAll()
def test_create_with_files(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': identity.stack_name,
'parameters': parameters,
'files': {'my.yaml': 'This is the file contents.'},
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': identity.stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {'my.yaml': 'This is the file contents.'},
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndReturn(dict(identity))
self.m.ReplayAll()
result = self.controller.create(req,
tenant_id=identity.tenant,
body=body)
expected = {'stack':
{'id': '1',
'links': [{'href': self._url(identity), 'rel': 'self'}]}}
self.assertEqual(expected, result)
self.m.VerifyAll()
def test_create_err_rpcerr(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True, 3)
stack_name = "wordpress"
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': stack_name,
'parameters': parameters,
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
unknown_parameter = heat_exc.UnknownUserParameter(key='a')
missing_parameter = heat_exc.UserParameterMissing(key='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndRaise(tools.to_remote_error(AttributeError()))
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndRaise(tools.to_remote_error(unknown_parameter))
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndRaise(tools.to_remote_error(missing_parameter))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(400, resp.json['code'])
self.assertEqual('AttributeError', resp.json['error']['type'])
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(400, resp.json['code'])
self.assertEqual('UnknownUserParameter', resp.json['error']['type'])
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(400, resp.json['code'])
self.assertEqual('UserParameterMissing', resp.json['error']['type'])
self.m.VerifyAll()
def test_create_err_existing(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
stack_name = "wordpress"
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': stack_name,
'parameters': parameters,
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
error = heat_exc.StackExists(stack_name='s')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(409, resp.json['code'])
self.assertEqual('StackExists', resp.json['error']['type'])
self.m.VerifyAll()
def test_create_timeout_not_int(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
stack_name = "wordpress"
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': stack_name,
'parameters': parameters,
'timeout_mins': 'not-an-int'}
req = self._post('/stacks', json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req,
tenant_id=self.tenant, body=body)
self.assertEqual("Only integer is acceptable by 'timeout_mins'.",
six.text_type(ex))
self.assertFalse(mock_call.called)
def test_create_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', False)
stack_name = "wordpress"
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': stack_name,
'parameters': parameters,
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_create_err_engine(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
stack_name = "wordpress"
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': stack_name,
'parameters': parameters,
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
error = heat_exc.StackValidationFailed(message='')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(400, resp.json['code'])
self.assertEqual('StackValidationFailed', resp.json['error']['type'])
self.m.VerifyAll()
def test_create_err_stack_bad_reqest(self, mock_enforce):
cfg.CONF.set_override('debug', True)
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
error = heat_exc.HTTPExceptionDisguise(webob.exc.HTTPBadRequest())
self.controller.create = mock.MagicMock(side_effect=error)
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create, req, body)
# When HTTP disguised exceptions reach the fault app, they are
# converted into regular responses, just like non-HTTP exceptions
self.assertEqual(400, resp.json['code'])
self.assertEqual('HTTPBadRequest', resp.json['error']['type'])
self.assertIsNotNone(resp.json['error']['traceback'])
@mock.patch.object(rpc_client.EngineClient, 'call')
@mock.patch.object(stacks.stacks_view, 'format_stack')
def test_preview_stack(self, mock_format, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'preview', True)
body = {'stack_name': 'foo', 'template': {}, 'parameters': {}}
req = self._post('/stacks/preview', json.dumps(body))
mock_call.return_value = {}
mock_format.return_value = 'formatted_stack'
result = self.controller.preview(req, tenant_id=self.tenant, body=body)
self.assertEqual({'stack': 'formatted_stack'}, result)
@mock.patch.object(rpc_client.EngineClient, 'call')
@mock.patch.object(stacks.stacks_view, 'format_stack')
def test_preview_with_tags_timeout(self, mock_format, mock_call,
mock_enforce):
self._mock_enforce_setup(mock_enforce, 'preview', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': identity.stack_name,
'parameters': parameters,
'tags': 'tag1,tag2',
'timeout_mins': 30}
req = self._post('/stacks/preview', json.dumps(body))
mock_call.return_value = {}
mock_format.return_value = 'formatted_stack_preview'
response = self.controller.preview(req,
tenant_id=identity.tenant,
body=body)
rpc_client.EngineClient.call.assert_called_once_with(
req.context,
('preview_stack',
{'stack_name': identity.stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30, 'tags': ['tag1', 'tag2']}})
)
self.assertEqual({'stack': 'formatted_stack_preview'}, response)
def test_preview_update_stack(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'preview_update', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 30}
req = self._put('/stacks/%(stack_name)s/%(stack_id)s/preview' %
identity, json.dumps(body))
resource_changes = {'updated': [],
'deleted': [],
'unchanged': [],
'added': [],
'replaced': []}
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('preview_update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30}}),
version='1.15'
).AndReturn(resource_changes)
self.m.ReplayAll()
result = self.controller.preview_update(req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.assertEqual({'resource_changes': resource_changes}, result)
self.m.VerifyAll()
def test_lookup(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
req = self._get('/stacks/%(stack_name)s' % identity)
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('identify_stack', {'stack_name': identity.stack_name})
).AndReturn(identity)
self.m.ReplayAll()
found = self.assertRaises(
webob.exc.HTTPFound, self.controller.lookup, req,
tenant_id=identity.tenant, stack_name=identity.stack_name)
self.assertEqual(self._url(identity), found.location)
self.m.VerifyAll()
def test_lookup_arn(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
req = self._get('/stacks%s' % identity.arn_url_path())
self.m.ReplayAll()
found = self.assertRaises(
webob.exc.HTTPFound, self.controller.lookup,
req, tenant_id=identity.tenant, stack_name=identity.arn())
self.assertEqual(self._url(identity), found.location)
self.m.VerifyAll()
def test_lookup_nonexistent(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', True)
stack_name = 'wibble'
req = self._get('/stacks/%(stack_name)s' % {
'stack_name': stack_name})
error = heat_exc.StackNotFound(stack_name='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('identify_stack', {'stack_name': stack_name})
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.lookup,
req, tenant_id=self.tenant,
stack_name=stack_name)
self.assertEqual(404, resp.json['code'])
self.assertEqual('StackNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_lookup_err_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', False)
stack_name = 'wibble'
req = self._get('/stacks/%(stack_name)s' % {
'stack_name': stack_name})
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.lookup,
req, tenant_id=self.tenant,
stack_name=stack_name)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_lookup_resource(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
req = self._get('/stacks/%(stack_name)s/resources' % identity)
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('identify_stack', {'stack_name': identity.stack_name})
).AndReturn(identity)
self.m.ReplayAll()
found = self.assertRaises(
webob.exc.HTTPFound, self.controller.lookup, req,
tenant_id=identity.tenant, stack_name=identity.stack_name,
path='resources')
self.assertEqual(self._url(identity) + '/resources',
found.location)
self.m.VerifyAll()
def test_lookup_resource_nonexistent(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', True)
stack_name = 'wibble'
req = self._get('/stacks/%(stack_name)s/resources' % {
'stack_name': stack_name})
error = heat_exc.StackNotFound(stack_name='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('identify_stack', {'stack_name': stack_name})
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.lookup,
req, tenant_id=self.tenant,
stack_name=stack_name,
path='resources')
self.assertEqual(404, resp.json['code'])
self.assertEqual('StackNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_lookup_resource_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', False)
stack_name = 'wibble'
req = self._get('/stacks/%(stack_name)s/resources' % {
'stack_name': stack_name})
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.lookup,
req, tenant_id=self.tenant,
stack_name=stack_name,
path='resources')
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_show(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'show', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
parameters = {u'DBUsername': u'admin',
u'LinuxDistribution': u'F17',
u'InstanceType': u'm1.large',
u'DBRootPassword': u'admin',
u'DBPassword': u'admin',
u'DBName': u'wordpress'}
outputs = [{u'output_key': u'WebsiteURL',
u'description': u'URL for Wordpress wiki',
u'output_value': u'http://10.0.0.8/wordpress'}]
engine_resp = [
{
u'stack_identity': dict(identity),
u'updated_time': u'2012-07-09T09:13:11Z',
u'parameters': parameters,
u'outputs': outputs,
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': identity.stack_name,
u'notification_topics': [],
u'stack_action': u'CREATE',
u'stack_status': u'COMPLETE',
u'description': u'blah',
u'disable_rollback': True,
u'timeout_mins':60,
u'capabilities': [],
}
]
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('show_stack', {'stack_identity': dict(identity)})
).AndReturn(engine_resp)
self.m.ReplayAll()
response = self.controller.show(req,
tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
expected = {
'stack': {
'links': [{"href": self._url(identity),
"rel": "self"}],
'id': '6',
u'updated_time': u'2012-07-09T09:13:11Z',
u'parameters': parameters,
u'outputs': outputs,
u'description': u'blah',
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': identity.stack_name,
u'stack_status': u'CREATE_COMPLETE',
u'capabilities': [],
u'notification_topics': [],
u'disable_rollback': True,
u'timeout_mins': 60,
}
}
self.assertEqual(expected, response)
self.m.VerifyAll()
def test_show_notfound(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'show', True)
identity = identifier.HeatIdentifier(self.tenant, 'wibble', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
error = heat_exc.StackNotFound(stack_name='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('show_stack', {'stack_identity': dict(identity)})
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.show,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(404, resp.json['code'])
self.assertEqual('StackNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_show_invalidtenant(self, mock_enforce):
identity = identifier.HeatIdentifier('wibble', 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.show,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
self.m.VerifyAll()
def test_show_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'show', False)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.show,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_get_template(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'template', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
template = {u'Foo': u'bar'}
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('get_template', {'stack_identity': dict(identity)})
).AndReturn(template)
self.m.ReplayAll()
response = self.controller.template(req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(template, response)
self.m.VerifyAll()
def test_get_template_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'template', False)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s/template'
% identity)
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.template,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
self.m.VerifyAll()
def test_get_template_err_notfound(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'template', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
error = heat_exc.StackNotFound(stack_name='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('get_template', {'stack_identity': dict(identity)})
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.template,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(404, resp.json['code'])
self.assertEqual('StackNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_update(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 30}
req = self._put('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30}})
).AndReturn(dict(identity))
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.m.VerifyAll()
def test_update_with_tags(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'tags': 'tag1,tag2',
'timeout_mins': 30}
req = self._put('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30, 'tags': ['tag1', 'tag2']}})
).AndReturn(dict(identity))
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.m.VerifyAll()
def test_update_bad_name(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', True)
identity = identifier.HeatIdentifier(self.tenant, 'wibble', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 30}
req = self._put('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
error = heat_exc.StackNotFound(stack_name='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {u'parameters': parameters,
u'encrypted_param_names': [],
u'parameter_defaults': {},
u'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30}})
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.update,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.assertEqual(404, resp.json['code'])
self.assertEqual('StackNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_update_timeout_not_int(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', True)
identity = identifier.HeatIdentifier(self.tenant, 'wibble', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 'not-int'}
req = self._put('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req,
tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.assertEqual("Only integer is acceptable by 'timeout_mins'.",
six.text_type(ex))
self.assertFalse(mock_call.called)
def test_update_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', False)
identity = identifier.HeatIdentifier(self.tenant, 'wibble', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 30}
req = self._put('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.update,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_update_with_existing_parameters(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
body = {'template': template,
'parameters': {},
'files': {},
'timeout_mins': 30}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': {},
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {rpc_api.PARAM_EXISTING: True,
'timeout_mins': 30}})
).AndReturn(dict(identity))
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update_patch,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.m.VerifyAll()
def test_update_with_existing_parameters_with_tags(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
body = {'template': template,
'parameters': {},
'files': {},
'tags': 'tag1,tag2',
'timeout_mins': 30}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': {},
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {rpc_api.PARAM_EXISTING: True,
'timeout_mins': 30,
'tags': ['tag1', 'tag2']}})
).AndReturn(dict(identity))
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update_patch,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.m.VerifyAll()
def test_update_with_patched_existing_parameters(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 30}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {rpc_api.PARAM_EXISTING: True,
'timeout_mins': 30}})
).AndReturn(dict(identity))
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update_patch,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.m.VerifyAll()
def test_update_with_patch_timeout_not_int(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 'not-int'}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_patch, req,
tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.assertEqual("Only integer is acceptable by 'timeout_mins'.",
six.text_type(ex))
self.assertFalse(mock_call.called)
def test_update_with_existing_and_default_parameters(
self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
clear_params = [u'DBUsername', u'DBPassword', u'LinuxDistribution']
body = {'template': template,
'parameters': {},
'clear_parameters': clear_params,
'files': {},
'timeout_mins': 30}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': {},
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {rpc_api.PARAM_EXISTING: True,
'clear_parameters': clear_params,
'timeout_mins': 30}})
).AndReturn(dict(identity))
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update_patch,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.m.VerifyAll()
def test_update_with_patched_and_default_parameters(
self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
clear_params = [u'DBUsername', u'DBPassword', u'LinuxDistribution']
body = {'template': template,
'parameters': parameters,
'clear_parameters': clear_params,
'files': {},
'timeout_mins': 30}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {rpc_api.PARAM_EXISTING: True,
'clear_parameters': clear_params,
'timeout_mins': 30}})
).AndReturn(dict(identity))
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update_patch,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.m.VerifyAll()
def test_delete(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'delete', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._delete('/stacks/%(stack_name)s/%(stack_id)s' % identity)
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
# Engine returns None when delete successful
rpc_client.EngineClient.call(
req.context,
('delete_stack', {'stack_identity': dict(identity)})
).AndReturn(None)
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPNoContent,
self.controller.delete,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.m.VerifyAll()
def test_delete_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'delete', False)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._delete('/stacks/%(stack_name)s/%(stack_id)s' % identity)
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.delete,
req, tenant_id=self.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_abandon(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'abandon', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._abandon('/stacks/%(stack_name)s/%(stack_id)s' % identity)
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
# Engine returns json data on abandon completion
expected = {"name": "test", "id": "123"}
rpc_client.EngineClient.call(
req.context,
('abandon_stack', {'stack_identity': dict(identity)})
).AndReturn(expected)
self.m.ReplayAll()
ret = self.controller.abandon(req,
tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(expected, ret)
self.m.VerifyAll()
def test_abandon_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'abandon', False)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._abandon('/stacks/%(stack_name)s/%(stack_id)s' % identity)
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.abandon,
req, tenant_id=self.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_delete_bad_name(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'delete', True)
identity = identifier.HeatIdentifier(self.tenant, 'wibble', '6')
req = self._delete('/stacks/%(stack_name)s/%(stack_id)s' % identity)
error = heat_exc.StackNotFound(stack_name='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
# Engine returns None when delete successful
rpc_client.EngineClient.call(
req.context,
('delete_stack', {'stack_identity': dict(identity)})
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.delete,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(404, resp.json['code'])
self.assertEqual('StackNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_validate_template(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'validate_template', True)
template = {u'Foo': u'bar'}
body = {'template': template}
req = self._post('/validate', json.dumps(body))
engine_response = {
u'Description': u'blah',
u'Parameters': [
{
u'NoEcho': u'false',
u'ParameterKey': u'InstanceType',
u'Description': u'Instance type'
}
]
}
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('validate_template',
{'template': template,
'params': {'parameters': {},
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}}})
).AndReturn(engine_response)
self.m.ReplayAll()
response = self.controller.validate_template(req,
tenant_id=self.tenant,
body=body)
self.assertEqual(engine_response, response)
self.m.VerifyAll()
def test_validate_template_error(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'validate_template', True)
template = {u'Foo': u'bar'}
body = {'template': template}
req = self._post('/validate', json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('validate_template',
{'template': template,
'params': {'parameters': {},
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}}})
).AndReturn({'Error': 'fubar'})
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.validate_template,
req, tenant_id=self.tenant, body=body)
self.m.VerifyAll()
def test_validate_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'validate_template', False)
template = {u'Foo': u'bar'}
body = {'template': template}
req = self._post('/validate', json.dumps(body))
resp = tools.request_with_middleware(
fault.FaultWrapper,
self.controller.validate_template,
req, tenant_id=self.tenant, body=body)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_list_resource_types(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'list_resource_types', True)
req = self._get('/resource_types')
engine_response = ['AWS::EC2::Instance',
'AWS::EC2::EIP',
'AWS::EC2::EIPAssociation']
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('list_resource_types',
{
'support_status': None,
'type_name': None,
'heat_version': None
}),
version="1.16"
).AndReturn(engine_response)
self.m.ReplayAll()
response = self.controller.list_resource_types(req,
tenant_id=self.tenant)
self.assertEqual({'resource_types': engine_response}, response)
self.m.VerifyAll()
def test_list_resource_types_error(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'list_resource_types', True)
req = self._get('/resource_types')
error = heat_exc.ResourceTypeNotFound(type_name='')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('list_resource_types',
{
'support_status': None,
'type_name': None,
'heat_version': None
}),
version="1.16"
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(
fault.FaultWrapper,
self.controller.list_resource_types,
req, tenant_id=self.tenant)
self.assertEqual(404, resp.json['code'])
self.assertEqual('ResourceTypeNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_list_resource_types_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'list_resource_types', False)
req = self._get('/resource_types')
resp = tools.request_with_middleware(
fault.FaultWrapper,
self.controller.list_resource_types,
req, tenant_id=self.tenant)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_list_template_versions(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'list_template_versions', True)
req = self._get('/template_versions')
engine_response = [
{'version': 'heat_template_version.2013-05-23', 'type': 'hot'},
{'version': 'AWSTemplateFormatVersion.2010-09-09', 'type': 'cfn'}]
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context, ('list_template_versions', {}),
version="1.11"
).AndReturn(engine_response)
self.m.ReplayAll()
response = self.controller.list_template_versions(
req, tenant_id=self.tenant)
self.assertEqual({'template_versions': engine_response}, response)
self.m.VerifyAll()
def test_list_template_functions(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'list_template_functions', True)
req = self._get('/template_versions/t1/functions')
engine_response = [
{'functions': 'func1', 'description': 'desc1'},
]
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context, (
'list_template_functions', {'template_version': 't1'}),
version="1.13"
).AndReturn(engine_response)
self.m.ReplayAll()
response = self.controller.list_template_functions(
req, tenant_id=self.tenant, template_version='t1')
self.assertEqual({'template_functions': engine_response}, response)
self.m.VerifyAll()
def test_resource_schema(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'resource_schema', True)
req = self._get('/resource_types/ResourceWithProps')
type_name = 'ResourceWithProps'
engine_response = {
'resource_type': type_name,
'properties': {
'Foo': {'type': 'string', 'required': False},
},
'attributes': {
'foo': {'description': 'A generic attribute'},
'Foo': {'description': 'Another generic attribute'},
},
'support_status': {
'status': 'SUPPORTED',
'version': None,
'message': None,
},
}
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('resource_schema', {'type_name': type_name})
).AndReturn(engine_response)
self.m.ReplayAll()
response = self.controller.resource_schema(req,
tenant_id=self.tenant,
type_name=type_name)
self.assertEqual(engine_response, response)
self.m.VerifyAll()
def test_resource_schema_nonexist(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'resource_schema', True)
req = self._get('/resource_types/BogusResourceType')
type_name = 'BogusResourceType'
error = heat_exc.ResourceTypeNotFound(type_name='BogusResourceType')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('resource_schema', {'type_name': type_name})
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.resource_schema,
req, tenant_id=self.tenant,
type_name=type_name)
self.assertEqual(404, resp.json['code'])
self.assertEqual('ResourceTypeNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_resource_schema_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'resource_schema', False)
req = self._get('/resource_types/BogusResourceType')
type_name = 'BogusResourceType'
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.resource_schema,
req, tenant_id=self.tenant,
type_name=type_name)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_generate_template(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'generate_template', True)
req = self._get('/resource_types/TEST_TYPE/template')
engine_response = {'Type': 'TEST_TYPE'}
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('generate_template', {'type_name': 'TEST_TYPE',
'template_type': 'cfn'}),
version='1.9'
).AndReturn(engine_response)
self.m.ReplayAll()
self.controller.generate_template(req, tenant_id=self.tenant,
type_name='TEST_TYPE')
self.m.VerifyAll()
def test_generate_template_invalid_template_type(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'generate_template', True)
params = {'template_type': 'invalid'}
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
req = self._get('/resource_types/TEST_TYPE/template',
params=params)
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.generate_template,
req, tenant_id=self.tenant,
type_name='TEST_TYPE')
self.assertIn('Template type is not supported: Invalid template '
'type "invalid", valid types are: cfn, hot.',
six.text_type(ex))
self.assertFalse(mock_call.called)
def test_generate_template_not_found(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'generate_template', True)
req = self._get('/resource_types/NOT_FOUND/template')
error = heat_exc.ResourceTypeNotFound(type_name='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('generate_template', {'type_name': 'NOT_FOUND',
'template_type': 'cfn'}),
version='1.9'
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.generate_template,
req, tenant_id=self.tenant,
type_name='NOT_FOUND')
self.assertEqual(404, resp.json['code'])
self.assertEqual('ResourceTypeNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_generate_template_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'generate_template', False)
req = self._get('/resource_types/NOT_FOUND/template')
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.generate_template,
req, tenant_id=self.tenant,
type_name='blah')
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
class StackSerializerTest(common.HeatTestCase):
def setUp(self):
super(StackSerializerTest, self).setUp()
self.serializer = stacks.StackSerializer()
def test_serialize_create(self):
result = {'stack':
{'id': '1',
'links': [{'href': 'location', "rel": "self"}]}}
response = webob.Response()
response = self.serializer.create(response, result)
self.assertEqual(201, response.status_int)
self.assertEqual(b'location', response.headers['Location'])
self.assertEqual('application/json', response.headers['Content-Type'])
| |
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Copies the given "win tool" (which the toolchain uses to wrap compiler
# invocations) and the environment blocks for the 32-bit and 64-bit builds on
# Windows to the build directory.
#
# The arguments are the visual studio install location and the location of the
# win tool. The script assumes that the root build directory is the current dir
# and the files will be written to the current directory.
from __future__ import print_function
import errno
import json
import os
import re
import subprocess
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
import gn_helpers
SCRIPT_DIR = os.path.dirname(__file__)
def _ExtractImportantEnvironment(output_of_set):
"""Extracts environment variables required for the toolchain to run from
a textual dump output by the cmd.exe 'set' command."""
envvars_to_save = (
'cipd_cache_dir', # needed by vpython
'homedrive', # needed by vpython
'homepath', # needed by vpython
'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
'include',
'lib',
'libpath',
'luci_context', # needed by vpython
'path',
'pathext',
'systemroot',
'temp',
'tmp',
'userprofile', # needed by vpython
'vpython_virtualenv_root' # needed by vpython
)
env = {}
# This occasionally happens and leads to misleading SYSTEMROOT error messages
# if not caught here.
if output_of_set.count('=') == 0:
raise Exception('Invalid output_of_set. Value is:\n%s' % output_of_set)
for line in output_of_set.splitlines():
for envvar in envvars_to_save:
if re.match(envvar + '=', line.lower()):
var, setting = line.split('=', 1)
if envvar == 'path':
# Our own rules and actions in Chromium rely on python being in the
# path. Add the path to this python here so that if it's not in the
# path when ninja is run later, python will still be found.
setting = os.path.dirname(sys.executable) + os.pathsep + setting
env[var.upper()] = setting
break
if sys.platform in ('win32', 'cygwin'):
for required in ('SYSTEMROOT', 'TEMP', 'TMP'):
if required not in env:
raise Exception('Environment variable "%s" '
'required to be set to valid path' % required)
return env
def _DetectVisualStudioPath():
"""Return path to the installed Visual Studio.
"""
# Use the code in build/vs_toolchain.py to avoid duplicating code.
chromium_dir = os.path.abspath(os.path.join(SCRIPT_DIR, '..', '..', '..'))
sys.path.append(os.path.join(chromium_dir, 'build'))
import vs_toolchain
return vs_toolchain.DetectVisualStudioPath()
def _LoadEnvFromBat(args):
"""Given a bat command, runs it and returns env vars set by it."""
args = args[:]
args.extend(('&&', 'set'))
popen = subprocess.Popen(
args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
variables, _ = popen.communicate()
if popen.returncode != 0:
raise Exception('"%s" failed with error %d' % (args, popen.returncode))
return variables.decode(errors='ignore')
def _LoadToolchainEnv(cpu, toolchain_root, sdk_dir, target_store):
"""Returns a dictionary with environment variables that must be set while
running binaries from the toolchain (e.g. INCLUDE and PATH for cl.exe)."""
# Check if we are running in the SDK command line environment and use
# the setup script from the SDK if so. |cpu| should be either
# 'x86' or 'x64' or 'arm' or 'arm64'.
assert cpu in ('x86', 'x64', 'arm', 'arm64')
if bool(int(os.environ.get('DEPOT_TOOLS_WIN_TOOLCHAIN', 1))) and sdk_dir:
# Load environment from json file.
env = os.path.normpath(os.path.join(sdk_dir, 'bin/SetEnv.%s.json' % cpu))
env = json.load(open(env))['env']
if env['VSINSTALLDIR'] == [["..", "..\\"]]:
# Old-style paths were relative to the win_sdk\bin directory.
json_relative_dir = os.path.join(sdk_dir, 'bin')
else:
# New-style paths are relative to the toolchain directory.
json_relative_dir = toolchain_root
for k in env:
entries = [os.path.join(*([json_relative_dir] + e)) for e in env[k]]
# clang-cl wants INCLUDE to be ;-separated even on non-Windows,
# lld-link wants LIB to be ;-separated even on non-Windows. Path gets :.
# The separator for INCLUDE here must match the one used in main() below.
sep = os.pathsep if k == 'PATH' else ';'
env[k] = sep.join(entries)
# PATH is a bit of a special case, it's in addition to the current PATH.
env['PATH'] = env['PATH'] + os.pathsep + os.environ['PATH']
# Augment with the current env to pick up TEMP and friends.
for k in os.environ:
if k not in env:
env[k] = os.environ[k]
varlines = []
for k in sorted(env.keys()):
varlines.append('%s=%s' % (str(k), str(env[k])))
variables = '\n'.join(varlines)
# Check that the json file contained the same environment as the .cmd file.
if sys.platform in ('win32', 'cygwin'):
script = os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.cmd'))
arg = '/' + cpu
json_env = _ExtractImportantEnvironment(variables)
cmd_env = _ExtractImportantEnvironment(_LoadEnvFromBat([script, arg]))
assert _LowercaseDict(json_env) == _LowercaseDict(cmd_env)
else:
if 'GYP_MSVS_OVERRIDE_PATH' not in os.environ:
os.environ['GYP_MSVS_OVERRIDE_PATH'] = _DetectVisualStudioPath()
# We only support x64-hosted tools.
script_path = os.path.normpath(os.path.join(
os.environ['GYP_MSVS_OVERRIDE_PATH'],
'VC/vcvarsall.bat'))
if not os.path.exists(script_path):
# vcvarsall.bat for VS 2017 fails if run after running vcvarsall.bat from
# VS 2013 or VS 2015. Fix this by clearing the vsinstalldir environment
# variable. Since vcvarsall.bat appends to the INCLUDE, LIB, and LIBPATH
# environment variables we need to clear those to avoid getting double
# entries when vcvarsall.bat has been run before gn gen. vcvarsall.bat
# also adds to PATH, but there is no clean way of clearing that and it
# doesn't seem to cause problems.
if 'VSINSTALLDIR' in os.environ:
del os.environ['VSINSTALLDIR']
del os.environ['INCLUDE']
del os.environ['LIB']
del os.environ['LIBPATH']
other_path = os.path.normpath(os.path.join(
os.environ['GYP_MSVS_OVERRIDE_PATH'],
'VC/Auxiliary/Build/vcvarsall.bat'))
if not os.path.exists(other_path):
raise Exception('%s is missing - make sure VC++ tools are installed.' %
script_path)
script_path = other_path
cpu_arg = "amd64"
if (cpu != 'x64'):
# x64 is default target CPU thus any other CPU requires a target set
cpu_arg += '_' + cpu
args = [script_path, cpu_arg, ]
# Store target must come before any SDK version declaration
if (target_store):
args.append('store')
# Explicitly specifying the SDK version to build with to avoid accidentally
# building with a new and untested SDK. This should stay in sync with the
# packaged toolchain in build/vs_toolchain.py.
args.append('10.0.19041.0')
variables = _LoadEnvFromBat(args)
return _ExtractImportantEnvironment(variables)
def _FormatAsEnvironmentBlock(envvar_dict):
"""Format as an 'environment block' directly suitable for CreateProcess.
Briefly this is a list of key=value\0, terminated by an additional \0. See
CreateProcess documentation for more details."""
block = ''
nul = '\0'
for key, value in envvar_dict.items():
block += key + '=' + value + nul
block += nul
return block
def _LowercaseDict(d):
"""Returns a copy of `d` with both key and values lowercased.
Args:
d: dict to lowercase (e.g. {'A': 'BcD'}).
Returns:
A dict with both keys and values lowercased (e.g.: {'a': 'bcd'}).
"""
return {k.lower(): d[k].lower() for k in d}
def FindFileInEnvList(env, env_name, separator, file_name, optional=False):
parts = env[env_name].split(separator)
for path in parts:
if os.path.exists(os.path.join(path, file_name)):
return os.path.realpath(path)
assert optional, "%s is not found in %s:\n%s\nCheck if it is installed." % (
file_name, env_name, '\n'.join(parts))
return ''
def main():
if len(sys.argv) != 7:
print('Usage setup_toolchain.py '
'<visual studio path> <win sdk path> '
'<runtime dirs> <target_os> <target_cpu> '
'<environment block name|none>')
sys.exit(2)
# toolchain_root and win_sdk_path are only read if the hermetic Windows
# toolchain is set, that is if DEPOT_TOOLS_WIN_TOOLCHAIN is not set to 0.
# With the hermetic Windows toolchain, the visual studio path in argv[1]
# is the root of the Windows toolchain directory.
toolchain_root = sys.argv[1]
win_sdk_path = sys.argv[2]
runtime_dirs = sys.argv[3]
target_os = sys.argv[4]
target_cpu = sys.argv[5]
environment_block_name = sys.argv[6]
if (environment_block_name == 'none'):
environment_block_name = ''
if (target_os == 'winuwp'):
target_store = True
else:
target_store = False
cpus = ('x86', 'x64', 'arm', 'arm64')
assert target_cpu in cpus
vc_bin_dir = ''
vc_lib_path = ''
vc_lib_atlmfc_path = ''
vc_lib_um_path = ''
include = ''
lib = ''
# TODO(scottmg|goma): Do we need an equivalent of
# ninja_use_custom_environment_files?
def relflag(s): # Make s relative to builddir when cwd and sdk on same drive.
try:
return os.path.relpath(s)
except ValueError:
return s
def q(s): # Quote s if it contains spaces or other weird characters.
return s if re.match(r'^[a-zA-Z0-9._/\\:-]*$', s) else '"' + s + '"'
for cpu in cpus:
if cpu == target_cpu:
# Extract environment variables for subprocesses.
env = _LoadToolchainEnv(cpu, toolchain_root, win_sdk_path, target_store)
env['PATH'] = runtime_dirs + os.pathsep + env['PATH']
vc_bin_dir = FindFileInEnvList(env, 'PATH', os.pathsep, 'cl.exe')
vc_lib_path = FindFileInEnvList(env, 'LIB', ';', 'msvcrt.lib')
vc_lib_atlmfc_path = FindFileInEnvList(
env, 'LIB', ';', 'atls.lib', optional=True)
vc_lib_um_path = FindFileInEnvList(env, 'LIB', ';', 'user32.lib')
# The separator for INCLUDE here must match the one used in
# _LoadToolchainEnv() above.
include = [p.replace('"', r'\"') for p in env['INCLUDE'].split(';') if p]
include = list(map(relflag, include))
lib = [p.replace('"', r'\"') for p in env['LIB'].split(';') if p]
lib = list(map(relflag, lib))
include_I = ' '.join([q('/I' + i) for i in include])
include_imsvc = ' '.join([q('-imsvc' + i) for i in include])
libpath_flags = ' '.join([q('-libpath:' + i) for i in lib])
if (environment_block_name != ''):
env_block = _FormatAsEnvironmentBlock(env)
with open(environment_block_name, 'w') as f:
f.write(env_block)
print('vc_bin_dir = ' + gn_helpers.ToGNString(vc_bin_dir))
assert include_I
print('include_flags_I = ' + gn_helpers.ToGNString(include_I))
assert include_imsvc
if bool(int(os.environ.get('DEPOT_TOOLS_WIN_TOOLCHAIN', 1))) and win_sdk_path:
print('include_flags_imsvc = ' +
gn_helpers.ToGNString(q('/winsysroot' + relflag(toolchain_root))))
else:
print('include_flags_imsvc = ' + gn_helpers.ToGNString(include_imsvc))
print('vc_lib_path = ' + gn_helpers.ToGNString(vc_lib_path))
# Possible atlmfc library path gets introduced in the future for store thus
# output result if a result exists.
if (vc_lib_atlmfc_path != ''):
print('vc_lib_atlmfc_path = ' + gn_helpers.ToGNString(vc_lib_atlmfc_path))
print('vc_lib_um_path = ' + gn_helpers.ToGNString(vc_lib_um_path))
print('paths = ' + gn_helpers.ToGNString(env['PATH']))
assert libpath_flags
print('libpath_flags = ' + gn_helpers.ToGNString(libpath_flags))
if __name__ == '__main__':
main()
| |
# Copyright 2018 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Discovery document generator for an Endpoints v1 over webapp2 service."""
import re
import endpoints
from protorpc import message_types
from protorpc import messages
from components import utils
def _normalize_name(n):
"""Splits words by non-alphanumeric characters and PascalCases the rest.
Args:
n: The string to normalize.
Returns:
A normalized version of the given string.
"""
words = []
for word in re.split(r'[^0-9a-zA-Z]', n):
words.append('%s%s' % (word[:1].upper(), word[1:]))
return ''.join(words)
def _normalize_whitespace(s):
"""Replaces consecutive whitespace characters with a single space.
Args:
s: The string to normalize, or None to return an empty string.
Returns:
A normalized version of the given string.
"""
return ' '.join((s or '').split())
def _get_type_format(field):
"""Returns the schema type and format for the given message type.
Args:
field: The protorpc.messages.Field to get schema type and format for.
Returns:
(type, format) for use in the "schemas" section of a discovery document.
"""
if isinstance(field, messages.BooleanField):
return ('boolean', None)
if isinstance(field, messages.BytesField):
return ('string', 'byte')
if isinstance(field, message_types.DateTimeField):
return ('string', 'date-time')
if isinstance(field, messages.EnumField):
return ('string', None)
if isinstance(field, messages.FloatField):
if field.variant == messages.Variant.DOUBLE:
return ('number', 'double')
return ('number', 'float')
if isinstance(field, messages.IntegerField):
if field.variant in (messages.Variant.INT32, messages.Variant.SINT32):
return ('integer', 'int32')
if field.variant in (messages.Variant.INT64, messages.Variant.SINT64):
# If the type requires int64 or uint64, specify string or JavaScript will
# convert them to 32-bit.
return ('string', 'int64')
if field.variant == messages.Variant.UINT32:
return ('integer', 'uint32')
if field.variant == messages.Variant.UINT64:
return ('string', 'uint64')
# Despite the warning about JavaScript, Endpoints v2's discovery document
# generator uses integer, int64 as the default here. Follow their choice.
return ('integer', 'int64')
if isinstance(field, messages.StringField):
return ('string', None)
return (None, None)
def _get_schemas(types):
"""Returns a schemas document for the given types.
Args:
types: The set of protorpc.messages.Messages subclasses to describe.
Returns:
A dict which can be written as JSON describing the types.
"""
schemas = {}
seen = set(types)
types = list(types)
# Messages may reference other messages whose schemas we need to add.
# Keep a set of types we've already seen (but not necessarily processed) to
# avoid repeatedly processing or queuing to process the same type.
# Desired invariant: seen contains types which have ever been in types.
# This invariant allows us to extend types mid-loop to add more types to
# process without unnecessarily processing the same type twice. We achieve
# this invariant by initializing seen to types and adding to seen every time
# the loop adds to types.
for message_type in types:
name = _normalize_name(message_type.definition_name())
schemas[name] = {
'id': name,
'type': 'object',
}
desc = _normalize_whitespace(message_type.__doc__)
if desc:
schemas[name]['description'] = desc
properties = {}
for field in message_type.all_fields():
items = {}
field_properties = {}
schema_type = None
# For non-message fields, add the field information to the schema
# directly. For message fields, add a $ref to elsewhere in the schema
# and ensure the type is queued to have its schema added. DateTimeField
# is a message field but is treated as a non-message field.
if (isinstance(field, messages.MessageField)
and not isinstance(field, message_types.DateTimeField)):
field_type = field.type().__class__
desc = _normalize_whitespace(field_type.__doc__)
if desc:
field_properties['description'] = desc
# Queue new types to have their schema added in a future iteration.
if field_type not in seen:
types.append(field_type)
# Maintain loop invariant.
seen.add(field_type)
items['$ref'] = _normalize_name(field_type.definition_name())
else:
schema_type, schema_format = _get_type_format(field)
items['type'] = schema_type
if schema_format:
items['format'] = schema_format
if isinstance(field, messages.EnumField):
# Endpoints v1 sorts these alphabetically while v2 does not.
items['enum'] = sorted([enum.name for enum in field.type])
# Endpoints v1 includes empty descriptions for each enum.
items['enumDescriptions'] = ['' for _ in field.type]
if field.default:
field_properties['default'] = field.default
# Defaults for types that aren't strings in the code but are strings
# in the schema must be converted here. For example, EnumField
# would otherwise have a default that isn't even valid JSON.
if schema_type == 'string':
field_properties['default'] = str(field.default)
if field.required:
field_properties['required'] = True
# For repeated fields, most of the field information gets added to items.
# For non-repeated fields, the field information is added directly to the
# field properties. However, for parameters, even repeated fields have
# their field information added directly to the field properties. See
# _get_parameters below.
if field.repeated:
field_properties['items'] = items
field_properties['type'] = 'array'
else:
field_properties.update(items)
properties[field.name] = field_properties
if properties:
schemas[name]['properties'] = properties
return schemas
def _get_parameters(message, path):
"""Returns a parameters document for the given parameters and path.
Args:
message: The protorpc.message.Message class describing the parameters.
path: The path to the method.
Returns:
A dict which can be written as JSON describing the path parameters.
"""
PARAMETER_REGEX = r'{([a-zA-Z_][a-zA-Z0-9_]*)}'
# The order is the names of path parameters in the order in which they
# appear in the path followed by the names of required query strings.
order = re.findall(PARAMETER_REGEX, path)
parameters = _get_schemas([message]).get(
_normalize_name(message.definition_name()), {}).get('properties', {})
for parameter, schema in parameters.items():
# As above, repeated fields for parameters do not have items.
if schema['type'] == 'array':
schema.update(schema.pop('items'))
schema['repeated'] = True
if parameter in order:
schema['location'] = 'path'
else:
schema['location'] = 'query'
if schema.get('required'):
order.append(parameter)
# Parameters have string defaults because they're part of a URL.
if schema.get('default'):
schema['default'] = str(schema['default'])
# JSON bools are lowercase.
if schema['type'] == 'boolean':
schema['default'] = schema['default'].lower()
document = {}
if order:
document['parameterOrder'] = order
if parameters:
document['parameters'] = parameters
return document
def _get_methods(service):
"""Returns methods, resources, and schemas documents for the given service.
Args:
service: The protorpc.remote.Service to describe.
Returns:
A tuple of three dicts which can be written as JSON describing the methods,
resources, and types.
"""
document = {
'methods': {},
'resources': {},
}
types = set()
for _, method in service.all_remote_methods().items():
# Only describe methods decorated with @method.
info = getattr(method, 'method_info', None)
if info is None:
continue
# info.method_id returns <service name>.[<resource name>.]*<method name>.
# There may be 0 or more resource names.
method_id = info.method_id(service.api_info)
parts = method_id.split('.')
assert len(parts) > 1, method_id
name = parts[-1]
resource_parts = parts[1:-1]
item = {
'httpMethod': info.http_method,
'id': method_id,
'path': info.get_path(service.api_info),
'scopes': [
'https://www.googleapis.com/auth/userinfo.email',
],
}
desc = _normalize_whitespace(method.remote.method.__doc__)
if desc:
item['description'] = desc
request = method.remote.request_type()
rc = endpoints.ResourceContainer.get_request_message(method.remote)
if not isinstance(rc, endpoints.ResourceContainer):
if not isinstance(request, message_types.VoidMessage):
if info.http_method not in ('GET', 'DELETE'):
item['request'] = {
# $refs refer to the "schemas" section of the discovery doc.
'$ref': _normalize_name(request.__class__.definition_name()),
'parameterName': 'resource',
}
types.add(request.__class__)
else:
# If the request type is a known ResourceContainer, create a schema
# reference to the body if necessary. Path parameters are handled
# differently.
if rc.body_message_class != message_types.VoidMessage:
if info.http_method not in ('GET', 'DELETE'):
item['request'] = {
'$ref': _normalize_name(rc.body_message_class.definition_name()),
'parameterName': 'resource',
}
types.add(rc.body_message_class)
item.update(_get_parameters(
rc.parameters_message_class, info.get_path(service.api_info)))
response = method.remote.response_type()
if not isinstance(response, message_types.VoidMessage):
item['response'] = {
'$ref': _normalize_name(response.__class__.definition_name()),
}
types.add(response.__class__)
pointer = document
for part in resource_parts:
pointer = pointer.setdefault('resources', {}).setdefault(part, {})
pointer.setdefault('methods', {})[name] = item
return document['methods'], document['resources'], _get_schemas(types)
def generate(classes, host, base_path):
"""Returns a discovery document for the given service.
Args:
classes: The non-empty list of protorpc.remote.Service classes to describe.
All classes must be part of the same service.
host: The host this request was received by.
base_path: The base path under which all service paths exist.
Returns:
A dict which can be written as JSON describing the service.
"""
assert classes, classes
scheme = 'http:' if utils.is_local_dev_server() else 'https:'
document = {
'discoveryVersion': 'v1',
'auth': {
'oauth2': {
'scopes': {s: {'description': s} for s in classes[0].api_info.scopes},
},
},
'basePath': '%s/%s/%s' % (
base_path, classes[0].api_info.name, classes[0].api_info.version),
'baseUrl': '%s//%s%s/%s/%s' % (
scheme, host, base_path,
classes[0].api_info.name, classes[0].api_info.version),
'batchPath': 'batch',
'icons': {
'x16': 'https://www.google.com/images/icons/product/search-16.gif',
'x32': 'https://www.google.com/images/icons/product/search-32.gif',
},
'id': '%s:%s' % (classes[0].api_info.name, classes[0].api_info.version),
'kind': 'discovery#restDescription',
'name': classes[0].api_info.name,
'parameters': {
'alt': {
'default': 'json',
'description': 'Data format for the response.',
'enum': ['json'],
'enumDescriptions': [
'Responses with Content-Type of application/json',
],
'location': 'query',
'type': 'string',
},
'fields': {
'description': (
'Selector specifying which fields to include in a partial'
' response.'),
'location': 'query',
'type': 'string',
},
'key': {
'description': (
'API key. Your API key identifies your project and provides you'
' with API access, quota, and reports. Required unless you provide'
' an OAuth 2.0 token.'),
'location': 'query',
'type': 'string',
},
'oauth_token': {
'description': 'OAuth 2.0 token for the current user.',
'location': 'query',
'type': 'string',
},
'prettyPrint': {
'default': 'true',
'description': 'Returns response with indentations and line breaks.',
'location': 'query',
'type': 'boolean',
},
'quotaUser': {
'description': (
'Available to use for quota purposes for server-side applications.'
' Can be any arbitrary string assigned to a user, but should not'
' exceed 40 characters. Overrides userIp if both are provided.'),
'location': 'query',
'type': 'string',
},
'userIp': {
'description': (
'IP address of the site where the request originates. Use this if'
' you want to enforce per-user limits.'),
'location': 'query',
'type': 'string',
},
},
'protocol': 'rest',
'rootUrl': '%s//%s%s/' % (scheme, host, base_path),
'servicePath': '%s/%s/' % (
classes[0].api_info.name, classes[0].api_info.version),
'version': classes[0].api_info.version,
}
if classes[0].api_info.title:
document['title'] = classes[0].api_info.title
desc = _normalize_whitespace(
classes[0].api_info.description or classes[0].__doc__)
if desc:
document['description'] = desc
if classes[0].api_info.documentation:
document['documentationLink'] = classes[0].api_info.documentation
methods = {}
resources = {}
schemas = {}
for service in classes:
m, r, s = _get_methods(service)
methods.update(m)
resources.update(r)
schemas.update(s)
if methods:
document['methods'] = methods
if resources:
document['resources'] = resources
if schemas:
document['schemas'] = schemas
return document
def directory(classes, host, base_path):
"""Returns a directory list for the given services.
Args:
classes: The list of protorpc.remote.Service classes to describe.
host: The host this request was received by.
base_path: The base path under which all service paths exist.
Returns:
A dict which can be written as JSON describing the services.
"""
scheme = 'http:' if utils.is_local_dev_server() else 'https:'
document = {
'discoveryVersion': 'v1',
'kind': 'discovery#directoryList',
}
items = {}
for service in classes:
item = {
'discoveryLink': './apis/%s/%s/rest' % (
service.api_info.name, service.api_info.version),
'discoveryRestUrl': '%s//%s%s/discovery/v1/apis/%s/%s/rest' % (
scheme, host, base_path,
service.api_info.name, service.api_info.version),
'id': '%s:%s' % (service.api_info.name, service.api_info.version),
'icons': {
'x16': 'https://www.google.com/images/icons/product/search-16.gif',
'x32': 'https://www.google.com/images/icons/product/search-32.gif',
},
'kind': 'discovery#directoryItem',
'name': service.api_info.name,
'preferred': True,
'version': service.api_info.version,
}
desc = _normalize_whitespace(
service.api_info.description or service.__doc__)
if desc:
item['description'] = desc
if service.api_info.documentation:
item['documentationLink'] = service.api_info.documentation
items[item['id']] = item
if items:
document['items'] = sorted(items.values(), key=lambda i: i['id'])
return document
| |
"""
Test suite for the table class
"""
from unittest import TestCase
from pynamodb.connection import TableConnection
from pynamodb.constants import DEFAULT_REGION
from pynamodb.expressions.operand import Path
from .data import DESCRIBE_TABLE_DATA, GET_ITEM_DATA
from .response import HttpOK
from unittest.mock import patch
PATCH_METHOD = 'pynamodb.connection.Connection._make_api_call'
class ConnectionTestCase(TestCase):
"""
Tests for the base connection class
"""
def setUp(self):
self.test_table_name = 'ci-table'
self.region = DEFAULT_REGION
def test_create_connection(self):
"""
TableConnection()
"""
conn = TableConnection(self.test_table_name)
self.assertIsNotNone(conn)
def test_connection_session_set_credentials(self):
conn = TableConnection(
self.test_table_name,
aws_access_key_id='access_key_id',
aws_secret_access_key='secret_access_key')
credentials = conn.connection.session.get_credentials()
self.assertEqual(credentials.access_key, 'access_key_id')
self.assertEqual(credentials.secret_key, 'secret_access_key')
def test_connection_session_set_credentials_with_session_token(self):
conn = TableConnection(
self.test_table_name,
aws_access_key_id='access_key_id',
aws_secret_access_key='secret_access_key',
aws_session_token='session_token')
credentials = conn.connection.session.get_credentials()
self.assertEqual(credentials.access_key, 'access_key_id')
self.assertEqual(credentials.secret_key, 'secret_access_key')
self.assertEqual(credentials.token, 'session_token')
def test_create_table(self):
"""
TableConnection.create_table
"""
conn = TableConnection(self.test_table_name)
kwargs = {
'read_capacity_units': 1,
'write_capacity_units': 1,
}
self.assertRaises(ValueError, conn.create_table, **kwargs)
kwargs['attribute_definitions'] = [
{
'attribute_name': 'key1',
'attribute_type': 'S'
},
{
'attribute_name': 'key2',
'attribute_type': 'S'
}
]
self.assertRaises(ValueError, conn.create_table, **kwargs)
kwargs['key_schema'] = [
{
'attribute_name': 'key1',
'key_type': 'hash'
},
{
'attribute_name': 'key2',
'key_type': 'range'
}
]
params = {
'TableName': 'ci-table',
'ProvisionedThroughput': {
'WriteCapacityUnits': 1,
'ReadCapacityUnits': 1
},
'AttributeDefinitions': [
{
'AttributeType': 'S',
'AttributeName': 'key1'
},
{
'AttributeType': 'S',
'AttributeName': 'key2'
}
],
'KeySchema': [
{
'KeyType': 'HASH',
'AttributeName': 'key1'
},
{
'KeyType': 'RANGE',
'AttributeName': 'key2'
}
]
}
with patch(PATCH_METHOD) as req:
req.return_value = {}
conn.create_table(
**kwargs
)
kwargs = req.call_args[0][1]
self.assertEqual(kwargs, params)
def test_create_table_with_tags(self):
conn = TableConnection(self.test_table_name)
kwargs = {
'read_capacity_units': 1,
'write_capacity_units': 1,
'attribute_definitions': [
{
'attribute_name': 'key1',
'attribute_type': 'S'
},
{
'attribute_name': 'key2',
'attribute_type': 'S'
}
],
'key_schema': [
{
'attribute_name': 'key1',
'key_type': 'hash'
},
{
'attribute_name': 'key2',
'key_type': 'range'
}
],
'tags': {
'tag-key1': 'tag-value1',
'tag-key2': 'tag-value2',
}
}
params = {
'TableName': 'ci-table',
'ProvisionedThroughput': {
'WriteCapacityUnits': 1,
'ReadCapacityUnits': 1
},
'AttributeDefinitions': [
{
'AttributeType': 'S',
'AttributeName': 'key1'
},
{
'AttributeType': 'S',
'AttributeName': 'key2'
}
],
'KeySchema': [
{
'KeyType': 'HASH',
'AttributeName': 'key1'
},
{
'KeyType': 'RANGE',
'AttributeName': 'key2'
}
],
'Tags': [
{
'Key': 'tag-key1',
'Value': 'tag-value1'
},
{
'Key': 'tag-key2',
'Value': 'tag-value2'
}
]
}
with patch(PATCH_METHOD) as req:
req.return_value = {}
conn.create_table(
**kwargs
)
kwargs = req.call_args[0][1]
self.assertEqual(kwargs, params)
def test_update_time_to_live(self):
"""
TableConnection.update_time_to_live
"""
params = {
'TableName': 'ci-table',
'TimeToLiveSpecification': {
'AttributeName': 'ttl_attr',
'Enabled': True,
}
}
with patch(PATCH_METHOD) as req:
req.return_value = HttpOK(), None
conn = TableConnection(self.test_table_name)
conn.update_time_to_live('ttl_attr')
kwargs = req.call_args[0][1]
self.assertEqual(kwargs, params)
def test_delete_table(self):
"""
TableConnection.delete_table
"""
params = {'TableName': 'ci-table'}
with patch(PATCH_METHOD) as req:
req.return_value = HttpOK(), None
conn = TableConnection(self.test_table_name)
conn.delete_table()
kwargs = req.call_args[0][1]
self.assertEqual(kwargs, params)
def test_update_table(self):
"""
TableConnection.update_table
"""
with patch(PATCH_METHOD) as req:
req.return_value = HttpOK(), None
conn = TableConnection(self.test_table_name)
params = {
'ProvisionedThroughput': {
'WriteCapacityUnits': 2,
'ReadCapacityUnits': 2
},
'TableName': self.test_table_name
}
conn.update_table(
read_capacity_units=2,
write_capacity_units=2
)
self.assertEqual(req.call_args[0][1], params)
with patch(PATCH_METHOD) as req:
req.return_value = HttpOK(), None
conn = TableConnection(self.test_table_name)
global_secondary_index_updates = [
{
"index_name": "foo-index",
"read_capacity_units": 2,
"write_capacity_units": 2
}
]
params = {
'TableName': self.test_table_name,
'ProvisionedThroughput': {
'ReadCapacityUnits': 2,
'WriteCapacityUnits': 2,
},
'GlobalSecondaryIndexUpdates': [
{
'Update': {
'IndexName': 'foo-index',
'ProvisionedThroughput': {
'ReadCapacityUnits': 2,
'WriteCapacityUnits': 2,
}
}
}
]
}
conn.update_table(
read_capacity_units=2,
write_capacity_units=2,
global_secondary_index_updates=global_secondary_index_updates
)
self.assertEqual(req.call_args[0][1], params)
def test_describe_table(self):
"""
TableConnection.describe_table
"""
with patch(PATCH_METHOD) as req:
req.return_value = DESCRIBE_TABLE_DATA
conn = TableConnection(self.test_table_name)
conn.describe_table()
self.assertEqual(conn.table_name, self.test_table_name)
self.assertEqual(req.call_args[0][1], {'TableName': 'ci-table'})
def test_delete_item(self):
"""
TableConnection.delete_item
"""
conn = TableConnection(self.test_table_name)
with patch(PATCH_METHOD) as req:
req.return_value = DESCRIBE_TABLE_DATA
conn.describe_table()
with patch(PATCH_METHOD) as req:
req.return_value = {}
conn.delete_item(
"Amazon DynamoDB",
"How do I update multiple items?")
params = {
'ReturnConsumedCapacity': 'TOTAL',
'Key': {
'ForumName': {
'S': 'Amazon DynamoDB'
},
'Subject': {
'S': 'How do I update multiple items?'
}
},
'TableName': self.test_table_name
}
self.assertEqual(req.call_args[0][1], params)
def test_update_item(self):
"""
TableConnection.update_item
"""
conn = TableConnection(self.test_table_name)
with patch(PATCH_METHOD) as req:
req.return_value = DESCRIBE_TABLE_DATA
conn.describe_table()
attr_updates = {
'Subject': {
'Value': 'foo-subject',
'Action': 'PUT'
},
}
with patch(PATCH_METHOD) as req:
req.return_value = HttpOK(), {}
conn.update_item(
'foo-key',
actions=[Path('Subject').set('foo-subject')],
range_key='foo-range-key',
)
params = {
'Key': {
'ForumName': {
'S': 'foo-key'
},
'Subject': {
'S': 'foo-range-key'
}
},
'UpdateExpression': 'SET #0 = :0',
'ExpressionAttributeNames': {
'#0': 'Subject'
},
'ExpressionAttributeValues': {
':0': {
'S': 'foo-subject'
}
},
'ReturnConsumedCapacity': 'TOTAL',
'TableName': 'ci-table'
}
self.assertEqual(req.call_args[0][1], params)
def test_get_item(self):
"""
TableConnection.get_item
"""
conn = TableConnection(self.test_table_name)
with patch(PATCH_METHOD) as req:
req.return_value = DESCRIBE_TABLE_DATA
conn.describe_table()
with patch(PATCH_METHOD) as req:
req.return_value = GET_ITEM_DATA
item = conn.get_item("Amazon DynamoDB", "How do I update multiple items?")
self.assertEqual(item, GET_ITEM_DATA)
def test_put_item(self):
"""
TableConnection.put_item
"""
conn = TableConnection(self.test_table_name)
with patch(PATCH_METHOD) as req:
req.return_value = DESCRIBE_TABLE_DATA
conn.describe_table()
with patch(PATCH_METHOD) as req:
req.return_value = {}
conn.put_item(
'foo-key',
range_key='foo-range-key',
attributes={'ForumName': 'foo-value'}
)
params = {
'ReturnConsumedCapacity': 'TOTAL',
'TableName': self.test_table_name,
'Item': {'ForumName': {'S': 'foo-value'}, 'Subject': {'S': 'foo-range-key'}}
}
self.assertEqual(req.call_args[0][1], params)
with patch(PATCH_METHOD) as req:
req.return_value = {}
conn.put_item(
'foo-key',
range_key='foo-range-key',
attributes={'ForumName': 'foo-value'}
)
params = {
'ReturnConsumedCapacity': 'TOTAL',
'Item': {
'ForumName': {
'S': 'foo-value'
},
'Subject': {
'S': 'foo-range-key'
}
},
'TableName': self.test_table_name
}
self.assertEqual(req.call_args[0][1], params)
with patch(PATCH_METHOD) as req:
req.return_value = HttpOK(), {}
conn.put_item(
'foo-key',
range_key='foo-range-key',
attributes={'ForumName': 'foo-value'},
condition=Path('ForumName').does_not_exist()
)
params = {
'ReturnConsumedCapacity': 'TOTAL',
'Item': {
'ForumName': {
'S': 'foo-value'
},
'Subject': {
'S': 'foo-range-key'
}
},
'TableName': self.test_table_name,
'ConditionExpression': 'attribute_not_exists (#0)',
'ExpressionAttributeNames': {
'#0': 'ForumName'
}
}
self.assertEqual(req.call_args[0][1], params)
def test_batch_write_item(self):
"""
TableConnection.batch_write_item
"""
items = []
conn = TableConnection(self.test_table_name)
for i in range(10):
items.append(
{"ForumName": "FooForum", "Subject": "thread-{}".format(i)}
)
with patch(PATCH_METHOD) as req:
req.return_value = DESCRIBE_TABLE_DATA
conn.describe_table()
with patch(PATCH_METHOD) as req:
req.return_value = {}
conn.batch_write_item(
put_items=items
)
params = {
'ReturnConsumedCapacity': 'TOTAL',
'RequestItems': {
self.test_table_name: [
{'PutRequest': {'Item': {'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-0'}}}},
{'PutRequest': {'Item': {'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-1'}}}},
{'PutRequest': {'Item': {'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-2'}}}},
{'PutRequest': {'Item': {'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-3'}}}},
{'PutRequest': {'Item': {'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-4'}}}},
{'PutRequest': {'Item': {'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-5'}}}},
{'PutRequest': {'Item': {'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-6'}}}},
{'PutRequest': {'Item': {'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-7'}}}},
{'PutRequest': {'Item': {'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-8'}}}},
{'PutRequest': {'Item': {'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-9'}}}}
]
}
}
self.assertEqual(req.call_args[0][1], params)
def test_batch_get_item(self):
"""
TableConnection.batch_get_item
"""
items = []
conn = TableConnection(self.test_table_name)
for i in range(10):
items.append(
{"ForumName": "FooForum", "Subject": "thread-{}".format(i)}
)
with patch(PATCH_METHOD) as req:
req.return_value = DESCRIBE_TABLE_DATA
conn.describe_table()
with patch(PATCH_METHOD) as req:
req.return_value = {}
conn.batch_get_item(
items
)
params = {
'ReturnConsumedCapacity': 'TOTAL',
'RequestItems': {
self.test_table_name: {
'Keys': [
{'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-0'}},
{'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-1'}},
{'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-2'}},
{'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-3'}},
{'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-4'}},
{'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-5'}},
{'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-6'}},
{'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-7'}},
{'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-8'}},
{'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-9'}}
]
}
}
}
self.assertEqual(req.call_args[0][1], params)
def test_query(self):
"""
TableConnection.query
"""
conn = TableConnection(self.test_table_name)
with patch(PATCH_METHOD) as req:
req.return_value = DESCRIBE_TABLE_DATA
conn.describe_table()
with patch(PATCH_METHOD) as req:
req.return_value = {}
conn.query(
"FooForum",
Path('Subject').startswith('thread')
)
params = {
'ReturnConsumedCapacity': 'TOTAL',
'KeyConditionExpression': '(#0 = :0 AND begins_with (#1, :1))',
'ExpressionAttributeNames': {
'#0': 'ForumName',
'#1': 'Subject'
},
'ExpressionAttributeValues': {
':0': {
'S': 'FooForum'
},
':1': {
'S': 'thread'
}
},
'TableName': self.test_table_name
}
self.assertEqual(req.call_args[0][1], params)
def test_scan(self):
"""
TableConnection.scan
"""
conn = TableConnection(self.test_table_name)
with patch(PATCH_METHOD) as req:
req.return_value = DESCRIBE_TABLE_DATA
conn.describe_table()
with patch(PATCH_METHOD) as req:
req.return_value = HttpOK(), {}
conn.scan()
params = {
'ReturnConsumedCapacity': 'TOTAL',
'TableName': self.test_table_name
}
self.assertEqual(req.call_args[0][1], params)
| |
# Copyright 2020 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import ssl
from urllib.request import Request, urlopen
from urllib.parse import urlencode
from urllib.error import HTTPError
from . import exceptions
class KeyCloakClient:
"""Client for connecting to Keycloak and using the REST API.
Client provides a method for issuing requests to the Keycloak REST API and
a set of methods to simplify Keycloak configuration.
Context manager examples:
kc = KeyCloakClient(url)
with kc.login(username, password):
kc.method(arguments)
with KeyCloakClient(url, username, password) as kc:
kc.method(arguments)
"""
def __init__(self, url_base, username=None, password=None, client_id='admin-cli', verify_ssl=True):
"""KeyCloakClient constructor
Args:
url_base (str) : The base URL to prepend to all request URLs
verify_ssl (bool) : Whether or not to verify HTTPS certs
"""
self.url_base = url_base
self.token = None
self.username = username
self.password = password
self.client_id = client_id
if self.url_base.startswith("https") and not verify_ssl:
self.ctx = ssl.create_default_context()
self.ctx.check_hostname = False
self.ctx.verify_mode = ssl.CERT_NONE
else:
self.ctx = None
def request(self, url, params=None, headers={}, convert=urlencode, method=None):
"""Make a request to the Keycloak server.
Args:
url (str) : REST API URL to query (appended to url_base from constructor)
params (None|dict) : None or a dict or key values that will be passed
to the convert argument to produce a string
headers (dict) : Dictionary of HTTP headers
convert : Function to convert params into a string
Defaults to urlencode, taking a dict and making a url encoded string
method (None|string) : HTTP method to use or None for the default method
based on the different arguments
Returns:
(None) : If there is an exception raised
(dict) : Dictionary containing JSON encoded response
"""
request = Request(
self.url_base + url,
data=None if params is None else convert(params).encode("utf-8"),
headers=headers,
method=method
)
# DP TODO: rewrite or merge using the boss-tools/bossutils KeycloakClient
try:
response = urlopen(request, context=self.ctx).read().decode("utf-8")
if len(response) > 0:
response = json.loads(response)
else:
response = {}
return response
except HTTPError as e:
raise exceptions.KeyCloakError(e.code, e.reason)
def login(self, username=None, password=None, client_id=None):
"""Login to the Keycloak master realm and retrieve an access token.
WARNING: If the base_url is not using HTTPS the password will be submitted
in plain text over the network.
Note: A user must be logged in before any other method calls will work
The bearer access token is saved as self.token["access_token"]
An error will be printed if login failed
Args:
username (str) : Keycloak username
password (str) : Keycloak password
client_id (str) : Keycloak Client ID to authenticate with
"""
if username is None:
username = self.username
if username is None:
raise Exception("No username set")
if password is None:
password = self.password
if password is None:
raise Exception("No password set")
if client_id is None:
client_id = self.client_id
if client_id is None:
raise Exception("No client_id set")
self.token = self.request(
"/auth/realms/master/protocol/openid-connect/token",
params={
"username": username,
"password": password,
"grant_type": "password",
"client_id": client_id,
},
headers={
"Content-Type": "application/x-www-form-urlencoded",
}
)
if self.token is None:
#print("Could not authenticate to KeyCloak Server")
raise exceptions.KeyCloakLoginError(self.url_base, username)
return self # DP NOTE: So context manager works correctly
def logout(self):
"""Logout from Keycloak.
Logout will invalidate the Keycloak session and clean the local token (
self.token)
"""
if self.token is None:
return
self.request( # no response
"/auth/realms/master/protocol/openid-connect/logout",
params={
"refresh_token": self.token["refresh_token"],
"client_id": "admin-cli",
},
headers={
"Content-Type": "application/x-www-form-urlencoded",
}
)
self.token = None
def __enter__(self):
"""The start of the context manager, which handles automatically calling logout."""
if self.token is None:
self.login()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""The end of the context manager. Print any error when trying to logut and
propogate any exception that happened while the context was active."""
try:
self.logout()
except:
print("Error logging out of Keycloak")
if exc_type is None:
return None
else:
return False # don't supress the exception
def create_realm(self, realm):
"""Create a new realm based on the JSON based configuration.
Note: User must be logged into Keycloak first
Args:
realm (dict) : JSON dictory configuration for the new realm
"""
resp = self.request(
"/auth/admin/realms",
params=realm,
headers={
"Authorization": "Bearer " + self.token["access_token"],
"Content-Type": "application/json",
},
convert=json.dumps
)
def get_client(self, realm_name, client_id):
"""Get the realm's client configuration.
Note: User must be logged into Keycloak first
Args:
realm_name (str) : Name of the realm to look in for the client
client_id (str) : Client ID of client configuration to retrieve
Returns:
(None|dict) : None if the client couldn't be located or the JSON
dictionary configuration of the client
"""
resp = self.request(
"/auth/admin/realms/{}/clients".format(realm_name),
headers={
"Authorization": "Bearer " + self.token["access_token"],
"Content-Type": "application/x-www-form-urlencoded",
}
)
if resp is None:
return None
for client in resp:
if client['clientId'] == client_id:
return client
return None
def update_client(self, realm_name, client):
"""Update the realm's client configuration.
Note: User must be logged into Keycloak first
Args:
realm_name (str) : Name of the realm
client (dict) : JSON dictory configuration for the updated realm client
"""
resp = self.request(
"/auth/admin/realms/{}/clients/{}".format(realm_name, client['id']),
params=client,
headers={
"Authorization": "Bearer " + self.token["access_token"],
"Content-Type": "application/json",
},
convert=json.dumps,
method="PUT"
)
def append_list_properties(self, realm_name, client_id, additions):
"""Append a set of key values to a realm's client configuration.
Download the current realm's client configuration, updated with the given
key values, and then upload the updated client configuration to the Keycloak
server.
Note: User must be logged into Keycloak first
Args:
realm_name (str) : Name of the realm
client_id (str) : Client ID of client configuration to retrieve
additions (dict) : dictionary of additions, each entry's key should
correspond to a client key and that entry's (singular)
value will be appended to the client's property.
"""
client = self.get_client(realm_name, client_id)
for key, value in additions.items():
if key not in client:
client[key] = []
if value not in client[key]:
client[key].append(value)
self.update_client(realm_name, client)
def add_redirect_uri(self, realm_name, client_id, uri):
"""Add the given uri as a valid redirectUri to a realm's client configuration.
Note: User must be logged into Keycloak first
Args:
realm_name (str) : Name of the realm
client_id (str) : Client ID of client configuration to retrieve
uri (str) : URL to add to the client's list of valid redirect URLs
"""
self.append_list_properties(realm_name, client_id, {"redirectUris": uri})
def get_client_installation_url(self, realm_name, client_id):
"""Returns information about this client installation (suitable for wget/curl).
Note: User must be logged into Keycloak first
Args:
realm_name (str) : Name of the realm
client_id (str) : Client ID of client configuration to retrieve
Returns:
(dict) : contains keys
* 'url' for the complete URL to retrieve the client installation json
* 'headers' for the authorization header populated with the bearer token.
"""
client = self.get_client(realm_name, client_id)
installation_endpoint = "{}/auth/admin/realms/{}/clients/{}/installation/providers/keycloak-oidc-keycloak-json"\
.format(self.url_base, realm_name, client["id"])
auth_header = "Authorization: Bearer {}".format(self.token["access_token"])
return {"url": installation_endpoint, "headers": auth_header}
def get_client_scopes(self, realm_name):
"""Gets all client scopes associated with the realm
Note: User must be logged into Keycloak first
Keycloak docs of the return value may not be accurate but included
for reference
Args:
realm_name (str) : Name of the realm
Returns:
(array[dict]): An array of client scopes (https://www.keycloak.org/docs-api/11.0/rest-api/index.html#_clientscoperepresentation)
"""
headers = {
"Authorization": "Bearer " + self.token["access_token"],
"Content-Type": "application/json",
}
return self.request(f'/auth/admin/realms/{realm_name}/client-scopes', headers=headers)
def add_default_client_scopes(self, realm_name, client_id, scopes):
"""Add the given scopes the client.
Args:
realm_name (str) : Name of the realm
client_id (str) : Client ID of client configuration to retrieve
scopes (list[str]) : Scopes to add to the client
Raises:
(KeyCloakScopeNotFoundError): if a requested scope not found
"""
client_dict = self.get_client(realm_name, client_id)
_id = client_dict['id']
all_scopes = self.get_client_scopes(realm_name)
# Need to use id of scope, not its name, so map name to id.
scope_map = {}
for scope in all_scopes:
if scope['name'] in scopes:
scope_map[scope['name']] = scope['id']
if len(scope_map) == len(scopes):
break
if len(scope_map) != len(scopes):
found = set(scope_map.keys())
want = set(scopes)
missing = [s for s in want.difference(found)]
raise exceptions.KeyCloakScopeNotFoundError(f'Could not find these scopes:, {", ".join(missing)}')
headers = {
"Authorization": "Bearer " + self.token["access_token"],
"Content-Type": "application/json",
}
for s_id in scope_map.values():
self.request(f'/auth/admin/realms/{realm_name}/clients/{_id}/default-client-scopes/{s_id}',
headers=headers, method='PUT')
| |
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from resource_management.libraries.functions import format
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.version import format_stack_version
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.stack_features import get_stack_feature_version
from resource_management.libraries.functions.default import default
from utils import get_bare_principal
from resource_management.libraries.functions.get_stack_version import get_stack_version
from resource_management.libraries.functions.is_empty import is_empty
import status_params
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import upgrade_summary
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config
# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
stack_root = Script.get_stack_root()
stack_name = default("/hostLevelParams/stack_name", None)
retryAble = default("/commandParams/command_retry_enabled", False)
# Version being upgraded/downgraded to
version = default("/commandParams/version", None)
stack_version_unformatted = config['hostLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)
upgrade_direction = default("/commandParams/upgrade_direction", None)
# get the correct version to use for checking stack features
version_for_stack_feature_checks = get_stack_feature_version(config)
stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
stack_supports_core_site_for_ranger_plugin = check_stack_feature(StackFeature.CORE_SITE_FOR_RANGER_PLUGINS_SUPPORT, version_for_stack_feature_checks)
# When downgrading the 'version' is pointing to the downgrade-target version
# downgrade_from_version provides the source-version the downgrade is happening from
downgrade_from_version = upgrade_summary.get_downgrade_from_version("KAFKA")
hostname = config['hostname']
# default kafka parameters
kafka_home = '/usr/lib/kafka'
kafka_bin = kafka_home+'/bin/kafka'
conf_dir = "/etc/kafka/conf"
limits_conf_dir = "/etc/security/limits.d"
# Used while upgrading the stack in a kerberized cluster and running kafka-acls.sh
zookeeper_connect = default("/configurations/kafka-broker/zookeeper.connect", None)
kafka_user_nofile_limit = default('/configurations/kafka-env/kafka_user_nofile_limit', 128000)
kafka_user_nproc_limit = default('/configurations/kafka-env/kafka_user_nproc_limit', 65536)
# parameters for 2.2+
if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
kafka_home = os.path.join(stack_root, "current", "kafka-broker")
kafka_bin = os.path.join(kafka_home, "bin", "kafka")
conf_dir = os.path.join(kafka_home, "config")
kafka_user = config['configurations']['kafka-env']['kafka_user']
kafka_log_dir = config['configurations']['kafka-env']['kafka_log_dir']
kafka_pid_dir = status_params.kafka_pid_dir
kafka_pid_file = kafka_pid_dir+"/kafka.pid"
# This is hardcoded on the kafka bash process lifecycle on which we have no control over
kafka_managed_pid_dir = "/var/run/kafka"
kafka_managed_log_dir = "/var/log/kafka"
user_group = config['configurations']['cluster-env']['user_group']
java64_home = config['hostLevelParams']['java_home']
kafka_env_sh_template = config['configurations']['kafka-env']['content']
kafka_jaas_conf_template = default("/configurations/kafka_jaas_conf/content", None)
kafka_client_jaas_conf_template = default("/configurations/kafka_client_jaas_conf/content", None)
kafka_hosts = config['clusterHostInfo']['kafka_broker_hosts']
kafka_hosts.sort()
zookeeper_hosts = config['clusterHostInfo']['zookeeper_hosts']
zookeeper_hosts.sort()
secure_acls = default("/configurations/kafka-broker/zookeeper.set.acl", False)
kafka_security_migrator = os.path.join(kafka_home, "bin", "zookeeper-security-migration.sh")
all_hosts = default("/clusterHostInfo/all_hosts", [])
all_racks = default("/clusterHostInfo/all_racks", [])
#Kafka log4j
kafka_log_maxfilesize = default('/configurations/kafka-log4j/kafka_log_maxfilesize',256)
kafka_log_maxbackupindex = default('/configurations/kafka-log4j/kafka_log_maxbackupindex',20)
controller_log_maxfilesize = default('/configurations/kafka-log4j/controller_log_maxfilesize',256)
controller_log_maxbackupindex = default('/configurations/kafka-log4j/controller_log_maxbackupindex',20)
if (('kafka-log4j' in config['configurations']) and ('content' in config['configurations']['kafka-log4j'])):
log4j_props = config['configurations']['kafka-log4j']['content']
else:
log4j_props = None
if 'ganglia_server_host' in config['clusterHostInfo'] and \
len(config['clusterHostInfo']['ganglia_server_host'])>0:
ganglia_installed = True
ganglia_server = config['clusterHostInfo']['ganglia_server_host'][0]
ganglia_report_interval = 60
else:
ganglia_installed = False
metric_collector_port = ""
metric_collector_protocol = ""
metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
set_instanceId = "false"
cluster_name = config["clusterName"]
if 'cluster-env' in config['configurations'] and \
'metrics_collector_external_hosts' in config['configurations']['cluster-env']:
ams_collector_hosts = config['configurations']['cluster-env']['metrics_collector_external_hosts']
set_instanceId = "true"
else:
ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
has_metric_collector = not len(ams_collector_hosts) == 0
if has_metric_collector:
if 'cluster-env' in config['configurations'] and \
'metrics_collector_external_port' in config['configurations']['cluster-env']:
metric_collector_port = config['configurations']['cluster-env']['metrics_collector_external_port']
else:
metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
if metric_collector_web_address.find(':') != -1:
metric_collector_port = metric_collector_web_address.split(':')[1]
else:
metric_collector_port = '6188'
if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
metric_collector_protocol = 'https'
else:
metric_collector_protocol = 'http'
pass
# Security-related params
security_enabled = config['configurations']['cluster-env']['security_enabled']
kafka_kerberos_enabled = (('security.inter.broker.protocol' in config['configurations']['kafka-broker']) and
((config['configurations']['kafka-broker']['security.inter.broker.protocol'] == "PLAINTEXTSASL") or
(config['configurations']['kafka-broker']['security.inter.broker.protocol'] == "SASL_PLAINTEXT")))
if security_enabled and stack_version_formatted != "" and 'kafka_principal_name' in config['configurations']['kafka-env'] \
and check_stack_feature(StackFeature.KAFKA_KERBEROS, stack_version_formatted):
_hostname_lowercase = config['hostname'].lower()
_kafka_principal_name = config['configurations']['kafka-env']['kafka_principal_name']
kafka_jaas_principal = _kafka_principal_name.replace('_HOST',_hostname_lowercase)
kafka_keytab_path = config['configurations']['kafka-env']['kafka_keytab']
kafka_bare_jaas_principal = get_bare_principal(_kafka_principal_name)
kafka_kerberos_params = "-Djava.security.auth.login.config="+ conf_dir +"/kafka_jaas.conf"
else:
kafka_kerberos_params = ''
kafka_jaas_principal = None
kafka_keytab_path = None
# for curl command in ranger plugin to get db connector
jdk_location = config['hostLevelParams']['jdk_location']
# ranger kafka plugin section start
# ranger host
ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
has_ranger_admin = not len(ranger_admin_hosts) == 0
# ranger support xml_configuration flag, instead of depending on ranger xml_configurations_supported/ranger-env, using stack feature
xml_configurations_supported = check_stack_feature(StackFeature.RANGER_XML_CONFIGURATION, version_for_stack_feature_checks)
# ambari-server hostname
ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
ranger_admin_log_dir = default("/configurations/ranger-env/ranger_admin_log_dir","/var/log/ranger/admin")
# ranger kafka plugin enabled property
enable_ranger_kafka = default("configurations/ranger-kafka-plugin-properties/ranger-kafka-plugin-enabled", "No")
enable_ranger_kafka = True if enable_ranger_kafka.lower() == 'yes' else False
# ranger kafka-plugin supported flag, instead of dependending on is_supported_kafka_ranger/kafka-env.xml, using stack feature
is_supported_kafka_ranger = check_stack_feature(StackFeature.KAFKA_RANGER_PLUGIN_SUPPORT, version_for_stack_feature_checks)
# ranger kafka properties
if enable_ranger_kafka and is_supported_kafka_ranger:
# get ranger policy url
policymgr_mgr_url = config['configurations']['ranger-kafka-security']['ranger.plugin.kafka.policy.rest.url']
if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'):
policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
# ranger audit db user
xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
xa_audit_db_password = ''
if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db and has_ranger_admin:
xa_audit_db_password = config['configurations']['admin-properties']['audit_db_password']
# ranger kafka service/repository name
repo_name = str(config['clusterName']) + '_kafka'
repo_name_value = config['configurations']['ranger-kafka-security']['ranger.plugin.kafka.service.name']
if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
repo_name = repo_name_value
ranger_env = config['configurations']['ranger-env']
# create ranger-env config having external ranger credential properties
if not has_ranger_admin and enable_ranger_kafka:
external_admin_username = default('/configurations/ranger-kafka-plugin-properties/external_admin_username', 'admin')
external_admin_password = default('/configurations/ranger-kafka-plugin-properties/external_admin_password', 'admin')
external_ranger_admin_username = default('/configurations/ranger-kafka-plugin-properties/external_ranger_admin_username', 'amb_ranger_admin')
external_ranger_admin_password = default('/configurations/ranger-kafka-plugin-properties/external_ranger_admin_password', 'amb_ranger_admin')
ranger_env = {}
ranger_env['admin_username'] = external_admin_username
ranger_env['admin_password'] = external_admin_password
ranger_env['ranger_admin_username'] = external_ranger_admin_username
ranger_env['ranger_admin_password'] = external_ranger_admin_password
ranger_plugin_properties = config['configurations']['ranger-kafka-plugin-properties']
ranger_kafka_audit = config['configurations']['ranger-kafka-audit']
ranger_kafka_audit_attrs = config['configuration_attributes']['ranger-kafka-audit']
ranger_kafka_security = config['configurations']['ranger-kafka-security']
ranger_kafka_security_attrs = config['configuration_attributes']['ranger-kafka-security']
ranger_kafka_policymgr_ssl = config['configurations']['ranger-kafka-policymgr-ssl']
ranger_kafka_policymgr_ssl_attrs = config['configuration_attributes']['ranger-kafka-policymgr-ssl']
policy_user = config['configurations']['ranger-kafka-plugin-properties']['policy_user']
ranger_plugin_config = {
'username' : config['configurations']['ranger-kafka-plugin-properties']['REPOSITORY_CONFIG_USERNAME'],
'password' : config['configurations']['ranger-kafka-plugin-properties']['REPOSITORY_CONFIG_PASSWORD'],
'zookeeper.connect' : config['configurations']['ranger-kafka-plugin-properties']['zookeeper.connect'],
'commonNameForCertificate' : config['configurations']['ranger-kafka-plugin-properties']['common.name.for.certificate']
}
kafka_ranger_plugin_repo = {
'isEnabled': 'true',
'configs': ranger_plugin_config,
'description': 'kafka repo',
'name': repo_name,
'repositoryType': 'kafka',
'type': 'kafka',
'assetType': '1'
}
custom_ranger_service_config = generate_ranger_service_config(ranger_plugin_properties)
if len(custom_ranger_service_config) > 0:
ranger_plugin_config.update(custom_ranger_service_config)
if stack_supports_ranger_kerberos and security_enabled:
ranger_plugin_config['policy.download.auth.users'] = kafka_user
ranger_plugin_config['tag.download.auth.users'] = kafka_user
ranger_plugin_config['ambari.service.check.user'] = policy_user
downloaded_custom_connector = None
previous_jdbc_jar_name = None
driver_curl_source = None
driver_curl_target = None
previous_jdbc_jar = None
if has_ranger_admin and stack_supports_ranger_audit_db:
xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR']
jdbc_jar_name, previous_jdbc_jar_name, audit_jdbc_url, jdbc_driver = get_audit_configs(config)
downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
driver_curl_target = format("{kafka_home}/libs/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
previous_jdbc_jar = format("{kafka_home}/libs/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
xa_audit_db_is_enabled = False
if xml_configurations_supported and stack_supports_ranger_audit_db:
xa_audit_db_is_enabled = config['configurations']['ranger-kafka-audit']['xasecure.audit.destination.db']
xa_audit_hdfs_is_enabled = default('/configurations/ranger-kafka-audit/xasecure.audit.destination.hdfs', False)
ssl_keystore_password = config['configurations']['ranger-kafka-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password'] if xml_configurations_supported else None
ssl_truststore_password = config['configurations']['ranger-kafka-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password'] if xml_configurations_supported else None
credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
stack_version = get_stack_version('kafka-broker')
setup_ranger_env_sh_source = format('{stack_root}/{stack_version}/ranger-kafka-plugin/install/conf.templates/enable/kafka-ranger-env.sh')
setup_ranger_env_sh_target = format("{conf_dir}/kafka-ranger-env.sh")
# for SQLA explicitly disable audit to DB for Ranger
if has_ranger_admin and stack_supports_ranger_audit_db and xa_audit_db_flavor.lower() == 'sqla':
xa_audit_db_is_enabled = False
# need this to capture cluster name from where ranger kafka plugin is enabled
cluster_name = config['clusterName']
# ranger kafka plugin section end
namenode_hosts = default("/clusterHostInfo/namenode_host", [])
has_namenode = not len(namenode_hosts) == 0
hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] if has_namenode else None
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] if has_namenode else None
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name'] if has_namenode else None
hdfs_site = config['configurations']['hdfs-site'] if has_namenode else None
default_fs = config['configurations']['core-site']['fs.defaultFS'] if has_namenode else None
hadoop_bin_dir = stack_select.get_hadoop_dir("bin") if has_namenode else None
hadoop_conf_dir = conf_select.get_hadoop_conf_dir() if has_namenode else None
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
import functools
#create partial functions with common arguments for every HdfsResource call
#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
HdfsResource = functools.partial(
HdfsResource,
user=hdfs_user,
hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local,
hadoop_bin_dir = hadoop_bin_dir,
hadoop_conf_dir = hadoop_conf_dir,
principal_name = hdfs_principal_name,
hdfs_site = hdfs_site,
default_fs = default_fs,
immutable_paths = get_not_managed_resources()
)
| |
from proteus import *
from proteus.default_p import *
from tank3D import *
from proteus.mprans import RANS2P
LevelModelType = RANS2P.LevelModel
if useOnlyVF:
LS_model = None
else:
LS_model = 2
if useRANS >= 1:
Closure_0_model = 5; Closure_1_model=6
if useOnlyVF:
Closure_0_model=2; Closure_1_model=3
if movingDomain:
Closure_0_model += 1; Closure_1_model += 1
else:
Closure_0_model = None
Closure_1_model = None
if spongeLayer or levee or slopingSpongeLayer:
coefficients = RANS2P.Coefficients(epsFact=epsFact_viscosity,
sigma=0.0,
rho_0 = rho_0,
nu_0 = nu_0,
rho_1 = rho_1,
nu_1 = nu_1,
g=g,
nd=nd,
VF_model=1,
LS_model=LS_model,
Closure_0_model=Closure_0_model,
Closure_1_model=Closure_1_model,
epsFact_density=epsFact_density,
stokes=False,
useVF=useVF,
useRBLES=useRBLES,
useMetrics=useMetrics,
eb_adjoint_sigma=1.0,
eb_penalty_constant=weak_bc_penalty_constant,
forceStrongDirichlet=ns_forceStrongDirichlet,
turbulenceClosureModel=ns_closure,
movingDomain=movingDomain,
porosityTypes=porosityTypes,
dragAlphaTypes=dragAlphaTypes,
dragBetaTypes=dragBetaTypes,
epsFact_solid = epsFact_solidTypes)
else:
coefficients = RANS2P.Coefficients(epsFact=epsFact_viscosity,
sigma=0.0,
rho_0 = rho_0,
nu_0 = nu_0,
rho_1 = rho_1,
nu_1 = nu_1,
g=g,
nd=nd,
VF_model=1,
LS_model=LS_model,
Closure_0_model=Closure_0_model,
Closure_1_model=Closure_1_model,
epsFact_density=epsFact_density,
stokes=False,
useVF=useVF,
useRBLES=useRBLES,
useMetrics=useMetrics,
eb_adjoint_sigma=1.0,
eb_penalty_constant=weak_bc_penalty_constant,
forceStrongDirichlet=ns_forceStrongDirichlet,
turbulenceClosureModel=ns_closure,
movingDomain=movingDomain)
def getDBC_p(x,flag):
if flag == boundaryTags['top']:
return lambda x,t: 0.0
elif flag == boundaryTags['right']:
return outflowPressure
elif flag == boundaryTags['back']:
return outflowPressure
def getDBC_u(x,flag):
if flag == boundaryTags['left']:
return twpflowVelocity_u
elif flag == boundaryTags['right']:
return u_current
elif flag == boundaryTags['back']:
return u_current
elif flag == boundaryTags['front']:
return u_current
def getDBC_v(x,flag):
if flag == boundaryTags['left']:
return twpflowVelocity_v
elif flag == boundaryTags['right']:
return v_current
elif flag == boundaryTags['back']:
return v_current
elif flag == boundaryTags['front']:
return v_current
def getDBC_w(x,flag):
if flag == boundaryTags['left']:
return twpflowVelocity_w
elif flag == boundaryTags['right']:
return lambda x,t: 0.0
elif flag == boundaryTags['back']:
return lambda x,t: 0.0
elif flag == boundaryTags['front']:
return lambda x,t: 0.0
dirichletConditions = {0:getDBC_p,
1:getDBC_u,
2:getDBC_v,
3:getDBC_w}
def getAFBC_p(x,flag):
if flag == boundaryTags['left']:
return lambda x,t: -twpflowVelocity_u(x,t)
elif flag == boundaryTags['front']:
return lambda x,t: -currentVelocity
elif flag == boundaryTags['bottom']:
return lambda x,t: 0.0
def getAFBC_u(x,flag):
if flag == boundaryTags['bottom']:
return lambda x,t: 0.0
def getAFBC_v(x,flag):
if flag == boundaryTags['bottom']:
return lambda x,t: 0.0
def getAFBC_w(x,flag):
if flag == boundaryTags['bottom']:
return lambda x,t: 0.0
def getDFBC_u(x,flag):
if flag == boundaryTags['bottom']:
return lambda x,t: 0.0
elif flag == boundaryTags['top']:
return lambda x,t: 0.0
elif flag == boundaryTags['empty']:
return lambda x,t: 0.0
else:
return None
def getDFBC_v(x,flag):
if flag == boundaryTags['bottom']:
return lambda x,t: 0.0
elif flag == boundaryTags['top']:
return lambda x,t: 0.0
elif flag == boundaryTags['empty']:
return lambda x,t: 0.0
else:
return None
def getDFBC_w(x,flag):
if flag == boundaryTags['bottom']:
return lambda x,t: 0.0
elif flag == boundaryTags['top']:
return lambda x,t: 0.0
elif flag == boundaryTags['empty']:
return lambda x,t: 0.0
else:
return None
advectiveFluxBoundaryConditions = {0:getAFBC_p,
1:getAFBC_u,
2:getAFBC_v,
3:getAFBC_w}
diffusiveFluxBoundaryConditions = {0:{},
1:{1:getDFBC_u},
2:{2:getDFBC_v},
3:{3:getDFBC_w}}
class PerturbedSurface_p:
def __init__(self,waterLevel):
self.waterLevel=waterLevel
def uOfXT(self,x,t):
if signedDistance(x) < 0:
return -(L[2] - self.waterLevel)*rho_1*g[2] - (self.waterLevel - x[2])*rho_0*g[2]
else:
return -(L[2] - self.waterLevel)*rho_1*g[2]
class AtRest:
def __init__(self):
pass
def uOfXT(self,x,t):
return 0.0
class WaterVelocity_u:
def __init__(self):
pass
def uOfXT(self,x,t):
return u_current(x,0)
class WaterVelocity_v:
def __init__(self):
pass
def uOfXT(self,x,t):
return v_current(x,0)
initialConditions = {0:PerturbedSurface_p(waterLine_z),
1:WaterVelocity_u(),
2:WaterVelocity_v(),
3:AtRest()}
| |
import torch
from deluca.lung.controllers import PID
from deluca.lung.core import Controller
from deluca.lung.utils import BreathWaveform
import itertools
import numpy as np
import random
from scipy.special import softmax
class DeepPIDResidualClipped(Controller):
def __init__(
self,
H=100,
waveform=None,
bptt=1,
input_dim=1,
activation=torch.nn.ReLU,
history_len=10,
kernel_size=5,
normalize=False,
time_as_feature=False,
u_scaler=None,
p_scaler=None,
clip=100.0,
pid_K=[3.0, 4.0, 0.0],
**kwargs
):
super().__init__()
self.model = torch.nn.Sequential(
torch.nn.Conv1d(input_dim, H, kernel_size),
activation(),
torch.nn.Flatten(),
torch.nn.Linear(H * (history_len - kernel_size + 1), 1),
)
self.time_as_feature = time_as_feature
if self.time_as_feature:
self.time_model = torch.nn.Sequential(torch.nn.Linear(1, 10), activation(), torch.nn.Linear(10, 1))
multiplier = self.tensor(1.0)
self.residual_multiplier = torch.nn.Parameter(multiplier)
# linear feature transform:
# errs -> [average of last h errs, ..., average of last 2 errs, last err]
# emulates low-pass filter bank
self.featurizer = torch.ones((history_len, history_len), requires_grad=False).tril_()
self.featurizer /= torch.arange(history_len, 0, -1).unsqueeze(0)
self.history_len = history_len
self.input_dim = input_dim
self.waveform = waveform or BreathWaveform()
self.pid_base = PID(K=pid_K)
self.normalize = normalize
self.clip = clip
if normalize:
self.u_scaler = u_scaler
self.p_scaler = p_scaler
self.reset()
def reset(self):
self.errs = [self.tensor(0.0)] * self.history_len
self.pid_base.waveform = self.waveform
self.pid_base.reset()
def update(self, key, state):
getattr(self, key).append(state)
def compute_action(self, state, t):
self.pid_base.waveform = self.waveform
target = self.tensor(self.waveform.at(t))
if self.normalize:
target_scaled = self.p_scaler.transform(target).squeeze()
state_scaled = self.p_scaler.transform(state).squeeze()
self.errs.append(target_scaled - state_scaled)
else:
self.errs.append(target - state)
decay = self.waveform.decay(t)
if decay is None:
trajectory = torch.stack(self.errs[-self.history_len:]).unsqueeze(0).unsqueeze(0)
u_in_base, _ = self.pid_base.compute_action(state, t)
u_in_residual = self.model(trajectory @ self.featurizer)
if self.time_as_feature:
u_in_residual += self.time_model(self.tensor(self.cycle_phase(t)).unsqueeze(0))
# u_in = u_in_base + u_in_residual * 0.1 * torch.relu(torch.tanh(5e-2 * self.residual_multiplier))
u_in = u_in_base + u_in_residual * 1e-3
else:
u_in = self.tensor(decay)
u_in = torch.clamp(u_in, min=0.0, max=self.clip).squeeze()
return (u_in, self.u_out(t))
def train(
self,
sim,
pip_feed="parallel",
duration=3,
dt=0.03,
epochs=100,
use_noise=False,
optimizer=torch.optim.Adam,
optimizer_params={"lr": 1e-3, "weight_decay": 1e-4},
loss_fn=torch.nn.L1Loss,
loss_fn_params={},
scheduler=torch.optim.lr_scheduler.ReduceLROnPlateau,
scheduler_params={"factor": 0.9, "patience": 10},
use_tqdm=True,
print_loss=1,
shuffle=False,
device="cpu",
):
optimizer = optimizer(self.parameters(), **optimizer_params)
scheduler = scheduler(optimizer, **scheduler_params)
loss_fn = loss_fn(**loss_fn_params)
tt = torch.linspace(0, duration, int(duration / dt))
losses = []
torch.autograd.set_detect_anomaly(True)
PIPs = [10, 15, 20, 25, 30, 35]
PEEP = 5
# TODO: handle device-awareness
for epoch in range(epochs):
if pip_feed == "parallel":
self.zero_grad()
loss = torch.tensor(0.0, device=device, requires_grad=True)
for PIP in random.sample(PIPs, 6):
if pip_feed == "sequential":
self.zero_grad()
loss = torch.tensor(0.0, device=device, requires_grad=True)
self.waveform = BreathWaveform((PEEP, PIP))
self.reset()
sim.reset()
for t in tt:
sim.pressure += use_noise * torch.normal(mean=torch.tensor(1.5), std=1.)
pressure = sim.pressure
u_in, u_out = self(pressure, t)
sim(u_in, u_out,
t) # potentially add multiplicative noise by * torch.normal(mean=torch.tensor(1.5), std=0.5)
if u_out == 0:
loss = loss + loss_fn(torch.tensor(self.waveform.at(t)), pressure)
if pip_feed == "sequential":
loss.backward(retain_graph=True)
optimizer.step()
scheduler.step(loss)
per_step_loss = loss / len(tt)
losses.append(per_step_loss)
if epoch % print_loss == 0:
print(
f"Epoch: {epoch}, PIP: {PIP}\tLoss: {per_step_loss:.2f}\tLR: {optimizer.param_groups[0]['lr']}"
)
if pip_feed == "parallel":
loss.backward(retain_graph=True)
optimizer.step()
scheduler.step(loss)
per_step_loss = loss / len(tt)
losses.append(per_step_loss)
if epoch % print_loss == 0:
print(
f"Epoch: {epoch}\tLoss: {per_step_loss:.2f}\tLR: {optimizer.param_groups[0]['lr']}"
)
return losses
def train_global(
self,
sims,
pip_feed="parallel",
duration=3,
dt=0.03,
epochs=100,
use_noise=False,
optimizer=torch.optim.Adam,
optimizer_params={"lr": 1e-3, "weight_decay": 1e-4},
loss_fn=torch.nn.L1Loss,
loss_fn_params={},
scheduler=torch.optim.lr_scheduler.ReduceLROnPlateau,
scheduler_params={"factor": 0.9, "patience": 10},
use_tqdm=True,
print_loss=1,
shuffle=False,
device="cpu",
PIPs = [10, 15, 20, 25, 30, 35],
):
optimizer = optimizer(self.parameters(), **optimizer_params)
scheduler = scheduler(optimizer, **scheduler_params)
loss_fn = loss_fn(**loss_fn_params)
tt = torch.linspace(0, duration, int(duration / dt))
losses = []
torch.autograd.set_detect_anomaly(True)
# PIPs = [10, 15, 20, 25, 30, 35]
PEEP = 5
# TODO: handle device-awareness
for epoch in range(epochs):
if pip_feed == "parallel":
self.zero_grad()
loss = torch.tensor(0.0, device=device, requires_grad=True)
for PIP, sim in itertools.product(PIPs, sims):
if pip_feed == "sequential":
self.zero_grad()
loss = torch.tensor(0.0, device=device, requires_grad=True)
self.waveform = BreathWaveform((PEEP, PIP))
self.reset()
sim.reset()
for t in tt:
sim.pressure += use_noise * torch.normal(mean=torch.tensor(1.5), std=1.)
pressure = sim.pressure
u_in, u_out = self(pressure, t)
sim(u_in, u_out,
t) # potentially add multiplicative noise by * torch.normal(mean=torch.tensor(1.5), std=0.5)
if u_out == 0:
loss = loss + loss_fn(torch.tensor(self.waveform.at(t)), pressure)
if pip_feed == "sequential":
loss.backward(retain_graph=True)
optimizer.step()
scheduler.step(loss)
per_step_loss = loss / len(tt)
losses.append(per_step_loss)
if epoch % print_loss == 0:
print(
f"Epoch: {epoch}, PIP: {PIP}\tLoss: {per_step_loss:.2f}\tLR: {optimizer.param_groups[0]['lr']}"
)
if pip_feed == "parallel":
loss.backward(retain_graph=True)
optimizer.step()
scheduler.step(loss)
per_step_loss = loss / len(tt)
losses.append(per_step_loss)
if epoch % print_loss == 0:
print(
f"Epoch: {epoch}\tLoss: {per_step_loss:.2f}\tLR: {optimizer.param_groups[0]['lr']}"
)
return losses
def train_global_boosted(
self,
sim_datas, #Make this a list of (R, C, sim)s
duration=3,
dt=0.03,
epochs=100,
use_noise=False,
optimizer=torch.optim.Adam,
optimizer_params={"lr": 1e-3, "weight_decay": 1e-4},
loss_fn=torch.nn.L1Loss,
loss_fn_params={},
scheduler=torch.optim.lr_scheduler.ReduceLROnPlateau,
scheduler_params={"factor": 0.9, "patience": 1000},
alpha=0.5,
device="cpu",
):
optimizer = optimizer(self.parameters(), **optimizer_params)
scheduler = scheduler(optimizer, **scheduler_params)
loss_fn = loss_fn(**loss_fn_params)
tt = torch.linspace(0, duration, int(duration / dt))
losses = []
torch.autograd.set_detect_anomaly(True)
PIPs = [10, 15, 20, 25, 30, 35]
PEEP = 5
PIPSims = list(itertools.product(PIPs, sim_datas))
loss_by_simpip = np.zeros(len(PIPSims))
self.zero_grad()
idxs =range(len(PIPSims))
for epoch in range(epochs):
p = softmax(alpha * loss_by_simpip)
loss = torch.tensor(0.0, device=device, requires_grad=True)
for idx in idxs:
PIP, (R, C, sim) = PIPSims[idx]
self.waveform = BreathWaveform((PEEP, PIP))
self.reset()
sim.reset()
loss_idx = 0.0
for t in tt:
sim.pressure += use_noise * torch.normal(mean=torch.tensor(1.5), std=1.)
pressure = sim.pressure
u_in, u_out = self(pressure, t)
sim(u_in, u_out, t)
if u_out == 0:
curr_loss = loss_fn(torch.tensor(self.waveform.at(t)), pressure)
loss = loss + curr_loss * p[idx]
loss_idx += curr_loss
loss.backward(retain_graph=True)
per_step_loss = loss_idx / len(tt)
losses.append(per_step_loss)
loss_by_simpip[idx] = per_step_loss
optimizer.step()
self.zero_grad()
print(f"Epoch: {epoch}, PIP: {PIP} R: {R} C: {C}\tLoss: {per_step_loss:.2f}\tLR: {optimizer.param_groups[0]['lr']}")
for i in range(len(PIPSims)):
PIP,(R,C, _) = PIPSims[i]
print(f"PIP, {PIP}, R {R}, C {C}, loss {loss_by_simpip[i] :.2f}")
print(f"loss mean {np.mean(loss_by_simpip):.2f}+-{np.std(loss_by_simpip):.2f}")
return losses
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import string
import sys
import urllib
import xbmcplugin
import xbmcgui
import common
import database_tv as tv_db
pluginhandle = common.pluginHandle
###################### Television
def list_tv_root():
tv_db.update_series_list(False)
cm_u = sys.argv[0] + '?mode=tv&sitemode=list_tvshows_favor_filtered_export'
cm = [('Export Favorites to Library', 'XBMC.RunPlugin(%s)' % cm_u)]
common.add_directory('Favorites', 'tv', 'list_tvshows_favor_filtered', contextmenu=cm)
cm = []
cm_u = sys.argv[0] + '?mode=tv&sitemode=list_tvshows_export&url=""'
# cm.append(('Export All to Library', 'XBMC.RunPlugin(%s)' % cm_u))
cm.append(('Force TV Series Refresh', 'XBMC.RunPlugin(%s)' % (sys.argv[0] + '?mode=tv&sitemode=refresh_db')))
common.add_directory('All Shows', 'tv', 'list_tvshows_az', contextmenu=cm)
# common.add_directory('Genres', 'tv', 'list_tvshow_types', 'GENRE')
#common.add_directory('Years', 'tv', 'list_tvshow_types', 'YEARS')
#common.add_directory('TV Rating', 'tv', 'list_tvshow_types', 'MPAA')
#common.add_directory('Actors', 'tv', 'list_tvshow_types', 'ACTORS')
#common.add_directory('Watched', 'tv', 'list_tvshows_watched_filtered')
xbmcplugin.endOfDirectory(pluginhandle)
def list_tvshows_az():
common.add_directory('#', 'tv', 'list_tvshow_alpha_filtered', '#')
for letter in string.uppercase:
common.add_directory(letter, 'tv', 'list_tvshow_alpha_filtered', letter)
xbmcplugin.endOfDirectory(pluginhandle)
def list_tvshow_types(type=False):
if not type:
type = common.args.url
if type == 'GENRE':
mode = 'list_tvshows_genre_filtered'
items = tv_db.get_types('genres')
elif type == 'YEARS':
mode = 'list_tvshows_years_filtered'
items = tv_db.get_types('year')
elif type == 'MPAA':
mode = 'list_tvshows_mpaa_filtered'
items = tv_db.get_types('mpaa')
elif type == 'ACTORS':
mode = 'list_tvshows_actors_filtered'
items = tv_db.get_types('actors')
for item in items:
common.add_directory(item, 'tv', mode, item)
xbmcplugin.addSortMethod(pluginhandle, xbmcplugin.SORT_METHOD_LABEL)
xbmcplugin.endOfDirectory(pluginhandle)
def list_tvshows_genre_filtered():
list_tvshows(export=False, genrefilter=common.args.url)
def list_tvshows_years_filtered():
list_tvshows(export=False, yearfilter=common.args.url)
def list_tvshows_mpaa_filtered():
list_tvshows(export=False, mpaafilter=common.args.url)
def list_tvshows_creators_filtered():
list_tvshows(export=False, creatorfilter=common.args.url)
def list_tvshows_favor_filtered_export():
list_tvshows(export=True, favorfilter=True)
def list_tvshows_favor_filtered():
list_tvshows(export=False, favorfilter=True)
def list_tvshows_export():
list_tvshows(export=True)
def list_tvshow_alpha_filtered():
letter = common.args.url
list_tvshows(alphafilter=letter)
def list_tvshows(export=False, mpaafilter=False, genrefilter=False, creatorfilter=False, yearfilter=False,
favorfilter=False, alphafilter=False):
if export:
import xbmclibrary
added_folders = xbmclibrary.setup_library()
shows = tv_db.get_series(favorfilter=favorfilter,alphafilter=alphafilter).fetchall()
total = len(shows)
for showdata in shows:
if export:
xbmclibrary.export_series(showdata)
else:
_add_series_item(showdata, total)
if export:
xbmclibrary.complete_export(added_folders)
else:
xbmcplugin.setContent(pluginhandle, 'tvshows')
xbmcplugin.addSortMethod(pluginhandle, xbmcplugin.SORT_METHOD_VIDEO_SORT_TITLE)
# xbmcplugin.addSortMethod(pluginhandle, xbmcplugin.SORT_METHOD_VIDEO_YEAR)
# xbmcplugin.addSortMethod(pluginhandle, xbmcplugin.SORT_METHOD_MPAA_RATING)
# xbmcplugin.addSortMethod(pluginhandle, xbmcplugin.SORT_METHOD_STUDIO_IGNORE_THE)
xbmcplugin.endOfDirectory(pluginhandle)
def _add_series_item(data, total=0):
total_seasons = tv_db.get_series_season_count(data['series_id'])
labels = {
'title': data['title'],
'tvshowtitle': data['title'],
'studio': data['studio'],
'year': data['year']
}
if data['directors']:
labels['director'] = ' / '.join(data['directors'].split(','))
if data['genres']:
labels['genres'] = ' / '.join(data['genres'].split(','))
if data['actors']:
labels['cast'] = data['actors'].split(',')
item = xbmcgui.ListItem(data['title'])
item.setInfo(type='Video', infoLabels=labels)
item.setProperty('TotalSeasons', str(total_seasons))
contextmenu = []
if data['favor']:
cm_u = sys.argv[0] + '?url={0}&mode=tv&sitemode=unfavor_series&title={1}'.format(data['series_id'],
urllib.unquote_plus(
data['title']))
contextmenu.append((common.localise(39006), 'XBMC.RunPlugin(%s)' % cm_u))
else:
cm_u = sys.argv[0] + '?url={0}&mode=tv&sitemode=favor_series&title={1}'.format(data['series_id'],
urllib.unquote_plus(
data['title']))
contextmenu.append((common.localise(39007), 'XBMC.RunPlugin(%s)' % cm_u))
cm_u = sys.argv[0] + '?url={0}&mode=tv&sitemode=update_series'.format(data['series_id'])
contextmenu.append(('Force Series Update', 'XBMC.RunPlugin(%s)' % cm_u))
item.addContextMenuItems(contextmenu)
u = sys.argv[0] + '?url={0}&mode=tv&sitemode=list_tv_seasons'.format(data['series_id'])
xbmcplugin.addDirectoryItem(pluginhandle, url=u, listitem=item, isFolder=True, totalItems=total)
def list_tv_seasons():
series_id = common.args.url
series = tv_db.lookup_series(series_id).fetchone()
tv_db.update_series(series)
seasons = tv_db.get_seasons(series_id).fetchall()
total = len(seasons)
for season in seasons:
_add_season_item(season, total)
xbmcplugin.addSortMethod(pluginhandle, xbmcplugin.SORT_METHOD_LABEL)
xbmcplugin.setContent(pluginhandle, 'tvshows')
xbmcplugin.endOfDirectory(pluginhandle)
def _add_season_item(data, total=0):
labels = {
'title': data['title'],
'tvshowtitle': data['series_title'],
'season': data['season_no']
}
item = xbmcgui.ListItem(data['title'])
item.setInfo(type='Video', infoLabels=labels)
contextmenu = []
cm_u = sys.argv[0] + '?url={0}&mode=tv&sitemode=export_season'.format(data['season_id'])
contextmenu.append(('Export Season to Library', 'XBMC.RunPlugin(%s)' % cm_u))
cm_u = sys.argv[0] + '?url={0}&mode=tv&sitemode=update_season'.format(data['season_id'])
contextmenu.append(('Force Season Update', 'XBMC.RunPlugin(%s)' % cm_u))
item.addContextMenuItems(contextmenu)
u = sys.argv[0] + '?url={0}&mode=tv&sitemode=list_episodes'.format(data['season_id'])
xbmcplugin.addDirectoryItem(pluginhandle, url=u, listitem=item, isFolder=True, totalItems=total)
def list_episodes(export=False):
season_id = common.args.url
season = tv_db.lookup_season(season_id).fetchone()
tv_db.update_season(season)
episodes = tv_db.get_episodes(season_id).fetchall()
total = len(episodes)
for episode in episodes:
_add_episode_item(episode, total)
xbmcplugin.addSortMethod(pluginhandle, xbmcplugin.SORT_METHOD_VIDEO_SORT_TITLE)
xbmcplugin.setContent(pluginhandle, 'Episodes')
xbmcplugin.endOfDirectory(pluginhandle)
def _add_episode_item(data, total):
labels = {
'title': data['title'],
'tvshowtitle': data['series_title'],
'season': data['season_no'],
'episode': data['episode_no'],
'playcount': data['play_count']
}
if data['air_date']:
labels['year'] = data['air_date'][:4]
labels['aired'] = data['air_date'][:10]
item = xbmcgui.ListItem(data['title'])
item.setInfo(type='Video', infoLabels=labels)
contextmenu = []
#if data['play_count'] > 0:
# cm_u = sys.argv[0] + '?url={0}&season_id={1}&mode=tv&sitemode=unwatch_episode'.format(data['title'], data['season_id'])
# contextmenu.append(('Mark as unwatched', 'XBMC.RunPlugin(%s)' % cm_u))
#else:
# cm_u = sys.argv[0] + '?url={0}&season_id={1}&mode=tv&sitemode=watch_episode'.format(data['title'], data['season_id'])
# contextmenu.append(('Mark as watched', 'XBMC.RunPlugin(%s)' % cm_u))
# contextmenu.append(('Episode Information', 'XBMC.Action(Info)'))
cm_u = sys.argv[0] + '?url={0}&season_id={1}&mode=tv&sitemode=export_episode'.format(data['title'], data['season_id'])
contextmenu.append(('Export Episode', 'XBMC.RunPlugin(%s)' % cm_u))
item.addContextMenuItems(contextmenu)
u = create_play_link(data)
xbmcplugin.addDirectoryItem(pluginhandle, url=u, listitem=item, isFolder=False, totalItems=total)
def create_play_link(episode):
return sys.argv[0] + '?url={0}&mode=tv&sitemode=play_episode&season_id={1}&episode_title={2}' \
'&episode_no={3}&season_no={4}&series_title={5}'.format(
urllib.quote_plus(episode['season_url']),
episode['season_id'],
urllib.quote_plus(episode['title']),
episode['episode_no'],
episode['season_no'],
urllib.quote_plus(episode['series_title']))
def play_episode():
try:
import urlresolver
except:
xbmcgui.Dialog().ok("Play Error", "Failed to import URLResolver",
"A component needed by PFTV is missing on your system")
xbmcplugin.setResolvedUrl(pluginhandle, False, xbmcgui.ListItem())
return
links = tv_db.get_media_urls(urllib.unquote_plus(common.args.url), urllib.unquote_plus(common.args.episode_title))
sources = []
for link in links:
sources.append(urlresolver.HostedMediaFile(host=link['host'], media_id=link['media_id'], title=link['title']))
sources = urlresolver.filter_source_list(sources)
if len(sources) == 0:
xbmcgui.Dialog().ok("Play Error", "Sorry, no compatible links are available at this time")
xbmcplugin.setResolvedUrl(pluginhandle, False, xbmcgui.ListItem())
return
source = urlresolver.choose_source(sources)
if source:
stream_url = source.resolve()
xbmcplugin.setResolvedUrl(pluginhandle, True, xbmcgui.ListItem(path=stream_url))
else:
xbmcplugin.setResolvedUrl(pluginhandle, False, xbmcgui.ListItem())
##########################################
# Context Menu Links
##########################################
def refresh_db():
tv_db.update_series_list(True)
def export_season():
season_id = common.args.url
season = tv_db.lookup_season(season_id).fetchone()
tv_db.update_season(season)
import xbmclibrary
added_folders = xbmclibrary.setup_library()
xbmclibrary.export_season(season)
xbmclibrary.complete_export(added_folders)
def export_episode():
episode_title = common.args.url
season_id = common.args.season_id
episode = tv_db.lookup_episode(episode_title, season_id)
import xbmclibrary
added_folders = xbmclibrary.setup_library()
xbmclibrary.export_episode(episode)
xbmclibrary.complete_export(added_folders)
def favor_series():
content_id = common.args.url
if tv_db.favor_series(content_id) > 0:
common.notification('Added ' + urllib.unquote_plus(common.args.title) + ' to favorites')
common.refresh_menu()
else:
common.notification('Error adding movie to favorites', isError=True)
def unfavor_series():
content_id = common.args.url
if tv_db.unfavor_series(content_id) > 0:
common.notification('Removed ' + urllib.unquote_plus(common.args.title) + ' from favorites')
common.refresh_menu()
else:
common.notification('Error removing movie from favorites', isError=True)
def update_series():
series = tv_db.lookup_series(common.args.url).fetchone()
tv_db.update_series(series, True)
common.notification('{0} Updated'.format(series['title']))
def update_season():
season = tv_db.lookup_season(common.args.url).fetchone()
tv_db.update_season(season, True)
common.notification('{0} Updated'.format(season['title']))
def watch_episode():
content_id = common.args.url
if tv_db.watch_episode(content_id) > 0:
common.refresh_menu()
else:
common.notification('Could not update watch count', isError=True)
def unwatch_episode():
content_id = common.args.url
tv_db.unwatch_episode(content_id)
common.refresh_menu()
| |
# Copyright (c) 2010-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
import optparse
import re
import socket
from swift.common.utils import expand_ipv6
def tiers_for_dev(dev):
"""
Returns a tuple of tiers for a given device in ascending order by
length.
:returns: tuple of tiers
"""
t1 = dev['region']
t2 = dev['zone']
t3 = "{ip}:{port}".format(ip=dev.get('ip'), port=dev.get('port'))
t4 = dev['id']
return ((t1,),
(t1, t2),
(t1, t2, t3),
(t1, t2, t3, t4))
def build_tier_tree(devices):
"""
Construct the tier tree from the zone layout.
The tier tree is a dictionary that maps tiers to their child tiers.
A synthetic root node of () is generated so that there's one tree,
not a forest.
Example:
region 1 -+---- zone 1 -+---- 192.168.101.1:6000 -+---- device id 0
| | |
| | +---- device id 1
| | |
| | +---- device id 2
| |
| +---- 192.168.101.2:6000 -+---- device id 3
| |
| +---- device id 4
| |
| +---- device id 5
|
+---- zone 2 -+---- 192.168.102.1:6000 -+---- device id 6
| |
| +---- device id 7
| |
| +---- device id 8
|
+---- 192.168.102.2:6000 -+---- device id 9
|
+---- device id 10
region 2 -+---- zone 1 -+---- 192.168.201.1:6000 -+---- device id 12
| |
| +---- device id 13
| |
| +---- device id 14
|
+---- 192.168.201.2:6000 -+---- device id 15
|
+---- device id 16
|
+---- device id 17
The tier tree would look like:
{
(): [(1,), (2,)],
(1,): [(1, 1), (1, 2)],
(2,): [(2, 1)],
(1, 1): [(1, 1, 192.168.101.1:6000),
(1, 1, 192.168.101.2:6000)],
(1, 2): [(1, 2, 192.168.102.1:6000),
(1, 2, 192.168.102.2:6000)],
(2, 1): [(2, 1, 192.168.201.1:6000),
(2, 1, 192.168.201.2:6000)],
(1, 1, 192.168.101.1:6000): [(1, 1, 192.168.101.1:6000, 0),
(1, 1, 192.168.101.1:6000, 1),
(1, 1, 192.168.101.1:6000, 2)],
(1, 1, 192.168.101.2:6000): [(1, 1, 192.168.101.2:6000, 3),
(1, 1, 192.168.101.2:6000, 4),
(1, 1, 192.168.101.2:6000, 5)],
(1, 2, 192.168.102.1:6000): [(1, 2, 192.168.102.1:6000, 6),
(1, 2, 192.168.102.1:6000, 7),
(1, 2, 192.168.102.1:6000, 8)],
(1, 2, 192.168.102.2:6000): [(1, 2, 192.168.102.2:6000, 9),
(1, 2, 192.168.102.2:6000, 10)],
(2, 1, 192.168.201.1:6000): [(2, 1, 192.168.201.1:6000, 12),
(2, 1, 192.168.201.1:6000, 13),
(2, 1, 192.168.201.1:6000, 14)],
(2, 1, 192.168.201.2:6000): [(2, 1, 192.168.201.2:6000, 15),
(2, 1, 192.168.201.2:6000, 16),
(2, 1, 192.168.201.2:6000, 17)],
}
:devices: device dicts from which to generate the tree
:returns: tier tree
"""
tier2children = defaultdict(set)
for dev in devices:
for tier in tiers_for_dev(dev):
if len(tier) > 1:
tier2children[tier[0:-1]].add(tier)
else:
tier2children[()].add(tier)
return tier2children
def validate_and_normalize_ip(ip):
"""
Return normalized ip if the ip is a valid ip.
Otherwise raise ValueError Exception. The hostname is
normalized to all lower case. IPv6-addresses are converted to
lowercase and fully expanded.
"""
# first convert to lower case
new_ip = ip.lower()
if is_valid_ipv4(new_ip):
return new_ip
elif is_valid_ipv6(new_ip):
return expand_ipv6(new_ip)
else:
raise ValueError('Invalid ip %s' % ip)
def validate_and_normalize_address(address):
"""
Return normalized address if the address is a valid ip or hostname.
Otherwise raise ValueError Exception. The hostname is
normalized to all lower case. IPv6-addresses are converted to
lowercase and fully expanded.
RFC1123 2.1 Host Names and Nubmers
DISCUSSION
This last requirement is not intended to specify the complete
syntactic form for entering a dotted-decimal host number;
that is considered to be a user-interface issue. For
example, a dotted-decimal number must be enclosed within
"[ ]" brackets for SMTP mail (see Section 5.2.17). This
notation could be made universal within a host system,
simplifying the syntactic checking for a dotted-decimal
number.
If a dotted-decimal number can be entered without such
identifying delimiters, then a full syntactic check must be
made, because a segment of a host domain name is now allowed
to begin with a digit and could legally be entirely numeric
(see Section 6.1.2.4). However, a valid host name can never
have the dotted-decimal form #.#.#.#, since at least the
highest-level component label will be alphabetic.
"""
new_address = address.lstrip('[').rstrip(']')
if address.startswith('[') and address.endswith(']'):
return validate_and_normalize_ip(new_address)
new_address = new_address.lower()
if is_valid_ipv4(new_address):
return new_address
elif is_valid_ipv6(new_address):
return expand_ipv6(new_address)
elif is_valid_hostname(new_address):
return new_address
else:
raise ValueError('Invalid address %s' % address)
def is_valid_ip(ip):
"""
Return True if the provided ip is a valid IP-address
"""
return is_valid_ipv4(ip) or is_valid_ipv6(ip)
def is_valid_ipv4(ip):
"""
Return True if the provided ip is a valid IPv4-address
"""
try:
socket.inet_pton(socket.AF_INET, ip)
except socket.error:
return False
return True
def is_valid_ipv6(ip):
"""
Return True if the provided ip is a valid IPv6-address
"""
try:
socket.inet_pton(socket.AF_INET6, ip)
except socket.error: # not a valid address
return False
return True
def is_valid_hostname(hostname):
"""
Return True if the provided hostname is a valid hostname
"""
if len(hostname) < 1 or len(hostname) > 255:
return False
if hostname[-1] == ".":
# strip exactly one dot from the right, if present
hostname = hostname[:-1]
allowed = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
return all(allowed.match(x) for x in hostname.split("."))
def is_local_device(my_ips, my_port, dev_ip, dev_port):
"""
Return True if the provided dev_ip and dev_port are among the IP
addresses specified in my_ips and my_port respectively.
If dev_ip is a hostname then it is first translated to an IP
address before checking it against my_ips.
"""
if not is_valid_ip(dev_ip) and is_valid_hostname(dev_ip):
try:
# get the ip for this host; use getaddrinfo so that
# it works for both ipv4 and ipv6 addresses
addrinfo = socket.getaddrinfo(dev_ip, dev_port)
for addr in addrinfo:
family = addr[0]
dev_ip = addr[4][0] # get the ip-address
if family == socket.AF_INET6:
dev_ip = expand_ipv6(dev_ip)
if dev_ip in my_ips and dev_port == my_port:
return True
return False
except socket.gaierror:
return False
return dev_ip in my_ips and dev_port == my_port
def parse_search_value(search_value):
"""The <search-value> can be of the form::
d<device_id>r<region>z<zone>-<ip>:<port>R<r_ip>:<r_port>/
<device_name>_<meta>
Where <r_ip> and <r_port> are replication ip and port.
Any part is optional, but you must include at least one part.
Examples::
d74 Matches the device id 74
r4 Matches devices in region 4
z1 Matches devices in zone 1
z1-1.2.3.4 Matches devices in zone 1 with the ip 1.2.3.4
1.2.3.4 Matches devices in any zone with the ip 1.2.3.4
z1:5678 Matches devices in zone 1 using port 5678
:5678 Matches devices that use port 5678
R5.6.7.8 Matches devices that use replication ip 5.6.7.8
R:5678 Matches devices that use replication port 5678
1.2.3.4R5.6.7.8 Matches devices that use ip 1.2.3.4 and replication ip
5.6.7.8
/sdb1 Matches devices with the device name sdb1
_shiny Matches devices with shiny in the meta data
_"snet: 5.6.7.8" Matches devices with snet: 5.6.7.8 in the meta data
[::1] Matches devices in any zone with the ip ::1
z1-[::1]:5678 Matches devices in zone 1 with ip ::1 and port 5678
Most specific example::
d74r4z1-1.2.3.4:5678/sdb1_"snet: 5.6.7.8"
Nerd explanation:
All items require their single character prefix except the ip, in which
case the - is optional unless the device id or zone is also included.
"""
orig_search_value = search_value
match = {}
if search_value.startswith('d'):
i = 1
while i < len(search_value) and search_value[i].isdigit():
i += 1
match['id'] = int(search_value[1:i])
search_value = search_value[i:]
if search_value.startswith('r'):
i = 1
while i < len(search_value) and search_value[i].isdigit():
i += 1
match['region'] = int(search_value[1:i])
search_value = search_value[i:]
if search_value.startswith('z'):
i = 1
while i < len(search_value) and search_value[i].isdigit():
i += 1
match['zone'] = int(search_value[1:i])
search_value = search_value[i:]
if search_value.startswith('-'):
search_value = search_value[1:]
if len(search_value) and search_value[0].isdigit():
i = 1
while i < len(search_value) and search_value[i] in '0123456789.':
i += 1
match['ip'] = search_value[:i]
search_value = search_value[i:]
elif len(search_value) and search_value[0] == '[':
i = 1
while i < len(search_value) and search_value[i] != ']':
i += 1
i += 1
match['ip'] = search_value[:i].lstrip('[').rstrip(']')
search_value = search_value[i:]
if 'ip' in match:
# ipv6 addresses are converted to all lowercase
# and use the fully expanded representation
match['ip'] = validate_and_normalize_ip(match['ip'])
if search_value.startswith(':'):
i = 1
while i < len(search_value) and search_value[i].isdigit():
i += 1
match['port'] = int(search_value[1:i])
search_value = search_value[i:]
# replication parameters
if search_value.startswith('R'):
search_value = search_value[1:]
if len(search_value) and search_value[0].isdigit():
i = 1
while (i < len(search_value) and
search_value[i] in '0123456789.'):
i += 1
match['replication_ip'] = search_value[:i]
search_value = search_value[i:]
elif len(search_value) and search_value[0] == '[':
i = 1
while i < len(search_value) and search_value[i] != ']':
i += 1
i += 1
match['replication_ip'] = search_value[:i].lstrip('[').rstrip(']')
search_value = search_value[i:]
if 'replication_ip' in match:
# ipv6 addresses are converted to all lowercase
# and use the fully expanded representation
match['replication_ip'] = \
validate_and_normalize_ip(match['replication_ip'])
if search_value.startswith(':'):
i = 1
while i < len(search_value) and search_value[i].isdigit():
i += 1
match['replication_port'] = int(search_value[1:i])
search_value = search_value[i:]
if search_value.startswith('/'):
i = 1
while i < len(search_value) and search_value[i] != '_':
i += 1
match['device'] = search_value[1:i]
search_value = search_value[i:]
if search_value.startswith('_'):
match['meta'] = search_value[1:]
search_value = ''
if search_value:
raise ValueError('Invalid <search-value>: %s' %
repr(orig_search_value))
return match
def parse_search_values_from_opts(opts):
"""
Convert optparse style options into a dictionary for searching.
:param opts: optparse style options
:returns: a dictonary with search values to filter devices,
supported parameters are id, region, zone, ip, port,
replication_ip, replication_port, device, weight, meta
"""
search_values = {}
for key in ('id', 'region', 'zone', 'ip', 'port', 'replication_ip',
'replication_port', 'device', 'weight', 'meta'):
value = getattr(opts, key, None)
if value:
if key == 'ip' or key == 'replication_ip':
value = validate_and_normalize_address(value)
search_values[key] = value
return search_values
def parse_change_values_from_opts(opts):
"""
Convert optparse style options into a dictionary for changing.
:param opts: optparse style options
:returns: a dictonary with change values to filter devices,
supported parameters are ip, port, replication_ip,
replication_port
"""
change_values = {}
for key in ('change_ip', 'change_port', 'change_replication_ip',
'change_replication_port', 'change_device', 'change_meta'):
value = getattr(opts, key, None)
if value:
if key == 'change_ip' or key == 'change_replication_ip':
value = validate_and_normalize_address(value)
change_values[key.replace('change_', '')] = value
return change_values
def validate_args(argvish):
"""
Build OptionParse and validate it whether the format is new command-line
format or not.
"""
opts, args = parse_args(argvish)
new_cmd_format = opts.id or opts.region or opts.zone or \
opts.ip or opts.port or \
opts.replication_ip or opts.replication_port or \
opts.device or opts.weight or opts.meta
return (new_cmd_format, opts, args)
def parse_args(argvish):
"""
Build OptionParser and evaluate command line arguments.
"""
parser = optparse.OptionParser()
parser.add_option('-u', '--id', type="int",
help="Device ID")
parser.add_option('-r', '--region', type="int",
help="Region")
parser.add_option('-z', '--zone', type="int",
help="Zone")
parser.add_option('-i', '--ip', type="string",
help="IP address")
parser.add_option('-p', '--port', type="int",
help="Port number")
parser.add_option('-j', '--replication-ip', type="string",
help="Replication IP address")
parser.add_option('-q', '--replication-port', type="int",
help="Replication port number")
parser.add_option('-d', '--device', type="string",
help="Device name (e.g. md0, sdb1)")
parser.add_option('-w', '--weight', type="float",
help="Device weight")
parser.add_option('-m', '--meta', type="string", default="",
help="Extra device info (just a string)")
parser.add_option('-I', '--change-ip', type="string",
help="IP address for change")
parser.add_option('-P', '--change-port', type="int",
help="Port number for change")
parser.add_option('-J', '--change-replication-ip', type="string",
help="Replication IP address for change")
parser.add_option('-Q', '--change-replication-port', type="int",
help="Replication port number for change")
parser.add_option('-D', '--change-device', type="string",
help="Device name (e.g. md0, sdb1) for change")
parser.add_option('-M', '--change-meta', type="string", default="",
help="Extra device info (just a string) for change")
return parser.parse_args(argvish)
def parse_builder_ring_filename_args(argvish):
first_arg = argvish[1]
if first_arg.endswith('.ring.gz'):
ring_file = first_arg
builder_file = first_arg[:-len('.ring.gz')] + '.builder'
else:
builder_file = first_arg
if not builder_file.endswith('.builder'):
ring_file = first_arg
else:
ring_file = builder_file[:-len('.builder')]
ring_file += '.ring.gz'
return builder_file, ring_file
def build_dev_from_opts(opts):
"""
Convert optparse stype options into a device dictionary.
"""
for attribute, shortopt, longopt in (['region', '-r', '--region'],
['zone', '-z', '--zone'],
['ip', '-i', '--ip'],
['port', '-p', '--port'],
['device', '-d', '--device'],
['weight', '-w', '--weight']):
if not getattr(opts, attribute, None):
raise ValueError('Required argument %s/%s not specified.' %
(shortopt, longopt))
ip = validate_and_normalize_address(opts.ip)
replication_ip = validate_and_normalize_address(
(opts.replication_ip or opts.ip))
replication_port = opts.replication_port or opts.port
return {'region': opts.region, 'zone': opts.zone, 'ip': ip,
'port': opts.port, 'device': opts.device, 'meta': opts.meta,
'replication_ip': replication_ip,
'replication_port': replication_port, 'weight': opts.weight}
def dispersion_report(builder, search_filter=None, verbose=False):
if not builder._dispersion_graph:
builder._build_dispersion_graph()
max_allowed_replicas = builder._build_max_replicas_by_tier()
worst_tier = None
max_dispersion = 0.0
sorted_graph = []
for tier, replica_counts in sorted(builder._dispersion_graph.items()):
tier_name = get_tier_name(tier, builder)
if search_filter and not re.match(search_filter, tier_name):
continue
max_replicas = int(max_allowed_replicas[tier])
at_risk_parts = sum(replica_counts[max_replicas + 1:])
placed_parts = sum(replica_counts[1:])
tier_dispersion = 100.0 * at_risk_parts / placed_parts
if tier_dispersion > max_dispersion:
max_dispersion = tier_dispersion
worst_tier = tier_name
max_dispersion = max(max_dispersion, tier_dispersion)
if not verbose:
continue
tier_report = {
'max_replicas': max_replicas,
'placed_parts': placed_parts,
'dispersion': tier_dispersion,
'replicas': replica_counts,
}
sorted_graph.append((tier_name, tier_report))
return {
'max_dispersion': max_dispersion,
'worst_tier': worst_tier,
'graph': sorted_graph,
}
def get_tier_name(tier, builder):
if len(tier) == 1:
return "r%s" % (tier[0], )
if len(tier) == 2:
return "r%sz%s" % (tier[0], tier[1])
if len(tier) == 3:
return "r%sz%s-%s" % (tier[0], tier[1], tier[2])
if len(tier) == 4:
device = builder.devs[tier[3]] or {}
return "r%sz%s-%s/%s" % (tier[0], tier[1], tier[2],
device.get('device', 'IDd%s' % tier[3]))
| |
import sys
import os
import bisect
import traceback
import time
import numpy as np
import pandas as pd
import pickle as p
from PyQt5 import QtGui, QtWidgets#,# uic
from PyQt5.QtCore import QThread, pyqtSignal, Qt, QRect, QTimer
from scipy import signal, stats
import pyqtgraph as pg
import inspect
import h5py
try: # for speed checking
from line_profiler import LineProfiler
def lprofile():
def inner(func):
def profiled_func(*args, **kwargs):
try:
profiler = LineProfiler()
profiler.add_function(func)
profiler.enable_by_count()
return func(*args, **kwargs)
finally:
profiler.print_stats()
return profiled_func
return inner
except:
pass
if __name__ != '__main__':
from . import check_preds_design, loading_subwindow, convert_ndf_window
from . import subwindows
from .context import ndf
else:
import check_preds_design, loading_subwindow, convert_ndf_window
import subwindows
from context import ndf
from ndf.h5loader import H5File
from ndf.datahandler import DataHandler
def throw_error(error_text = None):
msgBox = QtWidgets.QMessageBox()
if error_text is None:
msgBox.setText('Error caught! \n'+str(traceback.format_exc(1)))
else:
msgBox.setText('Error caught! \n'+str(error_text))
msgBox.exec_()
return 0
class TreeWidgetItem(QtGui.QTreeWidgetItem ):
def __init__(self, parent=None):
QtGui.QTreeWidgetItem.__init__(self, parent)
def __lt__(self, otherItem):
column = self.treeWidget().sortColumn()
try:
return float(self.text(column) ) > float( otherItem.text(column) )
except ValueError:
return self.text(column) > otherItem.text(column)
class MainGui(QtGui.QMainWindow, check_preds_design.Ui_MainWindow):
def __init__(self, parent=None):
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
super(MainGui, self).__init__(parent)
self.setupUi(self)
self.handler = DataHandler()
self.scroll_flag = -1
self.deleted_tree_items = []
#self.splitter.setSizes([50,20])
#self.splitter_2.setSizes([50,20])
#self.splitter_3.setSizes([50,20])
#self.bottom_splitter.setSizes([200,300])
#self.full_splitter.setSizes([300,200,150])
if self.blink_box.isChecked():
self.blink = 1
else:
self.blink = -1
self.scroll_sign = 1
self.timer = QTimer()
self.timer.timeout.connect(self.simple_scroll)
self.blink_box.stateChanged.connect(self.blink_box_change)
self.scroll_speed_box.valueChanged.connect(self.scroll_speed_change)
self.checkBox_scrolling.stateChanged.connect(self.scroll_checkbox_statechange)
self.xrange_spinBox.valueChanged.connect(self.xrange_change)
self.tid_spinBox.valueChanged.connect(self.tid_spinBox_change)
self.checkbox_filter_toggle.stateChanged.connect(self.plot1_display_filter_toggled)
self.hp_filter_freq.valueChanged.connect(self.hp_filter_settings_changed)
self.fs = None # change !
self.previously_displayed_tid = None
self.data_obj = None
self.predictions_df = None
self.h5directory = None
self.tree_items = []
self.valid_h5_tids = None
self.hdf5_plot = None
self.valid_tids_to_indexes = None
self.indexes_to_valid_tids = None
self.tid_spinbox_just_changed = False
self.annotation_change_tid = False
if os.path.exists('pyecog_temp_file.pickle'):
with open('pyecog_temp_file.pickle', "rb") as temp_file:
self.home = p.load(temp_file)
else:
self.home = os.getcwd()
# Hook up the file bar stuff here
self.substates_timewindow_secs = 6
self.actionLoad_Predictions.triggered.connect(self.load_predictions_gui)
self.actionSave_annotations.triggered.connect(self.master_tree_export_csv)
self.actionLoad_Library.triggered.connect(self.load_seizure_library)
self.actionLoad_h5_folder.triggered.connect(self.load_h5_folder) # this is still to do in its entireity
self.actionSet_default_folder.triggered.connect(self.set_home)
# Hook up analyse menu bar to functions here
self.actionConvert_ndf_to_h5.triggered.connect(self.convert_ndf_folder_to_h5)
self.actionLibrary_logistics.triggered.connect(self.load_library_management_subwindow)
self.actionClassifier_components.triggered.connect(self.load_clf_subwindow)
self.actionAdd_features_to_h5_folder.triggered.connect(self.load_add_prediction_features_subwindow)
self.plot_1 = self.GraphicsLayoutWidget.addPlot()
self.plot_overview = self.overview_plot.addPlot()
#self.tid_box.setValue(6)
#self.traceSelector.valueChanged.connect(self.plot_traces)
#self.channel_selector.valueChanged.connect(self.plot_traces)
self.treeWidget.setSortingEnabled(True)
self.treeWidget.itemSelectionChanged.connect(self.master_tree_selection)
self.predictions_up = False
self.library_up = False
self.file_dir_up = False
self.substates_up = False
self.substate_child_selected = False
def hp_filter_settings_changed(self):
if self.hdf5_plot is not None:
self.hdf5_plot.wipe_filtered_data()
self.plot1_display_filter_toggled()
def plot1_display_filter_toggled(self):
# set filter settings on trace
if self.hdf5_plot is not None:
toggle, hp, lp = self.get_plot1_display_filter_settings_from_maingui()
self.hdf5_plot.set_display_filter_settings(toggle , hp, lp)
self.hdf5_plot.updateHDF5Plot()
def get_plot1_display_filter_settings_from_maingui(self):
''' Returns the state, high pass and low pass values from main gui'''
hp = self.hp_filter_freq.value()
lp = self.lp_filter_freq.value()
if hp <= 0:
self.hp_filter_freq.setValue(1.0)
toggle = self.checkbox_filter_toggle.isChecked()
return toggle, hp, lp
def not_done_yet(self):
QtGui.QMessageBox.information(self," ", "Not implemented yet! Jonny has been lazy!")
def load_clf_subwindow(self):
child = subwindows.ClfWindow()
child.show()
child.home = self.home
if child.exec_():
print('exec_() was called')
return 0
def load_add_prediction_features_subwindow(self):
child = subwindows.AddPredictionFeaturesWindow()
child.show()
child.home = self.home
if child.exec_():
print('exec_() was called')
return 0
def load_library_management_subwindow(self):
child = subwindows.LibraryWindow()
child.show()
child.home = self.home
if child.exec_():
print('exec_was called')
return 0
def convert_ndf_folder_to_h5(self):
child = subwindows.ConvertingNDFsWindow()
child.show()
child.home = self.home
if child.exec_():
print('exec_was called')
return 0
def set_home(self):
self.home = QtGui.QFileDialog.getExistingDirectory(self, "Set a default folder to load from", self.home)
with open("pyecog_temp_file.pickle", "wb") as f:
p.dump(self.home, f)
def get_h5_folder_fnames(self):
new_directory = QtGui.QFileDialog.getExistingDirectory(self, "Pick a h5 folder", self.home)
if new_directory == '':
print('No folder selected')
return 0
self.h5directory = new_directory
self.clear_QTreeWidget()
self.build_startswith_to_filename()
fnames = [f for f in os.listdir(self.h5directory) if f.endswith('.h5') if not f.startswith('.') ]
return fnames
def load_h5_folder(self):
fnames = self.get_h5_folder_fnames()
if fnames == 0:
return 0
for i,fname in enumerate(fnames):
try:
tids = eval('['+fname.split(']')[0].split('[')[1]+']')
self.populate_tree_items_list_from_h5_folder(i,fname, tids) # this just populates self.tree_items
except:
print('Failed to add: '+ str(fname))
self.treeWidget.addTopLevelItems(self.tree_items)
self.update_h5_folder_display()
self.predictions_up = False
self.library_up = False
self.file_dir_up = True
self.substates_up = False
def populate_tree_items_list_from_h5_folder(self,index,fpath,tids):
self.deleted_tree_items = []
self.treeWidget.setColumnCount(6)
self.treeWidget.setHeaderLabels(['index', 'start', 'end','duration', 'tids', 'fname', 'real_start','real_end'])
details_entry = [str(index),
'',
'',
'',
str(tids),
str(fpath),
'',
'']
item = TreeWidgetItem(details_entry)
item.setFirstColumnSpanned(True)
self.tree_items.append(item)
def load_seizure_library(self):
try:
self.library = QtGui.QFileDialog.getOpenFileName(self, "Pick a h5 library file", self.home)[0]
if self.library is '':
print('nothing selected')
return 0
print(self.library)
print(type(self.library))
self.clear_QTreeWidget()
with h5py.File(self.library) as f:
group_names = list(f.keys())
groups = [f[key] for key in group_names]
for i, group in enumerate(groups):
for seizure_i in range(group.attrs['precise_annotation'].shape[0]):
real_start = self.handler.get_time_from_seconds_and_filepath(group_names[i],
group.attrs['precise_annotation'][seizure_i, 0],
split_on_underscore=True).round('s')
real_end = self.handler.get_time_from_seconds_and_filepath(group_names[i],
group.attrs['precise_annotation'][seizure_i, 1],
split_on_underscore=True).round('s')
row = {'name' : group_names[i],
'start': group.attrs['precise_annotation'][seizure_i, 0],
'end' : group.attrs['precise_annotation'][seizure_i, 1],
'tid' : group.attrs['tid'],
'index': i,
'chunk_start': group.attrs['chunked_annotation'][seizure_i, 0],
'chunk_end': group.attrs['chunked_annotation'][seizure_i, 1],
'real_start':real_start,
'real_end':real_end
}
self.populate_tree_items_from_library(row)
self.treeWidget.addTopLevelItems(self.tree_items)
self.predictions_up = False
self.library_up = True
self.file_dir_up = False
self.substates_up = True
except:
msgBox = QtWidgets.QMessageBox()
msgBox.setText('Error caught at load_seizure_library() \n'+str(traceback.format_exc(1)))
msgBox.exec_()
def populate_tree_items_from_library(self, row):
self.deleted_tree_items = []
self.treeWidget.setColumnCount(9)
self.treeWidget.setHeaderLabels(['index','start','end','duration','chunk_start','chunk_end', 'tid','name', 'real_start', 'real_end'])
details_entry = [str(row['index']),
str(row['start']),
str(row['end']),
str((row['end'] - row['start']) ),
str(row['chunk_start']),
str(row['chunk_end']),
str(row['tid']),
str(row['name']),
str(row['real_start']),
str(row['real_end'])
]
item = TreeWidgetItem(details_entry)
item.setFirstColumnSpanned(True)
self.tree_items.append(item)
def master_tree_export_csv(self):
if self.predictions_up:
self.predictions_tree_export_csv()
elif self.library_up:
self.library_tree_export_csv()
elif self.file_dir_up:
self.file_tree_export_csv()
def file_tree_export_csv(self):
# we just call predicitions tree export as should be same idea
self.predictions_tree_export_csv()
def library_tree_export_csv(self):
if self.h5directory:
default_dir = os.path.dirname(self.h5directory)
else:
default_dir = ""
save_name = QtGui.QFileDialog.getSaveFileName(self,'Save library details in a .csv file',default_dir)[0]
if save_name is '':
print('nothing selected')
return 0
# now build dataframe from the tree
root = self.treeWidget.invisibleRootItem()
child_count = root.childCount()
index, start, end, tid, fname, duration = [],[],[],[],[], []
for i in range(child_count):
item = root.child(i)
index.append(item.text(0))
start.append(item.text(1))
end.append(item.text(2))
tid.append(item.text(6))
fname.append(item.text(7))
duration.append(item.text(3))
exported_df = pd.DataFrame(data = np.vstack([fname,start,end,duration,tid]).T,columns = ['filename','start','end','duration','transmitter'] )
save_name = save_name.strip('.csv')
try:
exported_df.to_csv(save_name+'.csv')
except PermissionError:
throw_error('Error - permission error! Is the file open somewhere else?')
return 1
def predictions_tree_export_csv(self):
if self.h5directory:
default_dir = os.path.dirname(self.h5directory)
else:
default_dir = ""
save_name = QtGui.QFileDialog.getSaveFileName(self,'Save annotation .csv file',default_dir)[0]
if save_name is '':
print('nothing selected')
return 0
# now build dataframe from the tree
root = self.treeWidget.invisibleRootItem()
child_count = root.childCount()
index, start, end, tid, fname, duration,real_end,real_start = [],[],[],[],[], [],[],[]
for i in range(child_count):
item = root.child(i)
index.append(item.text(0))
start.append(item.text(1))
end.append(item.text(2))
try:
tid_str = int(item.text(4))
except:
# tid is probably a '[x]'
tid_str = eval(item.text(4))
if hasattr(tid_str, '__iter__'):
tid_str = str(tid_str)
tid.append(tid_str)
fname.append(item.text(5))
duration.append(item.text(3))
real_end.append(item.text(6))
real_start.append(item.text(7))
exported_df = pd.DataFrame(data = np.vstack([index,fname,start,end,duration,tid, real_end,real_start]).T,columns = ['old_index','filename','start','end',
'duration','transmitter', 'real_start', 'real_end'] )
save_name = save_name.strip('.csv')
try:
exported_df.to_csv(save_name+'.csv')
except PermissionError:
throw_error('Error - permission error! Is the file open somewhere else?')
return 1
def master_tree_selection(self):
if not self.deleteing: # this is a hack as was being called as I was clearing the items
if self.predictions_up:
#todo Jonny hacking awway again, this actuall loops back to tree_selections_preductions
self.tree_selection_file_dir()
#self.tree_selection_predictions()
elif self.library_up:
self.tree_selection_library()
elif self.file_dir_up:
self.tree_selection_file_dir()
elif self.substates_up:
self.tree_selection_substates()
def get_next_tree_item(self):
print('not implememented: try to grab next treewidget item!')
def set_valid_h5_ids(self, tid_list):
self.valid_h5_tids = tid_list
self.valid_tids_to_indexes
self.valid_tids_to_indexes = {tid:i for i, tid in enumerate(self.valid_h5_tids)}
self.indexes_to_valid_tids = {i:tid for i, tid in enumerate(self.valid_h5_tids)}
self.previously_displayed_tid = None # you also want to "wipe the list?"
def tree_selection_substates(self):
item = self.treeWidget.currentItem()
if item.text(2).startswith('M'):
#print('Filerow')
self.treeWidget.substate_child_selected = False
tids = item.text(1)
self.set_valid_h5_ids(eval(tids))
self.handle_tid_for_file_dir_plotting() # this will automatically call the plotting by changing the v
else:
#print('child')
self.treeWidget.substate_child_selected = True
i = item.text(0)
chunk = item.text(1)
t_window = chunk.split('-')
xmin = int(t_window[0])
xmax = int(t_window[1])
self.plot_1.getViewBox().setXRange(min = xmin,
max = xmax, padding=0)
category = item.text(2)
def tree_selection_file_dir(self):
# this method does too much
"grab tree detail and use to plot"
self.h5directory = self.h5directory # shitty but you had diff variabels?!
current_item = self.treeWidget.currentItem()
if current_item.text(1) != '':
try:
self.tree_selection_predictions()
except:
if current_item.text(2) == '':
msgBox = QtWidgets.QMessageBox()
msgBox.setText('Make an end and start line at the same time for best functionality')
msgBox.exec_()
# do something else here
self.tree_selection_predictions()
#tids = current_item.text(4)
#self.set_valid_h5_ids(eval(tids))
#self.handle_tid_for_file_dir_plotting() # this will automatically call the plotting by changing the v
else:
tids = current_item.text(4)
self.set_valid_h5_ids(eval(tids))
self.handle_tid_for_file_dir_plotting() # this will automatically call the plotting by changing the v
def load_filedir_h5_file(self,tid):
'''
Splitting tree_selection_file_dir up so changing the tid spinBox can reload easily
'''
import time
start = time.time()
fields = self.treeWidget.currentItem()
path = fields.text(5)
index = float(fields.text(0))
fpath = os.path.join(self.h5directory, path)
h5 = H5File(fpath)
data_dict = h5[tid]
self.fs = eval(h5.attributes['fs_dict'])[tid]
self.add_data_to_plots(data_dict['data'], data_dict['time'])
if self.checkbox_hold_trace_position.isChecked():
xlims = self.plot_1.getViewBox().viewRange()[0]
x_min = xlims[0]
x_max = xlims[1]
else:
xrange = self.xrange_spinBox.value()
x_min = 0
x_max = xrange
self.plot_1.setXRange(x_min, x_max, padding=0)
self.plot_1.setTitle(str(index)+' - '+ fpath+ '\n')
self.plot_overview.setTitle('Overview of file: '+str(index)+' - '+ fpath)
self.updatePlot()
def handle_tid_for_file_dir_plotting(self):
# this is called when clicking on the tree structure
# therefore first check if can use the same tid or not...
current_spinbox_id = self.tid_spinBox.value()
if current_spinbox_id not in self.valid_h5_tids:
self.tid_spinBox.setValue(self.valid_h5_tids[0]) # no reason to default to first
# here you add something to hold id if needed
#print('File tid changed as previous tid not valid')
# this will now automatically call the tid_spinBox_change method - as you have tid changed it
else:
# can use the same tid so plot
self.load_filedir_h5_file(current_spinbox_id)
# as id number will now be the last value next time changed
def tid_spinBox_change(self):
''' called when box data changes '''
if self.tid_spinBox.value() == self.previously_displayed_tid:
# catching the loop which occurs if setting the spinbox after finding next tid
return 0
elif self.annotation_change_tid == True:
self.annotation_change_tid = False
return 0
else:
self.tid_spinBox_handling()
def set_tid_spinbox(self, value):
'''
This wil
'''
#print('Tid_spinbox has been set by code' )
self.tid_spinBox.setValue(value)
self.tid_spinbox_just_changed = True
def tid_spinBox_handling(self):
#print('tid spin box handling called')
try:
# tid_spinbox.valueChanged connects to here
new_val = self.tid_spinBox.value()
#print(time.time(), 'New spinbox value is ', new_val)
set_tid_box = True
if new_val in self.valid_h5_tids:
set_tid_box = False # dont need to overwrite box
new_tid = new_val
elif new_val < min(self.valid_h5_tids): # this is rolling 0
new_tid = self.valid_h5_tids[-1]
elif new_val > max(self.valid_h5_tids): # this is rolling 0
new_tid = self.valid_h5_tids[0]
else:
if self.previously_displayed_tid is not None:
step = new_val - self.previously_displayed_tid
old_index = self.valid_tids_to_indexes[self.previously_displayed_tid]
new_index = old_index+step
new_tid = self.indexes_to_valid_tids[new_index]
else:
i = bisect.bisect_left(self.valid_h5_tids,new_val)
new_tid = self.valid_h5_tids[i%len(self.valid_h5_tids)]
#print('New is:', new_tid)
#print('Last was:', self.previously_displayed_tid)
self.previously_displayed_tid = new_tid
self.load_filedir_h5_file(new_tid)
if set_tid_box:
self.set_tid_spinbox(new_tid)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print (traceback.print_exception(exc_type, exc_value, exc_traceback))
print('Error caught at: pyecog_main_gui.tid_spinBox_handling()')
msgBox = QtWidgets.QMessageBox()
msgBox.setText('Error caught at tid_spinBox_handling() \n'+str(traceback.format_exc()))
msgBox.exec_()
def tree_selection_library(self):
seizure_buffer = 5 # seconds either side of seizure to plot
current_item = self.treeWidget.currentItem()
fields = current_item
index = int(float(fields.text(0)))
start = float(fields.text(1))
end = float(fields.text(2))
duration = float(fields.text(3))
chunk_start = float(fields.text(4))
chunk_end = float(fields.text(5))
tid = float(fields.text(6))
key = fields.text(7)
with h5py.File(self.library) as f:
dataset = f[key]
self.fs = dataset.attrs['fs']
# todo, then assumes you have calculated labels need to be calculated second?
# i guess you can just add labels before?
self.plot_1.clear()
self.bx1 = self.plot_1.getViewBox()
data = dataset['data'][:]
time = np.linspace(0, data.shape[0]/self.fs, data.shape[0])
self.add_data_to_plots(data,time)
start_pen = pg.mkPen((85, 168, 104), width=3, style= Qt.DashLine)
end_pen = pg.mkPen((210,88,88), width=3, style= Qt.DashLine)
coarse_pen = pg.mkPen((210,210,210), width=3, style= Qt.DashLine)
self.start_line = pg.InfiniteLine(pos=start, pen =start_pen, movable=True,label='{value:0.2f}',
labelOpts={'position':0.1, 'color': (200,200,100), 'fill': (200,200,200,0), 'movable': True})
self.end_line = pg.InfiniteLine(pos=end, pen = end_pen, movable=True,label='{value:0.2f}',
labelOpts={'position':0.1, 'color': (200,200,100), 'fill': (200,200,200,0), 'movable': True})
self.start_coarse = pg.InfiniteLine(pos=chunk_start, pen =coarse_pen, movable=False,label='{value:0.2f}',
labelOpts={'position':0.1, 'color': (200,200,100), 'fill': (200,200,200,0), 'movable': True})
self.end_coarse = pg.InfiniteLine(pos=chunk_end, pen =coarse_pen, movable=False,label='{value:0.2f}',
labelOpts={'position':0.1, 'color': (200,200,100), 'fill': (200,200,200,0), 'movable': True})
self.plot_1.addItem(self.start_line)
self.plot_1.addItem(self.end_line)
self.plot_1.addItem(self.start_coarse)
self.plot_1.addItem(self.end_coarse)
self.start_line.sigPositionChanged.connect(self.update_tree_element_start_time)
self.end_line.sigPositionChanged.connect(self.update_tree_element_end_time)
self.plot_1.setXRange(chunk_start-seizure_buffer, chunk_end+seizure_buffer, padding=0)
self.plot_1.setTitle(str(index)+' - '+ key+ '\n' + str(start)+' - ' +str(end))
self.plot_overview.setTitle('Overview of file: '+str(index)+' - '+ key)
self.updatePlot()
self.annotation_change_tid = True
self.set_tid_spinbox(tid)
def build_startswith_to_filename(self):
''' split either on the bracket of the .'''
self.startname_to_full = {}
for f in os.listdir(self.h5directory):
self.startname_to_full[f[:11]] = f
def tree_selection_predictions(self):
# this method does too much
"grab tree detail and use to plot"
seizure_buffer = 5 # seconds either side of seizure to plot
current_item = self.treeWidget.currentItem()
fields = current_item
try:
tid = int(fields.text(4))
except:
# tid is probably a '[x]'
tid = eval(fields.text(4))
if hasattr(tid, '__iter__'):
tid = tid[0]
start = float(fields.text(1))
try:
end = float(fields.text(2))
except:
end = start + 1
print(' caught you not clicking an end, line 651, need to code this better')
index = float(fields.text(0))
# duration is fields.text(3)
try:
correct_file = self.startname_to_full[fields.text(5)[:11]]
except KeyError:
throw_error()
return 0
fpath = os.path.join(self.h5directory, correct_file)
h5 = H5File(fpath)
data_dict = h5[tid]
self.fs = eval(h5.attributes['fs_dict'])[tid]
self.add_data_to_plots(data_dict['data'], data_dict['time'])
start_pen = pg.mkPen((85, 168, 104), width=3, style= Qt.DashLine)
end_pen = pg.mkPen((210,88,88), width=3, style= Qt.DashLine)
self.start_line = pg.InfiniteLine(pos=start, pen =start_pen, movable=True,label='{value:0.2f}',
labelOpts={'position':0.1, 'color': (200,200,100), 'fill': (200,200,200,0), 'movable': True})
self.end_line = pg.InfiniteLine(pos=end, pen = end_pen, movable=True,label='{value:0.2f}',
labelOpts={'position':0.1, 'color': (200,200,100), 'fill': (200,200,200,0), 'movable': True})
# todo add all lines per file in one go - more than one seizure
self.plot_1.addItem(self.start_line)
self.plot_1.addItem(self.end_line)
self.start_line.sigPositionChanged.connect(self.update_tree_element_start_time)
self.end_line.sigPositionChanged.connect(self.update_tree_element_end_time)
self.plot_1.setXRange(start-seizure_buffer, end+seizure_buffer, padding=0)
self.plot_1.setTitle(str(index)+' - '+ fpath+ '\n' + str(start)+' - ' +str(end))
self.plot_overview.setTitle('Overview of file: '+str(index)+' - '+ fpath)
self.updatePlot()
# you need to change the spinbox - should be caught if already the same?
self.annotation_change_tid = True
self.set_tid_spinbox(tid)
#@lprofile()
def add_data_to_plots(self, data, time):
self.plot_1.clear()
self.bx1 = self.plot_1.getViewBox()
self.hdf5_plot = HDF5Plot(parent = self.plot_1, viewbox = self.bx1)
if self.checkbox_filter_toggle.isChecked():
toggle, hp, lp = self.get_plot1_display_filter_settings_from_maingui()
self.hdf5_plot.set_display_filter_settings(toggle,hp,lp)
self.hdf5_plot.setHDF5(data, time, self.fs)
self.plot_1.addItem(self.hdf5_plot)
self.plot_1.setLabel('left', 'Voltage (uV)')
self.plot_1.setLabel('bottom','Time (s)')
# hit up the linked view here
self.plot_overview.clear()
self.plot_overview.enableAutoRange(False,False)
self.plot_overview.setXRange(0,3600, padding=0) # hardcoding in the hour here...
self.plot_overview.setMouseEnabled(x = False, y= True)
self.bx_overview = self.plot_overview.getViewBox()
hdf5_plotoverview = HDF5Plot(parent = self.plot_overview, viewbox = self.bx_overview)
hdf5_plotoverview.setHDF5(data, time, self.fs)
self.plot_overview.addItem(hdf5_plotoverview)
self.plot_overview.setXRange(time[0],time[-1], padding=0)
self.plot_overview.setLabel('left', 'Voltage (uV)')
self.plot_overview.setLabel('bottom','Time (s)')
# mousePressEvent,mouseDoubleClickEvent ,sigMouseClicked,sigMouseMoved,wheelEvent
# should you just be overwriting class methods for this stuff?
self.proxy2 = pg.SignalProxy(self.plot_1.scene().sigMouseClicked,rateLimit=30,slot=self.mouse_click_on_main)
self.proxy = pg.SignalProxy(self.plot_overview.scene().sigMouseClicked,rateLimit=30,slot=self.mouse_click_in_overview)
#print(dir(self.plot_overview.scene()))
self.lr = pg.LinearRegionItem(self.plot_1.getViewBox().viewRange()[0])
self.lr.setZValue(-10)
self.plot_overview.addItem(self.lr)
# is this good practice?
self.lr.sigRegionChanged.connect(self.updatePlot)
self.plot_1.sigXRangeChanged.connect(self.updateRegion) # xlims?
self.plot_1.sigXRangeChanged.connect(self.xrange_changed_on_plot)
self.updatePlot()
# these two methods are for the lr plot connection, refactor names
def updatePlot(self):
self.plot_1.setXRange(*self.lr.getRegion(), padding=0)
def updateRegion(self):
self.lr.setRegion(self.plot_1.getViewBox().viewRange()[0])
def update_tree_element_start_time(self):
tree_row = self.treeWidget.currentItem()
tree_row.setText(1,'{:.2f}'.format(self.start_line.x()))
self.update_tree_element_duration()
def update_tree_element_end_time(self):
self.check_for_blank()
tree_row = self.treeWidget.currentItem()
tree_row.setText(2,'{:.2f}'.format(self.end_line.x()))
self.update_tree_element_duration()
def check_for_blank(self):
try:
if int(self.end_line.x()) ==0 and int(self.start_line.x()) ==0:
if self.start_line.x() != 0:
self.start_line.setValue(0)
if self.end_line.x() != 0:
self.end_line.setValue(0)
print('Blank entered, setting to 0')
except:
print('Error when checking for blank')
traceback.print_exception(1)
def update_tree_element_duration(self):
try:
tree_row = self.treeWidget.currentItem()
duration = float(tree_row.text(2))-float(tree_row.text(1))
tree_row.setText(3, '{:.2f}'.format(duration))
self.update_real_times()
except:
print('caught error at 777')
def plot_traces(self, data_dict):
if not self.holdPlot.isChecked():
self.plot_1.clear()
# here you need to add the h5 file class with downsampling
curve1 = HDF5Plot()#parent = self.plot_1, viewbox = bx1)
curve1.setHDF5(data_dict['data'], data_dict['time'], self.fs)
self.plot_1.addItem(hdf5_plot)
#self.plot_1.addItem(pg.PlotCurveItem(data_dict['time'], data_dict['data']))
self.plot_1.setXRange(row['Start'], row['End'], padding=0)
#self.plot_1.ti
def clear_QTreeWidget(self):
# not sure if i need this top bit
self.deleteing = True
if self.treeWidget.currentItem():
current_item = self.treeWidget.currentItem()
root = self.treeWidget.invisibleRootItem()
root.removeChild(current_item)
root = self.treeWidget.invisibleRootItem()
n_kids = root.childCount()
for i in np.arange(n_kids)[::-1]:
child = self.treeWidget.topLevelItem(i)
root.removeChild(child)
self.tree_items = []
self.deleteing = False
def load_predictions_gui(self):
''' here make the window for choosing h5 file directory and predictions '''
# we want something displaying the two files and lets you optionally change the h5 folder.
print('loading new window!')
child = subwindows.LoadingSubwindow()
child.show()
child.home = self.home # this doesnt overwrite when called when on those windows...
if child.exec_():
print(child.predictions_fname)
self.h5directory = child.h5directory
self.predictions_fname = child.predictions_fname
self.update_h5_folder_display()
self.update_predictionfile_display()
self.load_pred_file()
self.build_startswith_to_filename()
def update_h5_folder_display(self):
self.h5_folder_display.setText(str(self.h5directory))
def update_predictionfile_display(self):
self.predictions_file_display.setText(str(self.predictions_fname))
def load_pred_file(self):
self.clear_QTreeWidget()
if self.predictions_fname.endswith('.csv'):
self.predictions_df = pd.read_csv(self.predictions_fname)
elif self.predictions_fname.endswith('.xlsx'):
self.predictions_df = pd.read_excel(self.predictions_fname)
else:
print('Please select .csv or .xlsx file')
return 0
self.update_predictionfile_display()
self.predictions_df['Index'] = self.predictions_df.index
self.predictions_df.columns = [colname.lower() for colname in self.predictions_df.columns]
self.predictions_df.fillna(value = '', inplace=True)
if self.h5directory is None:
self.set_h5_folder()
#print(self.predictions_df)
for i,row in list(self.predictions_df.iterrows()):
#todo correct this
fpath = os.path.join(self.h5directory, row['filename'])
#TODO : decide what to do with tids, this is not thought out at the moment
#So not bothering to load the tids here as should be one only per seizure... can either load on demand
# or use only for the data explorer stuff. Can maybe have dynamic when you click to see the full tree.
# problem is with files with many false positives, spend time loading for too long!
# or have a button for this...
#h5 = H5File(fpath)
#tids = h5.attributes['t_ids']
try:
tids = row['transmitter']
except:
# this is legacy from when there was only one
print('WARNING: DO NOT RELY ON ONE TID PER FILE - TELL JONNY')
tids = [int(fpath.split(']')[0].split('[')[1])]
s, e = row['start'], row['end']
self.populate_tree_items_list_from_predictions(row, tids) # this just populates self.tree_items
self.treeWidget.addTopLevelItems(self.tree_items)
self.predictions_up = True
self.library_up = False
self.file_dir_up = False
self.substates_up = False
def populate_tree_items_list_from_predictions(self, row, tids):
# todo refactor this name to annoations etc
self.deleted_tree_items = []
self.treeWidget.setColumnCount(7)
self.treeWidget.setHeaderLabels(['index', 'start', 'end','duration', 'tid', 'fname', 'real_start', 'real_end'])
filename = row['filename']
index = row['index']
start = row['start']
end = row['end']
if row['start'] !='' and row['end'] !='':
try: # if made with, then will have both
real_start = row['real_start']
real_end = row['real_end']
except:
real_start = self.handler.get_time_from_seconds_and_filepath(filename,
start,
split_on_underscore=True).round('s')
real_end = self.handler.get_time_from_seconds_and_filepath(filename,
end,
split_on_underscore=True).round('s')
else:
real_start, real_end = '',''
try:
duration = row['end']-row['start']
except:
duration = ''
fname_entry = [str(filename)]
details_entry = [str(index),
str(start),
str(end),
str(duration),
str(tids), # bad, should make only tid having one explicit - predictions should only have one!
str(filename),
str(real_start),
str(real_end)]
item = TreeWidgetItem(details_entry)
item.setFirstColumnSpanned(True)
self.tree_items.append(item)
def make_new_tree_entry_from_current(self,item,xpos):
''' for adding seizures with start '''
current_tid = [self.tid_spinBox.value()]
details_entry = [str(item.text(0)),
str(''),
str(''),
str(''),
str(current_tid), # bad, should make only tid having one explicit
str(item.text(5)),
str(''),
str('')]
new_item = TreeWidgetItem(details_entry)
return new_item
def add_new_entry_to_tree_widget(self,xpos):
''' this is for when file dir is up'''
current_item = self.treeWidget.currentItem()
root = self.treeWidget.invisibleRootItem()
index= root.indexOfChild(current_item)
test_item = self.make_new_tree_entry_from_current(current_item,xpos)
self.tree_items.insert(index+1, test_item)
self.treeWidget.insertTopLevelItem(index+1, test_item)
xlims = self.plot_1.getViewBox().viewRange()[0]
self.treeWidget.setCurrentItem(test_item)
self.plot_1.getViewBox().setXRange(min = xlims[0],max = xlims[1], padding=0)
def add_start_line_to_h5_file(self,xpos):
self.add_new_entry_to_tree_widget(xpos) # this makes new slects it, and sets xrange to the same.
# now make lines and wipe end line
start_pen = pg.mkPen((85, 168, 104), width=3, style= Qt.DashLine)
self.start_line = pg.InfiniteLine(pos=xpos, pen =start_pen, movable=True,label='{value:0.2f}',
labelOpts={'position':0.1, 'color': (200,200,100), 'fill': (200,200,200,0), 'movable': True})
self.plot_1.addItem(self.start_line)
self.treeWidget.currentItem().setText(1,'{:.2f}'.format(self.start_line.x()))
self.start_line.sigPositionChanged.connect(self.update_tree_element_start_time)
self.end_line = None
def set_end_and_calc_duration(self):
current_item = self.treeWidget.currentItem()
current_item.setText(2,'{:.2f}'.format(self.end_line.x()))
self.update_tree_element_duration()
def update_real_times(self):
# this is called by the update_tree_element_duration
try:
tree_row = self.treeWidget.currentItem()
fname = tree_row.text(5)
real_start = self.handler.get_time_from_seconds_and_filepath(fname,float(tree_row.text(1)), split_on_underscore = True).round('s')
real_end = self.handler.get_time_from_seconds_and_filepath(fname,float(tree_row.text(2)), split_on_underscore = True ).round('s')
tree_row.setText(6, str(real_start))
tree_row.setText(7, str(real_end))
except:
throw_error()
print('caught error at 777')
def add_end_line_to_h5(self, xpos):
# do something to move existing end line self.check_end_line_exists()
if self.end_line is None:
end_pen = pg.mkPen((210,88,88), width=3, style= Qt.DashLine)
self.end_line = pg.InfiniteLine(pos=xpos, pen = end_pen, movable=True,label='{value:0.2f}',
labelOpts={'position':0.1, 'color': (200,200,100), 'fill': (200,200,200,0), 'movable': True})
self.plot_1.addItem(self.end_line)
self.end_line.sigPositionChanged.connect(self.update_tree_element_end_time)
else:
self.end_line.setValue(xpos)
self.set_end_and_calc_duration()
def mouse_click_on_main(self,evt):
pos = evt[0].scenePos()
if self.plot_1.sceneBoundingRect().contains(pos):
mousePoint = self.bx1.mapSceneToView(pos) # bx1 is just self.plot_1.getViewBox()
modifier = evt[0].modifiers()
if modifier == Qt.ShiftModifier:
if self.library_up:
throw_error('Unfortunately unable to add to library at the moment. You have to edit the annotations csv that was used to make the library, sorry.' )
return 0
self.add_start_line_to_h5_file(mousePoint.x())
elif modifier == Qt.AltModifier:
if self.library_up:
throw_error('Unfortunately unable to add to library at the moment. You have to edit the annotations csv that was used to make the library, sorry.' )
return 0
self.add_end_line_to_h5(mousePoint.x())
def mouse_click_in_overview(self,evt):
# signal for this is coming from self.data,
# evt[0] should be a pyqtgraph.GraphicsScene.mouseEvents.MouseClickEvent
pos = evt[0].scenePos()
if self.plot_overview.sceneBoundingRect().contains(pos):
mousePoint = self.bx_overview.mapSceneToView(pos)
x = int(mousePoint.x())
y = int(mousePoint.y())
xrange, _ = self.get_main_plot_xrange_and_mid()
try:
assert xrange > 0
except:
print('Your view range is messed up')
self.plot_1.getViewBox().setXRange(min = x - xrange/2.0,
max = x + xrange/2.0, padding=0)
def get_main_plot_xrange_and_mid(self):
xlims = self.plot_1.getViewBox().viewRange()[0]
xrange = xlims[1]-xlims[0]
xmid = xlims[0]+xrange/2.0
return xrange, xmid
def xrange_change(self):
#self.xrange_spinBox.valueChanged.connect(self.xrange_change)
xrange = self.xrange_spinBox.value()
if xrange>0:
if self.plot_change == False:
_, xmid = self.get_main_plot_xrange_and_mid()
self.plot_1.getViewBox().setXRange(min = xmid - xrange/2.0,
max = xmid + xrange/2.0, padding=0)
elif self.plot_change == True:
# changing because plot has already changed - not key or spinbox alteration
self.plot_change = False
else:
pass
def xrange_changed_on_plot(self):
xrange, xmid = self.get_main_plot_xrange_and_mid()
self.plot_change = True
self.xrange_spinBox.setValue(xrange)
def undo_tree_deletion(self):
if len(self.deleted_tree_items) == 0:
print('Nothing to undo')
return 0
old_item = self.deleted_tree_items[-1]
del self.deleted_tree_items[-1]
original_index = int(old_item.text(0))
tree_index = self.get_new_index_for_deleted_tree_element(original_index)
self.treeWidget.insertTopLevelItem(tree_index, old_item)
self.treeWidget.setCurrentItem(old_item)
def get_new_index_for_deleted_tree_element(self, deleted_element_index):
root = self.treeWidget.invisibleRootItem()
child_count = root.childCount()
index_list = []
for i in range(child_count):
item = root.child(i)
index_list.append(int(item.text(0)))
correct_tree_index = bisect.bisect_left(index_list, int(deleted_element_index))
return correct_tree_index
def keyPressEvent(self, eventQKeyEvent):
key_id = eventQKeyEvent.key()
modifier = eventQKeyEvent.modifiers()
key_id_to_numbers = {eval('Qt.Key_'+str(i)):i for i in range(1,10)}
if key_id in list(key_id_to_numbers.keys()):
self.plot_change = False # disable this as key now entered
key_val = key_id_to_numbers[key_id]
if key_val == self.xrange_spinBox.value():
self.xrange_change() # just call again
else:
self.xrange_spinBox.setValue(key_val)
# connected trigger will call xrange change
x,y = self.plot_1.getViewBox().viewRange()
if key_id == Qt.Key_Delete or key_id == Qt.Key_Backspace:
# store the deleted element so you can undo it
tree_entry = self.treeWidget.currentItem()
self.deleted_tree_items.append(tree_entry)
if key_id == Qt.Key_Z and modifier == Qt.ControlModifier:
self.undo_tree_deletion()
if key_id == Qt.Key_Z :
self.undo_tree_deletion()
if key_id == Qt.Key_Up:
if self.scroll_flag==True:
scroll_rate = self.scroll_speed_box.value()
new_rate = scroll_rate * 2
self.scroll_speed_box.setValue(new_rate)
if self.blink ==True: self.reset_timer()
else:
self.plot_1.getViewBox().setYRange(min = y[0]*0.9, max = y[1]*0.9, padding = 0)
if key_id == Qt.Key_Down:
if self.scroll_flag==True:
scroll_rate = self.scroll_speed_box.value()
if scroll_rate > 1:
new_rate = int(scroll_rate / 2)
self.scroll_speed_box.setValue(new_rate)
if self.blink ==True: self.reset_timer()
else: # just zoom
self.plot_1.getViewBox().setYRange(min = y[0]*1.1, max = y[1]*1.1,padding = 0)
if key_id == Qt.Key_Right:
if self.scroll_flag==True:
self.scroll_sign = 1
else:
#scroll_i = (x[1]-x[0])*0.01*self.scroll_speed_box.value()
#if scroll_i > x[1]-x[0]: scroll_i = x[1]-x[0]
#self.plot_1.getViewBox().setXRange(min = x[0]+scroll_i, max = x[1]+scroll_i, padding=0)
scroll_i = (x[1]-x[0])*1
new_min = x[0]+scroll_i
new_max = x[1]+scroll_i
self.plot_1.getViewBox().setXRange(min =new_min, max = new_max, padding=0)
if key_id == Qt.Key_Left:
if self.scroll_flag==True:
self.scroll_sign = -1
else:
#scroll_i = (x[1]-x[0])*0.01*self.scroll_speed_box.value()
#if scroll_i > x[1]-x[0]: scroll_i = x[1]-x[0]
#self.plot_1.getViewBox().setXRange(min = x[0]-scroll_i, max = x[1]-scroll_i, padding=0)
scroll_i = (x[1]-x[0])*-1
new_min = x[0]+scroll_i
new_max = x[1]+scroll_i
#if new_max < xmax:
#self.get_next_tree_item()
self.plot_1.getViewBox().setXRange(min =new_min, max = new_max, padding=0)
if key_id == Qt.Key_Backspace or key_id == Qt.Key_Delete:
current_item = self.treeWidget.currentItem()
root = self.treeWidget.invisibleRootItem()
root.removeChild(current_item)
if key_id == Qt.Key_B:
if self.scroll_flag==True:
self.blink *= -1
if self.blink == True:
self.blink_box.setChecked(True)
else:
self.blink_box.setChecked(False)
self.reset_timer()
if key_id == Qt.Key_Space:
self.scroll_sign = 1
self.checkBox_scrolling.setChecked([1,0][self.checkBox_scrolling.isChecked()])
#self.scroll_flag *= -1
def scroll_checkbox_statechange(self):
self.scroll_sign = 1
self.scroll_flag = [-1,1][self.checkBox_scrolling.isChecked()]
self.reset_timer()
def blink_box_change(self):
self.reset_timer()
#print('someone changed the blink box')
def scroll_speed_change(self):
self.reset_timer()
def reset_timer(self):
scroll_rate = self.scroll_speed_box.value()
if self.scroll_flag==True:
self.timer.stop()
if self.blink_box.isChecked():
rate = int(2000/scroll_rate)
#print(rate)
self.timer.start(rate)
else:
self.timer.start(20)
else:
self.timer.stop()
def simple_scroll(self):
x,y = self.plot_1.getViewBox().viewRange()
scroll_rate = self.scroll_speed_box.value()
xlims = self.plot_overview.getViewBox().viewRange()[0]
xmax = xlims[1]
if self.blink_box.isChecked() != True:
scroll_i = (x[1]-x[0])*(0.001*scroll_rate)*self.scroll_sign
new_min = x[0]+scroll_i
new_max = x[1]+scroll_i
if new_max < xmax-1:
#self.get_next_tree_item()
self.plot_1.getViewBox().setXRange(min =new_min, max = new_max, padding=0)
elif self.blink_box.isChecked():
scroll_i = (x[1]-x[0])*self.scroll_sign
new_min = x[0]+scroll_i
new_max = x[1]+scroll_i
if new_max < xmax:
#self.get_next_tree_item()
self.plot_1.getViewBox().setXRange(min =new_min, max = new_max, padding=0)
def load_h5_file(self,fname):
self.loading_thread = LoadFileThread(fname)
#self.connect(self.loading_thread, SIGNAL("finished()"), self.done)
self.connect(self.loading_thread, pyqtSignal("catch_data(PyQt_PyObject)"), self.catch_data)
self.loading_thread.start()
def catch_data(self, h5obj):
self.h5obj = h5obj
self.plot_traces()
def done(self):
QtGui.QMessageBox.information(self, "Done!", "Done loading!")
class LoadFileThread(QThread):
def __init__(self, filename):
QThread.__init__(self)
self.filename = filename
def __del__(self):
self.wait()
def load_file(self, filename):
self.h5obj = H5File(filename)
def run(self):
print('sup, loading: '+self.filename)
self.load_file(self.filename)
self.emit(pyqtSignal('catch_data(PyQt_PyObject)'), self.h5obj)
class HDF5Plot(pg.PlotCurveItem):
"""
Create a subclass of PlotCurveItem for displaying a very large
data set from an HDF5 file that does not neccesarilly fit in memory.
The basic approach is to override PlotCurveItem.viewRangeChanged such that it
reads only the portion of the HDF5 data that is necessary to display the visible
portion of the data. This is further downsampled to reduce the number of samples
being displayed.
A more clever implementation of this class would employ some kind of caching
to avoid re-reading the entire visible waveform at every update.
"""
def __init__(self, downsample_limit = 20000,viewbox = None, *args, **kwds):
" TODO what are the args and kwds for PlotCurveItem class?"
self.hdf5 = None
self.hdf5_filtered_data = None
self.time = None
self.fs = None
self.vb = viewbox
self.limit = downsample_limit # maximum number of samples to be plotted, 10000 orginally
self.display_filter = None
self.hp_cutoff = None
self.lp_cutoff = None
pg.PlotCurveItem.__init__(self, *args, **kwds)
if pg.CONFIG_OPTIONS['background'] == 'w':
self.pen = (0,0,0)
else:
self.pen = (255,255,255)
def keyPressEvent(self, event):
''' this doesnt work, change key press to correct it.'''
if event.key() == QtCore.Qt.Key_Escape:
self.close()
else:
pass
#print(event.key())
def setHDF5(self, data, time, fs):
self.hdf5 = data
self.time = time
self.fs = fs
#print ( self.hdf5.shape, self.time.shape)
self.updateHDF5Plot()
def set_display_filter_settings(self, display_filter, hp_cutoff, lp_cutoff):
self.display_filter = display_filter
self.hp_cutoff = hp_cutoff
self.lp_cutoff = lp_cutoff
def highpass_filter(self, data):
'''
Implements high pass digital butterworth filter, order 2.
Args:
cutoff_hz: default is 1hz
'''
nyq = 0.5 * self.fs
cutoff_decimal = self.hp_cutoff/nyq
b, a = signal.butter(2, cutoff_decimal, 'highpass', analog=False)
filtered_data = signal.filtfilt(b, a, data)
return filtered_data
def wipe_filtered_data(self):
self.hdf5_filtered_data = None
def viewRangeChanged(self):
self.updateHDF5Plot()
def updateHDF5Plot(self):
if self.hdf5 is None:
self.setData([])
return 0
if self.display_filter:
if self.hdf5_filtered_data is None:
self.hdf5_filtered_data = hdf5data = self.highpass_filter(self.hdf5)
hdf5data = self.hdf5_filtered_data
else:
hdf5data = self.hdf5
#vb = self.getViewBox()
#if vb is None:
# return # no ViewBox yet
# Determine what data range must be read from HDF5
xrange = [i*self.fs for i in self.vb.viewRange()[0]]
start = max(0,int(xrange[0])-1)
stop = min(len(hdf5data), int(xrange[1]+2))
if stop-start < 1:
print('didnt update')
return 0
# Decide by how much we should downsample
ds = int((stop-start) / self.limit) + 1
if ds == 1:
# Small enough to display with no intervention.
visible_y = hdf5data[start:stop]
visible_x = self.time[start:stop]
scale = 1
else:
# Here convert data into a down-sampled array suitable for visualizing.
# Must do this piecewise to limit memory usage.
samples = 1 + ((stop-start) // ds)
visible_y = np.zeros(samples*2, dtype=hdf5data.dtype)
visible_x = np.zeros(samples*2, dtype=self.time.dtype)
sourcePtr = start
targetPtr = 0
# read data in chunks of ~1M samples
chunkSize = (1000000//ds) * ds
while sourcePtr < stop-1:
chunk = hdf5data[sourcePtr:min(stop,sourcePtr+chunkSize)]
chunk_x = self.time[sourcePtr:min(stop,sourcePtr+chunkSize)]
sourcePtr += len(chunk)
#print(chunk.shape, chunk_x.shape)
# reshape chunk to be integral multiple of ds
chunk = chunk[:(len(chunk)//ds) * ds].reshape(len(chunk)//ds, ds)
chunk_x = chunk_x[:(len(chunk_x)//ds) * ds].reshape(len(chunk_x)//ds, ds)
# compute max and min
#chunkMax = chunk.max(axis=1)
#chunkMin = chunk.min(axis=1)
mx_inds = np.argmax(chunk, axis=1)
mi_inds = np.argmin(chunk, axis=1)
row_inds = np.arange(chunk.shape[0])
chunkMax = chunk[row_inds, mx_inds]
chunkMin = chunk[row_inds, mi_inds]
chunkMax_x = chunk_x[row_inds, mx_inds]
chunkMin_x = chunk_x[row_inds, mi_inds]
# interleave min and max into plot data to preserve envelope shape
visible_y[targetPtr:targetPtr+chunk.shape[0]*2:2] = chunkMin
visible_y[1+targetPtr:1+targetPtr+chunk.shape[0]*2:2] = chunkMax
visible_x[targetPtr:targetPtr+chunk_x.shape[0]*2:2] = chunkMin_x
visible_x[1+targetPtr:1+targetPtr+chunk_x.shape[0]*2:2] = chunkMax_x
targetPtr += chunk.shape[0]*2
visible_x = visible_x[:targetPtr]
visible_y = visible_y[:targetPtr]
#print('**** now downsampling')
#print(visible_y.shape, visible_x.shape)
scale = ds * 0.5
# TODO: setPos, scale, resetTransform methods... scale?
self.setData(visible_x, visible_y, pen=self.pen) # update the plot
#self.setPos(start, 0) # shift to match starting index ### Had comment out to stop it breaking... when limit is >0?!
self.resetTransform()
#self.scale(scale, 1) # scale to match downsampling
def main():
app = QtGui.QApplication(sys.argv)
maingui = MainGui()
# cannot get menu bar to show on first launch - need to click of and back
#maingui.menuBar.raise_()
#maingui.menuBar.show()
#maingui.menuBar.activateWindow()
#maingui.menuBar.focusWidget(True)
maingui.menuBar.setNativeMenuBar(False) # therefore turn of native
maingui.raise_()
maingui.show()
app.exec_()
if __name__ == '__main__':
main()
| |
# Adapted from https://github.com/mccutchen/triangulizor
# Copyright (C) 2012 by Will McCutchen and individual contributors.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import aggdraw
import math
from PIL import Image
import itertools
import logging
import util
def pxl(image, tile_size=32):
"""Processes the given image by breaking it down into tiles of the given
size and applying a triangular effect to each tile. Returns the processed
image as a PIL Image object.
The image can be given as anything suitable for passing to `Image.open`
(ie, the path to an image or as a file-like object containing image data).
If tile_size is 0, the tile size will be guessed based on the image
size. It will also be adjusted to be divisible by 2 if it is not already.
"""
# Make sure we have a usable tile size, by guessing based on image size
# and making sure it's a multiple of two.
if tile_size == 0:
tile_size = guess_tile_size(image)
if tile_size % 2 != 0:
tile_size = (tile_size / 2) * 2
logging.info('Input image size: %r', image.size)
logging.info('Tile size: %r', tile_size)
# Preprocess image to make sure it's at a size we can handle
image = prep_image(image, tile_size)
logging.info('Prepped image size: %r', image.size)
# Get pixmap (for direct pixel access) and draw objects for the image.
pix = image.load()
draw = aggdraw.Draw(image)
# Process the image, tile by tile
for x, y in iter_tiles(image, tile_size):
process_tile(x, y, tile_size, pix, draw, image)
draw.flush()
return image
def process_tile(tile_x, tile_y, tile_size, pix, draw, image):
"""Process a tile whose top left corner is at the given x and y
coordinates.
"""
logging.debug('Processing tile (%d, %d)', tile_x, tile_y)
# Calculate average color for each "triangle" in the given tile
n, e, s, w = triangle_colors(tile_x, tile_y, tile_size, pix)
# Calculate distance between triangle pairs
d_ne = get_color_dist(n, e)
d_nw = get_color_dist(n, w)
d_se = get_color_dist(s, e)
d_sw = get_color_dist(s, w)
# Figure out which pair is the closest, which will determine the direction
# we'll split this tile into triangles. A 'right' split runs from top left
# to bottom right. A 'left' split runs bottom left to top right.
closest = sorted([d_ne, d_nw, d_se, d_sw])[0]
if closest in (d_ne, d_sw):
split = 'right'
elif closest in (d_nw, d_se):
split = 'left'
# Figure out the average color for each side of the "split"
if split == 'right':
top_color = get_average_color([n, e])
bottom_color = get_average_color([s, w])
else:
top_color = get_average_color([n, w])
bottom_color = get_average_color([s, e])
draw_triangles(tile_x, tile_y, tile_size, split, top_color, bottom_color,
draw)
def triangle_colors(tile_x, tile_y, tile_size, pix):
"""Extracts the average color for each triangle in the given tile. Returns
a 4-tuple of colors for the triangles in this order: North, East, South,
West (clockwise).
"""
quad_size = tile_size / 2
north = []
for y in xrange(tile_y, tile_y + quad_size):
x_off = y - tile_y
for x in xrange(tile_x + x_off, tile_x + tile_size - x_off):
north.append(pix[x, y])
south = []
for y in xrange(tile_y + quad_size, tile_y + tile_size):
x_off = tile_y + tile_size - y
for x in xrange(tile_x + x_off, tile_x + tile_size - x_off):
south.append(pix[x, y])
east = []
for x in xrange(tile_x, tile_x + quad_size):
y_off = x - tile_x
for y in xrange(tile_y + y_off, tile_y + tile_size - y_off):
east.append(pix[x, y])
west = []
for x in xrange(tile_x + quad_size, tile_x + tile_size):
y_off = tile_x + tile_size - x
for y in xrange(tile_y + y_off, tile_y + tile_size - y_off):
west.append(pix[x, y])
return map(get_average_color, [north, east, south, west])
def draw_triangles(tile_x, tile_y, tile_size, split, top_color, bottom_color,
draw):
"""Draws a triangle on each half of the tile with the given coordinates
and size.
"""
assert split in ('right', 'left')
# The four corners of this tile
nw = (tile_x, tile_y)
ne = (tile_x + tile_size - 1, tile_y)
se = (tile_x + tile_size - 1, tile_y + tile_size)
sw = (tile_x, tile_y + tile_size)
if split == 'left':
# top right triangle
draw_triangle(nw, ne, se, top_color, draw)
# bottom left triangle
draw_triangle(nw, sw, se, bottom_color, draw)
else:
# top left triangle
draw_triangle(sw, nw, ne, top_color, draw)
# bottom right triangle
draw_triangle(sw, se, ne, bottom_color, draw)
def draw_triangle(a, b, c, color, draw):
"""Draws a triangle with the given vertices in the given color."""
pen = aggdraw.Pen(color)
brush = aggdraw.Brush(color)
draw.polygon(a + b + c, pen, brush)
def get_average_color(colors):
"""Calculate the average color from the list of colors, where each color
is a 3-tuple of (r, g, b) values.
"""
c = reduce(color_reducer, colors)
total = len(colors)
return tuple(v / total for v in c)
def color_reducer(c1, c2):
"""Helper function used to add two colors together when averaging."""
return tuple(v1 + v2 for v1, v2 in itertools.izip(c1, c2))
def get_color_dist(c1, c2):
"""Calculates the "distance" between two colors, where the distance is
another color whose components are the absolute values of the difference
between each component of the input colors.
"""
return tuple(abs(v1 - v2) for v1, v2 in itertools.izip(c1, c2))
def prep_image(image, tile_size):
"""Takes an image and a tile size and returns a possibly cropped version
of the image that is evenly divisible in both dimensions by the tile size.
"""
w, h = image.size
x_tiles = w / tile_size # floor division
y_tiles = h / tile_size
new_w = x_tiles * tile_size
new_h = y_tiles * tile_size
if new_w == w and new_h == h:
return image
else:
crop_bounds = (0, 0, new_w, new_h)
return image.crop(crop_bounds)
def iter_tiles(image, tile_size):
"""Yields (x, y) coordinate pairs for the top left corner of each tile in
the given image, based on the given tile size.
"""
w, h = image.size
for y in xrange(0, h, tile_size):
for x in xrange(0, w, tile_size):
yield x, y
def guess_tile_size(image):
"""Try to pick an appropriate tile size based on the image's size."""
# Formula: 5% of the largest dimension of the image
return int(max(image.size) * 0.05)
if __name__ == '__main__':
image = Image.open(sys.argv[1])
image = image.convert('RGB')
tile_size = 32
width = util.WIDTH + tile_size - (util.WIDTH % tile_size)
height = util.HEIGHT + tile_size- (util.HEIGHT % tile_size)
# image = util.resize_jam_background(image, width, height)
image = pxl(image)
image.save(sys.argv[2], quality=90)
| |
"""Support KNX devices."""
from __future__ import annotations
import asyncio
import logging
from typing import Final
import voluptuous as vol
from xknx import XKNX
from xknx.core import XknxConnectionState
from xknx.core.telegram_queue import TelegramQueue
from xknx.dpt import DPTArray, DPTBase, DPTBinary
from xknx.exceptions import XKNXException
from xknx.io import ConnectionConfig, ConnectionType
from xknx.telegram import AddressFilter, Telegram
from xknx.telegram.address import parse_device_group_address
from xknx.telegram.apci import GroupValueRead, GroupValueResponse, GroupValueWrite
from homeassistant.const import (
CONF_HOST,
CONF_PORT,
EVENT_HOMEASSISTANT_STOP,
SERVICE_RELOAD,
)
from homeassistant.core import Event, HomeAssistant, ServiceCall
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import async_get_platforms
from homeassistant.helpers.reload import async_integration_yaml_config
from homeassistant.helpers.service import async_register_admin_service
from homeassistant.helpers.typing import ConfigType
from .const import (
CONF_KNX_EXPOSE,
CONF_KNX_INDIVIDUAL_ADDRESS,
CONF_KNX_ROUTING,
CONF_KNX_TUNNELING,
DOMAIN,
KNX_ADDRESS,
SupportedPlatforms,
)
from .expose import KNXExposeSensor, KNXExposeTime, create_knx_exposure
from .schema import (
BinarySensorSchema,
ClimateSchema,
ConnectionSchema,
CoverSchema,
ExposeSchema,
FanSchema,
LightSchema,
NotifySchema,
NumberSchema,
SceneSchema,
SelectSchema,
SensorSchema,
SwitchSchema,
WeatherSchema,
ga_validator,
sensor_type_validator,
)
_LOGGER = logging.getLogger(__name__)
CONF_KNX_FIRE_EVENT: Final = "fire_event"
CONF_KNX_EVENT_FILTER: Final = "event_filter"
SERVICE_KNX_SEND: Final = "send"
SERVICE_KNX_ATTR_PAYLOAD: Final = "payload"
SERVICE_KNX_ATTR_TYPE: Final = "type"
SERVICE_KNX_ATTR_REMOVE: Final = "remove"
SERVICE_KNX_EVENT_REGISTER: Final = "event_register"
SERVICE_KNX_EXPOSURE_REGISTER: Final = "exposure_register"
SERVICE_KNX_READ: Final = "read"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
# deprecated since 2021.4
cv.deprecated("config_file"),
# deprecated since 2021.2
cv.deprecated(CONF_KNX_FIRE_EVENT),
cv.deprecated("fire_event_filter", replacement_key=CONF_KNX_EVENT_FILTER),
vol.Schema(
{
**ConnectionSchema.SCHEMA,
vol.Optional(CONF_KNX_FIRE_EVENT): cv.boolean,
vol.Optional(CONF_KNX_EVENT_FILTER, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
**ExposeSchema.platform_node(),
**BinarySensorSchema.platform_node(),
**ClimateSchema.platform_node(),
**CoverSchema.platform_node(),
**FanSchema.platform_node(),
**LightSchema.platform_node(),
**NotifySchema.platform_node(),
**NumberSchema.platform_node(),
**SceneSchema.platform_node(),
**SelectSchema.platform_node(),
**SensorSchema.platform_node(),
**SwitchSchema.platform_node(),
**WeatherSchema.platform_node(),
}
),
)
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_KNX_SEND_SCHEMA = vol.Any(
vol.Schema(
{
vol.Required(KNX_ADDRESS): vol.All(
cv.ensure_list,
[ga_validator],
),
vol.Required(SERVICE_KNX_ATTR_PAYLOAD): cv.match_all,
vol.Required(SERVICE_KNX_ATTR_TYPE): sensor_type_validator,
}
),
vol.Schema(
# without type given payload is treated as raw bytes
{
vol.Required(KNX_ADDRESS): vol.All(
cv.ensure_list,
[ga_validator],
),
vol.Required(SERVICE_KNX_ATTR_PAYLOAD): vol.Any(
cv.positive_int, [cv.positive_int]
),
}
),
)
SERVICE_KNX_READ_SCHEMA = vol.Schema(
{
vol.Required(KNX_ADDRESS): vol.All(
cv.ensure_list,
[ga_validator],
)
}
)
SERVICE_KNX_EVENT_REGISTER_SCHEMA = vol.Schema(
{
vol.Required(KNX_ADDRESS): vol.All(
cv.ensure_list,
[ga_validator],
),
vol.Optional(SERVICE_KNX_ATTR_REMOVE, default=False): cv.boolean,
}
)
SERVICE_KNX_EXPOSURE_REGISTER_SCHEMA = vol.Any(
ExposeSchema.EXPOSE_SENSOR_SCHEMA.extend(
{
vol.Optional(SERVICE_KNX_ATTR_REMOVE, default=False): cv.boolean,
}
),
vol.Schema(
# for removing only `address` is required
{
vol.Required(KNX_ADDRESS): ga_validator,
vol.Required(SERVICE_KNX_ATTR_REMOVE): vol.All(cv.boolean, True),
},
extra=vol.ALLOW_EXTRA,
),
)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the KNX integration."""
try:
knx_module = KNXModule(hass, config)
hass.data[DOMAIN] = knx_module
await knx_module.start()
except XKNXException as ex:
_LOGGER.warning("Could not connect to KNX interface: %s", ex)
hass.components.persistent_notification.async_create(
f"Could not connect to KNX interface: <br><b>{ex}</b>", title="KNX"
)
if CONF_KNX_EXPOSE in config[DOMAIN]:
for expose_config in config[DOMAIN][CONF_KNX_EXPOSE]:
knx_module.exposures.append(
create_knx_exposure(hass, knx_module.xknx, expose_config)
)
for platform in SupportedPlatforms:
if platform.value not in config[DOMAIN]:
continue
hass.async_create_task(
discovery.async_load_platform(
hass,
platform.value,
DOMAIN,
{
"platform_config": config[DOMAIN][platform.value],
},
config,
)
)
hass.services.async_register(
DOMAIN,
SERVICE_KNX_SEND,
knx_module.service_send_to_knx_bus,
schema=SERVICE_KNX_SEND_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SERVICE_KNX_READ,
knx_module.service_read_to_knx_bus,
schema=SERVICE_KNX_READ_SCHEMA,
)
async_register_admin_service(
hass,
DOMAIN,
SERVICE_KNX_EVENT_REGISTER,
knx_module.service_event_register_modify,
schema=SERVICE_KNX_EVENT_REGISTER_SCHEMA,
)
async_register_admin_service(
hass,
DOMAIN,
SERVICE_KNX_EXPOSURE_REGISTER,
knx_module.service_exposure_register_modify,
schema=SERVICE_KNX_EXPOSURE_REGISTER_SCHEMA,
)
async def reload_service_handler(service_call: ServiceCall) -> None:
"""Remove all KNX components and load new ones from config."""
# First check for config file. If for some reason it is no longer there
# or knx is no longer mentioned, stop the reload.
config = await async_integration_yaml_config(hass, DOMAIN)
if not config or DOMAIN not in config:
return
await knx_module.xknx.stop()
await asyncio.gather(
*(platform.async_reset() for platform in async_get_platforms(hass, DOMAIN))
)
await async_setup(hass, config)
async_register_admin_service(
hass, DOMAIN, SERVICE_RELOAD, reload_service_handler, schema=vol.Schema({})
)
return True
class KNXModule:
"""Representation of KNX Object."""
def __init__(self, hass: HomeAssistant, config: ConfigType) -> None:
"""Initialize KNX module."""
self.hass = hass
self.config = config
self.connected = False
self.exposures: list[KNXExposeSensor | KNXExposeTime] = []
self.service_exposures: dict[str, KNXExposeSensor | KNXExposeTime] = {}
self.init_xknx()
self._knx_event_callback: TelegramQueue.Callback = self.register_callback()
self.xknx.connection_manager.register_connection_state_changed_cb(
self.connection_state_changed_cb
)
def init_xknx(self) -> None:
"""Initialize XKNX object."""
self.xknx = XKNX(
own_address=self.config[DOMAIN][CONF_KNX_INDIVIDUAL_ADDRESS],
rate_limit=self.config[DOMAIN][ConnectionSchema.CONF_KNX_RATE_LIMIT],
multicast_group=self.config[DOMAIN][ConnectionSchema.CONF_KNX_MCAST_GRP],
multicast_port=self.config[DOMAIN][ConnectionSchema.CONF_KNX_MCAST_PORT],
connection_config=self.connection_config(),
state_updater=self.config[DOMAIN][ConnectionSchema.CONF_KNX_STATE_UPDATER],
)
async def start(self) -> None:
"""Start XKNX object. Connect to tunneling or Routing device."""
await self.xknx.start()
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, self.stop)
async def stop(self, event: Event) -> None:
"""Stop XKNX object. Disconnect from tunneling or Routing device."""
await self.xknx.stop()
def connection_config(self) -> ConnectionConfig:
"""Return the connection_config."""
if CONF_KNX_TUNNELING in self.config[DOMAIN]:
return self.connection_config_tunneling()
if CONF_KNX_ROUTING in self.config[DOMAIN]:
return self.connection_config_routing()
return ConnectionConfig(auto_reconnect=True)
def connection_config_routing(self) -> ConnectionConfig:
"""Return the connection_config if routing is configured."""
local_ip = None
# all configuration values are optional
if self.config[DOMAIN][CONF_KNX_ROUTING] is not None:
local_ip = self.config[DOMAIN][CONF_KNX_ROUTING].get(
ConnectionSchema.CONF_KNX_LOCAL_IP
)
return ConnectionConfig(
connection_type=ConnectionType.ROUTING, local_ip=local_ip
)
def connection_config_tunneling(self) -> ConnectionConfig:
"""Return the connection_config if tunneling is configured."""
gateway_ip = self.config[DOMAIN][CONF_KNX_TUNNELING][CONF_HOST]
gateway_port = self.config[DOMAIN][CONF_KNX_TUNNELING][CONF_PORT]
local_ip = self.config[DOMAIN][CONF_KNX_TUNNELING].get(
ConnectionSchema.CONF_KNX_LOCAL_IP
)
route_back = self.config[DOMAIN][CONF_KNX_TUNNELING][
ConnectionSchema.CONF_KNX_ROUTE_BACK
]
return ConnectionConfig(
connection_type=ConnectionType.TUNNELING,
gateway_ip=gateway_ip,
gateway_port=gateway_port,
local_ip=local_ip,
route_back=route_back,
auto_reconnect=True,
)
async def telegram_received_cb(self, telegram: Telegram) -> None:
"""Call invoked after a KNX telegram was received."""
data = None
# Not all telegrams have serializable data.
if (
isinstance(telegram.payload, (GroupValueWrite, GroupValueResponse))
and telegram.payload.value is not None
):
data = telegram.payload.value.value
self.hass.bus.async_fire(
"knx_event",
{
"data": data,
"destination": str(telegram.destination_address),
"direction": telegram.direction.value,
"source": str(telegram.source_address),
"telegramtype": telegram.payload.__class__.__name__,
},
)
async def connection_state_changed_cb(self, state: XknxConnectionState) -> None:
"""Call invoked after a KNX connection state change was received."""
self.connected = state == XknxConnectionState.CONNECTED
if tasks := [device.after_update() for device in self.xknx.devices]:
await asyncio.gather(*tasks)
def register_callback(self) -> TelegramQueue.Callback:
"""Register callback within XKNX TelegramQueue."""
address_filters = list(
map(AddressFilter, self.config[DOMAIN][CONF_KNX_EVENT_FILTER])
)
return self.xknx.telegram_queue.register_telegram_received_cb(
self.telegram_received_cb,
address_filters=address_filters,
group_addresses=[],
match_for_outgoing=True,
)
async def service_event_register_modify(self, call: ServiceCall) -> None:
"""Service for adding or removing a GroupAddress to the knx_event filter."""
attr_address = call.data[KNX_ADDRESS]
group_addresses = map(parse_device_group_address, attr_address)
if call.data.get(SERVICE_KNX_ATTR_REMOVE):
for group_address in group_addresses:
try:
self._knx_event_callback.group_addresses.remove(group_address)
except ValueError:
_LOGGER.warning(
"Service event_register could not remove event for '%s'",
str(group_address),
)
return
for group_address in group_addresses:
if group_address in self._knx_event_callback.group_addresses:
continue
self._knx_event_callback.group_addresses.append(group_address)
_LOGGER.debug(
"Service event_register registered event for '%s'",
str(group_address),
)
async def service_exposure_register_modify(self, call: ServiceCall) -> None:
"""Service for adding or removing an exposure to KNX bus."""
group_address = call.data[KNX_ADDRESS]
if call.data.get(SERVICE_KNX_ATTR_REMOVE):
try:
removed_exposure = self.service_exposures.pop(group_address)
except KeyError as err:
raise HomeAssistantError(
f"Could not find exposure for '{group_address}' to remove."
) from err
else:
removed_exposure.shutdown()
return
if group_address in self.service_exposures:
replaced_exposure = self.service_exposures.pop(group_address)
_LOGGER.warning(
"Service exposure_register replacing already registered exposure for '%s' - %s",
group_address,
replaced_exposure.device.name,
)
replaced_exposure.shutdown()
exposure = create_knx_exposure(self.hass, self.xknx, call.data) # type: ignore[arg-type]
self.service_exposures[group_address] = exposure
_LOGGER.debug(
"Service exposure_register registered exposure for '%s' - %s",
group_address,
exposure.device.name,
)
async def service_send_to_knx_bus(self, call: ServiceCall) -> None:
"""Service for sending an arbitrary KNX message to the KNX bus."""
attr_address = call.data[KNX_ADDRESS]
attr_payload = call.data[SERVICE_KNX_ATTR_PAYLOAD]
attr_type = call.data.get(SERVICE_KNX_ATTR_TYPE)
payload: DPTBinary | DPTArray
if attr_type is not None:
transcoder = DPTBase.parse_transcoder(attr_type)
if transcoder is None:
raise ValueError(f"Invalid type for knx.send service: {attr_type}")
payload = DPTArray(transcoder.to_knx(attr_payload))
elif isinstance(attr_payload, int):
payload = DPTBinary(attr_payload)
else:
payload = DPTArray(attr_payload)
for address in attr_address:
telegram = Telegram(
destination_address=parse_device_group_address(address),
payload=GroupValueWrite(payload),
)
await self.xknx.telegrams.put(telegram)
async def service_read_to_knx_bus(self, call: ServiceCall) -> None:
"""Service for sending a GroupValueRead telegram to the KNX bus."""
for address in call.data[KNX_ADDRESS]:
telegram = Telegram(
destination_address=parse_device_group_address(address),
payload=GroupValueRead(),
)
await self.xknx.telegrams.put(telegram)
| |
# Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from neutron.common import exceptions
from oslo_config import cfg
from oslo_log import log as logging
from quark import allocation_pool
from quark.db import api as db_api
from quark import exceptions as quark_exceptions
from quark import plugin_views as v
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def _validate_policy_with_routes(context, policies, subnets):
pools = {}
policy_networks = [netaddr.IPNetwork(p) for p in policies]
for subnet in subnets:
pool = allocation_pool.AllocationPools(subnet["cidr"],
policies=policy_networks)
pools[subnet["id"]] = pool
subnet_ids = [subnet["id"] for subnet in subnets]
routes = db_api.route_find(context, subnet_id=subnet_ids)
for route in routes:
subnet_pool = pools[route["subnet_id"]]
subnet_pool.validate_gateway_excluded(route["gateway"])
def create_ip_policy(context, ip_policy):
LOG.info("create_ip_policy for tenant %s" % context.tenant_id)
ipp = ip_policy['ip_policy']
if not ipp.get("exclude"):
raise exceptions.BadRequest(resource="ip_policy",
msg="Empty ip_policy.exclude")
network_ids = ipp.get("network_ids")
subnet_ids = ipp.get("subnet_ids")
if subnet_ids and network_ids:
raise exceptions.BadRequest(
resource="ip_policy",
msg="network_ids and subnet_ids specified. only one allowed")
if not subnet_ids and not network_ids:
raise exceptions.BadRequest(
resource="ip_policy",
msg="network_ids or subnet_ids not specified")
with context.session.begin():
if subnet_ids:
subnets = db_api.subnet_find(
context, id=subnet_ids, scope=db_api.ALL)
if not subnets:
raise exceptions.SubnetNotFound(id=subnet_ids)
_check_for_pre_existing_policies_in(subnets)
ensure_default_policy(ipp["exclude"], subnets)
_validate_cidrs_fit_into_subnets(ipp["exclude"], subnets)
ipp.pop("subnet_ids")
ipp["subnets"] = subnets
if network_ids:
nets = db_api.network_find(
context, id=network_ids, scope=db_api.ALL)
if not nets:
raise exceptions.NetworkNotFound(net_id=network_ids)
_check_for_pre_existing_policies_in(nets)
subnets = [subnet for net in nets
for subnet in net.get("subnets", [])]
ensure_default_policy(ipp["exclude"], subnets)
_validate_cidrs_fit_into_subnets(ipp["exclude"], subnets)
ipp.pop("network_ids")
ipp["networks"] = nets
ip_policy = db_api.ip_policy_create(context, **ipp)
return v._make_ip_policy_dict(ip_policy)
def _check_for_pre_existing_policies_in(models):
models_with_existing_policies = [model for model in models
if model.get('ip_policy', None)]
if models_with_existing_policies:
first_model = models_with_existing_policies[0]
raise quark_exceptions.IPPolicyAlreadyExists(
id=first_model['ip_policy']['id'],
n_id=first_model['id'])
def get_ip_policy(context, id):
LOG.info("get_ip_policy %s for tenant %s" % (id, context.tenant_id))
ipp = db_api.ip_policy_find(context, id=id, scope=db_api.ONE)
if not ipp:
raise quark_exceptions.IPPolicyNotFound(id=id)
return v._make_ip_policy_dict(ipp)
def get_ip_policies(context, **filters):
LOG.info("get_ip_policies for tenant %s" % (context.tenant_id))
ipps = db_api.ip_policy_find(context, scope=db_api.ALL, **filters)
return [v._make_ip_policy_dict(ipp) for ipp in ipps]
def update_ip_policy(context, id, ip_policy):
LOG.info("update_ip_policy for tenant %s" % context.tenant_id)
ipp = ip_policy["ip_policy"]
with context.session.begin():
ipp_db = db_api.ip_policy_find(context, id=id, scope=db_api.ONE)
if not ipp_db:
raise quark_exceptions.IPPolicyNotFound(id=id)
ip_policy_cidrs = ipp.get("exclude")
network_ids = ipp.get("network_ids")
subnet_ids = ipp.get("subnet_ids")
if subnet_ids and network_ids:
raise exceptions.BadRequest(
resource="ip_policy",
msg="network_ids and subnet_ids specified. only one allowed")
models = []
all_subnets = []
if subnet_ids:
for subnet in ipp_db["subnets"]:
subnet["ip_policy"] = None
subnets = db_api.subnet_find(
context, id=subnet_ids, scope=db_api.ALL)
if len(subnets) != len(subnet_ids):
raise exceptions.SubnetNotFound(id=subnet_ids)
if ip_policy_cidrs is not None:
ensure_default_policy(ip_policy_cidrs, subnets)
_validate_cidrs_fit_into_subnets(ip_policy_cidrs, subnets)
all_subnets.extend(subnets)
models.extend(subnets)
if network_ids:
for network in ipp_db["networks"]:
network["ip_policy"] = None
nets = db_api.network_find(context, id=network_ids,
scope=db_api.ALL)
if len(nets) != len(network_ids):
raise exceptions.NetworkNotFound(net_id=network_ids)
subnets = [subnet for net in nets
for subnet in net.get("subnets", [])]
if ip_policy_cidrs is not None:
ensure_default_policy(ip_policy_cidrs, subnets)
_validate_cidrs_fit_into_subnets(ip_policy_cidrs, subnets)
all_subnets.extend(subnets)
models.extend(nets)
if not subnet_ids and not network_ids and ip_policy_cidrs is not None:
ensure_default_policy(ip_policy_cidrs, ipp_db["subnets"])
_validate_cidrs_fit_into_subnets(
ip_policy_cidrs, ipp_db["subnets"])
for model in models:
if model["ip_policy"]:
raise quark_exceptions.IPPolicyAlreadyExists(
id=model["ip_policy"]["id"], n_id=model["id"])
model["ip_policy"] = ipp_db
if ip_policy_cidrs:
_validate_policy_with_routes(context, ip_policy_cidrs, all_subnets)
ipp_db = db_api.ip_policy_update(context, ipp_db, **ipp)
return v._make_ip_policy_dict(ipp_db)
def delete_ip_policy(context, id):
LOG.info("delete_ip_policy %s for tenant %s" % (id, context.tenant_id))
with context.session.begin():
ipp = db_api.ip_policy_find(context, id=id, scope=db_api.ONE)
if not ipp:
raise quark_exceptions.IPPolicyNotFound(id=id)
if ipp["networks"] or ipp["subnets"]:
raise quark_exceptions.IPPolicyInUse(id=id)
db_api.ip_policy_delete(context, ipp)
def _validate_cidrs_fit_into_subnets(cidrs, subnets):
LOG.info("validate_cidrs_all_fit_into_subnets with CIDRs (%s) "
"and subnets (%s)" % (cidrs, subnets))
for cidr in cidrs:
cidr = netaddr.IPNetwork(cidr)
for subnet in subnets:
subnet_cidr = netaddr.IPNetwork(subnet["cidr"])
if cidr.version == subnet_cidr.version and cidr not in subnet_cidr:
raise exceptions.BadRequest(
resource="ip_policy",
msg="CIDR %s not in subnet CIDR %s"
% (cidr, subnet_cidr))
def ensure_default_policy(cidrs, subnets):
policy_cidrs = netaddr.IPSet(cidrs)
for subnet in subnets:
subnet_cidr = netaddr.IPNetwork(subnet["cidr"])
network_ip = subnet_cidr.network
broadcast_ip = subnet_cidr.broadcast
prefix_len = '32' if subnet_cidr.version == 4 else '128'
default_policy_cidrs = ["%s/%s" % (network_ip, prefix_len),
"%s/%s" % (broadcast_ip, prefix_len)]
for cidr in default_policy_cidrs:
if (netaddr.IPNetwork(cidr) not in policy_cidrs
and cidr not in cidrs):
cidrs.append(cidr)
| |
"""Helper for aiohttp webclient stuff."""
import asyncio
import logging
from ssl import SSLContext
import sys
from typing import Any, Awaitable, Optional, Union, cast
import aiohttp
from aiohttp import web
from aiohttp.hdrs import CONTENT_TYPE, USER_AGENT
from aiohttp.web_exceptions import HTTPBadGateway, HTTPGatewayTimeout
import async_timeout
from homeassistant.const import EVENT_HOMEASSISTANT_CLOSE, __version__
from homeassistant.core import Event, callback
from homeassistant.helpers.frame import MissingIntegrationFrame, get_integration_frame
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.loader import bind_hass
from homeassistant.util import ssl as ssl_util
_LOGGER = logging.getLogger(__name__)
DATA_CONNECTOR = "aiohttp_connector"
DATA_CONNECTOR_NOTVERIFY = "aiohttp_connector_notverify"
DATA_CLIENTSESSION = "aiohttp_clientsession"
DATA_CLIENTSESSION_NOTVERIFY = "aiohttp_clientsession_notverify"
SERVER_SOFTWARE = "HomeAssistant/{0} aiohttp/{1} Python/{2[0]}.{2[1]}".format(
__version__, aiohttp.__version__, sys.version_info
)
@callback
@bind_hass
def async_get_clientsession(
hass: HomeAssistantType, verify_ssl: bool = True
) -> aiohttp.ClientSession:
"""Return default aiohttp ClientSession.
This method must be run in the event loop.
"""
if verify_ssl:
key = DATA_CLIENTSESSION
else:
key = DATA_CLIENTSESSION_NOTVERIFY
if key not in hass.data:
hass.data[key] = async_create_clientsession(hass, verify_ssl)
return cast(aiohttp.ClientSession, hass.data[key])
@callback
@bind_hass
def async_create_clientsession(
hass: HomeAssistantType,
verify_ssl: bool = True,
auto_cleanup: bool = True,
**kwargs: Any,
) -> aiohttp.ClientSession:
"""Create a new ClientSession with kwargs, i.e. for cookies.
If auto_cleanup is False, you need to call detach() after the session
returned is no longer used. Default is True, the session will be
automatically detached on homeassistant_stop.
This method must be run in the event loop.
"""
connector = _async_get_connector(hass, verify_ssl)
clientsession = aiohttp.ClientSession(
connector=connector, headers={USER_AGENT: SERVER_SOFTWARE}, **kwargs,
)
async def patched_close() -> None:
"""Mock close to avoid integrations closing our session."""
try:
found_frame, integration, path = get_integration_frame()
except MissingIntegrationFrame:
# Did not source from an integration? Hard error.
raise RuntimeError(
"Detected closing of the Home Assistant aiohttp session in the Home Assistant core. "
"Please report this issue."
)
index = found_frame.filename.index(path)
if path == "custom_components/":
extra = " to the custom component author"
else:
extra = ""
_LOGGER.warning(
"Detected integration that closes the Home Assistant aiohttp session. "
"Please report issue%s for %s using this method at %s, line %s: %s",
extra,
integration,
found_frame.filename[index:],
found_frame.lineno,
found_frame.line.strip(),
)
clientsession.close = patched_close # type: ignore
if auto_cleanup:
_async_register_clientsession_shutdown(hass, clientsession)
return clientsession
@bind_hass
async def async_aiohttp_proxy_web(
hass: HomeAssistantType,
request: web.BaseRequest,
web_coro: Awaitable[aiohttp.ClientResponse],
buffer_size: int = 102400,
timeout: int = 10,
) -> Optional[web.StreamResponse]:
"""Stream websession request to aiohttp web response."""
try:
with async_timeout.timeout(timeout):
req = await web_coro
except asyncio.CancelledError:
# The user cancelled the request
return None
except asyncio.TimeoutError as err:
# Timeout trying to start the web request
raise HTTPGatewayTimeout() from err
except aiohttp.ClientError as err:
# Something went wrong with the connection
raise HTTPBadGateway() from err
try:
return await async_aiohttp_proxy_stream(
hass, request, req.content, req.headers.get(CONTENT_TYPE)
)
finally:
req.close()
@bind_hass
async def async_aiohttp_proxy_stream(
hass: HomeAssistantType,
request: web.BaseRequest,
stream: aiohttp.StreamReader,
content_type: str,
buffer_size: int = 102400,
timeout: int = 10,
) -> web.StreamResponse:
"""Stream a stream to aiohttp web response."""
response = web.StreamResponse()
response.content_type = content_type
await response.prepare(request)
try:
while True:
with async_timeout.timeout(timeout):
data = await stream.read(buffer_size)
if not data:
break
await response.write(data)
except (asyncio.TimeoutError, aiohttp.ClientError):
# Something went wrong fetching data, closed connection
pass
return response
@callback
def _async_register_clientsession_shutdown(
hass: HomeAssistantType, clientsession: aiohttp.ClientSession
) -> None:
"""Register ClientSession close on Home Assistant shutdown.
This method must be run in the event loop.
"""
@callback
def _async_close_websession(event: Event) -> None:
"""Close websession."""
clientsession.detach()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, _async_close_websession)
@callback
def _async_get_connector(
hass: HomeAssistantType, verify_ssl: bool = True
) -> aiohttp.BaseConnector:
"""Return the connector pool for aiohttp.
This method must be run in the event loop.
"""
key = DATA_CONNECTOR if verify_ssl else DATA_CONNECTOR_NOTVERIFY
if key in hass.data:
return cast(aiohttp.BaseConnector, hass.data[key])
if verify_ssl:
ssl_context: Union[bool, SSLContext] = ssl_util.client_context()
else:
ssl_context = False
connector = aiohttp.TCPConnector(enable_cleanup_closed=True, ssl=ssl_context)
hass.data[key] = connector
async def _async_close_connector(event: Event) -> None:
"""Close connector pool."""
await connector.close()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, _async_close_connector)
return connector
| |
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import logging
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ether
from ryu.ofproto import inet
from ryu.tests.integrated import tester
LOG = logging.getLogger(__name__)
class RunTest(tester.TestFlowBase):
""" Test case for add flows of Matches
"""
OFP_VERSIONS = [ofproto_v1_2.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(RunTest, self).__init__(*args, **kwargs)
self._verify = {}
def add_matches(self, dp, match):
m = dp.ofproto_parser.OFPFlowMod(dp, 0, 0, 0,
dp.ofproto.OFPFC_ADD,
0, 0, 0, 0xffffffff,
dp.ofproto.OFPP_ANY,
0xffffffff, 0, match, [])
dp.send_msg(m)
def _set_verify(self, headers, value, mask=None,
all_bits_masked=False, type_='int'):
self._verify = {}
self._verify['headers'] = headers
self._verify['value'] = value
self._verify['mask'] = mask
self._verify['all_bits_masked'] = all_bits_masked
self._verify['type'] = type_
def verify_default(self, dp, stats):
type_ = self._verify['type']
headers = self._verify['headers']
value = self._verify['value']
mask = self._verify['mask']
value_masked = self._masked(type_, value, mask)
all_bits_masked = self._verify['all_bits_masked']
field = None
for s in stats:
for f in s.match.fields:
if f.header in headers:
field = f
break
if field is None:
if self._is_all_zero_bit(type_, mask):
return True
return 'Field not found.'
f_value = field.value
if hasattr(field, 'mask'):
f_mask = field.mask
else:
f_mask = None
if (f_value == value) or (f_value == value_masked):
if (f_mask == mask) or (all_bits_masked and f_mask is None):
return True
return "send: %s/%s, reply: %s/%s" \
% (self._cnv_to_str(type_, value, mask, f_value, f_mask))
def _masked(self, type_, value, mask):
if mask is None:
v = value
elif type_ == 'int':
v = value & mask
elif type_ == 'mac':
v = self.haddr_masked(value, mask)
elif type_ == 'ipv4':
v = self.ipv4_masked(value, mask)
elif type_ == 'ipv6':
v = self.ipv6_masked(value, mask)
else:
raise 'Unknown type'
return v
def _is_all_zero_bit(self, type_, val):
if type_ == 'int' or type_ == 'ipv4':
return val == 0
elif type_ == 'mac':
for v in val:
if v != '\x00':
return False
return True
elif type_ == 'ipv6':
for v in val:
if v != 0:
return False
return True
else:
raise 'Unknown type'
def _cnv_to_str(self, type_, value, mask, f_value, f_mask):
func = None
if type_ == 'int':
pass
elif type_ == 'mac':
func = self.haddr_to_str
elif type_ == 'ipv4':
func = self.ipv4_to_str
elif type_ == 'ipv6':
func = self.ipv6_to_str
else:
raise 'Unknown type'
if func:
value = func(value)
f_value = func(f_value)
if mask:
mask = func(mask)
if f_mask:
f_mask = func(f_mask)
return value, mask, f_value, f_mask
def test_rule_set_dl_dst(self, dp):
dl_dst = 'e2:7a:09:79:0b:0f'
dl_dst_bin = self.haddr_to_bin(dl_dst)
headers = [dp.ofproto.OXM_OF_ETH_DST, dp.ofproto.OXM_OF_ETH_DST_W]
self._set_verify(headers, dl_dst_bin, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_dst(dl_dst_bin)
self.add_matches(dp, match)
def test_rule_set_dl_dst_masked_ff(self, dp):
dl_dst = 'd0:98:79:b4:75:b5'
dl_dst_bin = self.haddr_to_bin(dl_dst)
mask = 'ff:ff:ff:ff:ff:ff'
mask_bin = self.haddr_to_bin(mask)
headers = [dp.ofproto.OXM_OF_ETH_DST, dp.ofproto.OXM_OF_ETH_DST_W]
self._set_verify(headers, dl_dst_bin, mask_bin, True, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_dst_masked(dl_dst_bin, mask_bin)
self.add_matches(dp, match)
def test_rule_set_dl_dst_masked_f0(self, dp):
dl_dst = 'e2:7a:09:79:0b:0f'
dl_dst_bin = self.haddr_to_bin(dl_dst)
mask = 'ff:ff:ff:ff:ff:00'
mask_bin = self.haddr_to_bin(mask)
headers = [dp.ofproto.OXM_OF_ETH_DST, dp.ofproto.OXM_OF_ETH_DST_W]
self._set_verify(headers, dl_dst_bin, mask_bin, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_dst_masked(dl_dst_bin, mask_bin)
self.add_matches(dp, match)
def test_rule_set_dl_dst_masked_00(self, dp):
dl_dst = 'e2:7a:09:79:0b:0f'
dl_dst_bin = self.haddr_to_bin(dl_dst)
mask = '00:00:00:00:00:00'
mask_bin = self.haddr_to_bin(mask)
headers = [dp.ofproto.OXM_OF_ETH_DST, dp.ofproto.OXM_OF_ETH_DST_W]
self._set_verify(headers, dl_dst_bin, mask_bin, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_dst_masked(dl_dst_bin, mask_bin)
self.add_matches(dp, match)
def test_rule_set_dl_src(self, dp):
dl_src = 'e2:7a:09:79:0b:0f'
dl_src_bin = self.haddr_to_bin(dl_src)
headers = [dp.ofproto.OXM_OF_ETH_SRC, dp.ofproto.OXM_OF_ETH_SRC_W]
self._set_verify(headers, dl_src_bin, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_src(dl_src_bin)
self.add_matches(dp, match)
def test_rule_set_dl_src_masked_ff(self, dp):
dl_src = 'e2:7a:09:79:0b:0f'
dl_src_bin = self.haddr_to_bin(dl_src)
mask = 'ff:ff:ff:ff:ff:ff'
mask_bin = self.haddr_to_bin(mask)
headers = [dp.ofproto.OXM_OF_ETH_SRC, dp.ofproto.OXM_OF_ETH_SRC_W]
self._set_verify(headers, dl_src_bin, mask_bin, True, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_src_masked(dl_src_bin, mask_bin)
self.add_matches(dp, match)
def test_rule_set_dl_src_masked_f0(self, dp):
dl_src = 'e2:7a:09:79:0b:0f'
dl_src_bin = self.haddr_to_bin(dl_src)
mask = 'ff:ff:ff:ff:ff:00'
mask_bin = self.haddr_to_bin(mask)
headers = [dp.ofproto.OXM_OF_ETH_SRC, dp.ofproto.OXM_OF_ETH_SRC_W]
self._set_verify(headers, dl_src_bin, mask_bin, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_src_masked(dl_src_bin, mask_bin)
self.add_matches(dp, match)
def test_rule_set_dl_src_masked_00(self, dp):
dl_src = 'e2:7a:09:79:0b:0f'
dl_src_bin = self.haddr_to_bin(dl_src)
mask = '00:00:00:00:00:00'
mask_bin = self.haddr_to_bin(mask)
headers = [dp.ofproto.OXM_OF_ETH_SRC, dp.ofproto.OXM_OF_ETH_SRC_W]
self._set_verify(headers, dl_src_bin, mask_bin, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_src_masked(dl_src_bin, mask_bin)
self.add_matches(dp, match)
def test_rule_set_dl_type_ip(self, dp):
dl_type = ether.ETH_TYPE_IP
headers = [dp.ofproto.OXM_OF_ETH_TYPE]
self._set_verify(headers, dl_type)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
self.add_matches(dp, match)
def test_rule_set_dl_type_arp(self, dp):
dl_type = ether.ETH_TYPE_ARP
headers = [dp.ofproto.OXM_OF_ETH_TYPE]
self._set_verify(headers, dl_type)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
self.add_matches(dp, match)
def test_rule_set_dl_type_vlan(self, dp):
dl_type = ether.ETH_TYPE_8021Q
headers = [dp.ofproto.OXM_OF_ETH_TYPE]
self._set_verify(headers, dl_type)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
self.add_matches(dp, match)
def test_rule_set_dl_type_ipv6(self, dp):
dl_type = ether.ETH_TYPE_IPV6
headers = [dp.ofproto.OXM_OF_ETH_TYPE]
self._set_verify(headers, dl_type)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
self.add_matches(dp, match)
def test_rule_set_dl_type_lacp(self, dp):
dl_type = ether.ETH_TYPE_SLOW
headers = [dp.ofproto.OXM_OF_ETH_TYPE]
self._set_verify(headers, dl_type)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
self.add_matches(dp, match)
def test_rule_set_ip_dscp(self, dp):
ip_dscp = 36
dl_type = ether.ETH_TYPE_IP
headers = [dp.ofproto.OXM_OF_IP_DSCP]
self._set_verify(headers, ip_dscp)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_dscp(ip_dscp)
self.add_matches(dp, match)
def test_rule_set_vlan_vid(self, dp):
vlan_vid = 0x4ef
headers = [dp.ofproto.OXM_OF_VLAN_VID, dp.ofproto.OXM_OF_VLAN_VID_W]
self._set_verify(headers, vlan_vid)
match = dp.ofproto_parser.OFPMatch()
match.set_vlan_vid(vlan_vid)
self.add_matches(dp, match)
def test_rule_set_vlan_vid_masked_ff(self, dp):
vlan_vid = 0x4ef
mask = 0xfff
headers = [dp.ofproto.OXM_OF_VLAN_VID, dp.ofproto.OXM_OF_VLAN_VID_W]
self._set_verify(headers, vlan_vid, mask, True)
match = dp.ofproto_parser.OFPMatch()
match.set_vlan_vid_masked(vlan_vid, mask)
self.add_matches(dp, match)
def test_rule_set_vlan_vid_masked_f0(self, dp):
vlan_vid = 0x4ef
mask = 0xff0
headers = [dp.ofproto.OXM_OF_VLAN_VID, dp.ofproto.OXM_OF_VLAN_VID_W]
self._set_verify(headers, vlan_vid, mask)
match = dp.ofproto_parser.OFPMatch()
match.set_vlan_vid_masked(vlan_vid, mask)
self.add_matches(dp, match)
def test_rule_set_vlan_vid_masked_00(self, dp):
vlan_vid = 0x4ef
mask = 0x000
headers = [dp.ofproto.OXM_OF_VLAN_VID, dp.ofproto.OXM_OF_VLAN_VID_W]
self._set_verify(headers, vlan_vid, mask)
match = dp.ofproto_parser.OFPMatch()
match.set_vlan_vid_masked(vlan_vid, mask)
self.add_matches(dp, match)
def test_rule_set_vlan_pcp(self, dp):
vlan_vid = 0x4ef
vlan_pcp = 5
headers = [dp.ofproto.OXM_OF_VLAN_PCP]
self._set_verify(headers, vlan_pcp)
match = dp.ofproto_parser.OFPMatch()
match.set_vlan_vid(vlan_vid)
match.set_vlan_pcp(vlan_pcp)
self.add_matches(dp, match)
def test_rule_set_ip_ecn(self, dp):
dl_type = ether.ETH_TYPE_IP
ip_ecn = 3
headers = [dp.ofproto.OXM_OF_IP_ECN]
self._set_verify(headers, ip_ecn)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_ecn(ip_ecn)
self.add_matches(dp, match)
def test_rule_set_ip_proto_icmp(self, dp):
dl_type = ether.ETH_TYPE_IP
ip_proto = inet.IPPROTO_ICMP
headers = [dp.ofproto.OXM_OF_IP_PROTO]
self._set_verify(headers, ip_proto)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
self.add_matches(dp, match)
def test_rule_set_ip_proto_tcp(self, dp):
dl_type = ether.ETH_TYPE_IP
ip_proto = inet.IPPROTO_TCP
headers = [dp.ofproto.OXM_OF_IP_PROTO]
self._set_verify(headers, ip_proto)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
self.add_matches(dp, match)
def test_rule_set_ip_proto_udp(self, dp):
dl_type = ether.ETH_TYPE_IP
ip_proto = inet.IPPROTO_UDP
headers = [dp.ofproto.OXM_OF_IP_PROTO]
self._set_verify(headers, ip_proto)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
self.add_matches(dp, match)
def test_rule_set_ip_proto_ipv6_route(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ip_proto = inet.IPPROTO_ROUTING
headers = [dp.ofproto.OXM_OF_IP_PROTO]
self._set_verify(headers, ip_proto)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
self.add_matches(dp, match)
def test_rule_set_ip_proto_ipv6_frag(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ip_proto = inet.IPPROTO_FRAGMENT
headers = [dp.ofproto.OXM_OF_IP_PROTO]
self._set_verify(headers, ip_proto)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
self.add_matches(dp, match)
def test_rule_set_ip_proto_ipv6_icmp(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ip_proto = inet.IPPROTO_ICMPV6
headers = [dp.ofproto.OXM_OF_IP_PROTO]
self._set_verify(headers, ip_proto)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
self.add_matches(dp, match)
def test_rule_set_ip_proto_ipv6_none(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ip_proto = inet.IPPROTO_NONE
headers = [dp.ofproto.OXM_OF_IP_PROTO]
self._set_verify(headers, ip_proto)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
self.add_matches(dp, match)
def test_rule_set_ip_proto_ipv6_dstopts(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ip_proto = inet.IPPROTO_DSTOPTS
headers = [dp.ofproto.OXM_OF_IP_PROTO]
self._set_verify(headers, ip_proto)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
self.add_matches(dp, match)
def test_rule_set_ipv4_src(self, dp):
dl_type = ether.ETH_TYPE_IP
src = '192.168.196.250'
src_int = self.ipv4_to_int(src)
headers = [dp.ofproto.OXM_OF_IPV4_SRC, dp.ofproto.OXM_OF_IPV4_SRC_W]
self._set_verify(headers, src_int, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv4_src(src_int)
self.add_matches(dp, match)
def test_rule_set_ipv4_src_masked_32(self, dp):
dl_type = ether.ETH_TYPE_IP
src = '192.168.196.250'
src_int = self.ipv4_to_int(src)
mask = '255.255.255.255'
mask_int = self.ipv4_to_int(mask)
headers = [dp.ofproto.OXM_OF_IPV4_SRC, dp.ofproto.OXM_OF_IPV4_SRC_W]
self._set_verify(headers, src_int, mask_int, True, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv4_src_masked(src_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_ipv4_src_masked_24(self, dp):
dl_type = ether.ETH_TYPE_IP
src = '192.168.196.250'
src_int = self.ipv4_to_int(src)
mask = '255.255.255.0'
mask_int = self.ipv4_to_int(mask)
headers = [dp.ofproto.OXM_OF_IPV4_SRC, dp.ofproto.OXM_OF_IPV4_SRC_W]
self._set_verify(headers, src_int, mask_int, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv4_src_masked(src_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_ipv4_src_masked_0(self, dp):
dl_type = ether.ETH_TYPE_IP
src = '192.168.196.250'
src_int = self.ipv4_to_int(src)
mask = '0.0.0.0'
mask_int = self.ipv4_to_int(mask)
headers = [dp.ofproto.OXM_OF_IPV4_SRC, dp.ofproto.OXM_OF_IPV4_SRC_W]
self._set_verify(headers, src_int, mask_int, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv4_src_masked(src_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_ipv4_dst(self, dp):
dl_type = ether.ETH_TYPE_IP
dst = '192.168.54.155'
dst_int = self.ipv4_to_int(dst)
headers = [dp.ofproto.OXM_OF_IPV4_DST, dp.ofproto.OXM_OF_IPV4_DST_W]
self._set_verify(headers, dst_int, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv4_dst(dst_int)
self.add_matches(dp, match)
def test_rule_set_ipv4_dst_masked_32(self, dp):
dl_type = ether.ETH_TYPE_IP
dst = '192.168.54.155'
dst_int = self.ipv4_to_int(dst)
mask = '255.255.255.255'
mask_int = self.ipv4_to_int(mask)
headers = [dp.ofproto.OXM_OF_IPV4_DST, dp.ofproto.OXM_OF_IPV4_DST_W]
self._set_verify(headers, dst_int, mask_int, True, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv4_dst_masked(dst_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_ipv4_dst_masked_24(self, dp):
dl_type = ether.ETH_TYPE_IP
dst = '192.168.54.155'
dst_int = self.ipv4_to_int(dst)
mask = '255.255.255.0'
mask_int = self.ipv4_to_int(mask)
headers = [dp.ofproto.OXM_OF_IPV4_DST, dp.ofproto.OXM_OF_IPV4_DST_W]
self._set_verify(headers, dst_int, mask_int, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv4_dst_masked(dst_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_ipv4_dst_masked_0(self, dp):
dl_type = ether.ETH_TYPE_IP
dst = '192.168.54.155'
dst_int = self.ipv4_to_int(dst)
mask = '0.0.0.0'
mask_int = self.ipv4_to_int(mask)
headers = [dp.ofproto.OXM_OF_IPV4_DST, dp.ofproto.OXM_OF_IPV4_DST_W]
self._set_verify(headers, dst_int, mask_int, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv4_dst_masked(dst_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_tcp_src(self, dp):
dl_type = ether.ETH_TYPE_IP
ip_proto = inet.IPPROTO_TCP
tp_src = 1103
headers = [dp.ofproto.OXM_OF_TCP_SRC]
self._set_verify(headers, tp_src)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
match.set_tcp_src(tp_src)
self.add_matches(dp, match)
def test_rule_set_tcp_dst(self, dp):
dl_type = ether.ETH_TYPE_IP
ip_proto = inet.IPPROTO_TCP
tp_dst = 236
headers = [dp.ofproto.OXM_OF_TCP_DST]
self._set_verify(headers, tp_dst)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
match.set_tcp_dst(tp_dst)
self.add_matches(dp, match)
def test_rule_set_udp_src(self, dp):
dl_type = ether.ETH_TYPE_IP
ip_proto = inet.IPPROTO_UDP
tp_src = 56617
headers = [dp.ofproto.OXM_OF_UDP_SRC]
self._set_verify(headers, tp_src)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
match.set_udp_src(tp_src)
self.add_matches(dp, match)
def test_rule_set_udp_dst(self, dp):
dl_type = ether.ETH_TYPE_IP
ip_proto = inet.IPPROTO_UDP
tp_dst = 61278
headers = [dp.ofproto.OXM_OF_UDP_DST]
self._set_verify(headers, tp_dst)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
match.set_udp_dst(tp_dst)
self.add_matches(dp, match)
def test_rule_set_icmpv4_type(self, dp):
dl_type = ether.ETH_TYPE_IP
ip_proto = inet.IPPROTO_ICMP
icmp_type = 8
headers = [dp.ofproto.OXM_OF_ICMPV4_TYPE]
self._set_verify(headers, icmp_type)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
match.set_icmpv4_type(icmp_type)
self.add_matches(dp, match)
def test_rule_set_icmpv4_code(self, dp):
dl_type = ether.ETH_TYPE_IP
ip_proto = inet.IPPROTO_ICMP
icmp_type = 9
icmp_code = 16
headers = [dp.ofproto.OXM_OF_ICMPV4_CODE]
self._set_verify(headers, icmp_code)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
match.set_icmpv4_type(icmp_type)
match.set_icmpv4_code(icmp_code)
self.add_matches(dp, match)
def test_rule_set_arp_opcode(self, dp):
dl_type = ether.ETH_TYPE_ARP
arp_op = 1
headers = [dp.ofproto.OXM_OF_ARP_OP]
self._set_verify(headers, arp_op)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_opcode(arp_op)
self.add_matches(dp, match)
def test_rule_set_arp_spa(self, dp):
dl_type = ether.ETH_TYPE_ARP
nw_src = '192.168.222.57'
nw_src_int = self.ipv4_to_int(nw_src)
headers = [dp.ofproto.OXM_OF_ARP_SPA, dp.ofproto.OXM_OF_ARP_SPA_W]
self._set_verify(headers, nw_src_int, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_spa(nw_src_int)
self.add_matches(dp, match)
def test_rule_set_arp_spa_masked_32(self, dp):
dl_type = ether.ETH_TYPE_ARP
nw_src = '192.168.222.57'
nw_src_int = self.ipv4_to_int(nw_src)
mask = '255.255.255.255'
mask_int = self.ipv4_to_int(mask)
headers = [dp.ofproto.OXM_OF_ARP_SPA, dp.ofproto.OXM_OF_ARP_SPA_W]
self._set_verify(headers, nw_src_int, mask_int, True, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_spa_masked(nw_src_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_arp_spa_masked_24(self, dp):
dl_type = ether.ETH_TYPE_ARP
nw_src = '192.168.222.57'
nw_src_int = self.ipv4_to_int(nw_src)
mask = '255.255.255.0'
mask_int = self.ipv4_to_int(mask)
headers = [dp.ofproto.OXM_OF_ARP_SPA, dp.ofproto.OXM_OF_ARP_SPA_W]
self._set_verify(headers, nw_src_int, mask_int, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_spa_masked(nw_src_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_arp_spa_masked_00(self, dp):
dl_type = ether.ETH_TYPE_ARP
nw_src = '192.168.222.57'
nw_src_int = self.ipv4_to_int(nw_src)
mask = '0.0.0.0'
mask_int = self.ipv4_to_int(mask)
headers = [dp.ofproto.OXM_OF_ARP_SPA, dp.ofproto.OXM_OF_ARP_SPA_W]
self._set_verify(headers, nw_src_int, mask_int, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_spa_masked(nw_src_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_arp_tpa(self, dp):
dl_type = ether.ETH_TYPE_ARP
nw_dst = '192.168.198.233'
nw_dst_int = self.ipv4_to_int(nw_dst)
headers = [dp.ofproto.OXM_OF_ARP_TPA, dp.ofproto.OXM_OF_ARP_TPA_W]
self._set_verify(headers, nw_dst_int, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_tpa(nw_dst_int)
self.add_matches(dp, match)
def test_rule_set_arp_tpa_masked_32(self, dp):
dl_type = ether.ETH_TYPE_ARP
nw_dst = '192.168.198.233'
nw_dst_int = self.ipv4_to_int(nw_dst)
mask = '255.255.255.255'
mask_int = self.ipv4_to_int(mask)
headers = [dp.ofproto.OXM_OF_ARP_TPA, dp.ofproto.OXM_OF_ARP_TPA_W]
self._set_verify(headers, nw_dst_int, mask_int, True, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_tpa_masked(nw_dst_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_arp_tpa_masked_24(self, dp):
dl_type = ether.ETH_TYPE_ARP
nw_dst = '192.168.198.233'
nw_dst_int = self.ipv4_to_int(nw_dst)
mask = '255.255.255.0'
mask_int = self.ipv4_to_int(mask)
headers = [dp.ofproto.OXM_OF_ARP_TPA, dp.ofproto.OXM_OF_ARP_TPA_W]
self._set_verify(headers, nw_dst_int, mask_int, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_tpa_masked(nw_dst_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_arp_tpa_masked_00(self, dp):
dl_type = ether.ETH_TYPE_ARP
nw_dst = '192.168.198.233'
nw_dst_int = self.ipv4_to_int(nw_dst)
mask = '0.0.0.0'
mask_int = self.ipv4_to_int(mask)
headers = [dp.ofproto.OXM_OF_ARP_TPA, dp.ofproto.OXM_OF_ARP_TPA_W]
self._set_verify(headers, nw_dst_int, mask_int, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_tpa_masked(nw_dst_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_arp_sha(self, dp):
dl_type = ether.ETH_TYPE_ARP
arp_sha = '3e:ec:13:9b:f3:0b'
arp_sha_bin = self.haddr_to_bin(arp_sha)
headers = [dp.ofproto.OXM_OF_ARP_SHA, dp.ofproto.OXM_OF_ARP_SHA_W]
self._set_verify(headers, arp_sha_bin, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_sha(arp_sha_bin)
self.add_matches(dp, match)
def test_rule_set_arp_sha_masked_ff(self, dp):
dl_type = ether.ETH_TYPE_ARP
arp_sha = '3e:ec:13:9b:f3:0b'
arp_sha_bin = self.haddr_to_bin(arp_sha)
mask = 'ff:ff:ff:ff:ff:ff'
mask_bin = self.haddr_to_bin(mask)
headers = [dp.ofproto.OXM_OF_ARP_SHA, dp.ofproto.OXM_OF_ARP_SHA_W]
self._set_verify(headers, arp_sha_bin, mask_bin, True, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_sha_masked(arp_sha_bin, mask_bin)
self.add_matches(dp, match)
def test_rule_set_arp_sha_masked_f0(self, dp):
dl_type = ether.ETH_TYPE_ARP
arp_sha = '3e:ec:13:9b:f3:0b'
arp_sha_bin = self.haddr_to_bin(arp_sha)
mask = 'ff:ff:ff:ff:ff:00'
mask_bin = self.haddr_to_bin(mask)
headers = [dp.ofproto.OXM_OF_ARP_SHA, dp.ofproto.OXM_OF_ARP_SHA_W]
self._set_verify(headers, arp_sha_bin, mask_bin, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_sha_masked(arp_sha_bin, mask_bin)
self.add_matches(dp, match)
def test_rule_set_arp_sha_masked_00(self, dp):
dl_type = ether.ETH_TYPE_ARP
arp_sha = '3e:ec:13:9b:f3:0b'
arp_sha_bin = self.haddr_to_bin(arp_sha)
mask = '00:00:00:00:00:00'
mask_bin = self.haddr_to_bin(mask)
headers = [dp.ofproto.OXM_OF_ARP_SHA, dp.ofproto.OXM_OF_ARP_SHA_W]
self._set_verify(headers, arp_sha_bin, mask_bin, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_sha_masked(arp_sha_bin, mask_bin)
self.add_matches(dp, match)
def test_rule_set_arp_tha(self, dp):
dl_type = ether.ETH_TYPE_ARP
arp_tha = '83:6c:21:52:49:68'
arp_tha_bin = self.haddr_to_bin(arp_tha)
headers = [dp.ofproto.OXM_OF_ARP_THA, dp.ofproto.OXM_OF_ARP_THA_W]
self._set_verify(headers, arp_tha_bin, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_tha(arp_tha_bin)
self.add_matches(dp, match)
def test_rule_set_arp_tha_masked_ff(self, dp):
dl_type = ether.ETH_TYPE_ARP
arp_tha = '83:6c:21:52:49:68'
arp_tha_bin = self.haddr_to_bin(arp_tha)
mask = 'ff:ff:ff:ff:ff:ff'
mask_bin = self.haddr_to_bin(mask)
headers = [dp.ofproto.OXM_OF_ARP_THA, dp.ofproto.OXM_OF_ARP_THA_W]
self._set_verify(headers, arp_tha_bin, mask_bin, True, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_tha_masked(arp_tha_bin, mask_bin)
self.add_matches(dp, match)
def test_rule_set_arp_tha_masked_f0(self, dp):
dl_type = ether.ETH_TYPE_ARP
arp_tha = '83:6c:21:52:49:68'
arp_tha_bin = self.haddr_to_bin(arp_tha)
mask = 'ff:ff:ff:ff:ff:00'
mask_bin = self.haddr_to_bin(mask)
headers = [dp.ofproto.OXM_OF_ARP_THA, dp.ofproto.OXM_OF_ARP_THA_W]
self._set_verify(headers, arp_tha_bin, mask_bin, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_tha_masked(arp_tha_bin, mask_bin)
self.add_matches(dp, match)
def test_rule_set_arp_tha_masked_00(self, dp):
dl_type = ether.ETH_TYPE_ARP
arp_tha = '83:6c:21:52:49:68'
arp_tha_bin = self.haddr_to_bin(arp_tha)
mask = '00:00:00:00:00:00'
mask_bin = self.haddr_to_bin(mask)
headers = [dp.ofproto.OXM_OF_ARP_THA, dp.ofproto.OXM_OF_ARP_THA_W]
self._set_verify(headers, arp_tha_bin, mask_bin, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_tha_masked(arp_tha_bin, mask_bin)
self.add_matches(dp, match)
def test_rule_set_ipv6_src(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ipv6_src = '2001:db8:bd05:1d2:288a:1fc0:1:10ee'
ipv6_src_int = self.ipv6_to_int(ipv6_src)
headers = [dp.ofproto.OXM_OF_IPV6_SRC, dp.ofproto.OXM_OF_IPV6_SRC_W]
self._set_verify(headers, ipv6_src_int, type_='ipv6')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv6_src(ipv6_src_int)
self.add_matches(dp, match)
def test_rule_set_ipv6_src_masked_ff(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ipv6_src = '2001:db8:bd05:1d2:288a:1fc0:1:10ee'
ipv6_src_int = self.ipv6_to_int(ipv6_src)
mask = 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'
mask_int = self.ipv6_to_int(mask)
headers = [dp.ofproto.OXM_OF_IPV6_SRC, dp.ofproto.OXM_OF_IPV6_SRC_W]
self._set_verify(headers, ipv6_src_int, mask_int, True, type_='ipv6')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv6_src_masked(ipv6_src_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_ipv6_src_masked_f0(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ipv6_src = '2001:db8:bd05:1d2:288a:1fc0:1:10ee'
ipv6_src_int = self.ipv6_to_int(ipv6_src)
mask = 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:0'
mask_int = self.ipv6_to_int(mask)
headers = [dp.ofproto.OXM_OF_IPV6_SRC, dp.ofproto.OXM_OF_IPV6_SRC_W]
self._set_verify(headers, ipv6_src_int, mask_int, type_='ipv6')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv6_src_masked(ipv6_src_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_ipv6_src_masked_00(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ipv6_src = '2001:db8:bd05:1d2:288a:1fc0:1:10ee'
ipv6_src_int = self.ipv6_to_int(ipv6_src)
mask = '0:0:0:0:0:0:0:0'
mask_int = self.ipv6_to_int(mask)
headers = [dp.ofproto.OXM_OF_IPV6_SRC, dp.ofproto.OXM_OF_IPV6_SRC_W]
self._set_verify(headers, ipv6_src_int, mask_int, type_='ipv6')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv6_src_masked(ipv6_src_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_ipv6_dst(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ipv6_dst = 'e9e8:9ea5:7d67:82cc:ca54:1fc0:2d24:f038'
ipv6_dst_int = self.ipv6_to_int(ipv6_dst)
headers = [dp.ofproto.OXM_OF_IPV6_DST, dp.ofproto.OXM_OF_IPV6_DST_W]
self._set_verify(headers, ipv6_dst_int, type_='ipv6')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv6_dst(ipv6_dst_int)
self.add_matches(dp, match)
def test_rule_set_ipv6_dst_masked_ff(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ipv6_dst = 'e9e8:9ea5:7d67:82cc:ca54:1fc0:2d24:f038'
ipv6_dst_int = self.ipv6_to_int(ipv6_dst)
mask = 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'
mask_int = self.ipv6_to_int(mask)
headers = [dp.ofproto.OXM_OF_IPV6_DST, dp.ofproto.OXM_OF_IPV6_DST_W]
self._set_verify(headers, ipv6_dst_int, mask_int, True, type_='ipv6')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv6_dst_masked(ipv6_dst_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_ipv6_dst_masked_f0(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ipv6_dst = 'e9e8:9ea5:7d67:82cc:ca54:1fc0:2d24:f038'
ipv6_dst_int = self.ipv6_to_int(ipv6_dst)
mask = 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:0'
mask_int = self.ipv6_to_int(mask)
headers = [dp.ofproto.OXM_OF_IPV6_DST, dp.ofproto.OXM_OF_IPV6_DST_W]
self._set_verify(headers, ipv6_dst_int, mask_int, type_='ipv6')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv6_dst_masked(ipv6_dst_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_ipv6_dst_masked_00(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ipv6_dst = 'e9e8:9ea5:7d67:82cc:ca54:1fc0:2d24:f038'
ipv6_dst_int = self.ipv6_to_int(ipv6_dst)
mask = '0:0:0:0:0:0:0:0'
mask_int = self.ipv6_to_int(mask)
headers = [dp.ofproto.OXM_OF_IPV6_DST, dp.ofproto.OXM_OF_IPV6_DST_W]
self._set_verify(headers, ipv6_dst_int, mask_int, type_='ipv6')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv6_dst_masked(ipv6_dst_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_ipv6_flabel(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ipv6_label = 0xc5384
headers = [dp.ofproto.OXM_OF_IPV6_FLABEL,
dp.ofproto.OXM_OF_IPV6_FLABEL_W]
self._set_verify(headers, ipv6_label)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv6_flabel(ipv6_label)
self.add_matches(dp, match)
def test_rule_set_ipv6_flabel_masked_ff(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ipv6_label = 0xc5384
mask = 0xfffff
headers = [dp.ofproto.OXM_OF_IPV6_FLABEL,
dp.ofproto.OXM_OF_IPV6_FLABEL_W]
self._set_verify(headers, ipv6_label, mask, True)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv6_flabel_masked(ipv6_label, mask)
self.add_matches(dp, match)
def test_rule_set_ipv6_flabel_masked_f0(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ipv6_label = 0xc5384
mask = 0xffff0
headers = [dp.ofproto.OXM_OF_IPV6_FLABEL,
dp.ofproto.OXM_OF_IPV6_FLABEL_W]
self._set_verify(headers, ipv6_label, mask)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv6_flabel_masked(ipv6_label, mask)
self.add_matches(dp, match)
def test_rule_set_ipv6_flabel_masked_00(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ipv6_label = 0xc5384
mask = 0x0
headers = [dp.ofproto.OXM_OF_IPV6_FLABEL,
dp.ofproto.OXM_OF_IPV6_FLABEL_W]
self._set_verify(headers, ipv6_label, mask)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv6_flabel_masked(ipv6_label, mask)
self.add_matches(dp, match)
def test_rule_set_icmpv6_type(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ip_proto = inet.IPPROTO_ICMPV6
icmp_type = 129
headers = [dp.ofproto.OXM_OF_ICMPV6_TYPE]
self._set_verify(headers, icmp_type)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
match.set_icmpv6_type(icmp_type)
self.add_matches(dp, match)
def test_rule_set_icmpv6_code(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ip_proto = inet.IPPROTO_ICMPV6
icmp_type = 138
icmp_code = 1
headers = [dp.ofproto.OXM_OF_ICMPV6_CODE]
self._set_verify(headers, icmp_code)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
match.set_icmpv6_type(icmp_type)
match.set_icmpv6_code(icmp_code)
self.add_matches(dp, match)
def test_rule_set_ipv6_nd_target(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ip_proto = inet.IPPROTO_ICMPV6
icmp_type = 135
target = "5420:db3f:921b:3e33:2791:98f:dd7f:2e19"
target_int = self.ipv6_to_int(target)
headers = [dp.ofproto.OXM_OF_IPV6_ND_TARGET]
self._set_verify(headers, target_int, type_='ipv6')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
match.set_icmpv6_type(icmp_type)
match.set_ipv6_nd_target(target_int)
self.add_matches(dp, match)
def test_rule_set_ipv6_nd_sll(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ip_proto = inet.IPPROTO_ICMPV6
icmp_type = 135
nd_sll = "93:6d:d0:d4:e8:36"
nd_sll_bin = self.haddr_to_bin(nd_sll)
headers = [dp.ofproto.OXM_OF_IPV6_ND_SLL]
self._set_verify(headers, nd_sll_bin, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
match.set_icmpv6_type(icmp_type)
match.set_ipv6_nd_sll(nd_sll_bin)
self.add_matches(dp, match)
def test_rule_set_ipv6_nd_tll(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ip_proto = inet.IPPROTO_ICMPV6
icmp_type = 136
nd_tll = "18:f6:66:b6:f1:b3"
nd_tll_bin = self.haddr_to_bin(nd_tll)
headers = [dp.ofproto.OXM_OF_IPV6_ND_TLL]
self._set_verify(headers, nd_tll_bin, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
match.set_icmpv6_type(icmp_type)
match.set_ipv6_nd_tll(nd_tll_bin)
self.add_matches(dp, match)
def test_rule_set_mpls_label(self, dp):
dl_type = 0x8847
label = 2144
headers = [dp.ofproto.OXM_OF_MPLS_LABEL]
self._set_verify(headers, label)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_mpls_label(label)
self.add_matches(dp, match)
def test_rule_set_mpls_tc(self, dp):
dl_type = 0x8847
tc = 3
headers = [dp.ofproto.OXM_OF_MPLS_TC]
self._set_verify(headers, tc)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_mpls_tc(tc)
self.add_matches(dp, match)
def is_supported(self, t):
unsupported = [
'test_rule_set_mpls_tc',
]
for u in unsupported:
if t.find(u) != -1:
return False
return True
| |
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron_lib.api.definitions import portbindings
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib import constants
from neutron_lib.plugins.ml2 import api
from oslo_config import cfg
from neutron.plugins.ml2.drivers.openvswitch.agent.common import (
constants as a_const)
from neutron.plugins.ml2.drivers.openvswitch.mech_driver import (
mech_openvswitch)
from neutron.tests.unit.plugins.ml2 import _test_mech_agent as base
class OpenvswitchMechanismBaseTestCase(base.AgentMechanismBaseTestCase):
VIF_TYPE = portbindings.VIF_TYPE_OVS
VIF_DETAILS = {portbindings.OVS_DATAPATH_TYPE: 'system',
portbindings.CAP_PORT_FILTER: True,
portbindings.OVS_HYBRID_PLUG: True}
AGENT_TYPE = constants.AGENT_TYPE_OVS
GOOD_MAPPINGS = {'fake_physical_network': 'fake_bridge'}
GOOD_TUNNEL_TYPES = ['gre', 'vxlan']
GOOD_CONFIGS = {'bridge_mappings': GOOD_MAPPINGS,
'tunnel_types': GOOD_TUNNEL_TYPES}
BAD_MAPPINGS = {'wrong_physical_network': 'wrong_bridge'}
BAD_TUNNEL_TYPES = ['bad_tunnel_type']
BAD_CONFIGS = {'bridge_mappings': BAD_MAPPINGS,
'tunnel_types': BAD_TUNNEL_TYPES}
AGENTS = [{'alive': True,
'configurations': GOOD_CONFIGS,
'host': 'host'}]
AGENTS_DEAD = [{'alive': False,
'configurations': GOOD_CONFIGS,
'host': 'dead_host'}]
AGENTS_BAD = [{'alive': False,
'configurations': GOOD_CONFIGS,
'host': 'bad_host_1'},
{'alive': True,
'configurations': BAD_CONFIGS,
'host': 'bad_host_2'}]
def setUp(self):
super(OpenvswitchMechanismBaseTestCase, self).setUp()
cfg.CONF.set_override('firewall_driver', 'iptables_hybrid',
'SECURITYGROUP')
self.driver = mech_openvswitch.OpenvswitchMechanismDriver()
self.driver.initialize()
def test__set_bridge_name_notify(self):
def fake_callback(resource, event, trigger, **kwargs):
trigger('fake-br-name')
registry.subscribe(fake_callback, a_const.OVS_BRIDGE_NAME,
events.BEFORE_READ)
fake_vif_details = {}
self.driver._set_bridge_name('foo', fake_vif_details)
self.assertEqual(
'fake-br-name',
fake_vif_details.get(portbindings.VIF_DETAILS_BRIDGE_NAME, ''))
class OpenvswitchMechanismSGDisabledBaseTestCase(
OpenvswitchMechanismBaseTestCase):
VIF_DETAILS = {portbindings.OVS_DATAPATH_TYPE: 'system',
portbindings.CAP_PORT_FILTER: False,
portbindings.OVS_HYBRID_PLUG: False}
def setUp(self):
cfg.CONF.set_override('enable_security_group',
False,
group='SECURITYGROUP')
super(OpenvswitchMechanismSGDisabledBaseTestCase, self).setUp()
class OpenvswitchMechanismHybridPlugTestCase(OpenvswitchMechanismBaseTestCase):
def _make_port_ctx(self, agents):
segments = [{api.ID: 'local_segment_id', api.NETWORK_TYPE: 'local'}]
return base.FakePortContext(self.AGENT_TYPE, agents, segments,
vnic_type=self.VNIC_TYPE)
def test_backward_compat_with_unreporting_agent(self):
hybrid = portbindings.OVS_HYBRID_PLUG
# agent didn't report so it should be hybrid based on server config
context = self._make_port_ctx(self.AGENTS)
self.driver.bind_port(context)
self.assertTrue(context._bound_vif_details[hybrid])
self.driver.vif_details[hybrid] = False
context = self._make_port_ctx(self.AGENTS)
self.driver.bind_port(context)
self.assertFalse(context._bound_vif_details[hybrid])
def test_hybrid_plug_true_if_agent_requests(self):
hybrid = portbindings.OVS_HYBRID_PLUG
# set server side default to false and ensure that hybrid becomes
# true if requested by the agent
self.driver.vif_details[hybrid] = False
agents = [{'alive': True,
'configurations': {hybrid: True},
'host': 'host'}]
context = self._make_port_ctx(agents)
self.driver.bind_port(context)
self.assertTrue(context._bound_vif_details[hybrid])
def test_hybrid_plug_false_if_agent_requests(self):
hybrid = portbindings.OVS_HYBRID_PLUG
# set server side default to true and ensure that hybrid becomes
# false if requested by the agent
self.driver.vif_details[hybrid] = True
agents = [{'alive': True,
'configurations': {hybrid: False},
'host': 'host'}]
context = self._make_port_ctx(agents)
self.driver.bind_port(context)
self.assertFalse(context._bound_vif_details[hybrid])
class OpenvswitchMechanismGenericTestCase(OpenvswitchMechanismBaseTestCase,
base.AgentMechanismGenericTestCase):
pass
class OpenvswitchMechanismLocalTestCase(OpenvswitchMechanismBaseTestCase,
base.AgentMechanismLocalTestCase):
pass
class OpenvswitchMechanismFlatTestCase(OpenvswitchMechanismBaseTestCase,
base.AgentMechanismFlatTestCase):
pass
class OpenvswitchMechanismVlanTestCase(OpenvswitchMechanismBaseTestCase,
base.AgentMechanismVlanTestCase):
pass
class OpenvswitchMechanismGreTestCase(OpenvswitchMechanismBaseTestCase,
base.AgentMechanismGreTestCase):
pass
class OpenvswitchMechanismSGDisabledLocalTestCase(
OpenvswitchMechanismSGDisabledBaseTestCase,
base.AgentMechanismLocalTestCase):
pass
class OpenvswitchMechanismFirewallUndefinedTestCase(
OpenvswitchMechanismBaseTestCase, base.AgentMechanismLocalTestCase):
VIF_DETAILS = {portbindings.OVS_DATAPATH_TYPE: 'system',
portbindings.CAP_PORT_FILTER: True,
portbindings.OVS_HYBRID_PLUG: True}
def setUp(self):
# this simple test case just ensures backward compatibility where
# the server has no firewall driver configured, which should result
# in hybrid plugging.
super(OpenvswitchMechanismFirewallUndefinedTestCase, self).setUp()
cfg.CONF.set_override('firewall_driver', '', 'SECURITYGROUP')
self.driver = mech_openvswitch.OpenvswitchMechanismDriver()
self.driver.initialize()
class OpenvswitchMechanismDPDKTestCase(OpenvswitchMechanismBaseTestCase):
GOOD_MAPPINGS = {'fake_physical_network': 'fake_bridge'}
GOOD_TUNNEL_TYPES = ['gre', 'vxlan']
VHOST_CONFIGS = {'bridge_mappings': GOOD_MAPPINGS,
'tunnel_types': GOOD_TUNNEL_TYPES,
'datapath_type': a_const.OVS_DATAPATH_NETDEV,
'ovs_capabilities': {
'iface_types': [a_const.OVS_DPDK_VHOST_USER]}}
VHOST_SERVER_CONFIGS = {'bridge_mappings': GOOD_MAPPINGS,
'tunnel_types': GOOD_TUNNEL_TYPES,
'datapath_type': a_const.OVS_DATAPATH_NETDEV,
'ovs_capabilities': {
'iface_types': [a_const.OVS_DPDK_VHOST_USER_CLIENT]}}
SYSTEM_CONFIGS = {'bridge_mappings': GOOD_MAPPINGS,
'tunnel_types': GOOD_TUNNEL_TYPES,
'datapath_type': a_const.OVS_DATAPATH_SYSTEM,
'ovs_capabilities': {'iface_types': []}}
AGENT = {'alive': True,
'configurations': VHOST_CONFIGS,
'host': 'host'}
AGENT_SERVER = {'alive': True,
'configurations': VHOST_SERVER_CONFIGS,
'host': 'host'}
AGENT_SYSTEM = {'alive': True,
'configurations': SYSTEM_CONFIGS,
'host': 'host'}
def test_get_vhost_mode(self):
ifaces = []
result = self.driver.get_vhost_mode(ifaces)
self.assertEqual(portbindings.VHOST_USER_MODE_CLIENT, result)
ifaces = [a_const.OVS_DPDK_VHOST_USER]
result = self.driver.get_vhost_mode(ifaces)
self.assertEqual(portbindings.VHOST_USER_MODE_CLIENT, result)
ifaces = [a_const.OVS_DPDK_VHOST_USER_CLIENT]
result = self.driver.get_vhost_mode(ifaces)
self.assertEqual(portbindings.VHOST_USER_MODE_SERVER, result)
def test_get_vif_type(self):
result = self.driver.get_vif_type(None, self.AGENT, None)
self.assertEqual(portbindings.VIF_TYPE_VHOST_USER, result)
result = self.driver.get_vif_type(None, self.AGENT_SERVER, None)
self.assertEqual(portbindings.VIF_TYPE_VHOST_USER, result)
result = self.driver.get_vif_type(None, self.AGENT_SYSTEM, None)
self.assertEqual(portbindings.VIF_TYPE_OVS, result)
class OpenvswitchMechanismSRIOVTestCase(OpenvswitchMechanismBaseTestCase):
def _make_port_ctx(self, agents, profile=None):
segments = [{api.ID: 'local_segment_id', api.NETWORK_TYPE: 'local'}]
return base.FakePortContext(self.AGENT_TYPE, agents, segments,
vnic_type=portbindings.VNIC_DIRECT,
profile=profile)
@mock.patch('neutron.plugins.ml2.drivers.mech_agent.'
'SimpleAgentMechanismDriverBase.bind_port')
def test_bind_port_sriov_legacy(self, mocked_bind_port):
context = self._make_port_ctx(self.AGENTS)
self.driver.bind_port(context)
mocked_bind_port.assert_not_called()
@mock.patch('neutron.plugins.ml2.drivers.mech_agent.'
'SimpleAgentMechanismDriverBase.bind_port')
def test_bind_port_sriov_switchdev(self, mocked_bind_port):
profile = {'capabilities': ['switchdev']}
context = self._make_port_ctx(self.AGENTS, profile=profile)
self.driver.bind_port(context)
mocked_bind_port.assert_called()
| |
'''
Created on Apr 26, 2010
@author: jnaous
'''
from expedient.common.rpc4django import rpcmethod
from django.contrib.auth.models import User
from pprint import pprint
from models import CallBackServerProxy, FVServerProxy
from openflow.optin_manager.admin_manager.models import FlowSpaceAutoApproveScript
from openflow.optin_manager.opts.models import Experiment, ExperimentFLowSpace,\
UserOpts, OptsFlowSpace, MatchStruct
from openflow.optin_manager.flowspace.utils import dotted_ip_to_int, mac_to_int,\
int_to_dotted_ip, int_to_mac, parseFVexception
from decorator import decorator
from django.db import transaction
from django.conf import settings
from django.core.mail import send_mail
from openflow.optin_manager.flowspace.utils import int_to_mac, int_to_dotted_ip
from django.contrib.sites.models import Site
from openflow.optin_manager.opts.autofsgranter import auto_fs_granter
import uuid
@decorator
def check_fv_set(func, *arg, **kwargs):
fv = FVServerProxy.objects.all()
if len(fv) == 0:
raise Exception("No flowvisor has been set. Please set Flowvisor\
URL first and then try again")
elif (len(fv) > 1):
raise Exception("More than one flowvisor is set in database. Make\
sure you just have one flowvisor")
return func(*arg, **kwargs)
@decorator
def check_user(func, *args, **kwargs):
"""
Check that the user is authenticated and known.
"""
if "request" not in kwargs:
raise Exception("Request not available for XML-RPC %s" % \
func.func_name)
meta = kwargs["request"].META
if not hasattr(kwargs["request"], "user"):
raise Exception("Authentication Middleware not installed in settings.")
if not kwargs['request'].user.is_authenticated():
raise Exception("User not authenticated for XML-RPC %s." % func.func_name)
else:
kwargs['user'] = kwargs['request'].user
# Check that the user can actually make the xmlrpc call
this_user = kwargs['user']
if not this_user.get_profile().is_clearinghouse_user:
raise Exception("Remote user %s is not a clearinghouse user" % (
this_user.username))
return func(*args, **kwargs)
def _same(val):
return "%s" % val
@check_user
@rpcmethod()
def checkFlowVisor( *arg, **kwargs):
fv = FVServerProxy.objects.all()
if len(fv) == 0:
raise Exception("No flowvisor has been set. Please set Flowvisor\
URL first and then try again")
elif (len(fv) > 1):
Exception("More than one flowvisor is set in database. Make\
sure you just have one flowvisor")
return ""
class om_ch_translate(object):
attr_funcs = {
# attr_name: (func to turn to str, width)
"dl_src": (int_to_mac, mac_to_int, 48, "mac_src","dl_src"),
"dl_dst": (int_to_mac, mac_to_int, 48, "mac_dst","dl_dst"),
"dl_type": (_same, int, 16, "eth_type","dl_type"),
"vlan_id": (_same, int, 12, "vlan_id","dl_vlan"),
"nw_src": (int_to_dotted_ip, dotted_ip_to_int, 32, "ip_src","nw_src"),
"nw_dst": (int_to_dotted_ip, dotted_ip_to_int, 32, "ip_dst","nw_dst"),
"nw_proto": (_same, int, 8, "ip_proto","nw_proto"),
"tp_src": (_same, int, 16, "tp_src","tp_src"),
"tp_dst": (_same, int, 16, "tp_dst","tp_dst"),
"port_num": (_same, int, 16, "port_number","in_port"),
}
def convert_star(fs):
temp = fs.copy()
for ch_name, (to_str, from_str, width, om_name, of_name) in \
om_ch_translate.attr_funcs.items():
ch_start = "%s_start" % ch_name
ch_end = "%s_end" % ch_name
#Checking that the request has a defined a VLAN or a VLAN range, if not an Exception is raised
if ch_name == "vlan_id":
if ch_start not in fs or fs[ch_start] == "*":
raise Exception("Opt-in Manager FlowSpaces require the use of VLAN or a VLAN Range")
if ch_end not in fs or fs[ch_end] == "*":
raise Exception("Opt-in Manager FlowSpaces require the use of VLAN or a VLAN Range")
if ch_start not in fs or fs[ch_start] == "*":
temp[ch_start] = to_str(0)
if ch_end not in fs or fs[ch_end] == "*":
temp[ch_end] = to_str(2**width - 1)
return temp
def convert_star_int(fs):
temp = fs.copy()
for ch_name, (to_str, from_str, width, om_name, of_name) in \
om_ch_translate.attr_funcs.items():
ch_start = "%s_start" % ch_name
ch_end = "%s_end" % ch_name
if ch_start not in fs or fs[ch_start] == "*":
temp[ch_start] = 0
else:
temp[ch_start] = from_str(fs[ch_start])
if ch_end not in fs or fs[ch_end] == "*":
temp[ch_end] = 2**width - 1
else:
temp[ch_end] = from_str(fs[ch_end])
return temp
def get_direction(direction):
if (direction == 'ingress'):
return 0
if (direction == 'egress'):
return 1
if (direction == 'bidirectional'):
return 2
return 2
@check_user
@check_fv_set
@rpcmethod(signature=['struct', # return value
'string', 'string', 'string',
'string', 'string', 'string',
'array', 'array', 'struct'])
# XXX: **kwargs not allowed on XMLRPC methods
def create_slice(slice_id, project_name, project_description,
slice_name, slice_description, controller_url,
owner_email, owner_password,
switch_slivers, options={}, **kwargs):
'''
Create an OpenFlow slice.
The C{switch_sliver} list contains a dict for each switch to be added to the
slice's topology. Each such dict has the following items:
- C{datapath_id}: the switch's datapath id
- C{flowspace}: an array of dicts describing the switch's flowspace
Each such dict has the following keys:
- C{id}: integer. Per clearinghouse unique identifier for the rule.
- C{port_num_start}, C{port_num_end}: string. the port range for this
flowspace
- C{dl_src_start}, C{dl_src_end}: string. link layer address range in
"xx:xx:xx:xx:xx:xx" format or '*' for wildcard
- C{dl_dst_start}, C{dl_dst_end}: string. link layer address range in
"xx:xx:xx:xx:xx:xx" format or '*' for wildcard
- C{vlan_id_start}, C{vlan_id_end}: string. vlan id range or
"*" for wildcard
- C{nw_src_start}, C{nw_src_end}: string. network address range in
"x.x.x.x" format or '*' for wildcard
- C{nw_dst_start}, C{nw_dst_end}: string. network address range in
"x.x.x.x" format or '*' for wildcard
- C{nw_proto_start}, C{nw_proto_end}: string. network protocol range or
"*" for wildcard
- C{tp_src_start}, C{tp_src_end}: string. transport port range or "*"
for wildcard
- C{tp_dst_start}, C{tp_dst_end}: string. transport port range or "*"
for wildcard
The call returns a dict with the following items:
- C{error_msg}: a summary error message or "" if no errors occurred.
- C{switches}: a list of dicts with the following items:
- C{datapath_id}: id of the switch that caused the error
- C{error}: optional error msg for the switch
- all other fields of the C{switch_sliver} dicts mentioned above
(port_num, direction, ...). The values for these items are the error
messages associated with each field.
@param slice_id: a string that uniquely identifies the slice at the
clearinghouse.
@type slice_id: int
@param project_name: a name for the project under which this slice
is created
@type project_name: string
@param project_description: text describing the project
@type project_description: string
@param slice_name: Name for the slice
@type slice_name: string
@param slice_description: text describing the slice/experiment
@type slice_description: string
@param controller_url: The URL for the slice's OpenFlow controller specified
as <transport>:<hostname>[:<port>], where:
- tranport is 'tcp' ('ssl' will be added later)
- hostname is the controller's hostname
- port is the port on which the controller listens to openflow
messages (defaults to 6633).
@type controller_url: string
@param owner_email: email of the person responsible for the slice
@type owner_email: string
@param owner_password: initial password the user can use to login to the
FlowVisor Web interface. Will need to be changed on initial login.
@type owner_password: string
@param switch_slivers: description of the topology and flowspace for slice
@type switch_slivers: list of dicts
@param options: will contain additional useful information for the operation
@type options: dict
@param kwargs: will contain additional useful information about the request.
Of most use are the items in the C{kwargs['request'].META} dict. These
include 'REMOTE_USER' which is the username of the user connecting or
if using x509 certs then the domain name. Additionally, kwargs has the
user using the 'user' key.
@return: switches and links that have caused errors
@rtype: dict
'''
print "create_slice got the following:"
print " slice_id: %s" % slice_id
print " project_name: %s" % project_name
print " project_desc: %s" % project_description
print " slice_name: %s" % slice_name
print " slice_desc: %s" % slice_description
print " controller: %s" % controller_url
print " owner_email: %s" % owner_email
print " owner_pass: %s" % owner_password
print " switch_slivers"
pprint(switch_slivers, indent=8)
print " options: "
pprint(options, indent=8)
# print " kwargs: "
# pprint(kwargs, indent=8)
# Determine slice style naming: legacy (Opt-in <= 0.7) or newer (FlowVisor >= 1.0)
is_legacy_slice = True
remotely_created = False
# Retrieve information for current Experiment first
try:
# Legacy slices with older slice naming (Opt-in <= 0.7)
e = Experiment.objects.filter(slice_id = options["legacy_slice_id"])
if not e:
raise Exception
except:
# New slice naming style (for FlowVisor >= 1.0) -> No legacy slice
try:
uuid.UUID('{%s}' % str(slice_id))
is_legacy_slice = False
except:
remotely_created = True
is_legacy_slice = True
e = Experiment.objects.filter(slice_id = slice_id)
# If Experiment already existing => this is an update
if (e.count()>0):
old_e = e[0]
# Legacy slices: use combination of name and ID
if is_legacy_slice:
old_fv_name = old_e.get_fv_slice_name()
# Otherwise, use UUID
else:
old_fv_name = old_e.slice_id
update_exp = True
old_exp_fs = ExperimentFLowSpace.objects.filter(exp=old_e)
else:
update_exp = False
e = Experiment()
e.slice_id = slice_id
e.project_name = project_name
e.project_desc = project_description
e.slice_name = slice_name
e.slice_desc = slice_description
e.controller_url = controller_url
e.owner_email = owner_email
e.owner_password = owner_password
e.save()
all_efs = []
for sliver in switch_slivers:
if "datapath_id" in sliver:
dpid = sliver['datapath_id']
else:
dpid = "00:" * 8
dpid = dpid[:-1]
if len(sliver['flowspace'])==0:
# HACK:
efs = ExperimentFLowSpace()
efs.exp = e
efs.dpid = dpid
efs.direction = 2
all_efs.append(efs)
else:
for sfs in sliver['flowspace']:
efs = ExperimentFLowSpace()
efs.exp = e
efs.dpid = dpid
if "direction" in sfs:
efs.direction = get_direction(sfs['direction'])
else:
efs.direction = 2
try:
fs = convert_star(sfs)
except Exception as exc:
#The most probably cause is the fs was requested without VLANs
e.delete()
raise exc
for attr_name,(to_str, from_str, width, om_name, of_name) in \
om_ch_translate.attr_funcs.items():
ch_start ="%s_start"%(attr_name)
ch_end ="%s_end"%(attr_name)
om_start ="%s_s"%(om_name)
om_end ="%s_e"%(om_name)
setattr(efs,om_start,from_str(fs[ch_start]))
setattr(efs,om_end,from_str(fs[ch_end]))
all_efs.append(efs)
fv = FVServerProxy.objects.all()[0]
if (update_exp):
# Delete previous experiment from FV
try:
fv_success = fv.proxy.api.deleteSlice(old_fv_name)
old_exp_fs.delete()
old_e.delete()
except Exception, exc:
import traceback
traceback.print_exc()
if "slice does not exist" in str(exc):
fv_success = True
old_exp_fs.delete()
old_e.delete()
else:
e.delete()
print exc
raise Exception(parseFVexception(exc,"While trying to update experiment, FV raised exception on the delete previous experiment step: "))
if (not fv_success):
e.delete()
raise Exception("While trying to update experiment, FV returned False on the delete previous experiment step")
# Create the new experiment on FV
try:
# # Legacy slices: use combination of name and ID
if remotely_created:
new_fv_name = e.get_fv_slice_name()
# # Otherwise, use UUID
else:
new_fv_name = slice_id
#new_fv_name = slice_id
fv_success = fv.proxy.api.createSlice(
"%s" % new_fv_name,
"%s" % owner_password,
"%s" % controller_url,
"%s" % owner_email,
)
for fs in all_efs:
fs.save()
print "Created slice with %s %s %s %s" % (
new_fv_name, owner_password, controller_url, owner_email)
except Exception,exc:
import traceback
traceback.print_exc()
e.delete()
print exc
if (update_exp):
raise Exception(parseFVexception(exc,"Could not create slice at the Flowvisor, after deleting old slice. Error was: "))
else:
raise Exception(parseFVexception(exc,"Could not create slice at the Flowvisor. Error was: "))
if not fv_success:
e.delete()
if (update_exp):
raise Exception(
"Could not create slice at the Flowvisor, after deleting old slice. FV Returned False in createSlice call")
else:
raise Exception(
"Could not create slice at the Flowvisor. FV Returned False in createSlice call")
if (update_exp):
from openflow.optin_manager.opts.helper import update_opts_into_exp
[fv_args,match_list] = update_opts_into_exp(e)
if len(fv_args) > 0:
# update previous opt-ins into this updated experiment
try:
returned_ids = fv.proxy.api.changeFlowSpace(fv_args)
for i in range(len(match_list)):
match_list[i].fv_id = returned_ids[i]
match_list[i].save()
except Exception, exc:
from openflow.optin_manager.opts.helper import opt_fses_outof_exp
import traceback
traceback.print_exc()
all_opts = UserOpts.objects.filter(experiment=e)
for opt in all_opts:
optfses = OptsFlowSpace.objects.filter(opt = opt)
opt_fses_outof_exp(optfses)
all_opts.delete()
print exc
raise Exception(parseFVexception(exc,"Couldn't re-opt into updated experiment. Lost all the opt-ins: "))
flowspace_correctly_granted = True
automatic_settings = get_automatic_settings()
try:
if automatic_settings["flowspace_auto_approval"]:
auto_fs_granter(e)
# FIXME An exception is being raised. Investigate.
except Exception as exc:
print "Exception happened when granting flowspace automatically: %s" % str(exc)
flowspace_correctly_granted = False
try:
# Get project detail URL to send via e-mail
from openflow.optin_manager.opts import urls
from django.core.urlresolvers import reverse
project_detail_url = reverse("opt_in_experiment") or "/"
# No "https://" check should be needed if settings are OK
site_domain_url = "https://" + Site.objects.get_current().domain + project_detail_url
# Tuple with the requested VLAN range
vlan_range = ""
try:
if not isinstance(all_efs,list):
all_efs = [all_efs]
# Obtain unique ranges of VLANs
vlan_range_all_efs = set([ (efs.vlan_id_s, efs.vlan_id_e) for efs in all_efs ])
# Create list for e-mail
# add_vlan_range_email = lambda x: "VLAN range: %s" % str(x)
# vlan_range += map("\n".join, [map(add_vlan_range_email, vlan_range_all_efs)])[0]
vlan_range += "VLAN ranges: %s" % str(list(vlan_range_all_efs))
except:
pass
if all_efs:
# Default message: either for manual granting or any failure in automatic granting
flowspace_subject = settings.EMAIL_SUBJECT_PREFIX + " Flowspace Request: OptinManager '" + str(project_name) + "'"
flowspace_email = "Hi, Island Manager\n\nA new flowspace was requested:\n\nProject: " + str(project_name) + "\nSlice: " + str(slice_name) + "\n" + str(vlan_range) + "\n\nYou may add a new rule for this request at: %s" % site_domain_url
if automatic_settings["flowspace_auto_approval"]:
if flowspace_correctly_granted:
flowspace_subject = settings.EMAIL_SUBJECT_PREFIX + " Flowspace Approved: OptinManager '" + str(project_name) + "'"
flowspace_email = "Hi, Island Manager\n\nA new flowspace was automatically granted:\n\nProject: " + str(project_name) + "\nSlice: " + str(slice_name) + str(vlan_range) + "\n\nYou may check the rule for this request at: %s" % site_domain_url
send_mail(flowspace_subject, flowspace_email, from_email=settings.DEFAULT_FROM_EMAIL, recipient_list=[settings.ROOT_EMAIL],)
except:
pass
transaction.commit()
return {
'error_msg': "",
'switches': [],
}
@check_user
@check_fv_set
@rpcmethod(signature=['string', # return value
'int', 'struct'])
# XXX: **kwargs not allowed on XMLRPC methods
def delete_slice(slice_id, options={}, **kwargs):
'''
Delete the slice with id sliceid.
@param slice_id: an int that uniquely identifies the slice at the
Clearinghouseclearinghouse.
@type slice_id: int
@param options: will contain additional useful information for this operation.
@type options: dict
@param kwargs: will contain additional useful information about the request.
Of most use are the items in the C{kwargs['request'].META} dict. These
include 'REMOTE_USER' which is the username of the user connecting or
if using x509 certs then the domain name.
@return error message if there are any errors or "" otherwise.
'''
# Determine slice style naming: legacy (Opt-in <= 0.7) or newer (FlowVisor >= 1.0)
is_legacy_slice = True
# Retrieve information for current Experiment first
try:
# Legacy slices with older slice naming (Opt-in <= 0.7)
single_exp = Experiment.objects.get(slice_id = options["legacy_slice_id"])
if not single_exp:
raise Exception
except:
try:
try:
uuid.UUID('{%s}' % str(slice_id))
is_legacy_slice = False
single_exp = Experiment.objects.get(slice_id = slice_id)
except:
is_legacy_slice = True
single_exp = Experiment.objects.get(slice_id = slice_id)
# New slice naming style (for FlowVisor >= 1.0) -> No legacy slice
single_exp = Experiment.objects.get(slice_id = slice_id)
except Experiment.DoesNotExist:
return "Experiment does not exist"
fv = FVServerProxy.objects.all()[0]
try:
# Legacy slices: use combination of name and ID
if is_legacy_slice:
old_fv_name = single_exp.get_fv_slice_name()
# Otherwise, use UUID
else:
old_fv_name = single_exp.slice_id
success = fv.proxy.api.deleteSlice(old_fv_name)
except Exception,e:
import traceback
traceback.print_exc()
if "slice does not exist" in str(e):
success = True
else:
return "Could not delete slice on Flowvisor: %s" % parseFVexception(e)
# get all flowspaces opted into this exp
ofs = OptsFlowSpace.objects.filter(opt__experiment = single_exp)
# delete all match structs for each flowspace
for fs in ofs:
MatchStruct.objects.filter(optfs = fs).delete()
# delete all flowspaces opted into this exp
ofs.delete()
UserOpts.objects.filter(experiment = single_exp).delete()
ExperimentFLowSpace.objects.filter(exp = single_exp).delete()
single_exp.delete()
return ""
@check_user
@rpcmethod(signature=['string', 'string', 'array'])
def change_slice_controller(slice_id, controller_url, **kwargs):
'''
Changes the slice controller url.
'''
complete_list = []
fv = FVServerProxy.objects.all()[0]
try:
params = controller_url.split(':')
experiment = Experiment.objects.get(slice_id = slice_id)
slice_name= experiment.get_fv_slice_name()
fv.proxy.api.changeSlice(slice_name,'controller_hostname', params[1])
fv.proxy.api.changeSlice(slice_name,'controller_port', params[2])
experiment.controller_url = controller_url
except Exception, exc:
import traceback
traceback.print_exc()
raise Exception(parseFVexception(exc,"FV could not update slice controller URL:"))
return ""
@check_user
@check_fv_set
@rpcmethod(signature=['array'])
def get_switches(**kwargs):
'''
Return what the FlowVisor gives.
'''
complete_list = []
fv = FVServerProxy.objects.all()[0]
try:
switches = fv.get_switches()
except Exception,e:
import traceback
traceback.print_exc()
raise Exception(parseFVexception(e))
complete_list.extend(switches)
return complete_list
@check_user
@check_fv_set
@rpcmethod(signature=['array'])
def get_links(**kwargs):
'''
Return what the FlowVisor gives.
'''
complete_list = []
fv = FVServerProxy.objects.all()[0]
try:
links = fv.get_links()
except Exception,e:
import traceback
traceback.print_exc()
raise Exception(parseFVexception(e))
complete_list.extend(links)
return complete_list
@check_user
@rpcmethod(signature=['string', 'string', 'string'])
def register_topology_callback(url, cookie, **kwargs):
'''
Store some information for the topology callback.
'''
from expedient.common import utils
attrs = {'url': url, 'cookie': cookie}
filter_attrs = {'username': kwargs['user'].username}
utils.create_or_update(CallBackServerProxy, filter_attrs, attrs)
return ""
@check_user
@rpcmethod(signature=['string', 'string'])
def change_password(new_password, **kwargs):
'''
Change the current password used for the clearinghouse to 'new_password'.
@param new_password: the new password to use for authentication.
@type new_password: random string of 1024 characters
@param kwargs: will contain additional useful information about the request.
Of most use are the items in the C{kwargs['request'].META} dict. These
include 'REMOTE_USER' which is the username of the user connecting or
if using x509 certs then the domain name.
@return: Error message if there is any.
@rtype: string
'''
user = kwargs['user']
user.set_password(new_password)
user.save()
return ""
@check_user
@rpcmethod(signature=['string', 'string'])
def ping(data, **kwargs):
'''
Test method to see that everything is up.
return a string that is "PONG: %s" % data
'''
return "PONG: %s" % data
@check_user
@check_fv_set
@rpcmethod()
def get_granted_flowspace(slice_id, **kwargs):
'''
Return FlowVisor Rules for the slice.
'''
def parse_granted_flowspaces(gfs):
gfs_list=[]
for fs in gfs:
fs_dict = dict(
flowspace=dict(),
openflow=dict()
)
fs_dict['openflow']=[]
fs_dict['flowspace']=dict(
mac_src_s=int_to_mac(fs.mac_src_s),
mac_src_e=int_to_mac(fs.mac_src_e),
mac_dst_s=int_to_mac(fs.mac_dst_s),
mac_dst_e=int_to_mac(fs.mac_dst_e),
eth_type_s=fs.eth_type_s,
eth_type_e=fs.eth_type_e,
vlan_id_s=fs.vlan_id_s,
vlan_id_e=fs.vlan_id_e,
ip_src_s=int_to_dotted_ip(fs.ip_src_s),
ip_dst_s=int_to_dotted_ip(fs.ip_dst_s),
ip_src_e=int_to_dotted_ip(fs.ip_src_e),
ip_dst_e=int_to_dotted_ip(fs.ip_dst_e),
ip_proto_s=fs.ip_proto_s,
ip_proto_e=fs.ip_proto_e,
tp_src_s=fs.tp_src_s,
tp_dst_s=fs.tp_dst_s,
tp_src_e=fs.tp_src_e,
tp_dst_e=fs.tp_dst_e,
)
openflow_dict=dict(
dpid=fs.dpid,
direction=fs.direction,
port_number_s=fs.port_number_s,
port_number_e=fs.port_number_e,
)
existing_fs = False
for prev_dict in gfs_list:
if fs_dict['flowspace'] == prev_dict['flowspace']:
if openflow_dict not in prev_dict['openflow']:
prev_dict['openflow'].append(openflow_dict)
existing_fs = True
break
if not existing_fs:
fs_dict['openflow'].append(openflow_dict)
gfs_list.append(fs_dict)
return gfs_list
try:
#TODO: Check 100% that only with slice_id (domain+slice.id) is enough not to crash with some other clearinghouse connected to the optin
exp = Experiment.objects.filter(slice_id = slice_id)
gfs = []
if exp and len(exp) == 1:
opts = exp[0].useropts_set.all()
if opts:
for opt in opts:
gfs_temp = opt.optsflowspace_set.all()
gfs.append(parse_granted_flowspaces(gfs_temp))
except Exception,e:
import traceback
traceback.print_exc()
raise Exception(parseFVexception(e))
return gfs
#@check_user
@check_fv_set
@rpcmethod()
def get_offered_vlans(set=None):
from openflow.optin_manager.opts.vlans.vlanController import vlanController
vlans = vlanController.offer_vlan_tags(set)
return vlans
@check_fv_set
@rpcmethod()
def get_automatic_settings(args=None):
"""
Get status of the automatic granting of VLANs and approval of Flowspaces
"""
info = dict()
# Control missing choice here (older versions) => if setting not set, it means it was manual approval and still is
try:
auto_approve_settings = FlowSpaceAutoApproveScript.objects.filter(admin=User.objects.filter(is_superuser=True))[0]
except:
auto_approve_settings = None
# If "auto_approve_settings" object does not exist in models, default value ("False") will take its place
info["vlan_auto_assignment"] = getattr(auto_approve_settings, "vlan_auto_grant", False)
info["flowspace_auto_approval"] = getattr(auto_approve_settings, "flowspace_auto_approval", False)
return info
@check_fv_set
@rpcmethod()
def get_used_vlans(range_len=1, direct_output=False):
"""
Returns a list with the VLANs used within this OpenFlow aggregate
@param direct_output defines if only one aggregate is present (True) or more (False)
"""
range_len = None
from openflow.optin_manager.opts.vlans.vlanController import vlanController
import random
vlans = vlanController.offer_vlan_tags(range_len)
if not direct_output or range_len > 1:
return list(set(range(4096)) - set(vlans))
else:
rnd = random.randrange(0, len(vlans))
# Return random VLAN [from 0 to len(vlans)-1] for all the available to minimise collisions
return [vlans[rnd]]
@rpcmethod()
def ListResources(args=None):
# Used in ListResources for external monitoring (e.g. FELIX monitoring)
from openflow.optin_manager.geni.v3.configurators.optin import HandlerConfigurator
rspec_manager = HandlerConfigurator.get_optin_rspec_manager()
driver = HandlerConfigurator.get_optin_driver()
resources_data = driver.get_all_devices()
return rspec_manager.compose_advertisement(resources_data)
@check_fv_set
@rpcmethod()
def get_ocf_am_version(args=None):
"""
Returns the version for the current aggregate
"""
# sv = open('../../../../../.currentVersion','r')
import os
sv = open(os.path.join(settings.SRC_DIR, "..", ".currentVersion"),"r")
software_version = sv.read().strip()
sv.close()
return software_version
@check_fv_set
@rpcmethod()
def get_am_info(args=None):
"""
Returns a set of information about the aggregate
"""
# INFO: add as many keys as you wish
info = dict()
auto_approve_settings = FlowSpaceAutoApproveScript.objects.filter(admin=User.objects.filter(is_superuser=True))[0]
info["version"] = get_ocf_am_version()
info.update(get_automatic_settings())
return info
| |
"""
sentry.interfaces
~~~~~~~~~~~~~~~~~
Interfaces provide an abstraction for how structured data should be
validated and rendered.
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import itertools
import urlparse
from django.http import QueryDict
from django.utils.translation import ugettext_lazy as _
from sentry.web.helpers import render_to_string
_Exception = Exception
def unserialize(klass, data):
value = object.__new__(klass)
value.__setstate__(data)
return value
def get_context(lineno, context_line, pre_context=None, post_context=None):
lineno = int(lineno)
context = []
start_lineno = lineno - len(pre_context or [])
if pre_context:
start_lineno = lineno - len(pre_context)
at_lineno = start_lineno
for line in pre_context:
context.append((at_lineno, line))
at_lineno += 1
else:
start_lineno = lineno
at_lineno = lineno
context.append((at_lineno, context_line))
at_lineno += 1
if post_context:
for line in post_context:
context.append((at_lineno, line))
at_lineno += 1
return context
class Interface(object):
"""
An interface is a structured represntation of data, which may
render differently than the default ``extra`` metadata in an event.
"""
score = 0
def __init__(self, **kwargs):
self.attrs = kwargs.keys()
self.__dict__.update(kwargs)
def __setstate__(self, data):
kwargs = self.unserialize(data)
self.attrs = kwargs.keys()
self.__dict__.update(kwargs)
def __getstate__(self):
return self.serialize()
def unserialize(self, data):
return data
def serialize(self):
return dict((k, self.__dict__[k]) for k in self.attrs)
def get_hash(self):
return []
def to_html(self, event):
return ''
def to_string(self, event):
return ''
def get_title(self):
return _(self.__class__.__name__)
def get_search_context(self, event):
"""
Returns a dictionary describing the data that should be indexed
by the search engine. Several fields are accepted:
- text: a list of text items to index as part of the generic query
- filters: a map of fields which are used for precise matching
"""
return {
# 'text': ['...'],
# 'filters': {
# 'field": ['...'],
# },
}
class Message(Interface):
def __init__(self, message, params=()):
self.message = message
self.params = params
def serialize(self):
return {
'message': self.message,
'params': self.params,
}
def get_hash(self):
return [self.message]
def get_search_context(self, event):
if isinstance(self.params, (list, tuple)):
params = list(self.params)
elif isinstance(self.params, dict):
params = self.params.values()
else:
params = ()
return {
'text': [self.message] + params,
}
class Query(Interface):
def __init__(self, query, engine=None):
self.query = query
self.engine = engine
def get_hash(self):
return [self.query]
def serialize(self):
return {
'query': self.query,
'engine': self.engine,
}
def get_search_context(self, event):
return {
'text': [self.query],
}
class Stacktrace(Interface):
score = 1000
def __init__(self, frames):
self.frames = frames
for frame in frames:
# ensure we've got the correct required values
assert 'filename' in frame
assert 'lineno' in frame
# assert 'context_line' in frame
# assert 'function' in frame
def _shorten(self, value, depth=1):
if depth > 5:
return type(value)
if isinstance(value, dict):
return dict((k, self._shorten(v, depth + 1)) for k, v in sorted(value.iteritems())[:100 / depth])
elif isinstance(value, (list, tuple, set, frozenset)):
return tuple(self._shorten(v, depth + 1) for v in value)[:100 / depth]
elif isinstance(value, (int, long, float)):
return value
elif not value:
return value
return value[:100]
def serialize(self):
return {
'frames': self.frames,
}
def get_hash(self):
output = []
for frame in self.frames:
if frame.get('module'):
output.append(frame['module'])
else:
output.append(frame['filename'])
if frame.get('function'):
output.append(frame['function'])
else:
output.append(frame['lineno'])
return output
def to_html(self, event):
frames = []
for frame in self.frames:
if frame.get('context_line'):
context = get_context(frame['lineno'], frame['context_line'], frame.get('pre_context'), frame.get('post_context'))
start_lineno = context[0][0]
else:
context = []
start_lineno = None
context_vars = []
if frame.get('vars'):
context_vars = self._shorten(frame['vars'])
else:
context_vars = []
frames.append({
'abs_path': frame.get('abs_path'),
'filename': frame['filename'],
'function': frame.get('function'),
'start_lineno': start_lineno,
'lineno': frame.get('lineno'),
'context': context,
'vars': context_vars,
})
return render_to_string('sentry/partial/interfaces/stacktrace.html', {
'event': event,
'frames': frames,
'stacktrace': self.get_traceback(event),
})
def to_string(self, event):
return self.get_stacktrace(event)
def get_stacktrace(self, event):
result = [
'Stacktrace (most recent call last):', '',
]
for frame in self.frames:
if 'function' in frame:
result.append(' File "%(filename)s", line %(lineno)s, in %(function)s' % frame)
else:
result.append(' File "%(filename)s", line %(lineno)s' % frame)
if 'context_line' in frame:
result.append(' %s' % frame['context_line'].strip())
result.append('')
return '\n'.join(result)
def get_traceback(self, event):
result = [
event.message, '',
self.get_stacktrace(event),
]
return '\n'.join(result)
def get_search_context(self, event):
return {
'text': list(itertools.chain(*[[f.get('filename'), f.get('function'), f.get('context_line')] for f in self.frames])),
}
class Exception(Interface):
score = 900
def __init__(self, value, type=None, module=None):
# A human readable value for the exception
self.value = value
# The exception type name (e.g. TypeError)
self.type = type
# Optional module of the exception type (e.g. __builtin__)
self.module = module
def serialize(self):
return {
'type': self.type,
'value': self.value,
'module': self.module,
}
def get_hash(self):
output = filter(bool, [self.module, self.type])
if not output:
output = [self.value]
return output
def to_html(self, event):
last_frame = None
interface = event.interfaces.get('sentry.interfaces.Stacktrace')
if interface is not None and interface.frames:
last_frame = interface.frames[-1]
return render_to_string('sentry/partial/interfaces/exception.html', {
'event': event,
'exception_value': self.value,
'exception_type': self.type,
'exception_module': self.module,
'last_frame': last_frame
})
def get_search_context(self, event):
return {
'text': [self.value, self.type, self.module]
}
class Http(Interface):
score = 100
# methods as defined by http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html + PATCH
METHODS = ('GET', 'POST', 'PUT', 'OPTIONS', 'HEAD', 'DELETE', 'TRACE', 'CONNECT', 'PATCH')
def __init__(self, url, method=None, data=None, query_string=None, cookies=None, headers=None, env=None, **kwargs):
if data is None:
data = {}
if method:
method = method.upper()
urlparts = urlparse.urlsplit(url)
if not query_string:
# define querystring from url
query_string = urlparts.query
elif query_string.startswith('?'):
# remove '?' prefix
query_string = query_string[1:]
self.url = '%s://%s%s' % (urlparts.scheme, urlparts.netloc, urlparts.path)
self.method = method
self.data = data
self.query_string = query_string
if cookies:
self.cookies = cookies
else:
self.cookies = {}
# if cookies were [also] included in headers we
# strip them out
if headers and 'Cookie' in headers:
cookies = headers.pop('Cookie')
if cookies:
self.cookies = cookies
self.headers = headers or {}
self.env = env or {}
def serialize(self):
return {
'url': self.url,
'method': self.method,
'data': self.data,
'query_string': self.query_string,
'cookies': self.cookies,
'headers': self.headers,
'env': self.env,
}
def to_string(self, event):
return render_to_string('sentry/partial/interfaces/http.txt', {
'event': event,
'full_url': '?'.join(filter(bool, [self.url, self.query_string])),
'url': self.url,
'method': self.method,
'query_string': self.query_string,
})
def to_html(self, event):
data = self.data
data_is_dict = False
if self.headers.get('Content-Type') == 'application/x-www-form-urlencoded':
try:
data = QueryDict(data)
except _Exception:
pass
else:
data_is_dict = True
# It's kind of silly we store this twice
cookies = self.cookies or self.headers.pop('Cookie', {})
cookies_is_dict = isinstance(cookies, dict)
if not cookies_is_dict:
try:
cookies = QueryDict(cookies)
except _Exception:
pass
else:
cookies_is_dict = True
return render_to_string('sentry/partial/interfaces/http.html', {
'event': event,
'full_url': '?'.join(filter(bool, [self.url, self.query_string])),
'url': self.url,
'method': self.method,
'data': data,
'data_is_dict': data_is_dict,
'query_string': self.query_string,
'cookies': cookies,
'cookies_is_dict': cookies_is_dict,
'headers': self.headers,
'env': self.env,
})
def get_search_context(self, event):
return {
'filters': {
'url': [self.url],
}
}
class Template(Interface):
score = 1001
def __init__(self, filename, context_line, lineno, pre_context=None, post_context=None,
abs_path=None):
self.abs_path = abs_path
self.filename = filename
self.context_line = context_line
self.lineno = lineno
self.pre_context = pre_context
self.post_context = post_context
def serialize(self):
return {
'abs_path': self.abs_path,
'filename': self.filename,
'context_line': self.context_line,
'lineno': self.lineno,
'pre_context': self.pre_context,
'post_context': self.post_context,
}
def get_hash(self):
return [self.filename, self.context_line]
def to_string(self, event):
context = get_context(self.lineno, self.context_line, self.pre_context, self.post_context)
result = [
'Stacktrace (most recent call last):', '',
self.get_traceback(event, context)
]
return '\n'.join(result)
def to_html(self, event):
context = get_context(self.lineno, self.context_line, self.pre_context, self.post_context)
return render_to_string('sentry/partial/interfaces/template.html', {
'event': event,
'abs_path': self.abs_path,
'filename': self.filename,
'lineno': self.lineno,
'start_lineno': context[0][0],
'context': context,
'template': self.get_traceback(event, context),
})
def get_traceback(self, event, context):
result = [
event.message, '',
'File "%s", line %s' % (self.filename, self.lineno), '',
]
result.extend([n[1].strip('\n') for n in context])
return '\n'.join(result)
def get_search_context(self, event):
return {
'text': [self.abs_path, self.filename, self.context_line],
}
class User(Interface):
def __init__(self, is_authenticated, **kwargs):
self.is_authenticated = is_authenticated
self.id = kwargs.get('id')
self.username = kwargs.get('username')
self.email = kwargs.get('email')
def serialize(self):
if self.is_authenticated:
return {
'is_authenticated': self.is_authenticated,
'id': self.id,
'username': self.username,
'email': self.email,
}
else:
return {
'is_authenticated': self.is_authenticated
}
def get_hash(self):
return []
def to_html(self, event):
return render_to_string('sentry/partial/interfaces/user.html', {
'event': event,
'user_authenticated': self.is_authenticated,
'user_id': self.id,
'user_username': self.username,
'user_email': self.email
})
def get_search_context(self, event):
if not self.is_authenticated:
return {}
return {
'text': [self.id, self.username, self.email]
}
| |
import itertools
import random
from sqlalchemy import bindparam
from sqlalchemy import column
from sqlalchemy import exc
from sqlalchemy import exists
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import literal
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import table
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import update
from sqlalchemy import util
from sqlalchemy.dialects import mysql
from sqlalchemy.engine import default
from sqlalchemy.sql import operators
from sqlalchemy.sql.elements import BooleanClauseList
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import mock
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
class _UpdateFromTestBase(object):
@classmethod
def define_tables(cls, metadata):
Table(
"mytable",
metadata,
Column("myid", Integer),
Column("name", String(30)),
Column("description", String(50)),
)
Table(
"myothertable",
metadata,
Column("otherid", Integer),
Column("othername", String(30)),
)
Table(
"users",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(30), nullable=False),
)
Table(
"addresses",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("user_id", None, ForeignKey("users.id")),
Column("name", String(30), nullable=False),
Column("email_address", String(50), nullable=False),
)
Table(
"dingalings",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("address_id", None, ForeignKey("addresses.id")),
Column("data", String(30)),
)
Table(
"update_w_default",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("ycol", Integer, key="y"),
Column("data", String(30), onupdate=lambda: "hi"),
)
@classmethod
def fixtures(cls):
return dict(
users=(
("id", "name"),
(7, "jack"),
(8, "ed"),
(9, "fred"),
(10, "chuck"),
),
addresses=(
("id", "user_id", "name", "email_address"),
(1, 7, "x", "jack@bean.com"),
(2, 8, "x", "ed@wood.com"),
(3, 8, "x", "ed@bettyboop.com"),
(4, 8, "x", "ed@lala.com"),
(5, 9, "x", "fred@fred.com"),
),
dingalings=(
("id", "address_id", "data"),
(1, 2, "ding 1/2"),
(2, 5, "ding 2/5"),
),
)
class UpdateTest(_UpdateFromTestBase, fixtures.TablesTest, AssertsCompiledSQL):
__dialect__ = "default_enhanced"
def test_update_literal_binds(self):
table1 = self.tables.mytable
stmt = (
table1.update().values(name="jack").where(table1.c.name == "jill")
)
self.assert_compile(
stmt,
"UPDATE mytable SET name='jack' WHERE mytable.name = 'jill'",
literal_binds=True,
)
def test_update_custom_key_thing(self):
table1 = self.tables.mytable
class Thing(object):
def __clause_element__(self):
return table1.c.name
stmt = (
table1.update()
.values({Thing(): "jack"})
.where(table1.c.name == "jill")
)
self.assert_compile(
stmt,
"UPDATE mytable SET name='jack' WHERE mytable.name = 'jill'",
literal_binds=True,
)
def test_update_ordered_custom_key_thing(self):
table1 = self.tables.mytable
class Thing(object):
def __clause_element__(self):
return table1.c.name
stmt = (
table1.update()
.ordered_values((Thing(), "jack"))
.where(table1.c.name == "jill")
)
self.assert_compile(
stmt,
"UPDATE mytable SET name='jack' WHERE mytable.name = 'jill'",
literal_binds=True,
)
def test_update_broken_custom_key_thing(self):
table1 = self.tables.mytable
class Thing(object):
def __clause_element__(self):
return 5
assert_raises_message(
exc.ArgumentError,
"SET/VALUES column expression or string key expected, got .*Thing",
table1.update().values,
{Thing(): "jack"},
)
def test_update_ordered_broken_custom_key_thing(self):
table1 = self.tables.mytable
class Thing(object):
def __clause_element__(self):
return 5
assert_raises_message(
exc.ArgumentError,
"SET/VALUES column expression or string key expected, got .*Thing",
table1.update().ordered_values,
(Thing(), "jack"),
)
def test_correlated_update_one(self):
table1 = self.tables.mytable
# test against a straight text subquery
u = update(table1).values(
{
table1.c.name: text(
"(select name from mytable where id=mytable.id)"
)
}
)
self.assert_compile(
u,
"UPDATE mytable SET name=(select name from mytable "
"where id=mytable.id)",
)
def test_correlated_update_two(self):
table1 = self.tables.mytable
mt = table1.alias()
u = update(table1).values(
{
table1.c.name: select(mt.c.name)
.where(mt.c.myid == table1.c.myid)
.scalar_subquery()
}
)
self.assert_compile(
u,
"UPDATE mytable SET name=(SELECT mytable_1.name FROM "
"mytable AS mytable_1 WHERE "
"mytable_1.myid = mytable.myid)",
)
def test_correlated_update_three(self):
table1 = self.tables.mytable
table2 = self.tables.myothertable
# test against a regular constructed subquery
s = (
select(table2)
.where(table2.c.otherid == table1.c.myid)
.scalar_subquery()
)
u = (
update(table1)
.where(table1.c.name == "jack")
.values({table1.c.name: s})
)
self.assert_compile(
u,
"UPDATE mytable SET name=(SELECT myothertable.otherid, "
"myothertable.othername FROM myothertable WHERE "
"myothertable.otherid = mytable.myid) "
"WHERE mytable.name = :name_1",
)
def test_correlated_update_four(self):
table1 = self.tables.mytable
table2 = self.tables.myothertable
# test a non-correlated WHERE clause
s = select(table2.c.othername).where(table2.c.otherid == 7)
u = update(table1).where(table1.c.name == s.scalar_subquery())
self.assert_compile(
u,
"UPDATE mytable SET myid=:myid, name=:name, "
"description=:description WHERE mytable.name = "
"(SELECT myothertable.othername FROM myothertable "
"WHERE myothertable.otherid = :otherid_1)",
)
def test_correlated_update_five(self):
table1 = self.tables.mytable
table2 = self.tables.myothertable
# test one that is actually correlated...
s = select(table2.c.othername).where(table2.c.otherid == table1.c.myid)
u = table1.update().where(table1.c.name == s.scalar_subquery())
self.assert_compile(
u,
"UPDATE mytable SET myid=:myid, name=:name, "
"description=:description WHERE mytable.name = "
"(SELECT myothertable.othername FROM myothertable "
"WHERE myothertable.otherid = mytable.myid)",
)
def test_correlated_update_six(self):
table1 = self.tables.mytable
table2 = self.tables.myothertable
# test correlated FROM implicit in WHERE and SET clauses
u = (
table1.update()
.values(name=table2.c.othername)
.where(table2.c.otherid == table1.c.myid)
)
self.assert_compile(
u,
"UPDATE mytable SET name=myothertable.othername "
"FROM myothertable WHERE myothertable.otherid = mytable.myid",
)
def test_correlated_update_seven(self):
table1 = self.tables.mytable
table2 = self.tables.myothertable
u = (
table1.update()
.values(name="foo")
.where(table2.c.otherid == table1.c.myid)
)
# this is the "default_enhanced" compiler. there's no UPDATE FROM
# in the base compiler.
# See also test/dialect/mssql/test_compiler->test_update_from().
self.assert_compile(
u,
"UPDATE mytable SET name=:name "
"FROM myothertable WHERE myothertable.otherid = mytable.myid",
)
def test_binds_that_match_columns(self):
"""test bind params named after column names
replace the normal SET/VALUES generation."""
t = table("foo", column("x"), column("y"))
u = t.update().where(t.c.x == bindparam("x"))
assert_raises(exc.CompileError, u.compile)
self.assert_compile(u, "UPDATE foo SET WHERE foo.x = :x", params={})
assert_raises(exc.CompileError, u.values(x=7).compile)
self.assert_compile(
u.values(y=7), "UPDATE foo SET y=:y WHERE foo.x = :x"
)
assert_raises(
exc.CompileError, u.values(x=7).compile, column_keys=["x", "y"]
)
assert_raises(exc.CompileError, u.compile, column_keys=["x", "y"])
self.assert_compile(
u.values(x=3 + bindparam("x")),
"UPDATE foo SET x=(:param_1 + :x) WHERE foo.x = :x",
)
self.assert_compile(
u.values(x=3 + bindparam("x")),
"UPDATE foo SET x=(:param_1 + :x) WHERE foo.x = :x",
params={"x": 1},
)
self.assert_compile(
u.values(x=3 + bindparam("x")),
"UPDATE foo SET x=(:param_1 + :x), y=:y WHERE foo.x = :x",
params={"x": 1, "y": 2},
)
def test_labels_no_collision(self):
t = table("foo", column("id"), column("foo_id"))
self.assert_compile(
t.update().where(t.c.id == 5),
"UPDATE foo SET id=:id, foo_id=:foo_id WHERE foo.id = :id_1",
)
self.assert_compile(
t.update().where(t.c.id == bindparam(key=t.c.id._label)),
"UPDATE foo SET id=:id, foo_id=:foo_id WHERE foo.id = :foo_id_1",
)
def test_labels_no_collision_index(self):
"""test for [ticket:4911]"""
t = Table(
"foo",
MetaData(),
Column("id", Integer, index=True),
Column("foo_id", Integer),
)
self.assert_compile(
t.update().where(t.c.id == 5),
"UPDATE foo SET id=:id, foo_id=:foo_id WHERE foo.id = :id_1",
)
self.assert_compile(
t.update().where(t.c.id == bindparam(key=t.c.id._label)),
"UPDATE foo SET id=:id, foo_id=:foo_id WHERE foo.id = :foo_id_1",
)
def test_inline_defaults(self):
m = MetaData()
foo = Table("foo", m, Column("id", Integer))
t = Table(
"test",
m,
Column("col1", Integer, onupdate=func.foo(1)),
Column(
"col2",
Integer,
onupdate=select(func.coalesce(func.max(foo.c.id))),
),
Column("col3", String(30)),
)
self.assert_compile(
t.update().values({"col3": "foo"}),
"UPDATE test SET col1=foo(:foo_1), col2=(SELECT "
"coalesce(max(foo.id)) AS coalesce_1 FROM foo), "
"col3=:col3",
)
self.assert_compile(
t.update().inline().values({"col3": "foo"}),
"UPDATE test SET col1=foo(:foo_1), col2=(SELECT "
"coalesce(max(foo.id)) AS coalesce_1 FROM foo), "
"col3=:col3",
)
def test_update_1(self):
table1 = self.tables.mytable
self.assert_compile(
update(table1).where(table1.c.myid == 7),
"UPDATE mytable SET name=:name WHERE mytable.myid = :myid_1",
params={table1.c.name: "fred"},
)
def test_update_2(self):
table1 = self.tables.mytable
self.assert_compile(
table1.update()
.where(table1.c.myid == 7)
.values({table1.c.myid: 5}),
"UPDATE mytable SET myid=:myid WHERE mytable.myid = :myid_1",
checkparams={"myid": 5, "myid_1": 7},
)
def test_update_3(self):
table1 = self.tables.mytable
self.assert_compile(
update(table1).where(table1.c.myid == 7),
"UPDATE mytable SET name=:name WHERE mytable.myid = :myid_1",
params={"name": "fred"},
)
def test_update_4(self):
table1 = self.tables.mytable
self.assert_compile(
update(table1).values({table1.c.name: table1.c.myid}),
"UPDATE mytable SET name=mytable.myid",
)
def test_update_5(self):
table1 = self.tables.mytable
self.assert_compile(
update(table1)
.where(table1.c.name == bindparam("crit"))
.values(
{table1.c.name: "hi"},
),
"UPDATE mytable SET name=:name WHERE mytable.name = :crit",
params={"crit": "notthere"},
checkparams={"crit": "notthere", "name": "hi"},
)
def test_update_6(self):
table1 = self.tables.mytable
self.assert_compile(
update(table1)
.where(table1.c.myid == 12)
.values(
{table1.c.name: table1.c.myid},
),
"UPDATE mytable "
"SET name=mytable.myid, description=:description "
"WHERE mytable.myid = :myid_1",
params={"description": "test"},
checkparams={"description": "test", "myid_1": 12},
)
def test_update_7(self):
table1 = self.tables.mytable
self.assert_compile(
update(table1)
.where(table1.c.myid == 12)
.values({table1.c.myid: 9}),
"UPDATE mytable "
"SET myid=:myid, description=:description "
"WHERE mytable.myid = :myid_1",
params={"myid_1": 12, "myid": 9, "description": "test"},
)
def test_update_8(self):
table1 = self.tables.mytable
self.assert_compile(
update(table1).where(table1.c.myid == 12),
"UPDATE mytable SET myid=:myid WHERE mytable.myid = :myid_1",
params={"myid": 18},
checkparams={"myid": 18, "myid_1": 12},
)
def test_update_9(self):
table1 = self.tables.mytable
s = (
table1.update()
.where(table1.c.myid == 12)
.values({table1.c.name: "lala"})
)
c = s.compile(column_keys=["id", "name"])
eq_(str(s), str(c))
def test_update_10(self):
table1 = self.tables.mytable
v1 = {table1.c.name: table1.c.myid}
v2 = {table1.c.name: table1.c.name + "foo"}
self.assert_compile(
update(table1).where(table1.c.myid == 12).values(v1).values(v2),
"UPDATE mytable "
"SET "
"name=(mytable.name || :name_1), "
"description=:description "
"WHERE mytable.myid = :myid_1",
params={"description": "test"},
)
def test_update_11(self):
table1 = self.tables.mytable
values = {
table1.c.name: table1.c.name + "lala",
table1.c.myid: func.do_stuff(table1.c.myid, literal("hoho")),
}
self.assert_compile(
update(table1)
.where(
(table1.c.myid == func.hoho(4))
& (
table1.c.name
== literal("foo") + table1.c.name + literal("lala")
)
)
.values(values),
"UPDATE mytable "
"SET "
"myid=do_stuff(mytable.myid, :param_1), "
"name=(mytable.name || :name_1) "
"WHERE "
"mytable.myid = hoho(:hoho_1) AND "
"mytable.name = :param_2 || mytable.name || :param_3",
)
def test_unconsumed_names_kwargs(self):
t = table("t", column("x"), column("y"))
assert_raises_message(
exc.CompileError,
"Unconsumed column names: z",
t.update().values(x=5, z=5).compile,
)
def test_unconsumed_names_values_dict(self):
t = table("t", column("x"), column("y"))
t2 = table("t2", column("q"), column("z"))
assert_raises_message(
exc.CompileError,
"Unconsumed column names: j",
t.update()
.values(x=5, j=7)
.values({t2.c.z: 5})
.where(t.c.x == t2.c.q)
.compile,
)
def test_unconsumed_names_kwargs_w_keys(self):
t = table("t", column("x"), column("y"))
assert_raises_message(
exc.CompileError,
"Unconsumed column names: j",
t.update().values(x=5, j=7).compile,
column_keys=["j"],
)
def test_update_ordered_parameters_newstyle_1(self):
table1 = self.tables.mytable
# Confirm that we can pass values as list value pairs
# note these are ordered *differently* from table.c
values = [
(table1.c.name, table1.c.name + "lala"),
(table1.c.myid, func.do_stuff(table1.c.myid, literal("hoho"))),
]
self.assert_compile(
update(table1)
.where(
(table1.c.myid == func.hoho(4))
& (
table1.c.name
== literal("foo") + table1.c.name + literal("lala")
)
)
.ordered_values(*values),
"UPDATE mytable "
"SET "
"name=(mytable.name || :name_1), "
"myid=do_stuff(mytable.myid, :param_1) "
"WHERE "
"mytable.myid = hoho(:hoho_1) AND "
"mytable.name = :param_2 || mytable.name || :param_3",
)
def test_update_ordered_parameters_newstyle_2(self):
table1 = self.tables.mytable
# Confirm that we can pass values as list value pairs
# note these are ordered *differently* from table.c
values = [
(table1.c.name, table1.c.name + "lala"),
("description", "some desc"),
(table1.c.myid, func.do_stuff(table1.c.myid, literal("hoho"))),
]
self.assert_compile(
update(table1)
.where(
(table1.c.myid == func.hoho(4))
& (
table1.c.name
== literal("foo") + table1.c.name + literal("lala")
),
)
.ordered_values(*values),
"UPDATE mytable "
"SET "
"name=(mytable.name || :name_1), "
"description=:description, "
"myid=do_stuff(mytable.myid, :param_1) "
"WHERE "
"mytable.myid = hoho(:hoho_1) AND "
"mytable.name = :param_2 || mytable.name || :param_3",
)
def test_update_ordered_parameters_multiple(self):
table1 = self.tables.mytable
stmt = update(table1)
stmt = stmt.ordered_values(("name", "somename"))
assert_raises_message(
exc.ArgumentError,
"This statement already has ordered values present",
stmt.ordered_values,
("myid", 10),
)
def test_update_ordered_then_nonordered(self):
table1 = self.tables.mytable
stmt = table1.update().ordered_values(("myid", 1), ("name", "d1"))
assert_raises_message(
exc.InvalidRequestError,
"This statement already has ordered values present",
stmt.values,
{"myid": 2, "name": "d2"},
)
def test_update_no_multiple_parameters_allowed(self):
table1 = self.tables.mytable
stmt = table1.update().values(
[{"myid": 1, "name": "n1"}, {"myid": 2, "name": "n2"}]
)
assert_raises_message(
exc.InvalidRequestError,
"UPDATE construct does not support multiple parameter sets.",
stmt.compile,
)
def test_update_ordereddict(self):
table1 = self.tables.mytable
# Confirm that ordered dicts are treated as normal dicts,
# columns sorted in table order
values = util.OrderedDict(
(
(table1.c.name, table1.c.name + "lala"),
(table1.c.myid, func.do_stuff(table1.c.myid, literal("hoho"))),
)
)
self.assert_compile(
update(table1)
.where(
(table1.c.myid == func.hoho(4))
& (
table1.c.name
== literal("foo") + table1.c.name + literal("lala")
),
)
.values(values),
"UPDATE mytable "
"SET "
"myid=do_stuff(mytable.myid, :param_1), "
"name=(mytable.name || :name_1) "
"WHERE "
"mytable.myid = hoho(:hoho_1) AND "
"mytable.name = :param_2 || mytable.name || :param_3",
)
def test_where_empty(self):
table1 = self.tables.mytable
self.assert_compile(
table1.update().where(
BooleanClauseList._construct_raw(operators.and_)
),
"UPDATE mytable SET myid=:myid, name=:name, "
"description=:description",
)
self.assert_compile(
table1.update().where(
BooleanClauseList._construct_raw(operators.or_)
),
"UPDATE mytable SET myid=:myid, name=:name, "
"description=:description",
)
def test_prefix_with(self):
table1 = self.tables.mytable
stmt = (
table1.update()
.prefix_with("A", "B", dialect="mysql")
.prefix_with("C", "D")
)
self.assert_compile(
stmt,
"UPDATE C D mytable SET myid=:myid, name=:name, "
"description=:description",
)
self.assert_compile(
stmt,
"UPDATE A B C D mytable SET myid=%s, name=%s, description=%s",
dialect=mysql.dialect(),
)
def test_update_to_expression_one(self):
"""test update from an expression.
this logic is triggered currently by a left side that doesn't
have a key. The current supported use case is updating the index
of a PostgreSQL ARRAY type.
"""
table1 = self.tables.mytable
expr = func.foo(table1.c.myid)
eq_(expr.key, None)
self.assert_compile(
table1.update().values({expr: "bar"}),
"UPDATE mytable SET foo(myid)=:param_1",
)
@testing.fixture
def randomized_param_order_update(self):
from sqlalchemy.sql.dml import UpdateDMLState
super_process_ordered_values = UpdateDMLState._process_ordered_values
# this fixture is needed for Python 3.6 and above to work around
# dictionaries being insert-ordered. in python 2.7 the previous
# logic fails pretty easily without this fixture.
def _process_ordered_values(self, statement):
super_process_ordered_values(self, statement)
tuples = list(self._dict_parameters.items())
random.shuffle(tuples)
self._dict_parameters = dict(tuples)
dialect = default.StrCompileDialect()
dialect.paramstyle = "qmark"
dialect.positional = True
with mock.patch.object(
UpdateDMLState, "_process_ordered_values", _process_ordered_values
):
yield
def random_update_order_parameters():
from sqlalchemy import ARRAY
t = table(
"foo",
column("data1", ARRAY(Integer)),
column("data2", ARRAY(Integer)),
column("data3", ARRAY(Integer)),
column("data4", ARRAY(Integer)),
)
idx_to_value = [
(t.c.data1, 5, 7),
(t.c.data2, 10, 18),
(t.c.data3, 8, 4),
(t.c.data4, 12, 14),
]
def combinations():
while True:
random.shuffle(idx_to_value)
yield list(idx_to_value)
return testing.combinations(
*[
(t, combination)
for i, combination in zip(range(10), combinations())
],
argnames="t, idx_to_value"
)
@random_update_order_parameters()
def test_update_to_expression_two(
self, randomized_param_order_update, t, idx_to_value
):
"""test update from an expression.
this logic is triggered currently by a left side that doesn't
have a key. The current supported use case is updating the index
of a PostgreSQL ARRAY type.
"""
dialect = default.StrCompileDialect()
dialect.paramstyle = "qmark"
dialect.positional = True
stmt = t.update().ordered_values(
*[(col[idx], val) for col, idx, val in idx_to_value]
)
self.assert_compile(
stmt,
"UPDATE foo SET %s"
% (
", ".join(
"%s[?]=?" % col.key for col, idx, val in idx_to_value
)
),
dialect=dialect,
checkpositional=tuple(
itertools.chain.from_iterable(
(idx, val) for col, idx, val in idx_to_value
)
),
)
def test_update_to_expression_three(self):
# this test is from test_defaults but exercises a particular
# parameter ordering issue
metadata = MetaData()
q = Table(
"q",
metadata,
Column("x", Integer, default=2),
Column("y", Integer, onupdate=5),
Column("z", Integer),
)
p = Table(
"p",
metadata,
Column("s", Integer),
Column("t", Integer),
Column("u", Integer, onupdate=1),
)
cte = (
q.update().where(q.c.z == 1).values(x=7).returning(q.c.z).cte("c")
)
stmt = select(p.c.s, cte.c.z).where(p.c.s == cte.c.z)
dialect = default.StrCompileDialect()
dialect.paramstyle = "qmark"
dialect.positional = True
self.assert_compile(
stmt,
"WITH c AS (UPDATE q SET x=?, y=? WHERE q.z = ? RETURNING q.z) "
"SELECT p.s, c.z FROM p, c WHERE p.s = c.z",
checkpositional=(7, None, 1),
dialect=dialect,
)
def test_update_bound_ordering(self):
"""test that bound parameters between the UPDATE and FROM clauses
order correctly in different SQL compilation scenarios.
"""
table1 = self.tables.mytable
table2 = self.tables.myothertable
sel = select(table2).where(table2.c.otherid == 5).alias()
upd = (
table1.update()
.where(table1.c.name == sel.c.othername)
.values(name="foo")
)
dialect = default.StrCompileDialect()
dialect.positional = True
self.assert_compile(
upd,
"UPDATE mytable SET name=:name FROM (SELECT "
"myothertable.otherid AS otherid, "
"myothertable.othername AS othername "
"FROM myothertable "
"WHERE myothertable.otherid = :otherid_1) AS anon_1 "
"WHERE mytable.name = anon_1.othername",
checkpositional=("foo", 5),
dialect=dialect,
)
self.assert_compile(
upd,
"UPDATE mytable, (SELECT myothertable.otherid AS otherid, "
"myothertable.othername AS othername "
"FROM myothertable "
"WHERE myothertable.otherid = %s) AS anon_1 SET mytable.name=%s "
"WHERE mytable.name = anon_1.othername",
checkpositional=(5, "foo"),
dialect=mysql.dialect(),
)
class UpdateFromCompileTest(
_UpdateFromTestBase, fixtures.TablesTest, AssertsCompiledSQL
):
__dialect__ = "default_enhanced"
run_create_tables = run_inserts = run_deletes = None
def test_alias_one(self):
table1 = self.tables.mytable
talias1 = table1.alias("t1")
# this case is nonsensical. the UPDATE is entirely
# against the alias, but we name the table-bound column
# in values. The behavior here isn't really defined
self.assert_compile(
update(talias1)
.where(talias1.c.myid == 7)
.values({table1.c.name: "fred"}),
"UPDATE mytable AS t1 "
"SET name=:name "
"WHERE t1.myid = :myid_1",
)
def test_alias_two(self):
table1 = self.tables.mytable
talias1 = table1.alias("t1")
# Here, compared to
# test_alias_one(), here we actually have UPDATE..FROM,
# which is causing the "table1.c.name" param to be handled
# as an "extra table", hence we see the full table name rendered.
self.assert_compile(
update(talias1)
.where(table1.c.myid == 7)
.values({table1.c.name: "fred"}),
"UPDATE mytable AS t1 "
"SET name=:mytable_name "
"FROM mytable "
"WHERE mytable.myid = :myid_1",
checkparams={"mytable_name": "fred", "myid_1": 7},
)
def test_alias_two_mysql(self):
table1 = self.tables.mytable
talias1 = table1.alias("t1")
self.assert_compile(
update(talias1)
.where(table1.c.myid == 7)
.values({table1.c.name: "fred"}),
"UPDATE mytable AS t1, mytable SET mytable.name=%s "
"WHERE mytable.myid = %s",
checkparams={"mytable_name": "fred", "myid_1": 7},
dialect="mysql",
)
def test_update_from_multitable_same_name_mysql(self):
users, addresses = self.tables.users, self.tables.addresses
self.assert_compile(
users.update()
.values(name="newname")
.values({addresses.c.name: "new address"})
.where(users.c.id == addresses.c.user_id),
"UPDATE users, addresses SET addresses.name=%s, "
"users.name=%s WHERE users.id = addresses.user_id",
checkparams={"addresses_name": "new address", "name": "newname"},
dialect="mysql",
)
def test_update_from_join_mysql_whereclause(self):
users, addresses = self.tables.users, self.tables.addresses
j = users.join(addresses)
self.assert_compile(
update(j)
.values(name="newname")
.where(addresses.c.email_address == "e1"),
""
"UPDATE users "
"INNER JOIN addresses ON users.id = addresses.user_id "
"SET users.name=%s "
"WHERE "
"addresses.email_address = %s",
checkparams={"email_address_1": "e1", "name": "newname"},
dialect=mysql.dialect(),
)
def test_update_from_join_mysql_no_whereclause_one(self):
users, addresses = self.tables.users, self.tables.addresses
j = users.join(addresses)
self.assert_compile(
update(j).values(name="newname"),
""
"UPDATE users "
"INNER JOIN addresses ON users.id = addresses.user_id "
"SET users.name=%s",
checkparams={"name": "newname"},
dialect=mysql.dialect(),
)
def test_update_from_join_mysql_no_whereclause_two(self):
users, addresses = self.tables.users, self.tables.addresses
j = users.join(addresses)
self.assert_compile(
update(j).values({users.c.name: addresses.c.email_address}),
""
"UPDATE users "
"INNER JOIN addresses ON users.id = addresses.user_id "
"SET users.name=addresses.email_address",
checkparams={},
dialect=mysql.dialect(),
)
def test_update_from_join_mysql_no_whereclause_three(self):
users, addresses, dingalings = (
self.tables.users,
self.tables.addresses,
self.tables.dingalings,
)
j = users.join(addresses).join(dingalings)
self.assert_compile(
update(j).values({users.c.name: dingalings.c.id}),
""
"UPDATE users "
"INNER JOIN addresses ON users.id = addresses.user_id "
"INNER JOIN dingalings ON addresses.id = dingalings.address_id "
"SET users.name=dingalings.id",
checkparams={},
dialect=mysql.dialect(),
)
def test_update_from_join_mysql_no_whereclause_four(self):
users, addresses, dingalings = (
self.tables.users,
self.tables.addresses,
self.tables.dingalings,
)
j = users.join(addresses).join(dingalings)
self.assert_compile(
update(j).values(name="foo"),
""
"UPDATE users "
"INNER JOIN addresses ON users.id = addresses.user_id "
"INNER JOIN dingalings ON addresses.id = dingalings.address_id "
"SET users.name=%s",
checkparams={"name": "foo"},
dialect=mysql.dialect(),
)
def test_render_table(self):
users, addresses = self.tables.users, self.tables.addresses
self.assert_compile(
users.update()
.values(name="newname")
.where(users.c.id == addresses.c.user_id)
.where(addresses.c.email_address == "e1"),
"UPDATE users "
"SET name=:name FROM addresses "
"WHERE "
"users.id = addresses.user_id AND "
"addresses.email_address = :email_address_1",
checkparams={"email_address_1": "e1", "name": "newname"},
)
def test_render_multi_table(self):
users = self.tables.users
addresses = self.tables.addresses
dingalings = self.tables.dingalings
checkparams = {"email_address_1": "e1", "id_1": 2, "name": "newname"}
self.assert_compile(
users.update()
.values(name="newname")
.where(users.c.id == addresses.c.user_id)
.where(addresses.c.email_address == "e1")
.where(addresses.c.id == dingalings.c.address_id)
.where(dingalings.c.id == 2),
"UPDATE users "
"SET name=:name "
"FROM addresses, dingalings "
"WHERE "
"users.id = addresses.user_id AND "
"addresses.email_address = :email_address_1 AND "
"addresses.id = dingalings.address_id AND "
"dingalings.id = :id_1",
checkparams=checkparams,
)
def test_render_table_mysql(self):
users, addresses = self.tables.users, self.tables.addresses
self.assert_compile(
users.update()
.values(name="newname")
.where(users.c.id == addresses.c.user_id)
.where(addresses.c.email_address == "e1"),
"UPDATE users, addresses "
"SET users.name=%s "
"WHERE "
"users.id = addresses.user_id AND "
"addresses.email_address = %s",
checkparams={"email_address_1": "e1", "name": "newname"},
dialect=mysql.dialect(),
)
def test_render_subquery(self):
users, addresses = self.tables.users, self.tables.addresses
checkparams = {"email_address_1": "e1", "id_1": 7, "name": "newname"}
subq = (
select(
addresses.c.id, addresses.c.user_id, addresses.c.email_address
)
.where(addresses.c.id == 7)
.alias()
)
self.assert_compile(
users.update()
.values(name="newname")
.where(users.c.id == subq.c.user_id)
.where(subq.c.email_address == "e1"),
"UPDATE users "
"SET name=:name FROM ("
"SELECT "
"addresses.id AS id, "
"addresses.user_id AS user_id, "
"addresses.email_address AS email_address "
"FROM addresses "
"WHERE addresses.id = :id_1"
") AS anon_1 "
"WHERE users.id = anon_1.user_id "
"AND anon_1.email_address = :email_address_1",
checkparams=checkparams,
)
def test_correlation_to_extra(self):
users, addresses = self.tables.users, self.tables.addresses
stmt = (
users.update()
.values(name="newname")
.where(users.c.id == addresses.c.user_id)
.where(
~exists()
.where(addresses.c.user_id == users.c.id)
.where(addresses.c.email_address == "foo")
.correlate(addresses)
)
)
self.assert_compile(
stmt,
"UPDATE users SET name=:name FROM addresses WHERE "
"users.id = addresses.user_id AND NOT "
"(EXISTS (SELECT * FROM users WHERE addresses.user_id = users.id "
"AND addresses.email_address = :email_address_1))",
)
def test_dont_correlate_to_extra(self):
users, addresses = self.tables.users, self.tables.addresses
stmt = (
users.update()
.values(name="newname")
.where(users.c.id == addresses.c.user_id)
.where(
~exists()
.where(addresses.c.user_id == users.c.id)
.where(addresses.c.email_address == "foo")
.correlate()
)
)
self.assert_compile(
stmt,
"UPDATE users SET name=:name FROM addresses WHERE "
"users.id = addresses.user_id AND NOT "
"(EXISTS (SELECT * FROM addresses, users "
"WHERE addresses.user_id = users.id "
"AND addresses.email_address = :email_address_1))",
)
def test_autocorrelate_error(self):
users, addresses = self.tables.users, self.tables.addresses
stmt = (
users.update()
.values(name="newname")
.where(users.c.id == addresses.c.user_id)
.where(
~exists()
.where(addresses.c.user_id == users.c.id)
.where(addresses.c.email_address == "foo")
)
)
assert_raises_message(
exc.InvalidRequestError,
".*returned no FROM clauses due to auto-correlation.*",
stmt.compile,
dialect=default.StrCompileDialect(),
)
class UpdateFromRoundTripTest(_UpdateFromTestBase, fixtures.TablesTest):
__backend__ = True
@testing.requires.update_from
def test_exec_two_table(self, connection):
users, addresses = self.tables.users, self.tables.addresses
connection.execute(
addresses.update()
.values(email_address=users.c.name)
.where(users.c.id == addresses.c.user_id)
.where(users.c.name == "ed")
)
expected = [
(1, 7, "x", "jack@bean.com"),
(2, 8, "x", "ed"),
(3, 8, "x", "ed"),
(4, 8, "x", "ed"),
(5, 9, "x", "fred@fred.com"),
]
self._assert_addresses(connection, addresses, expected)
@testing.requires.update_from
def test_exec_two_table_plus_alias(self, connection):
users, addresses = self.tables.users, self.tables.addresses
a1 = addresses.alias()
connection.execute(
addresses.update()
.values(email_address=users.c.name)
.where(users.c.id == a1.c.user_id)
.where(users.c.name == "ed")
.where(a1.c.id == addresses.c.id)
)
expected = [
(1, 7, "x", "jack@bean.com"),
(2, 8, "x", "ed"),
(3, 8, "x", "ed"),
(4, 8, "x", "ed"),
(5, 9, "x", "fred@fred.com"),
]
self._assert_addresses(connection, addresses, expected)
@testing.requires.update_from
def test_exec_three_table(self, connection):
users = self.tables.users
addresses = self.tables.addresses
dingalings = self.tables.dingalings
connection.execute(
addresses.update()
.values(email_address=users.c.name)
.where(users.c.id == addresses.c.user_id)
.where(users.c.name == "ed")
.where(addresses.c.id == dingalings.c.address_id)
.where(dingalings.c.id == 1)
)
expected = [
(1, 7, "x", "jack@bean.com"),
(2, 8, "x", "ed"),
(3, 8, "x", "ed@bettyboop.com"),
(4, 8, "x", "ed@lala.com"),
(5, 9, "x", "fred@fred.com"),
]
self._assert_addresses(connection, addresses, expected)
@testing.requires.multi_table_update
def test_exec_multitable(self, connection):
users, addresses = self.tables.users, self.tables.addresses
values = {addresses.c.email_address: "updated", users.c.name: "ed2"}
connection.execute(
addresses.update()
.values(values)
.where(users.c.id == addresses.c.user_id)
.where(users.c.name == "ed")
)
expected = [
(1, 7, "x", "jack@bean.com"),
(2, 8, "x", "updated"),
(3, 8, "x", "updated"),
(4, 8, "x", "updated"),
(5, 9, "x", "fred@fred.com"),
]
self._assert_addresses(connection, addresses, expected)
expected = [(7, "jack"), (8, "ed2"), (9, "fred"), (10, "chuck")]
self._assert_users(connection, users, expected)
@testing.requires.multi_table_update
def test_exec_join_multitable(self, connection):
users, addresses = self.tables.users, self.tables.addresses
values = {addresses.c.email_address: "updated", users.c.name: "ed2"}
connection.execute(
update(users.join(addresses))
.values(values)
.where(users.c.name == "ed")
)
expected = [
(1, 7, "x", "jack@bean.com"),
(2, 8, "x", "updated"),
(3, 8, "x", "updated"),
(4, 8, "x", "updated"),
(5, 9, "x", "fred@fred.com"),
]
self._assert_addresses(connection, addresses, expected)
expected = [(7, "jack"), (8, "ed2"), (9, "fred"), (10, "chuck")]
self._assert_users(connection, users, expected)
@testing.requires.multi_table_update
def test_exec_multitable_same_name(self, connection):
users, addresses = self.tables.users, self.tables.addresses
values = {addresses.c.name: "ad_ed2", users.c.name: "ed2"}
connection.execute(
addresses.update()
.values(values)
.where(users.c.id == addresses.c.user_id)
.where(users.c.name == "ed")
)
expected = [
(1, 7, "x", "jack@bean.com"),
(2, 8, "ad_ed2", "ed@wood.com"),
(3, 8, "ad_ed2", "ed@bettyboop.com"),
(4, 8, "ad_ed2", "ed@lala.com"),
(5, 9, "x", "fred@fred.com"),
]
self._assert_addresses(connection, addresses, expected)
expected = [(7, "jack"), (8, "ed2"), (9, "fred"), (10, "chuck")]
self._assert_users(connection, users, expected)
def _assert_addresses(self, connection, addresses, expected):
stmt = addresses.select().order_by(addresses.c.id)
eq_(connection.execute(stmt).fetchall(), expected)
def _assert_users(self, connection, users, expected):
stmt = users.select().order_by(users.c.id)
eq_(connection.execute(stmt).fetchall(), expected)
class UpdateFromMultiTableUpdateDefaultsTest(
_UpdateFromTestBase, fixtures.TablesTest
):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(30), nullable=False),
Column("some_update", String(30), onupdate="im the update"),
)
Table(
"addresses",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("user_id", None, ForeignKey("users.id")),
Column("email_address", String(50), nullable=False),
)
Table(
"foobar",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("user_id", None, ForeignKey("users.id")),
Column("data", String(30)),
Column("some_update", String(30), onupdate="im the other update"),
)
@classmethod
def fixtures(cls):
return dict(
users=(
("id", "name", "some_update"),
(8, "ed", "value"),
(9, "fred", "value"),
),
addresses=(
("id", "user_id", "email_address"),
(2, 8, "ed@wood.com"),
(3, 8, "ed@bettyboop.com"),
(4, 9, "fred@fred.com"),
),
foobar=(
("id", "user_id", "data"),
(2, 8, "d1"),
(3, 8, "d2"),
(4, 9, "d3"),
),
)
@testing.requires.multi_table_update
def test_defaults_second_table(self, connection):
users, addresses = self.tables.users, self.tables.addresses
values = {addresses.c.email_address: "updated", users.c.name: "ed2"}
ret = connection.execute(
addresses.update()
.values(values)
.where(users.c.id == addresses.c.user_id)
.where(users.c.name == "ed")
)
eq_(set(ret.prefetch_cols()), set([users.c.some_update]))
expected = [
(2, 8, "updated"),
(3, 8, "updated"),
(4, 9, "fred@fred.com"),
]
self._assert_addresses(connection, addresses, expected)
expected = [(8, "ed2", "im the update"), (9, "fred", "value")]
self._assert_users(connection, users, expected)
@testing.requires.multi_table_update
def test_defaults_second_table_same_name(self, connection):
users, foobar = self.tables.users, self.tables.foobar
values = {foobar.c.data: foobar.c.data + "a", users.c.name: "ed2"}
ret = connection.execute(
users.update()
.values(values)
.where(users.c.id == foobar.c.user_id)
.where(users.c.name == "ed")
)
eq_(
set(ret.prefetch_cols()),
set([users.c.some_update, foobar.c.some_update]),
)
expected = [
(2, 8, "d1a", "im the other update"),
(3, 8, "d2a", "im the other update"),
(4, 9, "d3", None),
]
self._assert_foobar(connection, foobar, expected)
expected = [(8, "ed2", "im the update"), (9, "fred", "value")]
self._assert_users(connection, users, expected)
@testing.requires.multi_table_update
def test_no_defaults_second_table(self, connection):
users, addresses = self.tables.users, self.tables.addresses
ret = connection.execute(
addresses.update()
.values({"email_address": users.c.name})
.where(users.c.id == addresses.c.user_id)
.where(users.c.name == "ed")
)
eq_(ret.prefetch_cols(), [])
expected = [(2, 8, "ed"), (3, 8, "ed"), (4, 9, "fred@fred.com")]
self._assert_addresses(connection, addresses, expected)
# users table not actually updated, so no onupdate
expected = [(8, "ed", "value"), (9, "fred", "value")]
self._assert_users(connection, users, expected)
def _assert_foobar(self, connection, foobar, expected):
stmt = foobar.select().order_by(foobar.c.id)
eq_(connection.execute(stmt).fetchall(), expected)
def _assert_addresses(self, connection, addresses, expected):
stmt = addresses.select().order_by(addresses.c.id)
eq_(connection.execute(stmt).fetchall(), expected)
def _assert_users(self, connection, users, expected):
stmt = users.select().order_by(users.c.id)
eq_(connection.execute(stmt).fetchall(), expected)
| |
#!/usr/bin/env python
"""
@package mi.dataset.parser
@file marine-integrations/mi/dataset/parser/phsen_abdcef_sio.py
@author Emily Hahn
@brief Parser for the phsen abcdef sio dataset driver
Release notes:
initial release
"""
__author__ = 'Emily Hahn'
__license__ = 'Apache 2.0'
import re
import binascii
from mi.core.log import get_logger
log = get_logger()
from mi.core.common import BaseEnum
from mi.core.instrument.dataset_data_particle import DataParticle, DataParticleKey
from mi.core.exceptions import SampleException, RecoverableSampleException, UnexpectedDataException
from mi.dataset.parser.common_regexes import ASCII_HEX_CHAR_REGEX
from mi.dataset.parser.sio_mule_common import SioParser, SIO_HEADER_MATCHER, SIO_BLOCK_END
# match the ascii hex ph records
# the data should be ascii hex, but may have non hex ascii characters, if this happens the
# value will be set to none
DATA_REGEX = b'(\^0A\r\*)(' + ASCII_HEX_CHAR_REGEX + '{4}0A)([\x00-\xFF]{8})([\x00-\xFF]{446}' + \
ASCII_HEX_CHAR_REGEX + '{4})\r'
DATA_MATCHER = re.compile(DATA_REGEX)
# match the ascii hex control record, there is an optional 2 byte field at the end
# this also allows for non hex ascii characters in the timestamp, flags and number of records
CONTROL_REGEX = b'(\*)(' + ASCII_HEX_CHAR_REGEX + '{4}[8-9A-Fa-f]' + ASCII_HEX_CHAR_REGEX + \
')([\x00-\xFF]{32}' + ASCII_HEX_CHAR_REGEX + '{0,4})\r'
CONTROL_MATCHER = re.compile(CONTROL_REGEX)
# control messages are hex 80 or greater, so the first ascii char must be greater than 8 hex
CONTROL_ID_REGEX = b'[8-9A-Fa-f]' + ASCII_HEX_CHAR_REGEX
CONTROL_ID_MATCHER = re.compile(CONTROL_ID_REGEX)
TIMESTAMP_REGEX = ASCII_HEX_CHAR_REGEX + b'{8}'
TIMESTAMP_MATCHER = re.compile(TIMESTAMP_REGEX)
HEX_INT_REGEX = ASCII_HEX_CHAR_REGEX + b'{4}'
HEX_INT_MATCHER = re.compile(HEX_INT_REGEX)
# this occurs frequently at the end of ph messages, don't send an exception for this case
PH_EXTRA_END = b'?03\r'
PH_ID = '0A'
# the control message has an optional data or battery field for some control IDs
DATA_CONTROL_IDS = ['BF', 'FF']
BATT_CONTROL_IDS = ['C0', 'C1']
SIO_HEADER_BYTES = 33
NORMAL_CONTROL_LEN = 40
OPTIONAL_CONTROL_LEN = 44
MEASUREMENT_BYTES = 4
class DataParticleType(BaseEnum):
SAMPLE = 'phsen_abcdef_sio_mule_instrument'
CONTROL = 'phsen_abcdef_sio_mule_metadata'
class PhsenAbcdefSioCommonDataParticleKey(BaseEnum):
CONTROLLER_TIMESTAMP = 'sio_controller_timestamp'
UNIQUE_ID = 'unique_id'
RECORD_TYPE = 'record_type'
RECORD_TIME = 'record_time'
PASSED_CHECKSUM = 'passed_checksum'
class PhsenAbcdefSioDataParticleKey(PhsenAbcdefSioCommonDataParticleKey):
THERMISTOR_START = 'thermistor_start'
REFERENCE_LIGHT_MEASUREMENTS = 'reference_light_measurements'
LIGHT_MEASUREMENTS = 'light_measurements'
VOLTAGE_BATTERY = 'voltage_battery'
THERMISTOR_END = 'thermistor_end'
def encode_int_16(val_str):
"""
Encode a hex string into an int
@param val_str string containing hex value
"""
return int(val_str, 16)
def encode_timestamp(timestamp_str):
"""
Encode a hex value into an int if it matches the timestamp
@param timestamp_str string containing hex timestamp value
"""
timestamp_match = TIMESTAMP_MATCHER.match(timestamp_str)
if not timestamp_match:
return None
else:
return int(timestamp_str, 16)
class PhsenAbcdefSioDataParticle(DataParticle):
"""
Class for parsing data from the mflm_phsen instrument
"""
_data_particle_type = DataParticleType.SAMPLE
def _build_parsed_values(self):
"""
Take something in the data format and turn it into
a particle with the appropriate tag.
@throws SampleException If there is a problem with sample creation
"""
# raw data comes in as a tuple
ts = self.raw_data[0]
data_match = self.raw_data[1]
# use the timestamp from the sio header as internal timestamp
sec_since_1970 = int(ts, 16)
self.set_internal_timestamp(unix_time=sec_since_1970)
ref_meas = []
previous_record_bytes = 4
# 4 sets of 4 reference light measurements (16 total)
for i in range(0, 16):
start_idx = previous_record_bytes + i * MEASUREMENT_BYTES
# confirm this contains only ascii hex chars
if HEX_INT_MATCHER.match(data_match.group(4)[start_idx:start_idx + MEASUREMENT_BYTES]):
this_ref = int(data_match.group(4)[start_idx:start_idx + MEASUREMENT_BYTES], 16)
ref_meas.append(this_ref)
else:
# don't send an exception if a non ascii hex char is in this value
ref_meas.append(None)
light_meas = []
n_outer_sets = 23
n_inner_sets = 4
previous_record_bytes = 68
# 23 sets of 4 light measurements
for i in range(0, n_outer_sets):
for s in range(0, n_inner_sets):
start_idx = previous_record_bytes + i * n_inner_sets * MEASUREMENT_BYTES + s * MEASUREMENT_BYTES
# confirm this contains only ascii hex chars
if HEX_INT_MATCHER.match(data_match.group(4)[start_idx:start_idx + MEASUREMENT_BYTES]):
this_meas = int(data_match.group(4)[start_idx:start_idx + MEASUREMENT_BYTES], 16)
light_meas.append(this_meas)
else:
# don't send an exception if a non ascii hex char is in this value
light_meas.append(None)
# calculate the checksum and compare with the received checksum
passed_checksum = True
try:
chksum = int(data_match.group(0)[-3:-1], 16)
sum_bytes = 0
for i in range(7, 467, 2):
sum_bytes += int(data_match.group(0)[i:i + 2], 16)
calc_chksum = sum_bytes & 255
if calc_chksum != chksum:
passed_checksum = False
log.debug('Calculated internal checksum %d does not match received %d', calc_chksum, chksum)
except Exception as e:
log.debug('Error calculating checksums: %s, setting passed checksum to False', e)
passed_checksum = False
result = [self._encode_value(PhsenAbcdefSioDataParticleKey.CONTROLLER_TIMESTAMP, ts, encode_int_16),
self._encode_value(PhsenAbcdefSioDataParticleKey.UNIQUE_ID, data_match.group(2)[0:2], encode_int_16),
self._encode_value(PhsenAbcdefSioDataParticleKey.RECORD_TYPE, data_match.group(2)[4:6],
encode_int_16),
self._encode_value(PhsenAbcdefSioDataParticleKey.RECORD_TIME, data_match.group(3), encode_timestamp),
self._encode_value(PhsenAbcdefSioDataParticleKey.THERMISTOR_START, data_match.group(4)[0:4],
encode_int_16),
self._encode_value(PhsenAbcdefSioDataParticleKey.REFERENCE_LIGHT_MEASUREMENTS, ref_meas, list),
self._encode_value(PhsenAbcdefSioDataParticleKey.LIGHT_MEASUREMENTS, light_meas, list),
self._encode_value(PhsenAbcdefSioDataParticleKey.VOLTAGE_BATTERY, data_match.group(0)[-11:-7],
encode_int_16),
self._encode_value(PhsenAbcdefSioDataParticleKey.THERMISTOR_END, data_match.group(0)[-7:-3],
encode_int_16),
self._encode_value(PhsenAbcdefSioDataParticleKey.PASSED_CHECKSUM, passed_checksum, int)]
return result
class PhsenControlDataParticleKey(PhsenAbcdefSioCommonDataParticleKey):
CLOCK_ACTIVE = 'clock_active'
RECORDING_ACTIVE = 'recording_active'
RECORD_END_ON_TIME = 'record_end_on_time'
RECORD_MEMORY_FULL = 'record_memory_full'
RECORD_END_ON_ERROR = 'record_end_on_error'
DATA_DOWNLOAD_OK = 'data_download_ok'
FLASH_MEMORY_OPEN = 'flash_memory_open'
BATTERY_LOW_PRESTART = 'battery_low_prestart'
BATTERY_LOW_MEASUREMENT = 'battery_low_measurement'
BATTERY_LOW_BLANK = 'battery_low_blank'
BATTERY_LOW_EXTERNAL = 'battery_low_external'
EXTERNAL_DEVICE1_FAULT = 'external_device1_fault'
EXTERNAL_DEVICE2_FAULT = 'external_device2_fault'
EXTERNAL_DEVICE3_FAULT = 'external_device3_fault'
FLASH_ERASED = 'flash_erased'
POWER_ON_INVALID = 'power_on_invalid'
NUM_DATA_RECORDS = 'num_data_records'
NUM_ERROR_RECORDS = 'num_error_records'
NUM_BYTES_STORED = 'num_bytes_stored'
VOLTAGE_BATTERY = 'voltage_battery'
class PhsenAbcdefSioControlDataParticle(DataParticle):
"""
Class for parsing data from the mflm_phsen instrument
"""
_data_particle_type = DataParticleType.CONTROL
def _build_parsed_values(self):
"""
Take something in the data format and turn it into
a particle with the appropriate tag.
@throws SampleException If there is a problem with sample creation
"""
# raw data is a tuple made of timestamp and the data match
ts = self.raw_data[0]
data_match = self.raw_data[1]
# use the timestamp from the sio header as internal timestamp
sec_since_1970 = int(ts, 16)
self.set_internal_timestamp(unix_time=sec_since_1970)
data_len = len(data_match.group(0))
control_id = data_match.group(2)[4:6]
if control_id in DATA_CONTROL_IDS or control_id in BATT_CONTROL_IDS:
if data_len != OPTIONAL_CONTROL_LEN:
raise RecoverableSampleException("PhsenControlDataParticle: for id %s size %d does not match %d" %
(control_id, data_len, OPTIONAL_CONTROL_LEN))
elif data_len != NORMAL_CONTROL_LEN:
raise RecoverableSampleException("PhsenControlDataParticle: for id %s size %d does not match %d" %
(control_id, data_len, NORMAL_CONTROL_LEN))
# calculate the checksum and compare with the received checksum
passed_checksum = True
try:
chksum = int(data_match.group(0)[-3:-1], 16)
sum_bytes = 0
# subtract the 3 bytes for the '*' and unique ID, 2 for the checksum, and 1 for the last \r
control_len = data_len - 6
for i in range(3, control_len, 2):
sum_bytes += int(data_match.group(0)[i:i + 2], 16)
calc_chksum = sum_bytes & 255
if calc_chksum != chksum:
passed_checksum = False
log.debug('Calculated internal checksum %d does not match received %d', calc_chksum, chksum)
except Exception as e:
log.debug('Error calculating checksums: %s, setting passed checksum to False', e)
passed_checksum = False
# turn the flag value from a hex-ascii value into a string of binary values
try:
flags = bin(int(data_match.group(3)[8:12], 16))[2:].zfill(16)
valid_flags = True
except ValueError:
valid_flags = False
result = [
self._encode_value(PhsenControlDataParticleKey.CONTROLLER_TIMESTAMP, ts, encode_int_16),
self._encode_value(PhsenControlDataParticleKey.UNIQUE_ID, data_match.group(2)[0:2], encode_int_16),
self._encode_value(PhsenControlDataParticleKey.RECORD_TYPE, control_id, encode_int_16),
self._encode_value(PhsenControlDataParticleKey.RECORD_TIME, data_match.group(3)[0:8], encode_timestamp)]
# if the flag is valid, fill in the values, otherwise set to None
if valid_flags:
result.extend([
self._encode_value(PhsenControlDataParticleKey.CLOCK_ACTIVE, flags[0], int),
self._encode_value(PhsenControlDataParticleKey.RECORDING_ACTIVE, flags[1], int),
self._encode_value(PhsenControlDataParticleKey.RECORD_END_ON_TIME, flags[2], int),
self._encode_value(PhsenControlDataParticleKey.RECORD_MEMORY_FULL, flags[3], int),
self._encode_value(PhsenControlDataParticleKey.RECORD_END_ON_ERROR, flags[4], int),
self._encode_value(PhsenControlDataParticleKey.DATA_DOWNLOAD_OK, flags[5], int),
self._encode_value(PhsenControlDataParticleKey.FLASH_MEMORY_OPEN, flags[6], int),
self._encode_value(PhsenControlDataParticleKey.BATTERY_LOW_PRESTART, flags[7], int),
self._encode_value(PhsenControlDataParticleKey.BATTERY_LOW_MEASUREMENT, flags[8], int),
self._encode_value(PhsenControlDataParticleKey.BATTERY_LOW_BLANK, flags[9], int),
self._encode_value(PhsenControlDataParticleKey.BATTERY_LOW_EXTERNAL, flags[10], int),
self._encode_value(PhsenControlDataParticleKey.EXTERNAL_DEVICE1_FAULT, flags[11], int),
self._encode_value(PhsenControlDataParticleKey.EXTERNAL_DEVICE2_FAULT, flags[12], int),
self._encode_value(PhsenControlDataParticleKey.EXTERNAL_DEVICE3_FAULT, flags[13], int),
self._encode_value(PhsenControlDataParticleKey.FLASH_ERASED, flags[14], int),
self._encode_value(PhsenControlDataParticleKey.POWER_ON_INVALID, flags[15], int)])
else:
result.extend([
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.CLOCK_ACTIVE, DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.RECORDING_ACTIVE, DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.RECORD_END_ON_TIME,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.RECORD_MEMORY_FULL,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.RECORD_END_ON_ERROR,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.DATA_DOWNLOAD_OK, DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.FLASH_MEMORY_OPEN, DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.BATTERY_LOW_PRESTART,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.BATTERY_LOW_MEASUREMENT,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.BATTERY_LOW_BLANK, DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.BATTERY_LOW_EXTERNAL,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.EXTERNAL_DEVICE1_FAULT,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.EXTERNAL_DEVICE2_FAULT,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.EXTERNAL_DEVICE3_FAULT,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.FLASH_ERASED, DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.POWER_ON_INVALID, DataParticleKey.VALUE: None}])
# these 3 may also have invalid hex values, allow for none when encoding
# so exceptions are not thrown here
result.extend([
self._encode_value(PhsenControlDataParticleKey.NUM_DATA_RECORDS, data_match.group(3)[12:18],
PhsenAbcdefSioControlDataParticle.encode_int_16_or_none),
self._encode_value(PhsenControlDataParticleKey.NUM_ERROR_RECORDS, data_match.group(3)[18:24],
PhsenAbcdefSioControlDataParticle.encode_int_16_or_none),
self._encode_value(PhsenControlDataParticleKey.NUM_BYTES_STORED, data_match.group(3)[24:30],
PhsenAbcdefSioControlDataParticle.encode_int_16_or_none)])
if control_id in BATT_CONTROL_IDS and HEX_INT_MATCHER.match(data_match.group(3)[30:34]):
result.append(self._encode_value(PhsenControlDataParticleKey.VOLTAGE_BATTERY,
data_match.group(3)[30:34], encode_int_16))
else:
result.append({DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.VOLTAGE_BATTERY,
DataParticleKey.VALUE: None})
result.append(self._encode_value(PhsenControlDataParticleKey.PASSED_CHECKSUM, passed_checksum, int))
return result
@staticmethod
def encode_int_16_or_none(int_val):
"""
Use to convert from hex-ascii to int when encoding data particle values,
but it is not an error to not match, return None without failing encoding
"""
result = None
try:
result = int(int_val, 16)
except ValueError:
# the result will stay at None if we fail the encoding, and no exception
pass
return result
class PhsenAbcdefSioParser(SioParser):
def parse_chunks(self):
"""
Parse out any pending data chunks in the chunker. If
it is a valid data piece, build a particle, update the position and
timestamp. Go until the chunker has no more valid data.
@retval a list of tuples with sample particles encountered in this
parsing, plus the state. An empty list of nothing was parsed.
"""
result_particles = []
# non-data does not need to be handled here because for the single file
# the data may be corrected and re-written later, it is just ignored until it matches
(nd_timestamp, non_data, non_start, non_end) = self._chunker.get_next_non_data_with_index(clean=False)
(timestamp, chunk, start, end) = self._chunker.get_next_data_with_index()
self.handle_non_data(non_data, non_end, start)
while chunk is not None:
header_match = SIO_HEADER_MATCHER.match(chunk)
if header_match.group(1) == 'PH':
# start after the sio header
index = header_match.end(0)
last_index = index
chunk_len = len(chunk)
while index < chunk_len:
data_match = DATA_MATCHER.match(chunk[index:])
control_match = CONTROL_MATCHER.match(chunk[index:])
# check for any valid match and make sure no extra data was found between valid matches
if data_match or control_match or chunk[index] == SIO_BLOCK_END:
# if the indices don't match we have data that doesn't match
# exclude the expected possible ph end bytes
if last_index != index and chunk[last_index:index] != PH_EXTRA_END:
# we found bad data, send a sample exception but keep processing the file
msg = "unknown data found in chunk %s from %d to %d" % (chunk[1:32], last_index, index)
log.warning(msg)
self._exception_callback(SampleException(msg))
# stop processing this sio block, it is bad
break
if data_match:
log.debug('Found data match in chunk %s at index %d', chunk[1:32], index)
# particle-ize the data block received, return the record
sample = self._extract_sample(PhsenAbcdefSioDataParticle, None,
(header_match.group(3), data_match))
result_particles.append(sample)
index += len(data_match.group(0))
last_index = index
elif control_match:
log.debug('Found control match in chunk %s at index %d', chunk[1:32], index)
# particle-ize the data block received, return the record
sample = self._extract_sample(PhsenAbcdefSioControlDataParticle, None,
(header_match.group(3), control_match))
result_particles.append(sample)
index += len(control_match.group(0))
last_index = index
elif chunk[index] == SIO_BLOCK_END:
# found end of sio block marker, we are done with this chunk
break
else:
# we found extra data, warn on chunks of extra data not each byte
index += 1
else:
# we found unexpected sio instrument id
msg = "Unexpected sio instrument header ID %s" % header_match.group(1)
log.warning(msg)
self._exception_callback(UnexpectedDataException(msg))
# non-data does not need to be handled here because for the single file
# the data may be corrected and re-written later, it is just ignored until it matches
(nd_timestamp, non_data, non_start, non_end) = self._chunker.get_next_non_data_with_index(clean=False)
(timestamp, chunk, start, end) = self._chunker.get_next_data_with_index()
self.handle_non_data(non_data, non_end, start)
return result_particles
def handle_non_data(self, non_data, non_end, start):
"""
Handle any non-data found in the file
:param non_data: Non data
:param non_end: end index of non data
:param start: start index of data
"""
if non_data is not None and non_end <= start:
message = "Found %d bytes of unexpected non-data %s" % (len(non_data), binascii.b2a_hex(non_data))
log.warn(message)
self._exception_callback(UnexpectedDataException(message))
| |
"""The tests for Shelly device triggers."""
from unittest.mock import AsyncMock, Mock
import pytest
from homeassistant import setup
from homeassistant.components import automation
from homeassistant.components.device_automation.exceptions import (
InvalidDeviceAutomationConfig,
)
from homeassistant.components.shelly import ShellyDeviceWrapper
from homeassistant.components.shelly.const import (
ATTR_CHANNEL,
ATTR_CLICK_TYPE,
COAP,
CONF_SUBTYPE,
DATA_CONFIG_ENTRY,
DOMAIN,
EVENT_SHELLY_CLICK,
)
from homeassistant.const import CONF_DEVICE_ID, CONF_DOMAIN, CONF_PLATFORM, CONF_TYPE
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
)
async def test_get_triggers(hass, coap_wrapper):
"""Test we get the expected triggers from a shelly."""
assert coap_wrapper
expected_triggers = [
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: coap_wrapper.device_id,
CONF_DOMAIN: DOMAIN,
CONF_TYPE: "single",
CONF_SUBTYPE: "button1",
},
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: coap_wrapper.device_id,
CONF_DOMAIN: DOMAIN,
CONF_TYPE: "long",
CONF_SUBTYPE: "button1",
},
]
triggers = await async_get_device_automations(
hass, "trigger", coap_wrapper.device_id
)
assert_lists_same(triggers, expected_triggers)
async def test_get_triggers_button(hass):
"""Test we get the expected triggers from a shelly button."""
await async_setup_component(hass, "shelly", {})
config_entry = MockConfigEntry(
domain=DOMAIN,
data={"sleep_period": 43200, "model": "SHBTN-1"},
unique_id="12345678",
)
config_entry.add_to_hass(hass)
device = Mock(
blocks=None,
settings=None,
shelly=None,
update=AsyncMock(),
initialized=False,
)
hass.data[DOMAIN] = {DATA_CONFIG_ENTRY: {}}
hass.data[DOMAIN][DATA_CONFIG_ENTRY][config_entry.entry_id] = {}
coap_wrapper = hass.data[DOMAIN][DATA_CONFIG_ENTRY][config_entry.entry_id][
COAP
] = ShellyDeviceWrapper(hass, config_entry, device)
await coap_wrapper.async_setup()
expected_triggers = [
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: coap_wrapper.device_id,
CONF_DOMAIN: DOMAIN,
CONF_TYPE: "single",
CONF_SUBTYPE: "button",
},
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: coap_wrapper.device_id,
CONF_DOMAIN: DOMAIN,
CONF_TYPE: "double",
CONF_SUBTYPE: "button",
},
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: coap_wrapper.device_id,
CONF_DOMAIN: DOMAIN,
CONF_TYPE: "triple",
CONF_SUBTYPE: "button",
},
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: coap_wrapper.device_id,
CONF_DOMAIN: DOMAIN,
CONF_TYPE: "long",
CONF_SUBTYPE: "button",
},
]
triggers = await async_get_device_automations(
hass, "trigger", coap_wrapper.device_id
)
assert_lists_same(triggers, expected_triggers)
async def test_get_triggers_for_invalid_device_id(hass, device_reg, coap_wrapper):
"""Test error raised for invalid shelly device_id."""
assert coap_wrapper
config_entry = MockConfigEntry(domain=DOMAIN, data={})
config_entry.add_to_hass(hass)
invalid_device = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
with pytest.raises(InvalidDeviceAutomationConfig):
await async_get_device_automations(hass, "trigger", invalid_device.id)
async def test_if_fires_on_click_event(hass, calls, coap_wrapper):
"""Test for click_event trigger firing."""
assert coap_wrapper
await setup.async_setup_component(hass, "persistent_notification", {})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
CONF_PLATFORM: "device",
CONF_DOMAIN: DOMAIN,
CONF_DEVICE_ID: coap_wrapper.device_id,
CONF_TYPE: "single",
CONF_SUBTYPE: "button1",
},
"action": {
"service": "test.automation",
"data_template": {"some": "test_trigger_single_click"},
},
},
]
},
)
message = {
CONF_DEVICE_ID: coap_wrapper.device_id,
ATTR_CLICK_TYPE: "single",
ATTR_CHANNEL: 1,
}
hass.bus.async_fire(EVENT_SHELLY_CLICK, message)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "test_trigger_single_click"
async def test_validate_trigger_config_no_device(hass, calls, coap_wrapper):
"""Test for click_event with no device."""
assert coap_wrapper
await setup.async_setup_component(hass, "persistent_notification", {})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
CONF_PLATFORM: "device",
CONF_DOMAIN: DOMAIN,
CONF_DEVICE_ID: "no_device",
CONF_TYPE: "single",
CONF_SUBTYPE: "button1",
},
"action": {
"service": "test.automation",
"data_template": {"some": "test_trigger_single_click"},
},
},
]
},
)
message = {CONF_DEVICE_ID: "no_device", ATTR_CLICK_TYPE: "single", ATTR_CHANNEL: 1}
hass.bus.async_fire(EVENT_SHELLY_CLICK, message)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "test_trigger_single_click"
async def test_validate_trigger_invalid_triggers(hass, coap_wrapper):
"""Test for click_event with invalid triggers."""
assert coap_wrapper
notification_calls = async_mock_service(hass, "persistent_notification", "create")
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
CONF_PLATFORM: "device",
CONF_DOMAIN: DOMAIN,
CONF_DEVICE_ID: coap_wrapper.device_id,
CONF_TYPE: "single",
CONF_SUBTYPE: "button3",
},
"action": {
"service": "test.automation",
"data_template": {"some": "test_trigger_single_click"},
},
},
]
},
)
assert len(notification_calls) == 1
assert (
"The following integrations and platforms could not be set up"
in notification_calls[0].data["message"]
)
| |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class systembackup(base_resource) :
""" Configuration for Backup Data for ns backup and restore resource. """
def __init__(self) :
self._filename = ""
self._level = ""
self._comment = ""
self._size = 0
self._creationtime = ""
self._version = ""
self._createdby = ""
self._ipaddress = ""
self.___count = 0
@property
def filename(self) :
ur"""Name of the backup file(*.tgz) to be restored.
"""
try :
return self._filename
except Exception as e:
raise e
@filename.setter
def filename(self, filename) :
ur"""Name of the backup file(*.tgz) to be restored.
"""
try :
self._filename = filename
except Exception as e:
raise e
@property
def level(self) :
ur"""Level of data to be backed up.<br/>Default value: basic<br/>Possible values = basic, full.
"""
try :
return self._level
except Exception as e:
raise e
@level.setter
def level(self, level) :
ur"""Level of data to be backed up.<br/>Default value: basic<br/>Possible values = basic, full
"""
try :
self._level = level
except Exception as e:
raise e
@property
def comment(self) :
ur"""Comment specified at the time of creation of the backup file(*.tgz).
"""
try :
return self._comment
except Exception as e:
raise e
@comment.setter
def comment(self, comment) :
ur"""Comment specified at the time of creation of the backup file(*.tgz).
"""
try :
self._comment = comment
except Exception as e:
raise e
@property
def size(self) :
ur"""Size of the backup file(*.tgz) in KB.
"""
try :
return self._size
except Exception as e:
raise e
@property
def creationtime(self) :
ur"""Creation time of the backup file(*.tgz).
"""
try :
return self._creationtime
except Exception as e:
raise e
@property
def version(self) :
ur"""Build version of the backup file(*.tgz).
"""
try :
return self._version
except Exception as e:
raise e
@property
def createdby(self) :
ur"""Name of user who created the backup file(*.tgz).
"""
try :
return self._createdby
except Exception as e:
raise e
@property
def ipaddress(self) :
ur"""Ip of Netscaler box where the backup file(*.tgz) was created.
"""
try :
return self._ipaddress
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(systembackup_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.systembackup
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.filename is not None :
return str(self.filename)
return None
except Exception as e :
raise e
@classmethod
def create(cls, client, resource) :
ur""" Use this API to create systembackup.
"""
try :
if type(resource) is not list :
createresource = systembackup()
createresource.filename = resource.filename
createresource.level = resource.level
createresource.comment = resource.comment
return createresource.perform_operation(client,"create")
else :
if (resource and len(resource) > 0) :
createresources = [ systembackup() for _ in range(len(resource))]
for i in range(len(resource)) :
createresources[i].filename = resource[i].filename
createresources[i].level = resource[i].level
createresources[i].comment = resource[i].comment
result = cls.perform_operation_bulk_request(client, createresources,"create")
return result
except Exception as e :
raise e
@classmethod
def restore(cls, client, resource) :
ur""" Use this API to restore systembackup.
"""
try :
if type(resource) is not list :
restoreresource = systembackup()
restoreresource.filename = resource.filename
return restoreresource.perform_operation(client,"restore")
else :
if (resource and len(resource) > 0) :
restoreresources = [ systembackup() for _ in range(len(resource))]
for i in range(len(resource)) :
restoreresources[i].filename = resource[i].filename
result = cls.perform_operation_bulk_request(client, restoreresources,"restore")
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
ur""" Use this API to delete systembackup.
"""
try :
if type(resource) is not list :
deleteresource = systembackup()
if type(resource) != type(deleteresource):
deleteresource.filename = resource
else :
deleteresource.filename = resource.filename
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ systembackup() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].filename = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ systembackup() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].filename = resource[i].filename
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the systembackup resources that are configured on netscaler.
"""
try :
if not name :
obj = systembackup()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = systembackup()
obj.filename = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [systembackup() for _ in range(len(name))]
obj = [systembackup() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = systembackup()
obj[i].filename = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
ur""" Use this API to fetch filtered set of systembackup resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = systembackup()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
ur""" Use this API to count the systembackup resources configured on NetScaler.
"""
try :
obj = systembackup()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
ur""" Use this API to count filtered the set of systembackup resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = systembackup()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Level:
basic = "basic"
full = "full"
class systembackup_response(base_response) :
def __init__(self, length=1) :
self.systembackup = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.systembackup = [systembackup() for _ in range(length)]
| |
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import unittest
import inspect
import logging
from struct import pack, unpack_from, pack_into
from nose.tools import ok_, eq_, raises
from ryu.ofproto import ether
from ryu.ofproto import inet
from ryu.lib.packet.ethernet import ethernet
from ryu.lib.packet.ipv4 import ipv4
from ryu.lib.packet.packet import Packet
from ryu.lib.packet.packet_utils import checksum
from ryu.lib import addrconv
from ryu.lib.packet.igmp import igmp
from ryu.lib.packet.igmp import igmpv3_query
from ryu.lib.packet.igmp import igmpv3_report
from ryu.lib.packet.igmp import igmpv3_report_group
from ryu.lib.packet.igmp import IGMP_TYPE_QUERY
from ryu.lib.packet.igmp import IGMP_TYPE_REPORT_V3
from ryu.lib.packet.igmp import MODE_IS_INCLUDE
LOG = logging.getLogger(__name__)
class Test_igmp(unittest.TestCase):
""" Test case for Internet Group Management Protocol
"""
def setUp(self):
self.msgtype = IGMP_TYPE_QUERY
self.maxresp = 100
self.csum = 0
self.address = '225.0.0.1'
self.buf = pack(igmp._PACK_STR, self.msgtype, self.maxresp,
self.csum,
addrconv.ipv4.text_to_bin(self.address))
self.g = igmp(self.msgtype, self.maxresp, self.csum,
self.address)
def tearDown(self):
pass
def find_protocol(self, pkt, name):
for p in pkt.protocols:
if p.protocol_name == name:
return p
def test_init(self):
eq_(self.msgtype, self.g.msgtype)
eq_(self.maxresp, self.g.maxresp)
eq_(self.csum, self.g.csum)
eq_(self.address, self.g.address)
def test_parser(self):
_res = self.g.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(res.msgtype, self.msgtype)
eq_(res.maxresp, self.maxresp)
eq_(res.csum, self.csum)
eq_(res.address, self.address)
def test_serialize(self):
data = bytearray()
prev = None
buf = self.g.serialize(data, prev)
res = unpack_from(igmp._PACK_STR, buffer(buf))
eq_(res[0], self.msgtype)
eq_(res[1], self.maxresp)
eq_(res[2], checksum(self.buf))
eq_(res[3], addrconv.ipv4.text_to_bin(self.address))
def _build_igmp(self):
dl_dst = '11:22:33:44:55:66'
dl_src = 'aa:bb:cc:dd:ee:ff'
dl_type = ether.ETH_TYPE_IP
e = ethernet(dl_dst, dl_src, dl_type)
total_length = 20 + igmp._MIN_LEN
nw_proto = inet.IPPROTO_IGMP
nw_dst = '11.22.33.44'
nw_src = '55.66.77.88'
i = ipv4(total_length=total_length, src=nw_src, dst=nw_dst,
proto=nw_proto)
p = Packet()
p.add_protocol(e)
p.add_protocol(i)
p.add_protocol(self.g)
p.serialize()
return p
def test_build_igmp(self):
p = self._build_igmp()
e = self.find_protocol(p, "ethernet")
ok_(e)
eq_(e.ethertype, ether.ETH_TYPE_IP)
i = self.find_protocol(p, "ipv4")
ok_(i)
eq_(i.proto, inet.IPPROTO_IGMP)
g = self.find_protocol(p, "igmp")
ok_(g)
eq_(g.msgtype, self.msgtype)
eq_(g.maxresp, self.maxresp)
eq_(g.csum, checksum(self.buf))
eq_(g.address, self.address)
def test_to_string(self):
igmp_values = {'msgtype': repr(self.msgtype),
'maxresp': repr(self.maxresp),
'csum': repr(self.csum),
'address': repr(self.address)}
_g_str = ','.join(['%s=%s' % (k, igmp_values[k])
for k, v in inspect.getmembers(self.g)
if k in igmp_values])
g_str = '%s(%s)' % (igmp.__name__, _g_str)
eq_(str(self.g), g_str)
eq_(repr(self.g), g_str)
@raises(Exception)
def test_malformed_igmp(self):
m_short_buf = self.buf[1:igmp._MIN_LEN]
igmp.parser(m_short_buf)
def test_default_args(self):
ig = igmp()
buf = ig.serialize(bytearray(), None)
res = unpack_from(igmp._PACK_STR, str(buf))
eq_(res[0], 0x11)
eq_(res[1], 0)
eq_(res[3], addrconv.ipv4.text_to_bin('0.0.0.0'))
def test_json(self):
jsondict = self.g.to_jsondict()
g = igmp.from_jsondict(jsondict['igmp'])
eq_(str(self.g), str(g))
class Test_igmpv3_query(unittest.TestCase):
""" Test case for Internet Group Management Protocol v3
Membership Query Message"""
def setUp(self):
self.msgtype = IGMP_TYPE_QUERY
self.maxresp = 100
self.csum = 0
self.address = '225.0.0.1'
self.s_flg = 0
self.qrv = 2
self.qqic = 10
self.num = 0
self.srcs = []
self.s_qrv = self.s_flg << 3 | self.qrv
self.buf = pack(igmpv3_query._PACK_STR, self.msgtype,
self.maxresp, self.csum,
addrconv.ipv4.text_to_bin(self.address),
self.s_qrv, self.qqic, self.num)
self.g = igmpv3_query(
self.msgtype, self.maxresp, self.csum, self.address,
self.s_flg, self.qrv, self.qqic, self.num, self.srcs)
def setUp_with_srcs(self):
self.srcs = ['192.168.1.1', '192.168.1.2', '192.168.1.3']
self.num = len(self.srcs)
self.buf = pack(igmpv3_query._PACK_STR, self.msgtype,
self.maxresp, self.csum,
addrconv.ipv4.text_to_bin(self.address),
self.s_qrv, self.qqic, self.num)
for src in self.srcs:
self.buf += pack('4s', addrconv.ipv4.text_to_bin(src))
self.g = igmpv3_query(
self.msgtype, self.maxresp, self.csum, self.address,
self.s_flg, self.qrv, self.qqic, self.num, self.srcs)
def tearDown(self):
pass
def find_protocol(self, pkt, name):
for p in pkt.protocols:
if p.protocol_name == name:
return p
def test_init(self):
eq_(self.msgtype, self.g.msgtype)
eq_(self.maxresp, self.g.maxresp)
eq_(self.csum, self.g.csum)
eq_(self.address, self.g.address)
eq_(self.s_flg, self.g.s_flg)
eq_(self.qrv, self.g.qrv)
eq_(self.qqic, self.g.qqic)
eq_(self.num, self.g.num)
eq_(self.srcs, self.g.srcs)
def test_init_with_srcs(self):
self.setUp_with_srcs()
self.test_init()
def test_parser(self):
_res = self.g.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(res.msgtype, self.msgtype)
eq_(res.maxresp, self.maxresp)
eq_(res.csum, self.csum)
eq_(res.address, self.address)
eq_(res.s_flg, self.s_flg)
eq_(res.qrv, self.qrv)
eq_(res.qqic, self.qqic)
eq_(res.num, self.num)
eq_(res.srcs, self.srcs)
def test_parser_with_srcs(self):
self.setUp_with_srcs()
self.test_parser()
def test_serialize(self):
data = bytearray()
prev = None
buf = self.g.serialize(data, prev)
res = unpack_from(igmpv3_query._PACK_STR, buffer(buf))
eq_(res[0], self.msgtype)
eq_(res[1], self.maxresp)
eq_(res[2], checksum(self.buf))
eq_(res[3], addrconv.ipv4.text_to_bin(self.address))
eq_(res[4], self.s_qrv)
eq_(res[5], self.qqic)
eq_(res[6], self.num)
def test_serialize_with_srcs(self):
self.setUp_with_srcs()
data = bytearray()
prev = None
buf = self.g.serialize(data, prev)
res = unpack_from(igmpv3_query._PACK_STR, buffer(buf))
(src1, src2, src3) = unpack_from('4s4s4s', buffer(buf),
igmpv3_query._MIN_LEN)
eq_(res[0], self.msgtype)
eq_(res[1], self.maxresp)
eq_(res[2], checksum(self.buf))
eq_(res[3], addrconv.ipv4.text_to_bin(self.address))
eq_(res[4], self.s_qrv)
eq_(res[5], self.qqic)
eq_(res[6], self.num)
eq_(src1, addrconv.ipv4.text_to_bin(self.srcs[0]))
eq_(src2, addrconv.ipv4.text_to_bin(self.srcs[1]))
eq_(src3, addrconv.ipv4.text_to_bin(self.srcs[2]))
def _build_igmp(self):
dl_dst = '11:22:33:44:55:66'
dl_src = 'aa:bb:cc:dd:ee:ff'
dl_type = ether.ETH_TYPE_IP
e = ethernet(dl_dst, dl_src, dl_type)
total_length = len(ipv4()) + len(self.g)
nw_proto = inet.IPPROTO_IGMP
nw_dst = '11.22.33.44'
nw_src = '55.66.77.88'
i = ipv4(total_length=total_length, src=nw_src, dst=nw_dst,
proto=nw_proto, ttl=1)
p = Packet()
p.add_protocol(e)
p.add_protocol(i)
p.add_protocol(self.g)
p.serialize()
return p
def test_build_igmp(self):
p = self._build_igmp()
e = self.find_protocol(p, "ethernet")
ok_(e)
eq_(e.ethertype, ether.ETH_TYPE_IP)
i = self.find_protocol(p, "ipv4")
ok_(i)
eq_(i.proto, inet.IPPROTO_IGMP)
g = self.find_protocol(p, "igmpv3_query")
ok_(g)
eq_(g.msgtype, self.msgtype)
eq_(g.maxresp, self.maxresp)
eq_(g.csum, checksum(self.buf))
eq_(g.address, self.address)
eq_(g.s_flg, self.s_flg)
eq_(g.qrv, self.qrv)
eq_(g.qqic, self.qqic)
eq_(g.num, self.num)
eq_(g.srcs, self.srcs)
def test_build_igmp_with_srcs(self):
self.setUp_with_srcs()
self.test_build_igmp()
def test_to_string(self):
igmp_values = {'msgtype': repr(self.msgtype),
'maxresp': repr(self.maxresp),
'csum': repr(self.csum),
'address': repr(self.address),
's_flg': repr(self.s_flg),
'qrv': repr(self.qrv),
'qqic': repr(self.qqic),
'num': repr(self.num),
'srcs': repr(self.srcs)}
_g_str = ','.join(['%s=%s' % (k, igmp_values[k])
for k, v in inspect.getmembers(self.g)
if k in igmp_values])
g_str = '%s(%s)' % (igmpv3_query.__name__, _g_str)
eq_(str(self.g), g_str)
eq_(repr(self.g), g_str)
def test_to_string_with_srcs(self):
self.setUp_with_srcs()
self.test_to_string()
@raises(Exception)
def test_num_larger_than_srcs(self):
self.srcs = ['192.168.1.1', '192.168.1.2', '192.168.1.3']
self.num = len(self.srcs) + 1
self.buf = pack(igmpv3_query._PACK_STR, self.msgtype,
self.maxresp, self.csum,
addrconv.ipv4.text_to_bin(self.address),
self.s_qrv, self.qqic, self.num)
for src in self.srcs:
self.buf += pack('4s', addrconv.ipv4.text_to_bin(src))
self.g = igmpv3_query(
self.msgtype, self.maxresp, self.csum, self.address,
self.s_flg, self.qrv, self.qqic, self.num, self.srcs)
self.test_parser()
@raises(Exception)
def test_num_smaller_than_srcs(self):
self.srcs = ['192.168.1.1', '192.168.1.2', '192.168.1.3']
self.num = len(self.srcs) - 1
self.buf = pack(igmpv3_query._PACK_STR, self.msgtype,
self.maxresp, self.csum,
addrconv.ipv4.text_to_bin(self.address),
self.s_qrv, self.qqic, self.num)
for src in self.srcs:
self.buf += pack('4s', addrconv.ipv4.text_to_bin(src))
self.g = igmpv3_query(
self.msgtype, self.maxresp, self.csum, self.address,
self.s_flg, self.qrv, self.qqic, self.num, self.srcs)
self.test_parser()
def test_default_args(self):
prev = ipv4(proto=inet.IPPROTO_IGMP)
g = igmpv3_query()
prev.serialize(g, None)
buf = g.serialize(bytearray(), prev)
res = unpack_from(igmpv3_query._PACK_STR, str(buf))
buf = bytearray(buf)
pack_into('!H', buf, 2, 0)
buf = str(buf)
eq_(res[0], IGMP_TYPE_QUERY)
eq_(res[1], 100)
eq_(res[2], checksum(buf))
eq_(res[3], addrconv.ipv4.text_to_bin('0.0.0.0'))
eq_(res[4], 2)
eq_(res[5], 0)
eq_(res[6], 0)
# srcs without num
prev = ipv4(proto=inet.IPPROTO_IGMP)
srcs = ['192.168.1.1', '192.168.1.2', '192.168.1.3']
g = igmpv3_query(srcs=srcs)
prev.serialize(g, None)
buf = g.serialize(bytearray(), prev)
res = unpack_from(igmpv3_query._PACK_STR, str(buf))
buf = bytearray(buf)
pack_into('!H', buf, 2, 0)
buf = str(buf)
eq_(res[0], IGMP_TYPE_QUERY)
eq_(res[1], 100)
eq_(res[2], checksum(buf))
eq_(res[3], addrconv.ipv4.text_to_bin('0.0.0.0'))
eq_(res[4], 2)
eq_(res[5], 0)
eq_(res[6], len(srcs))
res = unpack_from('4s4s4s', str(buf), igmpv3_query._MIN_LEN)
eq_(res[0], addrconv.ipv4.text_to_bin(srcs[0]))
eq_(res[1], addrconv.ipv4.text_to_bin(srcs[1]))
eq_(res[2], addrconv.ipv4.text_to_bin(srcs[2]))
def test_json(self):
jsondict = self.g.to_jsondict()
g = igmpv3_query.from_jsondict(jsondict['igmpv3_query'])
eq_(str(self.g), str(g))
def test_json_with_srcs(self):
self.setUp_with_srcs()
self.test_json()
class Test_igmpv3_report(unittest.TestCase):
""" Test case for Internet Group Management Protocol v3
Membership Report Message"""
def setUp(self):
self.msgtype = IGMP_TYPE_REPORT_V3
self.csum = 0
self.record_num = 0
self.records = []
self.buf = pack(igmpv3_report._PACK_STR, self.msgtype,
self.csum, self.record_num)
self.g = igmpv3_report(
self.msgtype, self.csum, self.record_num, self.records)
def setUp_with_records(self):
self.record1 = igmpv3_report_group(
MODE_IS_INCLUDE, 0, 0, '225.0.0.1')
self.record2 = igmpv3_report_group(
MODE_IS_INCLUDE, 0, 2, '225.0.0.2',
['172.16.10.10', '172.16.10.27'])
self.record3 = igmpv3_report_group(
MODE_IS_INCLUDE, 1, 0, '225.0.0.3', [], 'abc\x00')
self.record4 = igmpv3_report_group(
MODE_IS_INCLUDE, 2, 2, '225.0.0.4',
['172.16.10.10', '172.16.10.27'], 'abcde\x00\x00\x00')
self.records = [self.record1, self.record2, self.record3,
self.record4]
self.record_num = len(self.records)
self.buf = pack(igmpv3_report._PACK_STR, self.msgtype,
self.csum, self.record_num)
self.buf += self.record1.serialize()
self.buf += self.record2.serialize()
self.buf += self.record3.serialize()
self.buf += self.record4.serialize()
self.g = igmpv3_report(
self.msgtype, self.csum, self.record_num, self.records)
def tearDown(self):
pass
def find_protocol(self, pkt, name):
for p in pkt.protocols:
if p.protocol_name == name:
return p
def test_init(self):
eq_(self.msgtype, self.g.msgtype)
eq_(self.csum, self.g.csum)
eq_(self.record_num, self.g.record_num)
eq_(self.records, self.g.records)
def test_init_with_records(self):
self.setUp_with_records()
self.test_init()
def test_parser(self):
_res = self.g.parser(str(self.buf))
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(res.msgtype, self.msgtype)
eq_(res.csum, self.csum)
eq_(res.record_num, self.record_num)
eq_(repr(res.records), repr(self.records))
def test_parser_with_records(self):
self.setUp_with_records()
self.test_parser()
def test_serialize(self):
data = bytearray()
prev = None
buf = self.g.serialize(data, prev)
res = unpack_from(igmpv3_report._PACK_STR, buffer(buf))
eq_(res[0], self.msgtype)
eq_(res[1], checksum(self.buf))
eq_(res[2], self.record_num)
def test_serialize_with_records(self):
self.setUp_with_records()
data = bytearray()
prev = None
buf = self.g.serialize(data, prev)
res = unpack_from(igmpv3_report._PACK_STR, buffer(buf))
offset = igmpv3_report._MIN_LEN
rec1 = igmpv3_report_group.parser(buffer(buf[offset:]))
offset += len(rec1)
rec2 = igmpv3_report_group.parser(buffer(buf[offset:]))
offset += len(rec2)
rec3 = igmpv3_report_group.parser(buffer(buf[offset:]))
offset += len(rec3)
rec4 = igmpv3_report_group.parser(buffer(buf[offset:]))
eq_(res[0], self.msgtype)
eq_(res[1], checksum(self.buf))
eq_(res[2], self.record_num)
eq_(repr(rec1), repr(self.record1))
eq_(repr(rec2), repr(self.record2))
eq_(repr(rec3), repr(self.record3))
eq_(repr(rec4), repr(self.record4))
def _build_igmp(self):
dl_dst = '11:22:33:44:55:66'
dl_src = 'aa:bb:cc:dd:ee:ff'
dl_type = ether.ETH_TYPE_IP
e = ethernet(dl_dst, dl_src, dl_type)
total_length = len(ipv4()) + len(self.g)
nw_proto = inet.IPPROTO_IGMP
nw_dst = '11.22.33.44'
nw_src = '55.66.77.88'
i = ipv4(total_length=total_length, src=nw_src, dst=nw_dst,
proto=nw_proto, ttl=1)
p = Packet()
p.add_protocol(e)
p.add_protocol(i)
p.add_protocol(self.g)
p.serialize()
return p
def test_build_igmp(self):
p = self._build_igmp()
e = self.find_protocol(p, "ethernet")
ok_(e)
eq_(e.ethertype, ether.ETH_TYPE_IP)
i = self.find_protocol(p, "ipv4")
ok_(i)
eq_(i.proto, inet.IPPROTO_IGMP)
g = self.find_protocol(p, "igmpv3_report")
ok_(g)
eq_(g.msgtype, self.msgtype)
eq_(g.csum, checksum(self.buf))
eq_(g.record_num, self.record_num)
eq_(g.records, self.records)
def test_build_igmp_with_records(self):
self.setUp_with_records()
self.test_build_igmp()
def test_to_string(self):
igmp_values = {'msgtype': repr(self.msgtype),
'csum': repr(self.csum),
'record_num': repr(self.record_num),
'records': repr(self.records)}
_g_str = ','.join(['%s=%s' % (k, igmp_values[k])
for k, v in inspect.getmembers(self.g)
if k in igmp_values])
g_str = '%s(%s)' % (igmpv3_report.__name__, _g_str)
eq_(str(self.g), g_str)
eq_(repr(self.g), g_str)
def test_to_string_with_records(self):
self.setUp_with_records()
self.test_to_string()
@raises(Exception)
def test_record_num_larger_than_records(self):
self.record1 = igmpv3_report_group(
MODE_IS_INCLUDE, 0, 0, '225.0.0.1')
self.record2 = igmpv3_report_group(
MODE_IS_INCLUDE, 0, 2, '225.0.0.2',
['172.16.10.10', '172.16.10.27'])
self.record3 = igmpv3_report_group(
MODE_IS_INCLUDE, 1, 0, '225.0.0.3', [], 'abc\x00')
self.record4 = igmpv3_report_group(
MODE_IS_INCLUDE, 1, 2, '225.0.0.4',
['172.16.10.10', '172.16.10.27'], 'abc\x00')
self.records = [self.record1, self.record2, self.record3,
self.record4]
self.record_num = len(self.records) + 1
self.buf = pack(igmpv3_report._PACK_STR, self.msgtype,
self.csum, self.record_num)
self.buf += self.record1.serialize()
self.buf += self.record2.serialize()
self.buf += self.record3.serialize()
self.buf += self.record4.serialize()
self.g = igmpv3_report(
self.msgtype, self.csum, self.record_num, self.records)
self.test_parser()
@raises(Exception)
def test_record_num_smaller_than_records(self):
self.record1 = igmpv3_report_group(
MODE_IS_INCLUDE, 0, 0, '225.0.0.1')
self.record2 = igmpv3_report_group(
MODE_IS_INCLUDE, 0, 2, '225.0.0.2',
['172.16.10.10', '172.16.10.27'])
self.record3 = igmpv3_report_group(
MODE_IS_INCLUDE, 1, 0, '225.0.0.3', [], 'abc\x00')
self.record4 = igmpv3_report_group(
MODE_IS_INCLUDE, 1, 2, '225.0.0.4',
['172.16.10.10', '172.16.10.27'], 'abc\x00')
self.records = [self.record1, self.record2, self.record3,
self.record4]
self.record_num = len(self.records) - 1
self.buf = pack(igmpv3_report._PACK_STR, self.msgtype,
self.csum, self.record_num)
self.buf += self.record1.serialize()
self.buf += self.record2.serialize()
self.buf += self.record3.serialize()
self.buf += self.record4.serialize()
self.g = igmpv3_report(
self.msgtype, self.csum, self.record_num, self.records)
self.test_parser()
def test_default_args(self):
prev = ipv4(proto=inet.IPPROTO_IGMP)
g = igmpv3_report()
prev.serialize(g, None)
buf = g.serialize(bytearray(), prev)
res = unpack_from(igmpv3_report._PACK_STR, str(buf))
buf = bytearray(buf)
pack_into('!H', buf, 2, 0)
buf = str(buf)
eq_(res[0], IGMP_TYPE_REPORT_V3)
eq_(res[1], checksum(buf))
eq_(res[2], 0)
# records without record_num
prev = ipv4(proto=inet.IPPROTO_IGMP)
record1 = igmpv3_report_group(
MODE_IS_INCLUDE, 0, 0, '225.0.0.1')
record2 = igmpv3_report_group(
MODE_IS_INCLUDE, 0, 2, '225.0.0.2',
['172.16.10.10', '172.16.10.27'])
record3 = igmpv3_report_group(
MODE_IS_INCLUDE, 1, 0, '225.0.0.3', [], 'abc\x00')
record4 = igmpv3_report_group(
MODE_IS_INCLUDE, 1, 2, '225.0.0.4',
['172.16.10.10', '172.16.10.27'], 'abc\x00')
records = [record1, record2, record3, record4]
g = igmpv3_report(records=records)
prev.serialize(g, None)
buf = g.serialize(bytearray(), prev)
res = unpack_from(igmpv3_report._PACK_STR, str(buf))
buf = bytearray(buf)
pack_into('!H', buf, 2, 0)
buf = str(buf)
eq_(res[0], IGMP_TYPE_REPORT_V3)
eq_(res[1], checksum(buf))
eq_(res[2], len(records))
def test_json(self):
jsondict = self.g.to_jsondict()
g = igmpv3_report.from_jsondict(jsondict['igmpv3_report'])
eq_(str(self.g), str(g))
def test_json_with_records(self):
self.setUp_with_records()
self.test_json()
class Test_igmpv3_report_group(unittest.TestCase):
"""Test case for Group Records of
Internet Group Management Protocol v3 Membership Report Message"""
def setUp(self):
self.type_ = MODE_IS_INCLUDE
self.aux_len = 0
self.num = 0
self.address = '225.0.0.1'
self.srcs = []
self.aux = None
self.buf = pack(igmpv3_report_group._PACK_STR, self.type_,
self.aux_len, self.num,
addrconv.ipv4.text_to_bin(self.address))
self.g = igmpv3_report_group(
self.type_, self.aux_len, self.num, self.address,
self.srcs, self.aux)
def setUp_with_srcs(self):
self.srcs = ['192.168.1.1', '192.168.1.2', '192.168.1.3']
self.num = len(self.srcs)
self.buf = pack(igmpv3_report_group._PACK_STR, self.type_,
self.aux_len, self.num,
addrconv.ipv4.text_to_bin(self.address))
for src in self.srcs:
self.buf += pack('4s', addrconv.ipv4.text_to_bin(src))
self.g = igmpv3_report_group(
self.type_, self.aux_len, self.num, self.address,
self.srcs, self.aux)
def setUp_with_aux(self):
self.aux = '\x01\x02\x03\x04\x05\x00\x00\x00'
self.aux_len = len(self.aux) / 4
self.buf = pack(igmpv3_report_group._PACK_STR, self.type_,
self.aux_len, self.num,
addrconv.ipv4.text_to_bin(self.address))
self.buf += self.aux
self.g = igmpv3_report_group(
self.type_, self.aux_len, self.num, self.address,
self.srcs, self.aux)
def setUp_with_srcs_and_aux(self):
self.srcs = ['192.168.1.1', '192.168.1.2', '192.168.1.3']
self.num = len(self.srcs)
self.aux = '\x01\x02\x03\x04\x05\x00\x00\x00'
self.aux_len = len(self.aux) / 4
self.buf = pack(igmpv3_report_group._PACK_STR, self.type_,
self.aux_len, self.num,
addrconv.ipv4.text_to_bin(self.address))
for src in self.srcs:
self.buf += pack('4s', addrconv.ipv4.text_to_bin(src))
self.buf += self.aux
self.g = igmpv3_report_group(
self.type_, self.aux_len, self.num, self.address,
self.srcs, self.aux)
def tearDown(self):
pass
def test_init(self):
eq_(self.type_, self.g.type_)
eq_(self.aux_len, self.g.aux_len)
eq_(self.num, self.g.num)
eq_(self.address, self.g.address)
eq_(self.srcs, self.g.srcs)
eq_(self.aux, self.g.aux)
def test_init_with_srcs(self):
self.setUp_with_srcs()
self.test_init()
def test_init_with_aux(self):
self.setUp_with_aux()
self.test_init()
def test_init_with_srcs_and_aux(self):
self.setUp_with_srcs_and_aux()
self.test_init()
def test_parser(self):
_res = self.g.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(res.type_, self.type_)
eq_(res.aux_len, self.aux_len)
eq_(res.num, self.num)
eq_(res.address, self.address)
eq_(res.srcs, self.srcs)
eq_(res.aux, self.aux)
def test_parser_with_srcs(self):
self.setUp_with_srcs()
self.test_parser()
def test_parser_with_aux(self):
self.setUp_with_aux()
self.test_parser()
def test_parser_with_srcs_and_aux(self):
self.setUp_with_srcs_and_aux()
self.test_parser()
def test_serialize(self):
buf = self.g.serialize()
res = unpack_from(igmpv3_report_group._PACK_STR, buffer(buf))
eq_(res[0], self.type_)
eq_(res[1], self.aux_len)
eq_(res[2], self.num)
eq_(res[3], addrconv.ipv4.text_to_bin(self.address))
def test_serialize_with_srcs(self):
self.setUp_with_srcs()
buf = self.g.serialize()
res = unpack_from(igmpv3_report_group._PACK_STR, buffer(buf))
(src1, src2, src3) = unpack_from('4s4s4s', buffer(buf),
igmpv3_report_group._MIN_LEN)
eq_(res[0], self.type_)
eq_(res[1], self.aux_len)
eq_(res[2], self.num)
eq_(res[3], addrconv.ipv4.text_to_bin(self.address))
eq_(src1, addrconv.ipv4.text_to_bin(self.srcs[0]))
eq_(src2, addrconv.ipv4.text_to_bin(self.srcs[1]))
eq_(src3, addrconv.ipv4.text_to_bin(self.srcs[2]))
def test_serialize_with_aux(self):
self.setUp_with_aux()
buf = self.g.serialize()
res = unpack_from(igmpv3_report_group._PACK_STR, buffer(buf))
(aux, ) = unpack_from('%ds' % (self.aux_len * 4), buffer(buf),
igmpv3_report_group._MIN_LEN)
eq_(res[0], self.type_)
eq_(res[1], self.aux_len)
eq_(res[2], self.num)
eq_(res[3], addrconv.ipv4.text_to_bin(self.address))
eq_(aux, self.aux)
def test_serialize_with_srcs_and_aux(self):
self.setUp_with_srcs_and_aux()
buf = self.g.serialize()
res = unpack_from(igmpv3_report_group._PACK_STR, buffer(buf))
(src1, src2, src3) = unpack_from('4s4s4s', buffer(buf),
igmpv3_report_group._MIN_LEN)
(aux, ) = unpack_from('%ds' % (self.aux_len * 4), buffer(buf),
igmpv3_report_group._MIN_LEN + 12)
eq_(res[0], self.type_)
eq_(res[1], self.aux_len)
eq_(res[2], self.num)
eq_(res[3], addrconv.ipv4.text_to_bin(self.address))
eq_(src1, addrconv.ipv4.text_to_bin(self.srcs[0]))
eq_(src2, addrconv.ipv4.text_to_bin(self.srcs[1]))
eq_(src3, addrconv.ipv4.text_to_bin(self.srcs[2]))
eq_(aux, self.aux)
def test_to_string(self):
igmp_values = {'type_': repr(self.type_),
'aux_len': repr(self.aux_len),
'num': repr(self.num),
'address': repr(self.address),
'srcs': repr(self.srcs),
'aux': repr(self.aux)}
_g_str = ','.join(['%s=%s' % (k, igmp_values[k])
for k, v in inspect.getmembers(self.g)
if k in igmp_values])
g_str = '%s(%s)' % (igmpv3_report_group.__name__, _g_str)
eq_(str(self.g), g_str)
eq_(repr(self.g), g_str)
def test_to_string_with_srcs(self):
self.setUp_with_srcs()
self.test_to_string()
def test_to_string_with_aux(self):
self.setUp_with_aux()
self.test_to_string()
def test_to_string_with_srcs_and_aux(self):
self.setUp_with_srcs_and_aux()
self.test_to_string()
def test_len(self):
eq_(len(self.g), 8)
def test_len_with_srcs(self):
self.setUp_with_srcs()
eq_(len(self.g), 20)
def test_len_with_aux(self):
self.setUp_with_aux()
eq_(len(self.g), 16)
def test_len_with_srcs_and_aux(self):
self.setUp_with_srcs_and_aux()
eq_(len(self.g), 28)
@raises
def test_num_larger_than_srcs(self):
self.srcs = ['192.168.1.1', '192.168.1.2', '192.168.1.3']
self.num = len(self.srcs) + 1
self.buf = pack(igmpv3_report_group._PACK_STR, self.type_,
self.aux_len, self.num,
addrconv.ipv4.text_to_bin(self.address))
for src in self.srcs:
self.buf += pack('4s', addrconv.ipv4.text_to_bin(src))
self.g = igmpv3_report_group(
self.type_, self.aux_len, self.num, self.address,
self.srcs, self.aux)
self.test_parser()
@raises
def test_num_smaller_than_srcs(self):
self.srcs = ['192.168.1.1', '192.168.1.2', '192.168.1.3']
self.num = len(self.srcs) - 1
self.buf = pack(igmpv3_report_group._PACK_STR, self.type_,
self.aux_len, self.num,
addrconv.ipv4.text_to_bin(self.address))
for src in self.srcs:
self.buf += pack('4s', addrconv.ipv4.text_to_bin(src))
self.g = igmpv3_report_group(
self.type_, self.aux_len, self.num, self.address,
self.srcs, self.aux)
self.test_parser()
@raises
def test_aux_len_larger_than_aux(self):
self.aux = '\x01\x02\x03\x04\x05\x00\x00\x00'
self.aux_len = len(self.aux) / 4 + 1
self.buf = pack(igmpv3_report_group._PACK_STR, self.type_,
self.aux_len, self.num,
addrconv.ipv4.text_to_bin(self.address))
self.buf += self.aux
self.g = igmpv3_report_group(
self.type_, self.aux_len, self.num, self.address,
self.srcs, self.aux)
self.test_parser()
@raises
def test_aux_len_smaller_than_aux(self):
self.aux = '\x01\x02\x03\x04\x05\x00\x00\x00'
self.aux_len = len(self.aux) / 4 - 1
self.buf = pack(igmpv3_report_group._PACK_STR, self.type_,
self.aux_len, self.num,
addrconv.ipv4.text_to_bin(self.address))
self.buf += self.aux
self.g = igmpv3_report_group(
self.type_, self.aux_len, self.num, self.address,
self.srcs, self.aux)
self.test_parser()
def test_default_args(self):
rep = igmpv3_report_group()
buf = rep.serialize()
res = unpack_from(igmpv3_report_group._PACK_STR, str(buf))
eq_(res[0], 0)
eq_(res[1], 0)
eq_(res[2], 0)
eq_(res[3], addrconv.ipv4.text_to_bin('0.0.0.0'))
# srcs without num
srcs = ['192.168.1.1', '192.168.1.2', '192.168.1.3']
rep = igmpv3_report_group(srcs=srcs)
buf = rep.serialize()
res = unpack_from(igmpv3_report_group._PACK_STR, str(buf))
eq_(res[0], 0)
eq_(res[1], 0)
eq_(res[2], len(srcs))
eq_(res[3], addrconv.ipv4.text_to_bin('0.0.0.0'))
res = unpack_from('4s4s4s', str(buf), igmpv3_report_group._MIN_LEN)
eq_(res[0], addrconv.ipv4.text_to_bin(srcs[0]))
eq_(res[1], addrconv.ipv4.text_to_bin(srcs[1]))
eq_(res[2], addrconv.ipv4.text_to_bin(srcs[2]))
# aux without aux_len
aux = 'abcde'
rep = igmpv3_report_group(aux=aux)
buf = rep.serialize()
res = unpack_from(igmpv3_report_group._PACK_STR, str(buf))
eq_(res[0], 0)
eq_(res[1], 2)
eq_(res[2], 0)
eq_(res[3], addrconv.ipv4.text_to_bin('0.0.0.0'))
eq_(buf[igmpv3_report_group._MIN_LEN:], 'abcde\x00\x00\x00')
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from twitter.common.collections import OrderedSet
from pants.backend.jvm.targets.jvm_app import JvmApp
from pants.backend.jvm.targets.jvm_binary import JvmBinary
from pants.backend.jvm.tasks.classpath_util import ClasspathUtil
from pants.backend.jvm.tasks.jvm_binary_task import JvmBinaryTask
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.build_graph.target_scopes import Scopes
from pants.fs import archive
from pants.util.dirutil import safe_mkdir
class BundleCreate(JvmBinaryTask):
"""
:API: public
"""
# Directory for both internal and external libraries.
LIBS_DIR = 'libs'
_target_closure_kwargs = dict(include_scopes=Scopes.JVM_RUNTIME_SCOPES, respect_intransitive=True)
@classmethod
def register_options(cls, register):
super(BundleCreate, cls).register_options(register)
register('--deployjar', type=bool,
fingerprint=True,
help="Pack all 3rdparty and internal jar classfiles into a single deployjar in "
"the bundle's root dir. If unset, all jars will go into the bundle's libs "
"directory, the root will only contain a synthetic jar with its manifest's "
"Class-Path set to those jars.")
register('--archive', choices=list(archive.TYPE_NAMES),
fingerprint=True,
help='Create an archive of this type from the bundle.')
register('--archive-prefix', type=bool,
fingerprint=True,
help='If --archive is specified, prefix archive with target basename or a unique '
'identifier as determined by --use-basename-prefix.')
# `target.id` ensures global uniqueness, this flag is provided primarily for
# backward compatibility.
register('--use-basename-prefix', type=bool,
help='Use target basename to prefix bundle folder or archive; otherwise a unique '
'identifier derived from target will be used.')
@classmethod
def product_types(cls):
return ['jvm_bundles']
class App(object):
"""A uniform interface to an app."""
@staticmethod
def is_app(target):
return isinstance(target, (JvmApp, JvmBinary))
def __init__(self, target, use_basename_prefix=False):
assert self.is_app(target), '{} is not a valid app target'.format(target)
self.address = target.address
self.binary = target if isinstance(target, JvmBinary) else target.binary
self.bundles = [] if isinstance(target, JvmBinary) else target.payload.bundles
self.basename = target.basename if use_basename_prefix else target.id
self.target = target
@property
def cache_target_dirs(self):
return True
def execute(self):
archiver_type = self.get_options().archive
archiver = archive.archiver(archiver_type) if archiver_type else None
if self.get_options().use_basename_prefix:
# NB(peiyu) This special casing is confusing especially given we already fail
# when duplicate basenames are detected. It's added because of the existing
# user experience. Turns out a `jvm_app` that depends on another `jvm_binary`
# of the same basename is fairly common. In this case, using just
# `target_roots` instead of all transitive targets will reduce the chance users
# see their bundle command fail due to basename conflicts. We should eventually
# get rid of this special case.
targets_to_bundle = self.context.target_roots
else:
targets_to_bundle = self.context.targets()
apps = [self.App(target, use_basename_prefix=self.get_options().use_basename_prefix)
for target in targets_to_bundle if self.App.is_app(target)]
if self.get_options().use_basename_prefix:
self.check_basename_conflicts(apps)
# NB(peiyu): performance hack to convert loose directories in classpath into jars. This is
# more efficient than loading them as individual files.
runtime_classpath = self.context.products.get_data('runtime_classpath')
targets_to_consolidate = self.find_consolidate_classpath_candidates(
runtime_classpath,
self.context.targets(**self._target_closure_kwargs),
)
self.consolidate_classpath(targets_to_consolidate, runtime_classpath)
for app in apps:
basedir = self.bundle(app)
# NB(Eric Ayers): Note that this product is not housed/controlled under .pants.d/ Since
# the bundle is re-created every time, this shouldn't cause a problem, but if we ever
# expect the product to be cached, a user running an 'rm' on the dist/ directory could
# cause inconsistencies.
jvm_bundles_product = self.context.products.get('jvm_bundles')
jvm_bundles_product.add(app.target, os.path.dirname(basedir)).append(os.path.basename(basedir))
if archiver:
archivepath = archiver.create(
basedir,
self.get_options().pants_distdir,
app.basename,
prefix=app.basename if self.get_options().archive_prefix else None
)
self.context.log.info('created {}'.format(os.path.relpath(archivepath, get_buildroot())))
class BasenameConflictError(TaskError):
"""Indicates the same basename is used by two targets."""
def bundle(self, app):
"""Create a self-contained application bundle.
The bundle will contain the target classes, dependencies and resources.
"""
assert(isinstance(app, BundleCreate.App))
bundle_dir = os.path.join(self.get_options().pants_distdir, '{}-bundle'.format(app.basename))
self.context.log.info('creating {}'.format(os.path.relpath(bundle_dir, get_buildroot())))
safe_mkdir(bundle_dir, clean=True)
classpath = OrderedSet()
# Create symlinks for both internal and external dependencies under `lib_dir`. This is
# only needed when not creating a deployjar
lib_dir = os.path.join(bundle_dir, self.LIBS_DIR)
if not self.get_options().deployjar:
os.mkdir(lib_dir)
runtime_classpath = self.context.products.get_data('runtime_classpath')
classpath.update(ClasspathUtil.create_canonical_classpath(
runtime_classpath,
app.target.closure(bfs=True, **self._target_closure_kwargs),
lib_dir,
internal_classpath_only=False,
excludes=app.binary.deploy_excludes,
))
bundle_jar = os.path.join(bundle_dir, '{}.jar'.format(app.binary.basename))
with self.monolithic_jar(app.binary, bundle_jar,
manifest_classpath=classpath) as jar:
self.add_main_manifest_entry(jar, app.binary)
# Make classpath complete by adding the monolithic jar.
classpath.update([jar.path])
if app.binary.shading_rules:
for jar_path in classpath:
# In case `jar_path` is a symlink, this is still safe, shaded jar will overwrite jar_path,
# original file `jar_path` linked to remains untouched.
# TODO run in parallel to speed up
self.shade_jar(shading_rules=app.binary.shading_rules, jar_path=jar_path)
for bundle in app.bundles:
for path, relpath in bundle.filemap.items():
bundle_path = os.path.join(bundle_dir, relpath)
if not os.path.exists(path):
raise TaskError('Given path: {} does not exist in target {}'.format(
path, app.address.spec))
safe_mkdir(os.path.dirname(bundle_path))
os.symlink(path, bundle_path)
return bundle_dir
def consolidate_classpath(self, targets, classpath_products):
"""Convert loose directories in classpath_products into jars. """
with self.invalidated(targets=targets, invalidate_dependents=True) as invalidation:
for vt in invalidation.all_vts:
entries = classpath_products.get_internal_classpath_entries_for_targets([vt.target])
for index, (conf, entry) in enumerate(entries):
if ClasspathUtil.is_dir(entry.path):
jarpath = os.path.join(vt.results_dir, 'output-{}.jar'.format(index))
# regenerate artifact for invalid vts
if not vt.valid:
with self.open_jar(jarpath, overwrite=True, compressed=False) as jar:
jar.write(entry.path)
# replace directory classpath entry with its jarpath
classpath_products.remove_for_target(vt.target, [(conf, entry.path)])
classpath_products.add_for_target(vt.target, [(conf, jarpath)])
def find_consolidate_classpath_candidates(self, classpath_products, targets):
targets_with_directory_in_classpath = []
for target in targets:
entries = classpath_products.get_internal_classpath_entries_for_targets([target])
for conf, entry in entries:
if ClasspathUtil.is_dir(entry.path):
targets_with_directory_in_classpath.append(target)
break
return targets_with_directory_in_classpath
def check_basename_conflicts(self, apps):
"""Apps' basenames are used as bundle directory names. Ensure they are all unique."""
basename_seen = {}
for app in apps:
if app.basename in basename_seen:
raise self.BasenameConflictError('Basename must be unique, found two targets use '
"the same basename: {}'\n\t{} and \n\t{}"
.format(app.basename,
basename_seen[app.basename].address.spec,
app.target.address.spec))
basename_seen[app.basename] = app.target
| |
#!/usr/bin/env python
"""Chainer example: autoencoder of a solar image.
"""
# c.f.
# http://nonbiri-tereka.hatenablog.com/entry/2015/06/21/220506
# http://qiita.com/kenmatsu4/items/99d4a54d5a57405ecaf8
import argparse
from astropy.io import fits
import numpy as np
import scipy.ndimage.interpolation as intp
import math as M
import operator
import os
import re
import six
import subprocess
import random
import pickle
import chainer
from chainer import computational_graph as c
from chainer import cuda, Variable, FunctionSet, optimizers
import chainer.functions as F
from chainer import optimizers
import matplotlib as mpl
mpl.use('Agg')
import pylab
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import AxesGrid
from datetime import datetime
global dlDepth
global global_normalization, work_dir, epoch_per_level, training_mode_string
dlDepth = 10
global_normalization = 1e-2
dl_batch_size = 2
epoch_per_level = 6000
training_mode_string = 'i'
parser = argparse.ArgumentParser(description='Chainer example: MNIST')
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
global gpu_flag
gpu_flag=(args.gpu >= 0)
if gpu_flag:
cuda.init(args.gpu)
def system(cmd):
subprocess.call(cmd, shell=True)
global work_dir
work_dir='/home/ubuntu/public_html/' + datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
system('mkdir -p ' + work_dir)
log_train_fn = work_dir + '/log-training.txt'
log_test_fn = work_dir + '/log-test.txt'
system('cp {} {} '.format(__file__, work_dir))
def plot_img(img4,fn,title_str):
global global_normalization, work_dir
global dlDepth, training_mode_string
print np.shape(img4)
if gpu_flag :
img4 = cuda.to_cpu(img4)
img=(1.0/ global_normalization)*img4[0][0]
dpi=200
plt.figure(figsize=(8,6),dpi=dpi)
fig, ax = plt.subplots()
circle1=plt.Circle((512,512),450,edgecolor='black',fill=False)
cmap = plt.get_cmap('bwr')
cax = ax.imshow(img,cmap=cmap,extent=(0,1024,0,1024),vmin=-100.0,vmax=100.0)
cbar=fig.colorbar(cax)
fig.gca().add_artist(circle1)
ax.set_title(title_str)
fig.savefig('{}/{}-{}.png'.format(work_dir,fn,training_mode_string),dpi=dpi)
fig.savefig('{}/{}-{}-thumb.png'.format(work_dir,fn,training_mode_string),dpi=dpi/4)
plt.close('all')
batch_location_supply = dlDepth * [None]
solar_disk_mask= dlDepth * [None]
def nch(d):
return min(2**d, 4)
for d in range(dlDepth):
n=1024/(2**d)
location_mask_x = np.float32(np.array(n*[n*[0]]))
location_mask_y = np.float32(np.array(n*[n*[0]]))
mask = np.float32(np.array(n*[n*[0]]))
ox = n/2-0.5
oy = n/2-0.5
r0 = n*450.0/1024.0
for iy in range(n):
for ix in range(n):
x = (ix - ox) / r0
y = (iy - oy) / r0
r = M.sqrt(x**2 + y**2)
if r < 1:
location_mask_x[iy][ix]=M.asin(x/(M.cos(M.asin(y))))
location_mask_y[iy][ix]=M.asin(y)
mask[iy][ix]=1
else:
location_mask_x[iy][ix]=-4
location_mask_y[iy][ix]=0
mask[iy][ix]=0
batch_location_supply[d] = np.array(dl_batch_size * [[location_mask_x, location_mask_y]])
batch_location_supply[d] *= global_normalization * 10.0
solar_disk_mask[d] = np.array(dl_batch_size * [nch(d)*[mask]])
if gpu_flag:
batch_location_supply[d]=cuda.to_gpu(batch_location_supply[d])
solar_disk_mask[d]=cuda.to_gpu(solar_disk_mask[d])
def zoom_x2(batch):
shape = batch.data.shape
channel_shape = shape[0:-2]
height, width = shape[-2:]
volume = reduce(operator.mul,shape,1)
b1 = F.reshape(batch,(volume,1))
b2 = F.concat([b1,b1],1)
b3 = F.reshape(b2,(volume/width,2*width))
b4 = F.concat([b3,b3],1)
return F.reshape(b4, channel_shape + (2*height ,) + (2*width ,))
global sun_data
sun_data = []
modelDict = dict()
for d in range(dlDepth):
# modelDict['convA{}'.format(d)] = F.Convolution2D( 2**d, 2**(d+1),3,stride=1,pad=1)
# modelDict['convB{}'.format(d)] = F.Convolution2D( 2**(d+1), 2**(d+1),3,stride=1,pad=1)
# modelDict['convV{}'.format(d)] = F.Convolution2D( 2**(d+1)+2, 2**d,3,stride=1,pad=1)
modelDict['convA{}'.format(d)] = F.Convolution2D( nch(d), nch(d+1),3,stride=1,pad=1)
modelDict['convB{}'.format(d)] = F.Convolution2D( nch(d+1), nch(d+1),3,stride=1,pad=1)
modelDict['convV{}'.format(d)] = F.Convolution2D( nch(d+1)+2, nch(d),3,stride=1,pad=1)
model=chainer.FunctionSet(**modelDict)
if gpu_flag:
model.to_gpu()
def forward_dumb(x_data,train=True,level=1):
x = Variable(x_data)
y = Variable(x_data)
for d in range(level):
x = F.average_pooling_2d(x,2)
for d in range(level):
x = zoom_x2(x)
ret = (global_normalization**(-2))*F.mean_squared_error(F.tanh(y),F.tanh(x))
if(not train):
plot_img(x.data, 'd{}'.format(level),
'Lv {} dumb encoder, msqe={}'.format(level, ret.data))
return ret
def forward(x_data,train=True,level=1):
global dlDepth, training_mode_string
do_dropout = train
if training_mode_string == 'p' :
do_dropout=False
x = Variable(x_data, volatile = not train)
y = Variable(x_data, volatile = not train) * Variable(solar_disk_mask[0], volatile= not train)
if(not train and level==1):
plot_img(y.data, 0, 'original magnetic field image')
h = F.dropout(x, ratio = 0.1, train=do_dropout)
for d in range(level):
if d == level -1 :
y = h * solar_disk_mask[d]
h = F.tanh(getattr(model,'convA{}'.format(d))(h))
if d < level - 1:
h = F.dropout(h, ratio = 0.1, train=do_dropout)
h = F.average_pooling_2d(h,2)
for d in reversed(range(level)):
h = F.dropout(h, ratio = 0.1, train=do_dropout)
h = F.tanh(getattr(model,'convB{}'.format(d))(h))
h = zoom_x2(h)
sup = Variable(batch_location_supply[d], volatile = not train)
h = F.concat([h,sup],1)
h = F.dropout(h, ratio = 0.1, train=do_dropout)
h = F.tanh(getattr(model,'convV{}'.format(d))(h))
if d == level -1 :
y_pred = h * solar_disk_mask[d]
ret = (global_normalization**(-2))*F.mean_squared_error(F.tanh(y),F.tanh(y_pred))
if(not train):
plot_img(h.data, level, 'Lv {} autoencoder, msqe={}'.format(level, ret.data))
return ret
def reference(x_data,y_data):
global global_normalization
x = Variable(x_data)
y = Variable(y_data)
print "rmsqerr_adj: {}".format((global_normalization**(-2))*F.mean_squared_error(x,y).data)
def fetch_data():
global sun_data, global_normalization
system('rm work/*')
while not os.path.exists('work/0000.npz'):
y=random.randrange(2011,2016)
m=random.randrange(1,13)
d=random.randrange(1,32)
cmd='aws s3 sync --quiet s3://sdo/hmi/mag720x1024/{:04}/{:02}/{:02}/ work/'.format(y,m,d)
system(cmd)
p=subprocess.Popen('find work/',shell=True, stdout=subprocess.PIPE)
stdout, _ = p.communicate()
sun_data = []
for fn in stdout.split('\n'):
if not re.search('\.npz$',fn) : continue
try:
sun_data.append( global_normalization*np.load(fn)['img'])
except:
continue
# if len(sun_data)==0:
# # where no data is available, add a dummy data for debugging
# for i in range(10):
# x=32*[0.333*i*i]
# xy=32*[x]
# sun_data.append(xy)
optimizer = dict()
for level in range(1,dlDepth+1):
optimizer[level] = optimizers.Adam() #(alpha=3e-4)
d=level-1
model_of_level=dict()
k='convA{}'.format(d)
model_of_level[k]=modelDict[k]
k='convB{}'.format(d)
model_of_level[k]=modelDict[k]
k='convV{}'.format(d)
model_of_level[k]=modelDict[k]
optimizer[level].setup(chainer.FunctionSet(**model_of_level).collect_parameters())
global_optimizer = optimizers.Adam()
global_optimizer.setup(model.collect_parameters())
epoch=0
while True:
fetch_data()
try:
reference(np.array(sun_data[0]), np.array(sun_data[1]))
except:
continue
for t in range(20): # use the same dataset
epoch+=1
batch= []
for i in range(dl_batch_size):
start = random.randrange(len(sun_data))
batch.append([sun_data[start]])
batch=np.array(batch)
if gpu_flag :
batch = cuda.to_gpu(batch)
current_depth = min(dlDepth+1,max(2,2+epoch/epoch_per_level))
eplm = epoch % epoch_per_level
if eplm < epoch_per_level/3:
training_mode_string = 'i'
if epoch > (dlDepth+2)*epoch_per_level:
training_mode_string = 'p'
elif eplm < 2*epoch_per_level/3:
training_mode_string = 'f'
else:
training_mode_string = 'p'
training_mode_string = 'i'
starting_depth = 1 if training_mode_string == 'i' else current_depth-1
for level in range(starting_depth,current_depth):
if level < current_depth-1:
optimizer[level].alpha=1e-4/current_depth
if training_mode_string == 'i':
optimizer[level].zero_grads()
loss = forward(batch, train=True,level=level)
loss.backward()
optimizer[level].update()
else :
global_optimizer.zero_grads()
loss = forward(batch, train=True,level=level)
loss.backward()
global_optimizer.update()
print ' '*(level-1),epoch,loss.data
with(open(log_train_fn,'a')) as fp:
fp.write('{} {} {}\n'.format(level,epoch,loss.data))
if epoch == 1:
with open("graph{}.dot".format(level), "w") as o:
o.write(c.build_computational_graph((loss, )).dump())
with open("graph{}.wo_split.dot".format(level), "w") as o:
g = c.build_computational_graph((loss, ),
remove_split=True)
o.write(g.dump())
print('graph generated')
if epoch % 20 == 1:
loss = forward_dumb(batch, train=False,level=level)
loss_dumb = loss.data
loss = forward(batch,train=False,level=level)
loss_auto = loss.data
print "T",' '*(level-1),epoch,loss_auto, loss_auto/loss_dumb
with(open(log_test_fn,'a')) as fp:
fp.write('{} {} {} {}\n'.format(level, epoch,loss_auto, loss_dumb))
| |
"""
Internationalization support.
"""
from __future__ import unicode_literals
import re
from django.utils.encoding import force_text
from django.utils.functional import lazy
from django.utils import six
__all__ = [
'activate', 'deactivate', 'override', 'deactivate_all',
'get_language', 'get_language_from_request',
'get_language_info', 'get_language_bidi',
'check_for_language', 'to_locale', 'templatize', 'string_concat',
'gettext', 'gettext_lazy', 'gettext_noop',
'ugettext', 'ugettext_lazy', 'ugettext_noop',
'ngettext', 'ngettext_lazy',
'ungettext', 'ungettext_lazy',
'pgettext', 'pgettext_lazy',
'npgettext', 'npgettext_lazy',
]
LANGUAGE_SESSION_KEY = '_language'
class TranslatorCommentWarning(SyntaxWarning):
pass
# Here be dragons, so a short explanation of the logic won't hurt:
# We are trying to solve two problems: (1) access settings, in particular
# settings.USE_I18N, as late as possible, so that modules can be imported
# without having to first configure Django, and (2) if some other code creates
# a reference to one of these functions, don't break that reference when we
# replace the functions with their real counterparts (once we do access the
# settings).
class Trans(object):
"""
The purpose of this class is to store the actual translation function upon
receiving the first call to that function. After this is done, changes to
USE_I18N will have no effect to which function is served upon request. If
your tests rely on changing USE_I18N, you can delete all the functions
from _trans.__dict__.
Note that storing the function with setattr will have a noticeable
performance effect, as access to the function goes the normal path,
instead of using __getattr__.
"""
def __getattr__(self, real_name):
from django.conf import settings
if settings.USE_I18N:
from django.utils.translation import trans_real as trans
else:
from django.utils.translation import trans_null as trans
setattr(self, real_name, getattr(trans, real_name))
return getattr(trans, real_name)
_trans = Trans()
# The Trans class is no more needed, so remove it from the namespace.
del Trans
def gettext_noop(message):
return _trans.gettext_noop(message)
ugettext_noop = gettext_noop
def gettext(message):
return _trans.gettext(message)
def ngettext(singular, plural, number):
return _trans.ngettext(singular, plural, number)
def ugettext(message):
return _trans.ugettext(message)
def ungettext(singular, plural, number):
return _trans.ungettext(singular, plural, number)
def pgettext(context, message):
return _trans.pgettext(context, message)
def npgettext(context, singular, plural, number):
return _trans.npgettext(context, singular, plural, number)
gettext_lazy = lazy(gettext, str)
ugettext_lazy = lazy(ugettext, six.text_type)
pgettext_lazy = lazy(pgettext, six.text_type)
def lazy_number(func, resultclass, number=None, **kwargs):
if isinstance(number, int):
kwargs['number'] = number
proxy = lazy(func, resultclass)(**kwargs)
else:
class NumberAwareString(resultclass):
def __mod__(self, rhs):
if isinstance(rhs, dict) and number:
try:
number_value = rhs[number]
except KeyError:
raise KeyError('Your dictionary lacks key \'%s\'. '
'Please provide it, because it is required to '
'determine whether string is singular or plural.'
% number)
else:
number_value = rhs
kwargs['number'] = number_value
translated = func(**kwargs)
try:
translated = translated % rhs
except TypeError:
# String doesn't contain a placeholder for the number
pass
return translated
proxy = lazy(lambda **kwargs: NumberAwareString(), NumberAwareString)(**kwargs)
return proxy
def ngettext_lazy(singular, plural, number=None):
return lazy_number(ngettext, str, singular=singular, plural=plural, number=number)
def ungettext_lazy(singular, plural, number=None):
return lazy_number(ungettext, six.text_type, singular=singular, plural=plural, number=number)
def npgettext_lazy(context, singular, plural, number=None):
return lazy_number(npgettext, six.text_type, context=context, singular=singular, plural=plural, number=number)
def activate(language):
return _trans.activate(language)
def deactivate():
return _trans.deactivate()
class override(object):
def __init__(self, language, deactivate=False):
self.language = language
self.deactivate = deactivate
self.old_language = get_language()
def __enter__(self):
if self.language is not None:
activate(self.language)
else:
deactivate_all()
def __exit__(self, exc_type, exc_value, traceback):
if self.deactivate:
deactivate()
else:
activate(self.old_language)
def get_language():
return _trans.get_language()
def get_language_bidi():
return _trans.get_language_bidi()
def check_for_language(lang_code):
return _trans.check_for_language(lang_code)
def to_locale(language):
return _trans.to_locale(language)
def get_language_from_request(request, check_path=False):
return _trans.get_language_from_request(request, check_path)
def get_language_from_path(path, supported=None):
return _trans.get_language_from_path(path, supported=supported)
def templatize(src, origin=None):
return _trans.templatize(src, origin)
def deactivate_all():
return _trans.deactivate_all()
def _string_concat(*strings):
"""
Lazy variant of string concatenation, needed for translations that are
constructed from multiple parts.
"""
return ''.join(force_text(s) for s in strings)
string_concat = lazy(_string_concat, six.text_type)
def get_language_info(lang_code):
from django.conf.locale import LANG_INFO
try:
return LANG_INFO[lang_code]
except KeyError:
if '-' not in lang_code:
raise KeyError("Unknown language code %s." % lang_code)
generic_lang_code = lang_code.split('-')[0]
try:
return LANG_INFO[generic_lang_code]
except KeyError:
raise KeyError("Unknown language code %s and %s." % (lang_code, generic_lang_code))
trim_whitespace_re = re.compile('\s*\n\s*')
def trim_whitespace(s):
return trim_whitespace_re.sub(' ', s.strip())
| |
from django.http import (
HttpResponse, JsonResponse,
HttpResponseForbidden, HttpResponseBadRequest,
)
from django.template.loader import render_to_string
from django.template.context_processors import csrf
from django.shortcuts import render, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from bootcamp.decorators import ajax_required
from .models import Feed
FEEDS_NUM_PAGES = 10
def feeds(request):
all_feeds = Feed.get_feeds()
paginator = Paginator(all_feeds, FEEDS_NUM_PAGES).page(1)
from_feed = -1
if paginator:
from_feed = paginator[0].id
return render(request, 'feeds/feeds.html', {
'feeds': paginator,
'from_feed': from_feed,
'page': 1,
})
def feed(request, pk):
feed = get_object_or_404(Feed, pk=pk)
return render(request, 'feeds/feed.html', {'feed': feed})
@ajax_required
def load(request):
page = request.GET.get('page')
from_feed = request.GET.get('from_feed')
feed_source = request.GET.get('feed_source')
csrf_token = str(csrf(request)['csrf_token'])
all_feeds = Feed.get_feeds(from_feed)
if feed_source != 'all':
all_feeds = all_feeds.filter(user__id=feed_source)
paginator = Paginator(all_feeds, FEEDS_NUM_PAGES)
try:
feeds = paginator.page(page)
except PageNotAnInteger:
return HttpResponseBadRequest()
except EmptyPage:
feeds = []
html = ''
for feed in feeds:
context = {
'feed': feed,
'user': request.user,
'csrf_token': csrf_token
}
template = render_to_string('feeds/partial_feed.html', context)
html = f'{html}{template}'
return HttpResponse(html)
def _html_feeds(last_feed, user, csrf_token, feed_source='all'):
feeds = Feed.get_feeds_after(last_feed)
if feed_source != 'all':
feeds = feeds.filter(user__id=feed_source)
html = ''
for feed in feeds:
context = {'feed': feed, 'user': user, 'csrf_token': csrf_token}
template = render_to_string('feeds/partial_feed.html', context)
html = f'{html}{template}'
return html
@ajax_required
def load_new(request):
last_feed = request.GET.get('last_feed')
user = request.user
csrf_token = str(csrf(request)['csrf_token'])
html = _html_feeds(last_feed, user, csrf_token)
return HttpResponse(html)
@ajax_required
def check(request):
last_feed = request.GET.get('last_feed')
feed_source = request.GET.get('feed_source')
feeds = Feed.get_feeds_after(last_feed)
if feed_source != 'all':
feeds = feeds.filter(user__id=feed_source)
count = feeds.count()
return HttpResponse(count)
@login_required
@ajax_required
def post(request):
last_feed = request.POST.get('last_feed')
post = request.POST['post'].strip()[:255]
user = request.user
csrf_token = str(csrf(request)['csrf_token'])
if len(post) > 0:
Feed.objects.create(
post=post,
user=user
)
html = _html_feeds(last_feed, user, csrf_token)
return HttpResponse(html)
@login_required
@ajax_required
def like(request):
user = request.user
feed_id = request.POST['feed']
feed = Feed.like(feed_id, user)
return HttpResponse(feed.likes)
@login_required
@ajax_required
def comment(request):
if request.method == 'POST':
feed_id = request.POST['feed']
feed = Feed.objects.get(pk=feed_id)
post = request.POST['post'].strip()
if len(post) > 0:
post = post[:255]
user = request.user
feed.comment(user=user, post=post)
user.profile.notify_commented(feed)
user.profile.notify_also_commented(feed)
context = {'feed': feed}
return render(request, 'feeds/partial_feed_comments.html', context)
feed_id = request.GET.get('feed')
feed = Feed.objects.get(pk=feed_id)
return render(request, 'feeds/partial_feed_comments.html', {'feed': feed})
@login_required
@ajax_required
def update(request):
first_feed = request.GET.get('first_feed')
last_feed = request.GET.get('last_feed')
feed_source = request.GET.get('feed_source')
feeds = Feed.get_feeds().filter(id__range=(last_feed, first_feed))
if feed_source != 'all':
feeds = feeds.filter(user__id=feed_source)
dump = {}
for feed in feeds:
dump[feed.pk] = {'likes': feed.likes, 'comments': feed.comments}
return JsonResponse(dump, safe=False)
@login_required
@ajax_required
def track_comments(request):
feed_id = request.GET.get('feed')
feed = Feed.objects.get(pk=feed_id)
return render(request, 'feeds/partial_feed_comments.html', {'feed': feed})
@login_required
@ajax_required
def remove(request):
feed_id = request.POST.get('feed')
feed = Feed.objects.filter(pk=feed_id).first()
if not feed:
return HttpResponseBadRequest
if feed.user == request.user or request.user.is_superuser:
likes = feed.get_likes()
parent = feed.parent
for like in likes:
like.delete()
feed.delete()
if parent:
parent.calculate_comments()
return HttpResponse()
return HttpResponseForbidden()
| |
from django.template import RequestContext, loader
from datetime import datetime, timedelta, date
from dateutil.relativedelta import relativedelta
from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse_lazy
from django.conf import settings
from models import (
StatusCheck, GraphiteStatusCheck, JenkinsStatusCheck, HttpStatusCheck, ICMPStatusCheck,
StatusCheckResult, UserProfile, Service, Instance, Shift, get_duty_officers)
from tasks import run_status_check as _run_status_check
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.views.generic import (
DetailView, CreateView, UpdateView, ListView, DeleteView, TemplateView, FormView, View)
from django import forms
from .graphite import get_data, get_matching_metrics
from django.contrib.auth.models import User
from django.utils import timezone
from django.utils.timezone import utc
from django.core.urlresolvers import reverse
from django.core.exceptions import ValidationError
from cabot.cabotapp import alert
from models import AlertPluginUserData
from django.forms.models import (inlineformset_factory, modelformset_factory)
from django import shortcuts
from itertools import groupby, dropwhile, izip_longest
import requests
import json
import re
class LoginRequiredMixin(object):
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(*args, **kwargs)
@login_required
def subscriptions(request):
""" Simple list of all checks """
t = loader.get_template('cabotapp/subscriptions.html')
services = Service.objects.all()
users = User.objects.filter(is_active=True)
c = RequestContext(request, {
'services': services,
'users': users,
'duty_officers': get_duty_officers(),
})
return HttpResponse(t.render(c))
@login_required
def run_status_check(request, pk):
"""Runs a specific check"""
_run_status_check(check_or_id=pk)
return HttpResponseRedirect(reverse('check', kwargs={'pk': pk}))
def duplicate_icmp_check(request, pk):
pc = StatusCheck.objects.get(pk=pk)
npk = pc.duplicate()
return HttpResponseRedirect(reverse('update-icmp-check', kwargs={'pk': npk}))
def duplicate_instance(request, pk):
instance = Instance.objects.get(pk=pk)
new_instance = instance.duplicate()
return HttpResponseRedirect(reverse('update-instance', kwargs={'pk': new_instance}))
def duplicate_http_check(request, pk):
pc = StatusCheck.objects.get(pk=pk)
npk = pc.duplicate()
return HttpResponseRedirect(reverse('update-http-check', kwargs={'pk': npk}))
def duplicate_graphite_check(request, pk):
pc = StatusCheck.objects.get(pk=pk)
npk = pc.duplicate()
return HttpResponseRedirect(reverse('update-graphite-check', kwargs={'pk': npk}))
def duplicate_jenkins_check(request, pk):
pc = StatusCheck.objects.get(pk=pk)
npk = pc.duplicate()
return HttpResponseRedirect(reverse('update-jenkins-check', kwargs={'pk': npk}))
class StatusCheckResultDetailView(LoginRequiredMixin, DetailView):
model = StatusCheckResult
context_object_name = 'result'
class SymmetricalForm(forms.ModelForm):
symmetrical_fields = () # Iterable of 2-tuples (field, model)
def __init__(self, *args, **kwargs):
super(SymmetricalForm, self).__init__(*args, **kwargs)
if self.instance and self.instance.pk:
for field in self.symmetrical_fields:
self.fields[field].initial = getattr(
self.instance, field).all()
def save(self, commit=True):
instance = super(SymmetricalForm, self).save(commit=False)
if commit:
instance.save()
if instance.pk:
for field in self.symmetrical_fields:
setattr(instance, field, self.cleaned_data[field])
self.save_m2m()
return instance
base_widgets = {
'name': forms.TextInput(attrs={
'style': 'width:30%',
}),
'importance': forms.RadioSelect(),
}
class StatusCheckForm(SymmetricalForm):
symmetrical_fields = ('service_set', 'instance_set')
service_set = forms.ModelMultipleChoiceField(
queryset=Service.objects.all(),
required=False,
help_text='Link to service(s).',
widget=forms.SelectMultiple(
attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
},
)
)
instance_set = forms.ModelMultipleChoiceField(
queryset=Instance.objects.all(),
required=False,
help_text='Link to instance(s).',
widget=forms.SelectMultiple(
attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
},
)
)
class GraphiteStatusCheckForm(StatusCheckForm):
class Meta:
model = GraphiteStatusCheck
fields = (
'name',
'metric',
'check_type',
'value',
'frequency',
'active',
'importance',
'expected_num_hosts',
'expected_num_metrics',
'debounce',
)
widgets = dict(**base_widgets)
widgets.update({
'value': forms.TextInput(attrs={
'style': 'width: 100px',
'placeholder': 'threshold value',
}),
'metric': forms.TextInput(attrs={
'style': 'width: 100%',
'placeholder': 'graphite metric key'
}),
'check_type': forms.Select(attrs={
'data-rel': 'chosen',
})
})
class ICMPStatusCheckForm(StatusCheckForm):
class Meta:
model = ICMPStatusCheck
fields = (
'name',
'frequency',
'importance',
'active',
'debounce',
)
widgets = dict(**base_widgets)
class HttpStatusCheckForm(StatusCheckForm):
class Meta:
model = HttpStatusCheck
fields = (
'name',
'endpoint',
'username',
'password',
'text_match',
'status_code',
'timeout',
'verify_ssl_certificate',
'frequency',
'importance',
'active',
'debounce',
)
widgets = dict(**base_widgets)
widgets.update({
'endpoint': forms.TextInput(attrs={
'style': 'width: 100%',
'placeholder': 'https://www.arachnys.com',
}),
'username': forms.TextInput(attrs={
'style': 'width: 30%',
}),
'password': forms.TextInput(attrs={
'style': 'width: 30%',
}),
'text_match': forms.TextInput(attrs={
'style': 'width: 100%',
'placeholder': '[Aa]rachnys\s+[Rr]ules',
}),
'status_code': forms.TextInput(attrs={
'style': 'width: 20%',
'placeholder': '200',
}),
})
class JenkinsStatusCheckForm(StatusCheckForm):
class Meta:
model = JenkinsStatusCheck
fields = (
'name',
'importance',
'debounce',
'max_queued_build_time',
)
widgets = dict(**base_widgets)
class InstanceForm(SymmetricalForm):
symmetrical_fields = ('service_set',)
service_set = forms.ModelMultipleChoiceField(
queryset=Service.objects.all(),
required=False,
help_text='Link to service(s).',
widget=forms.SelectMultiple(
attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
},
)
)
class Meta:
model = Instance
template_name = 'instance_form.html'
fields = (
'name',
'address',
'users_to_notify',
'status_checks',
'service_set',
)
widgets = {
'name': forms.TextInput(attrs={'style': 'width: 30%;'}),
'address': forms.TextInput(attrs={'style': 'width: 70%;'}),
'status_checks': forms.SelectMultiple(attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
}),
'service_set': forms.SelectMultiple(attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
}),
'alerts': forms.SelectMultiple(attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
}),
'users_to_notify': forms.CheckboxSelectMultiple(),
'hackpad_id': forms.TextInput(attrs={'style': 'width:30%;'}),
}
def __init__(self, *args, **kwargs):
ret = super(InstanceForm, self).__init__(*args, **kwargs)
self.fields['users_to_notify'].queryset = User.objects.filter(
is_active=True)
return ret
class ServiceForm(forms.ModelForm):
class Meta:
model = Service
template_name = 'service_form.html'
fields = (
'name',
'url',
'users_to_notify',
'status_checks',
'instances',
'alerts',
'alerts_enabled',
'hackpad_id',
)
widgets = {
'name': forms.TextInput(attrs={'style': 'width: 30%;'}),
'url': forms.TextInput(attrs={'style': 'width: 70%;'}),
'status_checks': forms.SelectMultiple(attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
}),
'instances': forms.SelectMultiple(attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
}),
'alerts': forms.SelectMultiple(attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
}),
'users_to_notify': forms.CheckboxSelectMultiple(),
'hackpad_id': forms.TextInput(attrs={'style': 'width:30%;'}),
}
def __init__(self, *args, **kwargs):
ret = super(ServiceForm, self).__init__(*args, **kwargs)
self.fields['users_to_notify'].queryset = User.objects.filter(
is_active=True)
return ret
def clean_hackpad_id(self):
value = self.cleaned_data['hackpad_id']
if not value:
return ''
for pattern in settings.RECOVERY_SNIPPETS_WHITELIST:
if re.match(pattern, value):
return value
raise ValidationError('Please specify a valid JS snippet link')
class StatusCheckReportForm(forms.Form):
service = forms.ModelChoiceField(
queryset=Service.objects.all(),
widget=forms.HiddenInput
)
checks = forms.ModelMultipleChoiceField(
queryset=StatusCheck.objects.all(),
widget=forms.SelectMultiple(
attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
},
)
)
date_from = forms.DateField(label='From', widget=forms.DateInput(attrs={'class': 'datepicker'}))
date_to = forms.DateField(label='To', widget=forms.DateInput(attrs={'class': 'datepicker'}))
def get_report(self):
checks = self.cleaned_data['checks']
now = timezone.now()
for check in checks:
# Group results of the check by status (failed alternating with succeeded),
# take time of the first one in each group (starting from a failed group),
# split them into pairs and form the list of problems.
results = check.statuscheckresult_set.filter(
time__gte=self.cleaned_data['date_from'],
time__lt=self.cleaned_data['date_to'] + timedelta(days=1)
).order_by('time')
groups = dropwhile(lambda item: item[0], groupby(results, key=lambda r: r.succeeded))
times = [next(group).time for succeeded, group in groups]
pairs = izip_longest(*([iter(times)] * 2))
check.problems = [(start, end, (end or now) - start) for start, end in pairs]
if results:
check.success_rate = results.filter(succeeded=True).count() / float(len(results)) * 100
return checks
class CheckCreateView(LoginRequiredMixin, CreateView):
template_name = 'cabotapp/statuscheck_form.html'
def form_valid(self, form):
form.instance.created_by = self.request.user
return super(CheckCreateView, self).form_valid(form)
def get_initial(self):
if self.initial:
initial = self.initial
else:
initial = {}
metric = self.request.GET.get('metric')
if metric:
initial['metric'] = metric
service_id = self.request.GET.get('service')
instance_id = self.request.GET.get('instance')
if service_id:
try:
service = Service.objects.get(id=service_id)
initial['service_set'] = [service]
except Service.DoesNotExist:
pass
if instance_id:
try:
instance = Instance.objects.get(id=instance_id)
initial['instance_set'] = [instance]
except Instance.DoesNotExist:
pass
return initial
def get_success_url(self):
if self.request.GET.get('service'):
return reverse('service', kwargs={'pk': self.request.GET.get('service')})
if self.request.GET.get('instance'):
return reverse('instance', kwargs={'pk': self.request.GET.get('instance')})
return reverse('checks')
class CheckUpdateView(LoginRequiredMixin, UpdateView):
template_name = 'cabotapp/statuscheck_form.html'
def get_success_url(self):
return reverse('check', kwargs={'pk': self.object.id})
class ICMPCheckCreateView(CheckCreateView):
model = ICMPStatusCheck
form_class = ICMPStatusCheckForm
class ICMPCheckUpdateView(CheckUpdateView):
model = ICMPStatusCheck
form_class = ICMPStatusCheckForm
class GraphiteCheckUpdateView(CheckUpdateView):
model = GraphiteStatusCheck
form_class = GraphiteStatusCheckForm
class GraphiteCheckCreateView(CheckCreateView):
model = GraphiteStatusCheck
form_class = GraphiteStatusCheckForm
class HttpCheckCreateView(CheckCreateView):
model = HttpStatusCheck
form_class = HttpStatusCheckForm
class HttpCheckUpdateView(CheckUpdateView):
model = HttpStatusCheck
form_class = HttpStatusCheckForm
class JenkinsCheckCreateView(CheckCreateView):
model = JenkinsStatusCheck
form_class = JenkinsStatusCheckForm
def form_valid(self, form):
form.instance.frequency = 1
return super(JenkinsCheckCreateView, self).form_valid(form)
class JenkinsCheckUpdateView(CheckUpdateView):
model = JenkinsStatusCheck
form_class = JenkinsStatusCheckForm
def form_valid(self, form):
form.instance.frequency = 1
return super(JenkinsCheckUpdateView, self).form_valid(form)
class StatusCheckListView(LoginRequiredMixin, ListView):
model = StatusCheck
context_object_name = 'checks'
def get_queryset(self):
return StatusCheck.objects.all().order_by('name').prefetch_related('service_set', 'instance_set')
class StatusCheckDeleteView(LoginRequiredMixin, DeleteView):
model = StatusCheck
success_url = reverse_lazy('checks')
context_object_name = 'check'
template_name = 'cabotapp/statuscheck_confirm_delete.html'
class StatusCheckDetailView(LoginRequiredMixin, DetailView):
model = StatusCheck
context_object_name = 'check'
template_name = 'cabotapp/statuscheck_detail.html'
def render_to_response(self, context, *args, **kwargs):
if context == None:
context = {}
context['checkresults'] = self.object.statuscheckresult_set.order_by(
'-time_complete')[:100]
return super(StatusCheckDetailView, self).render_to_response(context, *args, **kwargs)
class UserProfileUpdateView(LoginRequiredMixin, View):
model = AlertPluginUserData
def get(self, *args, **kwargs):
return HttpResponseRedirect(reverse('update-alert-user-data', args=(self.kwargs['pk'], u'General')))
class UserProfileUpdateAlert(LoginRequiredMixin, View):
template = loader.get_template('cabotapp/alertpluginuserdata_form.html')
model = AlertPluginUserData
def get(self, request, pk, alerttype):
try:
profile = UserProfile.objects.get(user=pk)
except UserProfile.DoesNotExist:
user = User.objects.get(id=pk)
profile = UserProfile(user=user)
profile.save()
profile.user_data()
if (alerttype == u'General'):
form = GeneralSettingsForm(initial={
'first_name': profile.user.first_name,
'last_name' : profile.user.last_name,
'email_address' : profile.user.email,
'enabled' : profile.user.is_active,
})
else:
plugin_userdata = self.model.objects.get(title=alerttype, user=profile)
form_model = get_object_form(type(plugin_userdata))
form = form_model(instance=plugin_userdata)
c = RequestContext(request, {
'form': form,
'alert_preferences': profile.user_data(),
})
return HttpResponse(self.template.render(c))
def post(self, request, pk, alerttype):
profile = UserProfile.objects.get(user=pk)
if (alerttype == u'General'):
form = GeneralSettingsForm(request.POST)
if form.is_valid():
profile.user.first_name = form.cleaned_data['first_name']
profile.user.last_name = form.cleaned_data['last_name']
profile.user.is_active = form.cleaned_data['enabled']
profile.user.email = form.cleaned_data['email_address']
profile.user.save()
return HttpResponseRedirect(reverse('update-alert-user-data', args=(self.kwargs['pk'], alerttype)))
else:
plugin_userdata = self.model.objects.get(title=alerttype, user=profile)
form_model = get_object_form(type(plugin_userdata))
form = form_model(request.POST, instance=plugin_userdata)
form.save()
if form.is_valid():
return HttpResponseRedirect(reverse('update-alert-user-data', args=(self.kwargs['pk'], alerttype)))
def get_object_form(model_type):
class AlertPreferencesForm(forms.ModelForm):
class Meta:
model = model_type
def is_valid(self):
return True
return AlertPreferencesForm
class GeneralSettingsForm(forms.Form):
first_name = forms.CharField(label='First name', max_length=30, required=False)
last_name = forms.CharField(label='Last name', max_length=30, required=False)
email_address = forms.CharField(label='Email Address', max_length=30, required=False)
enabled = forms.BooleanField(label='Enabled', required=False)
class InstanceListView(LoginRequiredMixin, ListView):
model = Instance
context_object_name = 'instances'
def get_queryset(self):
return Instance.objects.all().order_by('name').prefetch_related('status_checks')
class ServiceListView(LoginRequiredMixin, ListView):
model = Service
context_object_name = 'services'
def get_queryset(self):
return Service.objects.all().order_by('name').prefetch_related('status_checks')
class InstanceDetailView(LoginRequiredMixin, DetailView):
model = Instance
context_object_name = 'instance'
def get_context_data(self, **kwargs):
context = super(InstanceDetailView, self).get_context_data(**kwargs)
date_from = date.today() - relativedelta(day=1)
context['report_form'] = StatusCheckReportForm(initial={
'checks': self.object.status_checks.all(),
'service': self.object,
'date_from': date_from,
'date_to': date_from + relativedelta(months=1) - relativedelta(days=1)
})
return context
class ServiceDetailView(LoginRequiredMixin, DetailView):
model = Service
context_object_name = 'service'
def get_context_data(self, **kwargs):
context = super(ServiceDetailView, self).get_context_data(**kwargs)
date_from = date.today() - relativedelta(day=1)
context['report_form'] = StatusCheckReportForm(initial={
'alerts': self.object.alerts.all(),
'checks': self.object.status_checks.all(),
'service': self.object,
'date_from': date_from,
'date_to': date_from + relativedelta(months=1) - relativedelta(days=1)
})
return context
class InstanceCreateView(LoginRequiredMixin, CreateView):
model = Instance
form_class = InstanceForm
def form_valid(self, form):
ret = super(InstanceCreateView, self).form_valid(form)
if self.object.status_checks.filter(polymorphic_ctype__model='icmpstatuscheck').count() == 0:
self.generate_default_ping_check(self.object)
return ret
def generate_default_ping_check(self, obj):
pc = ICMPStatusCheck(
name="Default Ping Check for %s" % obj.name,
frequency=5,
importance=Service.ERROR_STATUS,
debounce=0,
created_by=None,
)
pc.save()
obj.status_checks.add(pc)
def get_success_url(self):
return reverse('instance', kwargs={'pk': self.object.id})
def get_initial(self):
if self.initial:
initial = self.initial
else:
initial = {}
service_id = self.request.GET.get('service')
if service_id:
try:
service = Service.objects.get(id=service_id)
initial['service_set'] = [service]
except Service.DoesNotExist:
pass
return initial
class ServiceCreateView(LoginRequiredMixin, CreateView):
model = Service
form_class = ServiceForm
alert.update_alert_plugins()
def get_success_url(self):
return reverse('service', kwargs={'pk': self.object.id})
class InstanceUpdateView(LoginRequiredMixin, UpdateView):
model = Instance
form_class = InstanceForm
def get_success_url(self):
return reverse('instance', kwargs={'pk': self.object.id})
class ServiceUpdateView(LoginRequiredMixin, UpdateView):
model = Service
form_class = ServiceForm
def get_success_url(self):
return reverse('service', kwargs={'pk': self.object.id})
class ServiceDeleteView(LoginRequiredMixin, DeleteView):
model = Service
success_url = reverse_lazy('services')
context_object_name = 'service'
template_name = 'cabotapp/service_confirm_delete.html'
class InstanceDeleteView(LoginRequiredMixin, DeleteView):
model = Instance
success_url = reverse_lazy('instances')
context_object_name = 'instance'
template_name = 'cabotapp/instance_confirm_delete.html'
class ShiftListView(LoginRequiredMixin, ListView):
model = Shift
context_object_name = 'shifts'
def get_queryset(self):
return Shift.objects.filter(
end__gt=datetime.utcnow().replace(tzinfo=utc),
deleted=False).order_by('start')
class StatusCheckReportView(LoginRequiredMixin, TemplateView):
template_name = 'cabotapp/statuscheck_report.html'
def get_context_data(self, **kwargs):
form = StatusCheckReportForm(self.request.GET)
if form.is_valid():
return {'checks': form.get_report(), 'service': form.cleaned_data['service']}
# Misc JSON api and other stuff
def checks_run_recently(request):
"""
Checks whether or not stuff is running by looking to see if checks have run in last 10 mins
"""
ten_mins = datetime.utcnow().replace(tzinfo=utc) - timedelta(minutes=10)
most_recent = StatusCheckResult.objects.filter(time_complete__gte=ten_mins)
if most_recent.exists():
return HttpResponse('Checks running')
return HttpResponse('Checks not running')
def jsonify(d):
return HttpResponse(json.dumps(d), content_type='application/json')
@login_required
def graphite_api_data(request):
metric = request.GET.get('metric')
data = None
matching_metrics = None
try:
data = get_data(metric)
except requests.exceptions.RequestException, e:
pass
if not data:
try:
matching_metrics = get_matching_metrics(metric)
except requests.exceptions.RequestException, e:
return jsonify({'status': 'error', 'message': str(e)})
matching_metrics = {'metrics': matching_metrics}
return jsonify({'status': 'ok', 'data': data, 'matchingMetrics': matching_metrics})
| |
"""
Python Markdown
===============
Python Markdown converts Markdown to HTML and can be used as a library or
called from the command line.
## Basic usage as a module:
import markdown
html = markdown.markdown(your_text_string)
See <https://pythonhosted.org/Markdown/> for more
information and instructions on how to extend the functionality of
Python Markdown. Read that before you try modifying this file.
## Authors and License
Started by [Manfred Stienstra](http://www.dwerg.net/). Continued and
maintained by [Yuri Takhteyev](http://www.freewisdom.org), [Waylan
Limberg](http://achinghead.com/) and [Artem Yunusov](http://blog.splyer.com).
Contact: markdown@freewisdom.org
Copyright 2007-2013 The Python Markdown Project (v. 1.7 and later)
Copyright 200? Django Software Foundation (OrderedDict implementation)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see LICENSE for details).
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from .__version__ import version, version_info # noqa
import codecs
import sys
import logging
import warnings
import importlib
from . import util
from .preprocessors import build_preprocessors
from .blockprocessors import build_block_parser
from .treeprocessors import build_treeprocessors
from .inlinepatterns import build_inlinepatterns
from .postprocessors import build_postprocessors
from .extensions import Extension
from .serializers import to_html_string, to_xhtml_string
__all__ = ['Markdown', 'markdown', 'markdownFromFile']
logger = logging.getLogger('MARKDOWN')
class Markdown(object):
"""Convert Markdown to HTML."""
doc_tag = "div" # Element used to wrap document - later removed
option_defaults = {
'html_replacement_text': '[HTML_REMOVED]',
'tab_length': 4,
'enable_attributes': True,
'smart_emphasis': True,
'lazy_ol': True,
}
output_formats = {
'html': to_html_string,
'html4': to_html_string,
'html5': to_html_string,
'xhtml': to_xhtml_string,
'xhtml1': to_xhtml_string,
'xhtml5': to_xhtml_string,
}
ESCAPED_CHARS = ['\\', '`', '*', '_', '{', '}', '[', ']',
'(', ')', '>', '#', '+', '-', '.', '!']
def __init__(self, *args, **kwargs):
"""
Creates a new Markdown instance.
Keyword arguments:
* extensions: A list of extensions.
If they are of type string, the module mdx_name.py will be loaded.
If they are a subclass of markdown.Extension, they will be used
as-is.
* extension_configs: Configuration settings for extensions.
* output_format: Format of output. Supported formats are:
* "xhtml1": Outputs XHTML 1.x. Default.
* "xhtml5": Outputs XHTML style tags of HTML 5
* "xhtml": Outputs latest supported version of XHTML
(currently XHTML 1.1).
* "html4": Outputs HTML 4
* "html5": Outputs HTML style tags of HTML 5
* "html": Outputs latest supported version of HTML
(currently HTML 4).
Note that it is suggested that the more specific formats ("xhtml1"
and "html4") be used as "xhtml" or "html" may change in the future
if it makes sense at that time.
* safe_mode: Deprecated! Disallow raw html. One of "remove", "replace"
or "escape".
* html_replacement_text: Deprecated! Text used when safe_mode is set
to "replace".
* tab_length: Length of tabs in the source. Default: 4
* enable_attributes: Enable the conversion of attributes. Default: True
* smart_emphasis: Treat `_connected_words_` intelligently Default: True
* lazy_ol: Ignore number of first item of ordered lists. Default: True
"""
# For backward compatibility, loop through old positional args
pos = ['extensions', 'extension_configs', 'safe_mode', 'output_format']
for c, arg in enumerate(args):
if pos[c] not in kwargs:
kwargs[pos[c]] = arg
if c+1 == len(pos): # pragma: no cover
# ignore any additional args
break
if len(args):
warnings.warn('Positional arguments are deprecated in Markdown. '
'Use keyword arguments only.',
DeprecationWarning)
# Loop through kwargs and assign defaults
for option, default in self.option_defaults.items():
setattr(self, option, kwargs.get(option, default))
self.safeMode = kwargs.get('safe_mode', False)
if self.safeMode and 'enable_attributes' not in kwargs:
# Disable attributes in safeMode when not explicitly set
self.enable_attributes = False
if 'safe_mode' in kwargs:
warnings.warn('"safe_mode" is deprecated in Python-Markdown. '
'Use an HTML sanitizer (like '
'Bleach https://bleach.readthedocs.io/) '
'if you are parsing untrusted markdown text. '
'See the 2.6 release notes for more info',
DeprecationWarning)
if 'html_replacement_text' in kwargs:
warnings.warn('The "html_replacement_text" keyword is '
'deprecated along with "safe_mode".',
DeprecationWarning)
self.registeredExtensions = []
self.docType = ""
self.stripTopLevelTags = True
self.build_parser()
self.references = {}
self.htmlStash = util.HtmlStash()
self.registerExtensions(extensions=kwargs.get('extensions', []),
configs=kwargs.get('extension_configs', {}))
self.set_output_format(kwargs.get('output_format', 'xhtml1'))
self.reset()
def build_parser(self):
""" Build the parser from the various parts. """
self.preprocessors = build_preprocessors(self)
self.parser = build_block_parser(self)
self.inlinePatterns = build_inlinepatterns(self)
self.treeprocessors = build_treeprocessors(self)
self.postprocessors = build_postprocessors(self)
return self
def registerExtensions(self, extensions, configs):
"""
Register extensions with this instance of Markdown.
Keyword arguments:
* extensions: A list of extensions, which can either
be strings or objects. See the docstring on Markdown.
* configs: A dictionary mapping module names to config options.
"""
for ext in extensions:
if isinstance(ext, util.string_type):
ext = self.build_extension(ext, configs.get(ext, {}))
if isinstance(ext, Extension):
ext.extendMarkdown(self, globals())
logger.debug(
'Successfully loaded extension "%s.%s".'
% (ext.__class__.__module__, ext.__class__.__name__)
)
elif ext is not None:
raise TypeError(
'Extension "%s.%s" must be of type: "markdown.Extension"'
% (ext.__class__.__module__, ext.__class__.__name__))
return self
def build_extension(self, ext_name, configs):
"""Build extension by name, then return the module.
The extension name may contain arguments as part of the string in the
following format: "extname(key1=value1,key2=value2)"
"""
configs = dict(configs)
# Parse extensions config params (ignore the order)
pos = ext_name.find("(") # find the first "("
if pos > 0:
ext_args = ext_name[pos+1:-1]
ext_name = ext_name[:pos]
pairs = [x.split("=") for x in ext_args.split(",")]
configs.update([(x.strip(), y.strip()) for (x, y) in pairs])
warnings.warn('Setting configs in the Named Extension string is '
'deprecated. It is recommended that you '
'pass an instance of the extension class to '
'Markdown or use the "extension_configs" keyword. '
'The current behavior will raise an error in version 2.7. '
'See the Release Notes for Python-Markdown version '
'2.6 for more info.', DeprecationWarning)
# Get class name (if provided): `path.to.module:ClassName`
ext_name, class_name = ext_name.split(':', 1) \
if ':' in ext_name else (ext_name, '')
# Try loading the extension first from one place, then another
try:
# Assume string uses dot syntax (`path.to.some.module`)
module = importlib.import_module(ext_name)
logger.debug(
'Successfuly imported extension module "%s".' % ext_name
)
# For backward compat (until deprecation)
# check that this is an extension.
if ('.' not in ext_name and not (hasattr(module, 'makeExtension') or
(class_name and hasattr(module, class_name)))):
# We have a name conflict
# eg: extensions=['tables'] and PyTables is installed
raise ImportError
except ImportError:
# Preppend `markdown.extensions.` to name
module_name = '.'.join(['markdown.extensions', ext_name])
try:
module = importlib.import_module(module_name)
logger.debug(
'Successfuly imported extension module "%s".' %
module_name
)
warnings.warn('Using short names for Markdown\'s builtin '
'extensions is deprecated. Use the '
'full path to the extension with Python\'s dot '
'notation (eg: "%s" instead of "%s"). The '
'current behavior will raise an error in version '
'2.7. See the Release Notes for '
'Python-Markdown version 2.6 for more info.' %
(module_name, ext_name),
DeprecationWarning)
except ImportError:
# Preppend `mdx_` to name
module_name_old_style = '_'.join(['mdx', ext_name])
try:
module = importlib.import_module(module_name_old_style)
logger.debug(
'Successfuly imported extension module "%s".' %
module_name_old_style)
warnings.warn('Markdown\'s behavior of prepending "mdx_" '
'to an extension name is deprecated. '
'Use the full path to the '
'extension with Python\'s dot notation '
'(eg: "%s" instead of "%s"). The current '
'behavior will raise an error in version 2.7. '
'See the Release Notes for Python-Markdown '
'version 2.6 for more info.' %
(module_name_old_style, ext_name),
DeprecationWarning)
except ImportError as e:
message = "Failed loading extension '%s' from '%s', '%s' " \
"or '%s'" % (ext_name, ext_name, module_name,
module_name_old_style)
e.args = (message,) + e.args[1:]
raise
if class_name:
# Load given class name from module.
return getattr(module, class_name)(**configs)
else:
# Expect makeExtension() function to return a class.
try:
return module.makeExtension(**configs)
except AttributeError as e:
message = e.args[0]
message = "Failed to initiate extension " \
"'%s': %s" % (ext_name, message)
e.args = (message,) + e.args[1:]
raise
def registerExtension(self, extension):
""" This gets called by the extension """
self.registeredExtensions.append(extension)
return self
def reset(self):
"""
Resets all state variables so that we can start with a new text.
"""
self.htmlStash.reset()
self.references.clear()
for extension in self.registeredExtensions:
if hasattr(extension, 'reset'):
extension.reset()
return self
def set_output_format(self, format):
""" Set the output format for the class instance. """
self.output_format = format.lower()
try:
self.serializer = self.output_formats[self.output_format]
except KeyError as e:
valid_formats = list(self.output_formats.keys())
valid_formats.sort()
message = 'Invalid Output Format: "%s". Use one of %s.' \
% (self.output_format,
'"' + '", "'.join(valid_formats) + '"')
e.args = (message,) + e.args[1:]
raise
return self
def convert(self, source):
"""
Convert markdown to serialized XHTML or HTML.
Keyword arguments:
* source: Source text as a Unicode string.
Markdown processing takes place in five steps:
1. A bunch of "preprocessors" munge the input text.
2. BlockParser() parses the high-level structural elements of the
pre-processed text into an ElementTree.
3. A bunch of "treeprocessors" are run against the ElementTree. One
such treeprocessor runs InlinePatterns against the ElementTree,
detecting inline markup.
4. Some post-processors are run against the text after the ElementTree
has been serialized into text.
5. The output is written to a string.
"""
# Fixup the source text
if not source.strip():
return '' # a blank unicode string
try:
source = util.text_type(source)
except UnicodeDecodeError as e:
# Customise error message while maintaining original trackback
e.reason += '. -- Note: Markdown only accepts unicode input!'
raise
# Split into lines and run the line preprocessors.
self.lines = source.split("\n")
for prep in self.preprocessors.values():
self.lines = prep.run(self.lines)
# Parse the high-level elements.
root = self.parser.parseDocument(self.lines).getroot()
# Run the tree-processors
for treeprocessor in self.treeprocessors.values():
newRoot = treeprocessor.run(root)
if newRoot is not None:
root = newRoot
# Serialize _properly_. Strip top-level tags.
output = self.serializer(root)
if self.stripTopLevelTags:
try:
start = output.index(
'<%s>' % self.doc_tag) + len(self.doc_tag) + 2
end = output.rindex('</%s>' % self.doc_tag)
output = output[start:end].strip()
except ValueError: # pragma: no cover
if output.strip().endswith('<%s />' % self.doc_tag):
# We have an empty document
output = ''
else:
# We have a serious problem
raise ValueError('Markdown failed to strip top-level '
'tags. Document=%r' % output.strip())
# Run the text post-processors
for pp in self.postprocessors.values():
output = pp.run(output)
return output.strip()
def convertFile(self, input=None, output=None, encoding=None):
"""Converts a Markdown file and returns the HTML as a Unicode string.
Decodes the file using the provided encoding (defaults to utf-8),
passes the file content to markdown, and outputs the html to either
the provided stream or the file with provided name, using the same
encoding as the source file. The 'xmlcharrefreplace' error handler is
used when encoding the output.
**Note:** This is the only place that decoding and encoding of Unicode
takes place in Python-Markdown. (All other code is Unicode-in /
Unicode-out.)
Keyword arguments:
* input: File object or path. Reads from stdin if `None`.
* output: File object or path. Writes to stdout if `None`.
* encoding: Encoding of input and output files. Defaults to utf-8.
"""
encoding = encoding or "utf-8"
# Read the source
if input:
if isinstance(input, util.string_type):
input_file = codecs.open(input, mode="r", encoding=encoding)
else:
input_file = codecs.getreader(encoding)(input)
text = input_file.read()
input_file.close()
else:
text = sys.stdin.read()
if not isinstance(text, util.text_type):
text = text.decode(encoding)
text = text.lstrip('\ufeff') # remove the byte-order mark
# Convert
html = self.convert(text)
# Write to file or stdout
if output:
if isinstance(output, util.string_type):
output_file = codecs.open(output, "w",
encoding=encoding,
errors="xmlcharrefreplace")
output_file.write(html)
output_file.close()
else:
writer = codecs.getwriter(encoding)
output_file = writer(output, errors="xmlcharrefreplace")
output_file.write(html)
# Don't close here. User may want to write more.
else:
# Encode manually and write bytes to stdout.
html = html.encode(encoding, "xmlcharrefreplace")
try:
# Write bytes directly to buffer (Python 3).
sys.stdout.buffer.write(html)
except AttributeError:
# Probably Python 2, which works with bytes by default.
sys.stdout.write(html)
return self
"""
EXPORTED FUNCTIONS
=============================================================================
Those are the two functions we really mean to export: markdown() and
markdownFromFile().
"""
def markdown(text, *args, **kwargs):
"""Convert a Markdown string to HTML and return HTML as a Unicode string.
This is a shortcut function for `Markdown` class to cover the most
basic use case. It initializes an instance of Markdown, loads the
necessary extensions and runs the parser on the given text.
Keyword arguments:
* text: Markdown formatted text as Unicode or ASCII string.
* Any arguments accepted by the Markdown class.
Returns: An HTML document as a string.
"""
md = Markdown(*args, **kwargs)
return md.convert(text)
def markdownFromFile(*args, **kwargs):
"""Read markdown code from a file and write it to a file or a stream.
This is a shortcut function which initializes an instance of Markdown,
and calls the convertFile method rather than convert.
Keyword arguments:
* input: a file name or readable object.
* output: a file name or writable object.
* encoding: Encoding of input and output.
* Any arguments accepted by the Markdown class.
"""
# For backward compatibility loop through positional args
pos = ['input', 'output', 'extensions', 'encoding']
c = 0
for arg in args:
if pos[c] not in kwargs:
kwargs[pos[c]] = arg
c += 1
if c == len(pos):
break
if len(args):
warnings.warn('Positional arguments are depreacted in '
'Markdown and will raise an error in version 2.7. '
'Use keyword arguments only.',
DeprecationWarning)
md = Markdown(**kwargs)
md.convertFile(kwargs.get('input', None),
kwargs.get('output', None),
kwargs.get('encoding', None))
| |
"""Utility class for validating scansion patterns: hexameter, hendecasyllables, pentameter.
Allows users to configure the scansion symbols internally via a constructor argument;
a suitable default is provided."""
import logging
from typing import List
from Levenshtein import distance
from cltk.prosody.lat.scansion_constants import ScansionConstants
LOG = logging.getLogger(__name__)
LOG.addHandler(logging.NullHandler())
__author__ = ["Todd Cook <todd.g.cook@gmail.com>"]
__license__ = "MIT License"
class MetricalValidator:
"""Currently supports validation for: hexameter, hendecasyllables, pentameter."""
def is_valid_hexameter(self, scanned_line: str) -> bool:
"""Determine if a scansion pattern is one of the valid hexameter metrical patterns
:param scanned_line: a line containing a sequence of stressed and unstressed syllables
:return bool
>>> print(MetricalValidator().is_valid_hexameter("-UU---UU---UU-U"))
True
"""
line = scanned_line.replace(self.constants.FOOT_SEPARATOR, "")
line = line.replace(" ", "")
if len(line) < 12:
return False
line = line[:-1] + self.constants.OPTIONAL_ENDING
return self.VALID_HEXAMETERS.__contains__(line)
def is_valid_hendecasyllables(self, scanned_line: str) -> bool:
"""Determine if a scansion pattern is one of the valid Hendecasyllables metrical patterns
Args:
scanned_line: a line containing a sequence of stressed and unstressed syllables
>>> print(MetricalValidator().is_valid_hendecasyllables("-U-UU-U-U-U"))
True
"""
line = scanned_line.replace(self.constants.FOOT_SEPARATOR, "")
line = line.replace(" ", "")
if len(line) < 11:
return False
line = line[:-1] + self.constants.OPTIONAL_ENDING
return self.VALID_HENDECASYLLABLES.__contains__(line)
def is_valid_pentameter(self, scanned_line: str) -> bool:
"""Determine if a scansion pattern is one of the valid Pentameter metrical patterns
:param scanned_line: a line containing a sequence of stressed and unstressed syllables
:return bool: whether or not the scansion is a valid pentameter
>>> print(MetricalValidator().is_valid_pentameter('-UU-UU--UU-UUX'))
True
"""
line = scanned_line.replace(self.constants.FOOT_SEPARATOR, "")
line = line.replace(" ", "")
if len(line) < 10:
return False
line = line[:-1] + self.constants.OPTIONAL_ENDING
return self.VALID_PENTAMETERS.__contains__(line)
def __init__(self, constants=ScansionConstants()):
self.constants = constants
self.VALID_HEXAMETERS = [
self._build_hexameter_template(bin(x)[3:]) for x in range(32, 64)
]
self.VALID_HENDECASYLLABLES = self._build_hendecasyllable_templates()
self.VALID_PENTAMETERS = self._build_pentameter_templates()
def hexameter_feet(self, scansion: str) -> List[str]:
"""
Produces a list of hexameter feet, stressed and unstressed syllables with spaces intact.
If the scansion line is not entirely correct, it will attempt to corral one or more improper
patterns into one or more feet.
:param: scansion: the scanned line
:return list of strings, representing the feet of the hexameter, or if the scansion is
wildly incorrect, the function will return an empty list.
>>> print("|".join(MetricalValidator().hexameter_feet(
... "- U U - - - - - - - U U - U")).strip() )
- U U |- - |- - |- - |- U U |- U
>>> print("|".join(MetricalValidator().hexameter_feet(
... "- U U - - U - - - - U U - U")).strip())
- U U |- - |U - |- - |- U U |- U
"""
backwards_scan = list(scansion.rstrip())
feet = []
candidates = [
self.constants.STRESSED + self.constants.OPTIONAL_ENDING,
self.constants.STRESSED + self.constants.STRESSED,
self.constants.STRESSED + self.constants.UNSTRESSED,
self.constants.UNSTRESSED + self.constants.STRESSED,
]
incomplete_foot = self.constants.UNSTRESSED + self.constants.UNSTRESSED
try:
while len(backwards_scan) > 0:
spaces = []
chunk1 = backwards_scan.pop()
while len("".join(chunk1).replace(" ", "")) == 0:
if len(backwards_scan) == 0:
feet.append(chunk1)
return feet[::-1]
chunk1 = backwards_scan.pop() + "".join(chunk1)
chunk2 = backwards_scan.pop()
while chunk2 == " ":
spaces.append(chunk2)
if len(backwards_scan) == 0:
feet.append(chunk2)
return feet[::-1]
chunk2 = backwards_scan.pop()
new_candidate = "".join(chunk2) + "".join(spaces) + "".join(chunk1)
if new_candidate.replace(" ", "") in candidates:
feet.append(new_candidate)
else:
if new_candidate.replace(" ", "") == incomplete_foot:
spaces2 = []
previous_mark = backwards_scan.pop()
while previous_mark == " ":
spaces2.append(previous_mark)
previous_mark = backwards_scan.pop()
if previous_mark == self.constants.STRESSED:
new_candidate = (
"".join(previous_mark)
+ "".join(spaces2)
+ new_candidate
)
feet.append(new_candidate)
else:
feet.append(new_candidate) # invalid foot
spaces3 = []
next_mark = backwards_scan.pop()
while next_mark == " ":
spaces3.append(previous_mark)
next_mark = backwards_scan.pop()
feet.append(
"".join(next_mark) + "".join(spaces3) + previous_mark
)
except Exception as ex:
LOG.error("err at: {}, {}".format(scansion, ex))
return list()
return feet[::-1]
@staticmethod
def hexameter_known_stresses() -> List[int]:
"""Provide a list of known stress positions for a hexameter.
:return: a zero based list enumerating which syllables are known to be stressed.
"""
return list(range(17)[::3])
@staticmethod
def hexameter_possible_unstresses() -> List[int]:
"""
Provide a list of possible positions which may be unstressed syllables in a hexameter.
:return: a zero based list enumerating which syllables are known to be unstressed.
"""
return list(set(range(17)) - set(range(17)[::3]))
def closest_hexameter_patterns(self, scansion: str) -> List[str]:
"""
Find the closest group of matching valid hexameter patterns.
:return: list of the closest valid hexameter patterns; only candidates with a matching
length/number of syllables are considered.
>>> print(MetricalValidator().closest_hexameter_patterns('-UUUUU-----UU--'))
['-UU-UU-----UU--']
"""
return self._closest_patterns(self.VALID_HEXAMETERS, scansion)
@staticmethod
def pentameter_possible_stresses() -> List[int]:
"""
Provide a list of possible stress positions for a hexameter.
:return: a zero based list enumerating which syllables are known to be stressed.
"""
return list(range(0, 6)) + [8]
def closest_pentameter_patterns(self, scansion: str) -> List[str]:
"""
Find the closest group of matching valid pentameter patterns.
:return: list of the closest valid pentameter patterns; only candidates with a matching
length/number of syllables are considered.
>>> print(MetricalValidator().closest_pentameter_patterns('--UUU--UU-UUX'))
['---UU--UU-UUX']
"""
return self._closest_patterns(self.VALID_PENTAMETERS, scansion)
def closest_hendecasyllable_patterns(self, scansion: str) -> List[str]:
"""
Find the closest group of matching valid hendecasyllable patterns.
:return: list of the closest valid hendecasyllable patterns; only candidates with a matching
length/number of syllables are considered.
>>> print(MetricalValidator().closest_hendecasyllable_patterns('UU-UU-U-U-X'))
['-U-UU-U-U-X', 'U--UU-U-U-X']
"""
return self._closest_patterns(self.VALID_HENDECASYLLABLES, scansion)
def _closest_patterns(self, patterns: List[str], scansion: str) -> List[str]:
"""
Find the closest group of matching valid patterns.
:patterns: a list of patterns
:scansion: the scansion pattern thus far
:return: list of the closest valid patterns; only candidates with a matching
length/number of syllables are considered.
"""
pattern = scansion.replace(" ", "")
pattern = pattern.replace(self.constants.FOOT_SEPARATOR, "")
ending = pattern[-1]
candidate = pattern[: len(pattern) - 1] + self.constants.OPTIONAL_ENDING
cans = [
(distance(candidate, x), x) for x in patterns if len(x) == len(candidate)
]
if cans:
cans = sorted(cans, key=lambda tup: tup[0])
top = cans[0][0]
return [can[1][:-1] + ending for can in cans if can[0] == top]
return []
def _build_hexameter_template(self, stress_positions: str) -> str:
"""
Build a hexameter scansion template from string of 5 binary numbers;
NOTE: Traditionally the fifth foot is dactyl and spondee substitution is rare,
however since it *is* a possible combination, we include it here.
:param stress_positions: 5 binary integers, indicating whether foot is dactyl or spondee
:return: a valid hexameter scansion template, a string representing stressed and
unstresssed syllables with the optional terminal ending.
>>> print(MetricalValidator()._build_hexameter_template("01010"))
-UU---UU---UU-X
"""
hexameter = []
for binary in stress_positions:
if binary == "1":
hexameter.append(self.constants.SPONDEE)
if binary == "0":
hexameter.append(self.constants.DACTYL)
hexameter.append(self.constants.HEXAMETER_ENDING)
return "".join(hexameter)
def _build_hendecasyllable_templates(self) -> List[str]:
return [
# -U - U U - U - U - X
self.constants.TROCHEE
+ self.constants.TROCHEE
+ self.constants.IAMB
+ self.constants.IAMB
+ self.constants.IAMB
+ self.constants.OPTIONAL_ENDING,
# -- - U U - U - U - X
self.constants.SPONDEE
+ self.constants.TROCHEE
+ self.constants.IAMB
+ self.constants.IAMB
+ self.constants.IAMB
+ self.constants.OPTIONAL_ENDING,
# U- - U U - U - U - X
self.constants.IAMB
+ self.constants.TROCHEE
+ self.constants.IAMB
+ self.constants.IAMB
+ self.constants.IAMB
+ self.constants.OPTIONAL_ENDING,
]
def _build_pentameter_templates(self) -> List[str]:
"""Create pentameter templates."""
return [ # '-UU|-UU|-|-UU|-UU|X'
self.constants.DACTYL
+ self.constants.DACTYL
+ self.constants.STRESSED
+ self.constants.DACTYL
+ self.constants.DACTYL
+ self.constants.OPTIONAL_ENDING,
# '-UU|--|-|-UU|-UU|X'
self.constants.DACTYL
+ self.constants.SPONDEE
+ self.constants.STRESSED
+ self.constants.DACTYL
+ self.constants.DACTYL
+ self.constants.OPTIONAL_ENDING,
# '--|-UU|-|-UU|-UU|X'
self.constants.SPONDEE
+ self.constants.DACTYL
+ self.constants.STRESSED
+ self.constants.DACTYL
+ self.constants.DACTYL
+ self.constants.OPTIONAL_ENDING,
# '--|--|-|-UU|-UU|X'
self.constants.SPONDEE
+ self.constants.SPONDEE
+ self.constants.STRESSED
+ self.constants.DACTYL
+ self.constants.DACTYL
+ self.constants.OPTIONAL_ENDING,
]
| |
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
import argparse
from oslo_serialization import jsonutils
from neutronclient.common import exceptions
from neutronclient.common import utils
from neutronclient.i18n import _
from neutronclient.neutron import v2_0 as neutronV20
def _format_external_gateway_info(router):
try:
return jsonutils.dumps(router['external_gateway_info'])
except (TypeError, KeyError):
return ''
class ListRouter(neutronV20.ListCommand):
"""List routers that belong to a given tenant."""
resource = 'router'
_formatters = {'external_gateway_info': _format_external_gateway_info, }
list_columns = ['id', 'name', 'external_gateway_info', 'distributed', 'ha']
pagination_support = True
sorting_support = True
class ShowRouter(neutronV20.ShowCommand):
"""Show information of a given router."""
resource = 'router'
class CreateRouter(neutronV20.CreateCommand):
"""Create a router for a given tenant."""
resource = 'router'
_formatters = {'external_gateway_info': _format_external_gateway_info, }
def add_known_arguments(self, parser):
parser.add_argument(
'--admin-state-down',
dest='admin_state', action='store_false',
help=_('Set admin state up to false.'))
parser.add_argument(
'--admin_state_down',
dest='admin_state', action='store_false',
help=argparse.SUPPRESS)
parser.add_argument(
'name', metavar='NAME',
help=_('Name of router to create.'))
utils.add_boolean_argument(
parser, '--distributed', dest='distributed',
help=_('Create a distributed router.'))
utils.add_boolean_argument(
parser, '--ha', dest='ha',
help=_('Create a highly available router.'))
def args2body(self, parsed_args):
body = {self.resource: {'admin_state_up': parsed_args.admin_state}}
neutronV20.update_dict(parsed_args, body[self.resource],
['name', 'tenant_id', 'distributed', 'ha'])
return body
class DeleteRouter(neutronV20.DeleteCommand):
"""Delete a given router."""
resource = 'router'
class UpdateRouter(neutronV20.UpdateCommand):
"""Update router's information."""
resource = 'router'
class RouterInterfaceCommand(neutronV20.NeutronCommand):
"""Based class to Add/Remove router interface."""
api = 'network'
resource = 'router'
def call_api(self, neutron_client, router_id, body):
raise NotImplementedError()
def success_message(self, router_id, portinfo):
raise NotImplementedError()
def get_parser(self, prog_name):
parser = super(RouterInterfaceCommand, self).get_parser(prog_name)
parser.add_argument(
'router', metavar='ROUTER',
help=_('ID or name of the router.'))
parser.add_argument(
'interface', metavar='INTERFACE',
help=_('The format is "SUBNET|subnet=SUBNET|port=PORT". '
'Either a subnet or port must be specified. '
'Both ID and name are accepted as SUBNET or PORT. '
'Note that "subnet=" can be omitted when specifying a '
'subnet.'))
return parser
def run(self, parsed_args):
self.log.debug('run(%s)' % parsed_args)
neutron_client = self.get_client()
neutron_client.format = parsed_args.request_format
if '=' in parsed_args.interface:
resource, value = parsed_args.interface.split('=', 1)
if resource not in ['subnet', 'port']:
exceptions.CommandError(_('You must specify either subnet or '
'port for INTERFACE parameter.'))
else:
resource = 'subnet'
value = parsed_args.interface
_router_id = neutronV20.find_resourceid_by_name_or_id(
neutron_client, self.resource, parsed_args.router)
_interface_id = neutronV20.find_resourceid_by_name_or_id(
neutron_client, resource, value)
body = {'%s_id' % resource: _interface_id}
portinfo = self.call_api(neutron_client, _router_id, body)
print(self.success_message(parsed_args.router, portinfo),
file=self.app.stdout)
class AddInterfaceRouter(RouterInterfaceCommand):
"""Add an internal network interface to a router."""
def call_api(self, neutron_client, router_id, body):
return neutron_client.add_interface_router(router_id, body)
def success_message(self, router_id, portinfo):
return (_('Added interface %(port)s to router %(router)s.') %
{'router': router_id, 'port': portinfo['port_id']})
class RemoveInterfaceRouter(RouterInterfaceCommand):
"""Remove an internal network interface from a router."""
def call_api(self, neutron_client, router_id, body):
return neutron_client.remove_interface_router(router_id, body)
def success_message(self, router_id, portinfo):
# portinfo is not used since it is None for router-interface-delete.
return _('Removed interface from router %s.') % router_id
class SetGatewayRouter(neutronV20.NeutronCommand):
"""Set the external network gateway for a router."""
api = 'network'
resource = 'router'
def get_parser(self, prog_name):
parser = super(SetGatewayRouter, self).get_parser(prog_name)
parser.add_argument(
'router', metavar='ROUTER',
help=_('ID or name of the router.'))
parser.add_argument(
'external_network', metavar='EXTERNAL-NETWORK',
help=_('ID or name of the external network for the gateway.'))
parser.add_argument(
'--disable-snat', action='store_true',
help=_('Disable source NAT on the router gateway.'))
parser.add_argument(
'--fixed-ip', action='append',
help=_('Desired IP and/or subnet on external network: '
'subnet_id=<name_or_id>,ip_address=<ip>. '
'You can repeat this option.'))
return parser
def run(self, parsed_args):
self.log.debug('run(%s)' % parsed_args)
neutron_client = self.get_client()
neutron_client.format = parsed_args.request_format
_router_id = neutronV20.find_resourceid_by_name_or_id(
neutron_client, self.resource, parsed_args.router)
_ext_net_id = neutronV20.find_resourceid_by_name_or_id(
neutron_client, 'network', parsed_args.external_network)
router_dict = {'network_id': _ext_net_id}
if parsed_args.disable_snat:
router_dict['enable_snat'] = False
if parsed_args.fixed_ip:
ips = []
for ip_spec in parsed_args.fixed_ip:
ip_dict = utils.str2dict(ip_spec)
subnet_name_id = ip_dict.get('subnet_id')
if subnet_name_id:
subnet_id = neutronV20.find_resourceid_by_name_or_id(
neutron_client, 'subnet', subnet_name_id)
ip_dict['subnet_id'] = subnet_id
ips.append(ip_dict)
router_dict['external_fixed_ips'] = ips
neutron_client.add_gateway_router(_router_id, router_dict)
print(_('Set gateway for router %s') % parsed_args.router,
file=self.app.stdout)
class RemoveGatewayRouter(neutronV20.NeutronCommand):
"""Remove an external network gateway from a router."""
api = 'network'
resource = 'router'
def get_parser(self, prog_name):
parser = super(RemoveGatewayRouter, self).get_parser(prog_name)
parser.add_argument(
'router', metavar='ROUTER',
help=_('ID or name of the router.'))
return parser
def run(self, parsed_args):
self.log.debug('run(%s)' % parsed_args)
neutron_client = self.get_client()
neutron_client.format = parsed_args.request_format
_router_id = neutronV20.find_resourceid_by_name_or_id(
neutron_client, self.resource, parsed_args.router)
neutron_client.remove_gateway_router(_router_id)
print(_('Removed gateway from router %s') % parsed_args.router,
file=self.app.stdout)
| |
# -*- coding: utf-8 -*-
# The MIT License (MIT) - Copyright (c) 2016-2021 Dave Vandenbout.
"""
Functions for finding/displaying parts and footprints.
"""
from __future__ import ( # isort:skip
absolute_import,
division,
print_function,
unicode_literals,
)
import os
import os.path
import re
from builtins import open, super
from future import standard_library
from .logger import active_logger
from .utilities import *
standard_library.install_aliases()
# TODO: Use push-down automata to parse nested parenthetical expression
# of AND/OR clauses for use in advanced part searching.
# https://stackoverflow.com/questions/4284991/parsing-nested-parentheses-in-python-grab-content-by-level
def parse_search_terms(terms):
"""
Return a regular expression for a sequence of search terms.
Substitute a zero-width lookahead assertion (?= ) for each term. Thus,
the "abc def" would become "(?=.*(abc))(?=.*(def))" and would match any string
containing both "abc" and "def". Or "abc (def|ghi)" would become
"(?=.*(abc))((?=.*(def|ghi))" and would match any string containing
"abc" and "def" or "ghi". Quoted terms can be used for phrases containing
whitespace.
"""
# Place the quote-delimited REs before the RE for sequences of
# non-white chars to prevent the initial portion of a quoted string from being
# gathered up as a non-white character sequence.
terms = terms.strip().rstrip() # Remove leading/trailing spaces.
terms = re.sub(r"\s*\|\s*", r"|", terms) # Remove spaces around OR operator.
terms = re.sub(r"((\".*?\")|(\'.*?\')|(\S+))\s*", r"(?=.*(\1))", terms)
terms = re.sub(r"[\'\"]", "", terms) # Remove quotes.
terms = terms + ".*"
return terms
def search_parts_iter(terms, tool=None):
"""Return a list of (lib, part) sequences that match a regex term."""
import skidl
from .schlib import SchLib
if tool is None:
tool = skidl.get_default_tool()
terms = parse_search_terms(terms)
def mk_list(l):
"""Make a list out of whatever is given."""
if isinstance(l, (list, tuple)):
return l
if not l:
return []
return [l]
# Gather all the lib files from all the directories in the search paths.
lib_files = list()
lib_suffixes = tuple(to_list(skidl.lib_suffixes[tool]))
for lib_dir in skidl.lib_search_paths[tool]:
# Get all the library files in the search path.
try:
files = os.listdir(lib_dir)
except (FileNotFoundError, OSError):
active_logger.warning("Could not open directory '{}'".format(lib_dir))
files = []
files = [(lib_dir, l) for l in files if l.endswith(lib_suffixes)]
lib_files.extend(files)
num_lib_files = len(lib_files)
# Now search through the lib files for parts that match the search terms.
for idx, (lib_dir, lib_file) in enumerate(lib_files):
# If just entered a new lib file, yield the name of the file and
# where it is within the total number of files to search.
# (This is used for progress indicators.)
yield "LIB", lib_file, idx + 1, num_lib_files
# Parse the lib file to create a part library.
lib = SchLib(
os.path.join(lib_dir, lib_file), tool=tool
) # Open the library file.
# Search the current library for parts with the given terms.
for part in mk_list(
# Get any matching parts from the library file.
lib.get_parts(use_backup_lib=False, search_text=terms)
):
# Parse the part to instantiate the complete object.
part.parse(get_name_only=True)
# Yield the part and its containing library.
yield "PART", lib_file, part, part.name
# Also return aliases.
for alias in list(part.aliases):
yield "PART", lib_file, part, alias
def search_parts(terms, tool=None):
"""
Print a list of parts with the regex terms within their name, alias, description or keywords.
"""
parts = set()
for part in search_parts_iter(terms, tool):
if part[0] == "LIB":
print(" " * 79, "\rSearching {} ...".format(part[1]), sep="", end="\r")
elif part[0] == "PART":
parts.add(part[1:4])
print(" " * 79, end="\r")
# Print each part name sorted by the library where it was found.
for lib_file, part, part_name in sorted(list(parts), key=lambda p: p[0]):
print(
"{}: {} ({})".format(
lib_file, part_name, getattr(part, "description", "???")
)
)
def show_part(lib, part_name, tool=None):
"""
Print the I/O pins for a given part in a library.
Args:
lib: Either a SchLib object or the name of a library.
part_name: The name of the part in the library.
tool: The ECAD tool format for the library.
Returns:
A Part object.
"""
import skidl
from .part import TEMPLATE, Part
if tool is None:
tool = skidl.get_default_tool()
try:
return Part(lib, re.escape(part_name), tool=tool, dest=TEMPLATE)
except Exception:
return None
class FootprintCache(dict):
"""Dict for storing footprints from all directories."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.reset() # Cache starts off empty, hence invalid.
def reset(self):
self.clear() # Clear out cache.
self.valid = False # Cache is empty, hence invalid.
def load(self, path):
"""Load cache with footprints from libraries in fp-lib-table file."""
# Expand any env. vars and/or user in the path.
path = os.path.expandvars(os.path.expanduser(path))
# Read contents of footprint library file into a single string.
try:
# Look for fp-lib-table file and read its entries into a table of footprint module libs.
with open(os.path.join(path, "fp-lib-table")) as fp:
tbl = fp.read()
except FileNotFoundError:
# fp-lib-table file was not found, so create a table containing the path directory
# as a single module lib.
nickname, ext = os.path.splitext(os.path.basename(path))
tbl = '(fp_lib_table\n(lib (name {nickname})(type KiCad)(uri {path})(options "")(descr ""))\n)'.format(
**locals()
)
# Get individual "(lib ...)" entries from the string.
libs = re.findall(
r"\(\s*lib\s* .*? \)\)", tbl, flags=re.IGNORECASE | re.VERBOSE | re.DOTALL
)
# Add the footprint modules found in each enabled KiCad library.
for lib in libs:
# Skip disabled libraries.
disabled = re.findall(
r"\(\s*disabled\s*\)", lib, flags=re.IGNORECASE | re.VERBOSE
)
if disabled:
continue
# Skip non-KiCad libraries (primarily git repos).
type_ = re.findall(
r'(?:\(\s*type\s*) ("[^"]*?"|[^)]*?) (?:\s*\))',
lib,
flags=re.IGNORECASE | re.VERBOSE,
)[0]
if type_.lower() != "kicad":
continue
# Get the library directory and nickname.
uri = re.findall(
r'(?:\(\s*uri\s*) ("[^"]*?"|[^)]*?) (?:\s*\))',
lib,
flags=re.IGNORECASE | re.VERBOSE,
)[0]
nickname = re.findall(
r'(?:\(\s*name\s*) ("[^"]*?"|[^)]*?) (?:\s*\))',
lib,
flags=re.IGNORECASE | re.VERBOSE,
)[0]
# Remove any quotes around the URI or nickname.
uri = rmv_quotes(uri)
nickname = rmv_quotes(nickname)
# Expand environment variables and ~ in the URI.
uri = os.path.expandvars(os.path.expanduser(uri))
# Look for unexpanded env vars and skip this loop iteration if found.
def get_env_vars(s):
"""Return a list of environment variables found in a string."""
env_vars = []
for env_var_re in (r"\${([^}]*)}", r"\$(\w+)", r"%(\w+)%"):
env_vars.extend(re.findall(env_var_re, s))
return env_vars
unexpanded_vars = get_env_vars(uri)
if unexpanded_vars:
active_logger.warning(
"There are some undefined environment variables: {}".format(
" ".join(unexpanded_vars)
)
)
continue
# Get a list of all the footprint module files in the top-level of the library URI.
filenames = [
fn
for fn in os.listdir(uri)
if os.path.isfile(os.path.join(uri, fn))
and fn.lower().endswith(".kicad_mod")
]
# Create an entry in the cache for this nickname. (This will overwrite
# any previous nickname entry, so make sure to scan fp-lib-tables in order of
# increasing priority.) Each entry contains the path to the directory containing
# the footprint module and a dictionary of the modules keyed by the module name
# with an associated value containing the module file contents (which starts off
# as None).
self[nickname] = {
"path": uri,
"modules": {os.path.splitext(fn)[0]: None for fn in filenames},
}
# Cache for storing footprints read from .kicad_mod files.
footprint_cache = FootprintCache()
def search_footprints_iter(terms, tool=None):
"""Return a list of (lib, footprint) sequences that match a regex term."""
import skidl
if tool is None:
tool = skidl.get_default_tool()
terms = parse_search_terms(terms)
# If the cache isn't valid, then make it valid by gathering all the
# footprint files from all the directories in the search paths.
if not footprint_cache.valid:
footprint_cache.clear()
for path in skidl.footprint_search_paths[tool]:
footprint_cache.load(path)
# Get the number of footprint libraries to be searched..
num_fp_libs = len(footprint_cache)
# Now search through the libraries for footprints that match the search terms.
for idx, fp_lib in enumerate(footprint_cache):
# If just entered a new library, yield the name of the lib and
# where it is within the total number of libs to search.
# (This is used for progress indicators.)
yield "LIB", fp_lib, idx + 1, num_fp_libs
# Get path to library directory and dict of footprint modules.
path = footprint_cache[fp_lib]["path"]
modules = footprint_cache[fp_lib]["modules"]
# Search each module in the library.
for module_name in modules:
# If the cache isn't valid, then read each footprint file and store
# it's contents in the cache.
if not footprint_cache.valid:
file = os.path.join(path, module_name + ".kicad_mod")
with open(file, "r") as fp:
try:
# Remove any linefeeds that would interfere with fullmatch() later on.
modules[module_name] = [l.rstrip() for l in fp.readlines()]
except UnicodeDecodeError:
try:
modules[module_name] = [
l.decode("utf-8").rstrip() for l in fp.readlines()
]
except AttributeError:
modules[module_name] = ""
# Get the contents of the footprint file from the cache.
module_text = tuple(modules[module_name])
# Count the pads so it can be added to the text being searched.
# Join all the module text lines, search for the number of
# occurrences of "(pad", and then count them.
# A set is used so pads with the same num/name are only counted once.
# Place the pad count before everything else so the space that
# terminates it won't be stripped off later. This is necessary
# so (for example) "#pads=20 " won't match "#pads=208".
num_pads = len(
set(re.findall(r"\(\s*pad\s+([^\s)]+)", " ".join(module_text)))
)
num_pads_str = "#pads={}".format(num_pads)
# Create a string with the module name, library name, number of pads,
# description and tags.
search_text = "\n".join([num_pads_str, fp_lib, module_name])
for line in module_text:
if "(descr " in line or "(tags " in line:
search_text = "\n".join([search_text, line])
# Search the string for a match with the search terms.
if fullmatch(
terms, search_text, flags=re.IGNORECASE | re.MULTILINE | re.DOTALL
):
yield "MODULE", fp_lib, module_text, module_name
# At the end, all modules have been scanned and the footprint cache is valid.
footprint_cache.valid = True
def search_footprints(terms, tool=None):
"""
Print a list of footprints with the regex term within their description/tags.
"""
footprints = []
for fp in search_footprints_iter(terms, tool):
if fp[0] == "LIB":
print(" " * 79, "\rSearching {} ...".format(fp[1]), sep="", end="\r")
elif fp[0] == "MODULE":
footprints.append(fp[1:4])
print(" " * 79, end="\r")
# Print each module name sorted by the library where it was found.
for lib_file, module_text, module_name in sorted(
footprints, key=lambda f: (f[0], f[2])
):
descr = "???"
tags = "???"
for line in module_text:
try:
descr = line.split("(descr ")[1].rsplit(")", 1)[0]
except IndexError:
pass
try:
tags = line.split("(tags ")[1].rsplit(")", 1)[0]
except IndexError:
pass
print("{}: {} ({} - {})".format(lib_file, module_name, descr, tags))
def show_footprint(lib, module_name, tool=None):
"""
Print the pads for a given module in a library.
Args:
lib: The name of a library.
module_name: The name of the footprint in the library.
tool: The ECAD tool format for the library.
Returns:
A Part object.
"""
import skidl
if tool is None:
tool = skidl.get_default_tool()
os.environ["KISYSMOD"] = os.pathsep.join(skidl.footprint_search_paths[tool])
return pym.Module.from_library(lib, module_name)
# Define some shortcuts.
search = search_parts
show = show_part
| |
#!/usr/bin/env python2.7
from __future__ import print_function
import argparse
import cgi
import codecs
import errno
import functools
from multiprocessing import cpu_count
import os.path
import re
import shutil
import sys
from pygments import highlight
from pygments.lexers.c_cpp import CppLexer
from pygments.formatters import HtmlFormatter
import optpmap
import optrecord
desc = '''Generate HTML output to visualize optimization records from the YAML files
generated with -fsave-optimization-record and -fdiagnostics-show-hotness.
The tools requires PyYAML and Pygments Python packages.'''
# This allows passing the global context to the child processes.
class Context:
def __init__(self, caller_loc = dict()):
# Map function names to their source location for function where inlining happened
self.caller_loc = caller_loc
context = Context()
def suppress(remark):
if remark.Name == 'sil.Specialized':
return remark.getArgDict()['Function'][0].startswith('\"Swift.')
elif remark.Name == 'sil.Inlined':
return remark.getArgDict()['Callee'][0].startswith(('\"Swift.', '\"specialized Swift.'))
return False
class SourceFileRenderer:
def __init__(self, source_dir, output_dir, filename, no_highlight):
self.filename = filename
existing_filename = None
if os.path.exists(filename):
existing_filename = filename
else:
fn = os.path.join(source_dir, filename)
if os.path.exists(fn):
existing_filename = fn
self.no_highlight = no_highlight
self.stream = codecs.open(os.path.join(output_dir, optrecord.html_file_name(filename)), 'w', encoding='utf-8')
if existing_filename:
self.source_stream = open(existing_filename)
else:
self.source_stream = None
print('''
<html>
<h1>Unable to locate file {}</h1>
</html>
'''.format(filename), file=self.stream)
self.html_formatter = HtmlFormatter(encoding='utf-8')
self.cpp_lexer = CppLexer(stripnl=False)
def render_source_lines(self, stream, line_remarks):
file_text = stream.read()
if self.no_highlight:
html_highlighted = file_text.decode('utf-8')
else:
html_highlighted = highlight(
file_text,
self.cpp_lexer,
self.html_formatter)
# Note that the API is different between Python 2 and 3. On
# Python 3, pygments.highlight() returns a bytes object, so we
# have to decode. On Python 2, the output is str but since we
# support unicode characters and the output streams is unicode we
# decode too.
html_highlighted = html_highlighted.decode('utf-8')
# Take off the header and footer, these must be
# reapplied line-wise, within the page structure
html_highlighted = html_highlighted.replace('<div class="highlight"><pre>', '')
html_highlighted = html_highlighted.replace('</pre></div>', '')
for (linenum, html_line) in enumerate(html_highlighted.split('\n'), start=1):
print(u'''
<tr>
<td><a name=\"L{linenum}\">{linenum}</a></td>
<td></td>
<td></td>
<td><div class="highlight"><pre>{html_line}</pre></div></td>
</tr>'''.format(**locals()), file=self.stream)
for remark in line_remarks.get(linenum, []):
if not suppress(remark):
self.render_inline_remarks(remark, html_line)
def render_inline_remarks(self, r, line):
inlining_context = r.DemangledFunctionName
dl = context.caller_loc.get(r.Function)
if dl:
dl_dict = dict(list(dl))
link = optrecord.make_link(dl_dict['File'], dl_dict['Line'] - 2)
inlining_context = "<a href={link}>{r.DemangledFunctionName}</a>".format(**locals())
# Column is the number of characters *including* tabs, keep those and
# replace everything else with spaces.
indent = line[:max(r.Column, 1) - 1]
indent = re.sub('\S', ' ', indent)
print(u'''
<tr>
<td></td>
<td>{r.RelativeHotness}</td>
<td class=\"column-entry-{r.color}\">{r.PassWithDiffPrefix}</td>
<td><pre style="display:inline">{indent}</pre><span class=\"column-entry-yellow\"> {r.message} </span></td>
<td class=\"column-entry-yellow\">{inlining_context}</td>
</tr>'''.format(**locals()), file=self.stream)
def render(self, line_remarks):
if not self.source_stream:
return
print('''
<html>
<title>{}</title>
<meta charset="utf-8" />
<head>
<link rel='stylesheet' type='text/css' href='style.css'>
</head>
<body>
<div class="centered">
<table class="source">
<thead>
<tr>
<th style="width: 2%">Line</td>
<th style="width: 3%">Hotness</td>
<th style="width: 10%">Optimization</td>
<th style="width: 70%">Source</td>
<th style="width: 15%">Inline Context</td>
</tr>
</thead>
<tbody>'''.format(os.path.basename(self.filename)), file=self.stream)
self.render_source_lines(self.source_stream, line_remarks)
print('''
</tbody>
</table>
</body>
</html>''', file=self.stream)
class IndexRenderer:
def __init__(self, output_dir, should_display_hotness, max_hottest_remarks_on_index):
self.stream = codecs.open(os.path.join(output_dir, 'index.html'), 'w', encoding='utf-8')
self.should_display_hotness = should_display_hotness
self.max_hottest_remarks_on_index = max_hottest_remarks_on_index
def render_entry(self, r, odd):
escaped_name = cgi.escape(r.DemangledFunctionName)
print(u'''
<tr>
<td class=\"column-entry-{odd}\"><a href={r.Link}>{r.DebugLocString}</a></td>
<td class=\"column-entry-{odd}\">{r.RelativeHotness}</td>
<td class=\"column-entry-{odd}\">{escaped_name}</td>
<td class=\"column-entry-{r.color}\">{r.PassWithDiffPrefix}</td>
</tr>'''.format(**locals()), file=self.stream)
def render(self, all_remarks):
print('''
<html>
<meta charset="utf-8" />
<head>
<link rel='stylesheet' type='text/css' href='style.css'>
</head>
<body>
<div class="centered">
<table>
<tr>
<td>Source Location</td>
<td>Hotness</td>
<td>Function</td>
<td>Pass</td>
</tr>''', file=self.stream)
max_entries = None
if self.should_display_hotness:
max_entries = self.max_hottest_remarks_on_index
for i, remark in enumerate(all_remarks[:max_entries]):
if not suppress(remark):
self.render_entry(remark, i % 2)
print('''
</table>
</body>
</html>''', file=self.stream)
def _render_file(source_dir, output_dir, ctx, no_highlight, entry):
global context
context = ctx
filename, remarks = entry
SourceFileRenderer(source_dir, output_dir, filename, no_highlight).render(remarks)
def map_remarks(all_remarks):
# Set up a map between function names and their source location for
# function where inlining happened
for remark in optrecord.itervalues(all_remarks):
if isinstance(remark, optrecord.Passed) and remark.Pass == "inline" and remark.Name == "Inlined":
for arg in remark.Args:
arg_dict = dict(list(arg))
caller = arg_dict.get('Caller')
if caller:
try:
context.caller_loc[caller] = arg_dict['DebugLoc']
except KeyError:
pass
def generate_report(all_remarks,
file_remarks,
source_dir,
output_dir,
no_highlight,
should_display_hotness,
max_hottest_remarks_on_index,
num_jobs,
should_print_progress):
try:
os.makedirs(output_dir)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(output_dir):
pass
else:
raise
if should_print_progress:
print('Rendering index page...')
if should_display_hotness:
sorted_remarks = sorted(optrecord.itervalues(all_remarks), key=lambda r: (r.Hotness, r.File, r.Line, r.Column, r.PassWithDiffPrefix, r.yaml_tag, r.Function), reverse=True)
else:
sorted_remarks = sorted(optrecord.itervalues(all_remarks), key=lambda r: (r.File, r.Line, r.Column, r.PassWithDiffPrefix, r.yaml_tag, r.Function))
IndexRenderer(output_dir, should_display_hotness, max_hottest_remarks_on_index).render(sorted_remarks)
shutil.copy(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"style.css"), output_dir)
_render_file_bound = functools.partial(_render_file, source_dir, output_dir, context, no_highlight)
if should_print_progress:
print('Rendering HTML files...')
optpmap.pmap(_render_file_bound,
file_remarks.items(),
num_jobs,
should_print_progress)
def main():
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'yaml_dirs_or_files',
nargs='+',
help='List of optimization record files or directories searched '
'for optimization record files.')
parser.add_argument(
'--output-dir',
'-o',
default='html',
help='Path to a directory where generated HTML files will be output. '
'If the directory does not already exist, it will be created. '
'"%(default)s" by default.')
parser.add_argument(
'--jobs',
'-j',
default=None,
type=int,
help='Max job count (defaults to %(default)s, the current CPU count)')
parser.add_argument(
'--source-dir',
'-s',
default='',
help='set source directory')
parser.add_argument(
'--no-progress-indicator',
'-n',
action='store_true',
default=False,
help='Do not display any indicator of how many YAML files were read '
'or rendered into HTML.')
parser.add_argument(
'--max-hottest-remarks-on-index',
default=1000,
type=int,
help='Maximum number of the hottest remarks to appear on the index page')
parser.add_argument(
'--no-highlight',
action='store_true',
default=False,
help='Do not use a syntax highlighter when rendering the source code')
parser.add_argument(
'--demangler',
help='Set the demangler to be used (defaults to %s)' % optrecord.Remark.default_demangler)
# Do not make this a global variable. Values needed to be propagated through
# to individual classes and functions to be portable with multiprocessing across
# Windows and non-Windows.
args = parser.parse_args()
print_progress = not args.no_progress_indicator
if args.demangler:
optrecord.Remark.set_demangler(args.demangler)
files = optrecord.find_opt_files(*args.yaml_dirs_or_files)
if not files:
parser.error("No *.opt.yaml files found")
sys.exit(1)
all_remarks, file_remarks, should_display_hotness = \
optrecord.gather_results(files, args.jobs, print_progress)
map_remarks(all_remarks)
generate_report(all_remarks,
file_remarks,
args.source_dir,
args.output_dir,
args.no_highlight,
should_display_hotness,
args.max_hottest_remarks_on_index,
args.jobs,
print_progress)
if __name__ == '__main__':
main()
| |
# ----------------------------------------------------------------------------
# Copyright 2015-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
from builtins import zip
import numpy as np
import os
from neon.backends import gen_backend
from neon.data import ArrayIterator, MNIST, PTB
from neon.initializers import Gaussian, Constant
from neon.layers import (GeneralizedCost, Affine, DeepBiRNN, DeepBiLSTM, LSTM, GRU,
Dropout, Conv, Pooling, Sequential, MergeMultistream, Recurrent,
RecurrentMean)
from neon.models import Model
from neon.optimizers import GradientDescentMomentum
from neon.transforms import Rectlin, Logistic, CrossEntropyBinary
def test_model_get_outputs_rnn(backend_default, data):
dataset = PTB(50, path=data)
dataiter = dataset.train_iter
# weight initialization
init = Constant(0.08)
# model initialization
layers = [
Recurrent(150, init, activation=Logistic()),
Affine(len(dataiter.vocab), init, bias=init, activation=Rectlin())
]
model = Model(layers=layers)
output = model.get_outputs(dataiter)
assert output.shape == (dataiter.ndata, dataiter.seq_length, dataiter.nclass)
# since the init are all constant and model is un-trained:
# along the feature dim, the values should be all the same
assert np.allclose(output[0, 0], output[0, 0, 0], rtol=0, atol=1e-5)
assert np.allclose(output[0, 1], output[0, 1, 0], rtol=0, atol=1e-5)
# along the time dim, the values should be increasing:
assert np.alltrue(output[0, 2] > output[0, 1])
assert np.alltrue(output[0, 1] > output[0, 0])
def test_model_N_S_setter(backend_default):
# weight initialization
init = Constant(0.08)
# model initialization
layers = [
Recurrent(150, init, activation=Logistic()),
Affine(100, init, bias=init, activation=Rectlin())
]
model = Model(layers=layers)
model.set_batch_size(20)
model.set_seq_len(10)
def test_model_get_outputs(backend_default, data):
dataset = MNIST(path=data)
train_set = dataset.train_iter
init_norm = Gaussian(loc=0.0, scale=0.1)
layers = [Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin()),
Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))]
mlp = Model(layers=layers)
out_list = []
mlp.initialize(train_set)
for x, t in train_set:
x = mlp.fprop(x)
out_list.append(x.get().T.copy())
ref_output = np.vstack(out_list)
train_set.reset()
output = mlp.get_outputs(train_set)
assert np.allclose(output, ref_output[:output.shape[0], :])
# test model benchmark inference
mlp.benchmark(train_set, inference=True, niterations=5)
def test_model_serialize(backend_default, data):
dataset = MNIST(path=data)
(X_train, y_train), (X_test, y_test), nclass = dataset.load_data()
train_set = ArrayIterator(
[X_train, X_train], y_train, nclass=nclass, lshape=(1, 28, 28))
init_norm = Gaussian(loc=0.0, scale=0.01)
# initialize model
path1 = Sequential([Conv((5, 5, 16), init=init_norm, bias=Constant(0), activation=Rectlin()),
Pooling(2),
Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin())])
path2 = Sequential([Affine(nout=100, init=init_norm, bias=Constant(0), activation=Rectlin()),
Dropout(keep=0.5),
Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin())])
layers = [MergeMultistream(layers=[path1, path2], merge="stack"),
Affine(nout=20, init=init_norm, batch_norm=True, activation=Rectlin()),
Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))]
tmp_save = 'test_model_serialize_tmp_save.pickle'
mlp = Model(layers=layers)
mlp.optimizer = GradientDescentMomentum(learning_rate=0.1, momentum_coef=0.9)
mlp.cost = GeneralizedCost(costfunc=CrossEntropyBinary())
mlp.initialize(train_set, cost=mlp.cost)
n_test = 3
num_epochs = 3
# Train model for num_epochs and n_test batches
for epoch in range(num_epochs):
for i, (x, t) in enumerate(train_set):
x = mlp.fprop(x)
delta = mlp.cost.get_errors(x, t)
mlp.bprop(delta)
mlp.optimizer.optimize(mlp.layers_to_optimize, epoch=epoch)
if i > n_test:
break
# Get expected outputs of n_test batches and states of all layers
outputs_exp = []
pdicts_exp = [l.get_params_serialize() for l in mlp.layers_to_optimize]
for i, (x, t) in enumerate(train_set):
outputs_exp.append(mlp.fprop(x, inference=True))
if i > n_test:
break
# Serialize model
mlp.save_params(tmp_save, keep_states=True)
# Load model
mlp = Model(tmp_save)
mlp.initialize(train_set)
outputs = []
pdicts = [l.get_params_serialize() for l in mlp.layers_to_optimize]
for i, (x, t) in enumerate(train_set):
outputs.append(mlp.fprop(x, inference=True))
if i > n_test:
break
# Check outputs, states, and params are the same
for output, output_exp in zip(outputs, outputs_exp):
assert np.allclose(output.get(), output_exp.get())
for pd, pd_exp in zip(pdicts, pdicts_exp):
for s, s_e in zip(pd['states'], pd_exp['states']):
if isinstance(s, list): # this is the batch norm case
for _s, _s_e in zip(s, s_e):
assert np.allclose(_s, _s_e)
else:
assert np.allclose(s, s_e)
for p, p_e in zip(pd['params'], pd_exp['params']):
assert type(p) == type(p_e)
if isinstance(p, list): # this is the batch norm case
for _p, _p_e in zip(p, p_e):
assert np.allclose(_p, _p_e)
elif isinstance(p, np.ndarray):
assert np.allclose(p, p_e)
else:
assert p == p_e
os.remove(tmp_save)
def test_conv_rnn(backend_default):
train_shape = (1, 17, 142)
be = backend_default
inp = be.array(be.rng.randn(np.prod(train_shape), be.bsz))
delta = be.array(be.rng.randn(10, be.bsz))
init_norm = Gaussian(loc=0.0, scale=0.01)
bilstm = DeepBiLSTM(128, init_norm, activation=Rectlin(), gate_activation=Rectlin(),
depth=1, reset_cells=True)
birnn_1 = DeepBiRNN(128, init_norm, activation=Rectlin(),
depth=1, reset_cells=True, batch_norm=False)
birnn_2 = DeepBiRNN(128, init_norm, activation=Rectlin(),
depth=2, reset_cells=True, batch_norm=False)
bibnrnn = DeepBiRNN(128, init_norm, activation=Rectlin(),
depth=1, reset_cells=True, batch_norm=True)
birnnsum = DeepBiRNN(128, init_norm, activation=Rectlin(),
depth=1, reset_cells=True, batch_norm=False, bi_sum=True)
rnn = Recurrent(128, init=init_norm, activation=Rectlin(), reset_cells=True)
lstm = LSTM(128, init_norm, activation=Rectlin(), gate_activation=Rectlin(), reset_cells=True)
gru = GRU(128, init_norm, activation=Rectlin(), gate_activation=Rectlin(), reset_cells=True)
rlayers = [bilstm, birnn_1, birnn_2, bibnrnn, birnnsum, rnn, lstm, gru]
for rl in rlayers:
layers = [
Conv((2, 2, 4), init=init_norm, activation=Rectlin(),
strides=dict(str_h=2, str_w=4)),
Pooling(2, strides=2),
Conv((3, 3, 4), init=init_norm, batch_norm=True, activation=Rectlin(),
strides=dict(str_h=1, str_w=2)),
rl,
RecurrentMean(),
Affine(nout=10, init=init_norm, activation=Rectlin()),
]
model = Model(layers=layers)
cost = GeneralizedCost(costfunc=CrossEntropyBinary())
model.initialize(train_shape, cost)
model.fprop(inp)
model.bprop(delta)
if __name__ == '__main__':
be = gen_backend(backend='gpu', batch_size=128)
test_conv_rnn(be)
| |
#!/usr/bin/python
#
# Copyright (c) 2005 Hewlett-Packard Company
# All rights reserved
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Hewlett-Packard Company nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# zdump.py
#
# Parse and display packets captured from TOSBase or ZSniff
#
# Author: Andrew Christian <andrew.christian@hp.com>
# November 2004
import sys,struct,time
from ZigbeeDecoding import decode_packet, BasicMessageReceiver, MyError
from twisted.internet.serialport import SerialPort
from twisted.internet import reactor
###########################################################################################
class RawReceive(BasicMessageReceiver):
'A trivial message receiver that just displays bytes received and does no decoding'
def __init__(self,config):
BasicMessageReceiver.__init__(self,config['verbose'])
self.channel_list = config['channels']
self.channel_index = 0
def msgReceived(self,msg):
if len(msg):
print "%.2f (%d)" % ((time.time() - self.start_time), len(msg)), ':'.join(["%02x" % ord(x) for x in msg])
def connectionMade(self):
msg = struct.pack('4B', 1, 1, 1, self.channel_list[self.channel_index] )
self.sendMessage(msg)
###########################################################################################
class TOSReceive(BasicMessageReceiver):
'Decode TOS messages as specified in the telos platform'
def __init__(self,config):
BasicMessageReceiver.__init__(self,config['verbose'])
def msgReceived(self,msg):
if len(msg) <= 0:
return
try:
pkt = TOSPacket(msg)
print "%.2f" % (time.time() - self.start_time), pkt.singleline()
except Exception, e:
print 'Could not decode ', ':'.join(["%02x" % ord(x) for x in msg]), e
def connectionMade(self):
pass
###########################################################################################
class SniffReceive(BasicMessageReceiver):
'''Decode a message from the ZSniff protocol.
The first byte of the message tells you the type of data being
sent. 1 = 802.15.4 packet, 2 = other message.
'''
def __init__(self,config):
BasicMessageReceiver.__init__(self,config['verbose'])
self.timeout = config['timeout']
self.channel_list = config['channels']
self.channel_index = 0
self.ip = config['ip']
self.multiline = config['multiline']
self.filters = config['filter']
def msgReceived(self,msg):
if len(msg) <= 0:
return
try:
pkt = decode_packet(msg, self.ip)
for f in self.filters:
if not f.apply(pkt):
return
print "%-8.2f" % (time.time() - self.start_time),
if self.multiline:
rlist = pkt.multiline()
print rlist[0]
for line in rlist[1:]:
print " " * 8, line
print
else:
print pkt.singleline()
except MyError, e:
print e
except Exception, e:
print '**** Unable to decode packet ******', e
print ":".join(["%02x" % ord(x) for x in msg])
def handleTimeout(self):
msg = struct.pack('3B', 1, 0, 0 ) # Request radio statistics
self.sendMessage(msg)
msg = struct.pack('3B', 1, 0, 2 ) # Request radio state
self.sendMessage(msg)
if len(self.channel_list) > 1:
self.channel_index += 1
if self.channel_index >= len(self.channel_list):
self.channel_index = 0
msg = struct.pack('4B', 1, 1, 1, self.channel_list[self.channel_index] )
self.sendMessage(msg)
reactor.callLater( self.timeout, self.handleTimeout )
def connectionMade(self):
msg = struct.pack('4B', 1, 1, 1, self.channel_list[self.channel_index] )
self.sendMessage(msg)
if self.timeout > 0:
reactor.callLater( self.timeout, self.handleTimeout )
###########################################################################################
class FilterAttribute:
def __init__(self,e):
(self.arg, self.value) = e.split('=')
if self.value.isdigit():
self.value = int(self.value)
def apply(self,pkt):
if not hasattr(pkt,self.arg):
return False
a = getattr(pkt,self.arg)
return a == self.value
class FilterType:
def __init__(self,e):
self.value = e
def apply(self,pkt):
return pkt.isa(self.value)
def make_filter(e):
if '=' in e:
return FilterAttribute(e)
return FilterType(e)
###########################################################################################
def usage(dict):
print """
Usage: zdump.py FORMAT [OPTIONS]
Valid formats are:
raw Just display bytes received from USB
tos Decode messages from TOSBase
sniff Decode framed 802.15.4 packets from ZSniff
Valid options are:
-v, --verbose May be repeated for more detail
-h, --help This help
-d, --dev=DEVICE Use TTY device DEVICE (%(dev)s)
-b, --baud=RATE Set baud rate to RATE (%(baudrate)d)
-m, --multiline Multiline output
-f, --filter=FILTER Filter packets by expression (see below)
'sniff' mode options
-c, --channels=LIST Comma-separated list of channels (default 11)
-t, --timeout=NUM Seconds between timeouts
-i, --ip Decode Link layer packets
Filter options:
arg=value Match a particular argument to a value. For example,
'll_protocol=1' (match ARP packets)
'arp_name=REQUEST' (match an ARP REQUEST packet)
type Match a generic packet type. Valid types include:
beacon, cmd, arp, icmp, ip, ll, data, ack, other,
mac, radio, stats, channel, state, unknown
Multiple filters form an implicit boolean 'AND'
""" % dict
sys.exit(0)
if __name__ == '__main__':
import getopt
config = { 'verbose' : 0,
'dev' : '/dev/ttyUSB0',
'baudrate': 57600,
'channels': [11],
'timeout' : 0,
'format' : 'header',
'multiline' : False,
'ip' : False,
'filter' : [] }
format_dict = { 'raw' : RawReceive,
'tos' : TOSReceive,
'sniff' : SniffReceive }
if len(sys.argv) < 2:
usage(config)
format_name = sys.argv[1]
if not format_dict.has_key(format_name):
print "Invalid format", format_name
usage(config)
try:
(options, argv) = getopt.getopt(sys.argv[2:], 'vhd:b:c:t:imf:',
['verbose', 'help', 'dev=', 'baudrate=', 'channels=',
'timeout=', 'ip', 'multiline','filter='])
except Exception, e:
print e
usage(config)
for (k,v) in options:
if k in ('-v', '--verbose'):
config['verbose'] += 1
elif k in ('-h', '--help'):
usage(config)
elif k in ('-d', '--dev'):
config['dev'] = v
elif k in ('-b', '--baudrate'):
config['baudrate'] = int(v)
elif k in ('-c', '--channels'):
config['channels'] = map(int,v.split(','))
elif k in ('-t', '--timeout'):
config['timeout'] = float(v)
elif k in ('-i', '--ip'):
config['ip'] = True
elif k in ('-m', '--multiline'):
config['multiline'] = True
elif k in ('-f', '--filter'):
config['filter'].append(make_filter(v))
else:
usage(config)
if len(argv) > 1:
print "too many arguments"
usage(config)
SerialPort(format_dict[format_name](config), config['dev'], reactor, baudrate=config['baudrate'])
reactor.run()
| |
from __future__ import print_function, division
from sympy.core import Add, S, sympify, oo, pi, Dummy
from sympy.core.function import Function, ArgumentIndexError
from sympy.core.numbers import Rational
from sympy.core.power import Pow
from sympy.core.compatibility import range
from .zeta_functions import zeta
from .error_functions import erf
from sympy.functions.elementary.exponential import exp, log
from sympy.functions.elementary.integers import ceiling, floor
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.combinatorial.numbers import bernoulli, harmonic
from sympy.functions.combinatorial.factorials import factorial, rf, RisingFactorial
###############################################################################
############################ COMPLETE GAMMA FUNCTION ##########################
###############################################################################
class gamma(Function):
r"""
The gamma function
.. math::
\Gamma(x) := \int^{\infty}_{0} t^{x-1} e^{t} \mathrm{d}t.
The ``gamma`` function implements the function which passes through the
values of the factorial function, i.e. `\Gamma(n) = (n - 1)!` when n is
an integer. More general, `\Gamma(z)` is defined in the whole complex
plane except at the negative integers where there are simple poles.
Examples
========
>>> from sympy import S, I, pi, oo, gamma
>>> from sympy.abc import x
Several special values are known:
>>> gamma(1)
1
>>> gamma(4)
6
>>> gamma(S(3)/2)
sqrt(pi)/2
The Gamma function obeys the mirror symmetry:
>>> from sympy import conjugate
>>> conjugate(gamma(x))
gamma(conjugate(x))
Differentiation with respect to x is supported:
>>> from sympy import diff
>>> diff(gamma(x), x)
gamma(x)*polygamma(0, x)
Series expansion is also supported:
>>> from sympy import series
>>> series(gamma(x), x, 0, 3)
1/x - EulerGamma + x*(EulerGamma**2/2 + pi**2/12) + x**2*(-EulerGamma*pi**2/12 + polygamma(2, 1)/6 - EulerGamma**3/6) + O(x**3)
We can numerically evaluate the gamma function to arbitrary precision
on the whole complex plane:
>>> gamma(pi).evalf(40)
2.288037795340032417959588909060233922890
>>> gamma(1+I).evalf(20)
0.49801566811835604271 - 0.15494982830181068512*I
See Also
========
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Gamma_function
.. [2] http://dlmf.nist.gov/5
.. [3] http://mathworld.wolfram.com/GammaFunction.html
.. [4] http://functions.wolfram.com/GammaBetaErf/Gamma/
"""
unbranched = True
def fdiff(self, argindex=1):
if argindex == 1:
return gamma(self.args[0])*polygamma(0, self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity
elif arg.is_Integer:
if arg.is_positive:
return factorial(arg - 1)
else:
return S.ComplexInfinity
elif arg.is_Rational:
if arg.q == 2:
n = abs(arg.p) // arg.q
if arg.is_positive:
k, coeff = n, S.One
else:
n = k = n + 1
if n & 1 == 0:
coeff = S.One
else:
coeff = S.NegativeOne
for i in range(3, 2*k, 2):
coeff *= i
if arg.is_positive:
return coeff*sqrt(S.Pi) / 2**n
else:
return 2**n*sqrt(S.Pi) / coeff
if arg.is_integer and arg.is_nonpositive:
return S.ComplexInfinity
def _eval_expand_func(self, **hints):
arg = self.args[0]
if arg.is_Rational:
if abs(arg.p) > arg.q:
x = Dummy('x')
n = arg.p // arg.q
p = arg.p - n*arg.q
return gamma(x + n)._eval_expand_func().subs(x, Rational(p, arg.q))
if arg.is_Add:
coeff, tail = arg.as_coeff_add()
if coeff and coeff.q != 1:
intpart = floor(coeff)
tail = (coeff - intpart,) + tail
coeff = intpart
tail = arg._new_rawargs(*tail, reeval=False)
return gamma(tail)*RisingFactorial(tail, coeff)
return self.func(*self.args)
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def _eval_is_real(self):
x = self.args[0]
if x.is_positive or x.is_noninteger:
return True
def _eval_is_positive(self):
x = self.args[0]
if x.is_positive:
return True
elif x.is_noninteger:
return floor(x).is_even
def _eval_rewrite_as_tractable(self, z):
return exp(loggamma(z))
def _eval_rewrite_as_factorial(self, z):
return factorial(z - 1)
def _eval_nseries(self, x, n, logx):
x0 = self.args[0].limit(x, 0)
if not (x0.is_Integer and x0 <= 0):
return super(gamma, self)._eval_nseries(x, n, logx)
t = self.args[0] - x0
return (gamma(t + 1)/rf(self.args[0], -x0 + 1))._eval_nseries(x, n, logx)
def _latex(self, printer, exp=None):
if len(self.args) != 1:
raise ValueError("Args length should be 1")
aa = printer._print(self.args[0])
if exp:
return r'\Gamma^{%s}{\left(%s \right)}' % (printer._print(exp), aa)
else:
return r'\Gamma{\left(%s \right)}' % aa
@staticmethod
def _latex_no_arg(printer):
return r'\Gamma'
###############################################################################
################## LOWER and UPPER INCOMPLETE GAMMA FUNCTIONS #################
###############################################################################
class lowergamma(Function):
r"""
The lower incomplete gamma function.
It can be defined as the meromorphic continuation of
.. math::
\gamma(s, x) := \int_0^x t^{s-1} e^{-t} \mathrm{d}t = \Gamma(s) - \Gamma(s, x).
This can be shown to be the same as
.. math::
\gamma(s, x) = \frac{x^s}{s} {}_1F_1\left({s \atop s+1} \middle| -x\right),
where :math:`{}_1F_1` is the (confluent) hypergeometric function.
Examples
========
>>> from sympy import lowergamma, S
>>> from sympy.abc import s, x
>>> lowergamma(s, x)
lowergamma(s, x)
>>> lowergamma(3, x)
-x**2*exp(-x) - 2*x*exp(-x) + 2 - 2*exp(-x)
>>> lowergamma(-S(1)/2, x)
-2*sqrt(pi)*erf(sqrt(x)) - 2*exp(-x)/sqrt(x)
See Also
========
gamma: Gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Incomplete_gamma_function#Lower_Incomplete_Gamma_Function
.. [2] Abramowitz, Milton; Stegun, Irene A., eds. (1965), Chapter 6, Section 5,
Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables
.. [3] http://dlmf.nist.gov/8
.. [4] http://functions.wolfram.com/GammaBetaErf/Gamma2/
.. [5] http://functions.wolfram.com/GammaBetaErf/Gamma3/
"""
def fdiff(self, argindex=2):
from sympy import meijerg, unpolarify
if argindex == 2:
a, z = self.args
return exp(-unpolarify(z))*z**(a - 1)
elif argindex == 1:
a, z = self.args
return gamma(a)*digamma(a) - log(z)*uppergamma(a, z) \
- meijerg([], [1, 1], [0, 0, a], [], z)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, a, x):
# For lack of a better place, we use this one to extract branching
# information. The following can be
# found in the literature (c/f references given above), albeit scattered:
# 1) For fixed x != 0, lowergamma(s, x) is an entire function of s
# 2) For fixed positive integers s, lowergamma(s, x) is an entire
# function of x.
# 3) For fixed non-positive integers s,
# lowergamma(s, exp(I*2*pi*n)*x) =
# 2*pi*I*n*(-1)**(-s)/factorial(-s) + lowergamma(s, x)
# (this follows from lowergamma(s, x).diff(x) = x**(s-1)*exp(-x)).
# 4) For fixed non-integral s,
# lowergamma(s, x) = x**s*gamma(s)*lowergamma_unbranched(s, x),
# where lowergamma_unbranched(s, x) is an entire function (in fact
# of both s and x), i.e.
# lowergamma(s, exp(2*I*pi*n)*x) = exp(2*pi*I*n*a)*lowergamma(a, x)
from sympy import unpolarify, I
nx, n = x.extract_branch_factor()
if a.is_integer and a.is_positive:
nx = unpolarify(x)
if nx != x:
return lowergamma(a, nx)
elif a.is_integer and a.is_nonpositive:
if n != 0:
return 2*pi*I*n*(-1)**(-a)/factorial(-a) + lowergamma(a, nx)
elif n != 0:
return exp(2*pi*I*n*a)*lowergamma(a, nx)
# Special values.
if a.is_Number:
# TODO this should be non-recursive
if a is S.One:
return S.One - exp(-x)
elif a is S.Half:
return sqrt(pi)*erf(sqrt(x))
elif a.is_Integer or (2*a).is_Integer:
b = a - 1
if b.is_positive:
return b*cls(b, x) - x**b * exp(-x)
if not a.is_Integer:
return (cls(a + 1, x) + x**a * exp(-x))/a
def _eval_evalf(self, prec):
from mpmath import mp, workprec
from sympy import Expr
a = self.args[0]._to_mpmath(prec)
z = self.args[1]._to_mpmath(prec)
with workprec(prec):
res = mp.gammainc(a, 0, z)
return Expr._from_mpmath(res, prec)
def _eval_conjugate(self):
z = self.args[1]
if not z in (S.Zero, S.NegativeInfinity):
return self.func(self.args[0].conjugate(), z.conjugate())
def _eval_rewrite_as_uppergamma(self, s, x):
return gamma(s) - uppergamma(s, x)
def _eval_rewrite_as_expint(self, s, x):
from sympy import expint
if s.is_integer and s.is_nonpositive:
return self
return self.rewrite(uppergamma).rewrite(expint)
@staticmethod
def _latex_no_arg(printer):
return r'\gamma'
class uppergamma(Function):
r"""
The upper incomplete gamma function.
It can be defined as the meromorphic continuation of
.. math::
\Gamma(s, x) := \int_x^\infty t^{s-1} e^{-t} \mathrm{d}t = \Gamma(s) - \gamma(s, x).
where `\gamma(s, x)` is the lower incomplete gamma function,
:class:`lowergamma`. This can be shown to be the same as
.. math::
\Gamma(s, x) = \Gamma(s) - \frac{x^s}{s} {}_1F_1\left({s \atop s+1} \middle| -x\right),
where :math:`{}_1F_1` is the (confluent) hypergeometric function.
The upper incomplete gamma function is also essentially equivalent to the
generalized exponential integral:
.. math::
\operatorname{E}_{n}(x) = \int_{1}^{\infty}{\frac{e^{-xt}}{t^n} \, dt} = x^{n-1}\Gamma(1-n,x).
Examples
========
>>> from sympy import uppergamma, S
>>> from sympy.abc import s, x
>>> uppergamma(s, x)
uppergamma(s, x)
>>> uppergamma(3, x)
x**2*exp(-x) + 2*x*exp(-x) + 2*exp(-x)
>>> uppergamma(-S(1)/2, x)
-2*sqrt(pi)*(-erf(sqrt(x)) + 1) + 2*exp(-x)/sqrt(x)
>>> uppergamma(-2, x)
expint(3, x)/x**2
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Incomplete_gamma_function#Upper_Incomplete_Gamma_Function
.. [2] Abramowitz, Milton; Stegun, Irene A., eds. (1965), Chapter 6, Section 5,
Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables
.. [3] http://dlmf.nist.gov/8
.. [4] http://functions.wolfram.com/GammaBetaErf/Gamma2/
.. [5] http://functions.wolfram.com/GammaBetaErf/Gamma3/
.. [6] http://en.wikipedia.org/wiki/Exponential_integral#Relation_with_other_functions
"""
def fdiff(self, argindex=2):
from sympy import meijerg, unpolarify
if argindex == 2:
a, z = self.args
return -exp(-unpolarify(z))*z**(a - 1)
elif argindex == 1:
a, z = self.args
return uppergamma(a, z)*log(z) + meijerg([], [1, 1], [0, 0, a], [], z)
else:
raise ArgumentIndexError(self, argindex)
def _eval_evalf(self, prec):
from mpmath import mp, workprec
from sympy import Expr
a = self.args[0]._to_mpmath(prec)
z = self.args[1]._to_mpmath(prec)
with workprec(prec):
res = mp.gammainc(a, z, mp.inf)
return Expr._from_mpmath(res, prec)
@classmethod
def eval(cls, a, z):
from sympy import unpolarify, I, expint
if z.is_Number:
if z is S.NaN:
return S.NaN
elif z is S.Infinity:
return S.Zero
elif z is S.Zero:
# TODO: Holds only for Re(a) > 0:
return gamma(a)
# We extract branching information here. C/f lowergamma.
nx, n = z.extract_branch_factor()
if a.is_integer and (a > 0) == True:
nx = unpolarify(z)
if z != nx:
return uppergamma(a, nx)
elif a.is_integer and (a <= 0) == True:
if n != 0:
return -2*pi*I*n*(-1)**(-a)/factorial(-a) + uppergamma(a, nx)
elif n != 0:
return gamma(a)*(1 - exp(2*pi*I*n*a)) + exp(2*pi*I*n*a)*uppergamma(a, nx)
# Special values.
if a.is_Number:
# TODO this should be non-recursive
if a is S.One:
return exp(-z)
elif a is S.Half:
return sqrt(pi)*(1 - erf(sqrt(z))) # TODO could use erfc...
elif a.is_Integer or (2*a).is_Integer:
b = a - 1
if b.is_positive:
return b*cls(b, z) + z**b * exp(-z)
elif b.is_Integer:
return expint(-b, z)*unpolarify(z)**(b + 1)
if not a.is_Integer:
return (cls(a + 1, z) - z**a * exp(-z))/a
def _eval_conjugate(self):
z = self.args[1]
if not z in (S.Zero, S.NegativeInfinity):
return self.func(self.args[0].conjugate(), z.conjugate())
def _eval_rewrite_as_lowergamma(self, s, x):
return gamma(s) - lowergamma(s, x)
def _eval_rewrite_as_expint(self, s, x):
from sympy import expint
return expint(1 - s, x)*x**s
###############################################################################
###################### POLYGAMMA and LOGGAMMA FUNCTIONS #######################
###############################################################################
class polygamma(Function):
r"""
The function ``polygamma(n, z)`` returns ``log(gamma(z)).diff(n + 1)``.
It is a meromorphic function on `\mathbb{C}` and defined as the (n+1)-th
derivative of the logarithm of the gamma function:
.. math::
\psi^{(n)} (z) := \frac{\mathrm{d}^{n+1}}{\mathrm{d} z^{n+1}} \log\Gamma(z).
Examples
========
Several special values are known:
>>> from sympy import S, polygamma
>>> polygamma(0, 1)
-EulerGamma
>>> polygamma(0, 1/S(2))
-2*log(2) - EulerGamma
>>> polygamma(0, 1/S(3))
-3*log(3)/2 - sqrt(3)*pi/6 - EulerGamma
>>> polygamma(0, 1/S(4))
-3*log(2) - pi/2 - EulerGamma
>>> polygamma(0, 2)
-EulerGamma + 1
>>> polygamma(0, 23)
-EulerGamma + 19093197/5173168
>>> from sympy import oo, I
>>> polygamma(0, oo)
oo
>>> polygamma(0, -oo)
oo
>>> polygamma(0, I*oo)
oo
>>> polygamma(0, -I*oo)
oo
Differentiation with respect to x is supported:
>>> from sympy import Symbol, diff
>>> x = Symbol("x")
>>> diff(polygamma(0, x), x)
polygamma(1, x)
>>> diff(polygamma(0, x), x, 2)
polygamma(2, x)
>>> diff(polygamma(0, x), x, 3)
polygamma(3, x)
>>> diff(polygamma(1, x), x)
polygamma(2, x)
>>> diff(polygamma(1, x), x, 2)
polygamma(3, x)
>>> diff(polygamma(2, x), x)
polygamma(3, x)
>>> diff(polygamma(2, x), x, 2)
polygamma(4, x)
>>> n = Symbol("n")
>>> diff(polygamma(n, x), x)
polygamma(n + 1, x)
>>> diff(polygamma(n, x), x, 2)
polygamma(n + 2, x)
We can rewrite polygamma functions in terms of harmonic numbers:
>>> from sympy import harmonic
>>> polygamma(0, x).rewrite(harmonic)
harmonic(x - 1) - EulerGamma
>>> polygamma(2, x).rewrite(harmonic)
2*harmonic(x - 1, 3) - 2*zeta(3)
>>> ni = Symbol("n", integer=True)
>>> polygamma(ni, x).rewrite(harmonic)
(-1)**(n + 1)*(-harmonic(x - 1, n + 1) + zeta(n + 1))*factorial(n)
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Polygamma_function
.. [2] http://mathworld.wolfram.com/PolygammaFunction.html
.. [3] http://functions.wolfram.com/GammaBetaErf/PolyGamma/
.. [4] http://functions.wolfram.com/GammaBetaErf/PolyGamma2/
"""
def fdiff(self, argindex=2):
if argindex == 2:
n, z = self.args[:2]
return polygamma(n + 1, z)
else:
raise ArgumentIndexError(self, argindex)
def _eval_is_positive(self):
if self.args[1].is_positive and (self.args[0] > 0) == True:
return self.args[0].is_odd
def _eval_is_negative(self):
if self.args[1].is_positive and (self.args[0] > 0) == True:
return self.args[0].is_even
def _eval_is_real(self):
return self.args[0].is_real
def _eval_aseries(self, n, args0, x, logx):
from sympy import Order
if args0[1] != oo or not \
(self.args[0].is_Integer and self.args[0].is_nonnegative):
return super(polygamma, self)._eval_aseries(n, args0, x, logx)
z = self.args[1]
N = self.args[0]
if N == 0:
# digamma function series
# Abramowitz & Stegun, p. 259, 6.3.18
r = log(z) - 1/(2*z)
o = None
if n < 2:
o = Order(1/z, x)
else:
m = ceiling((n + 1)//2)
l = [bernoulli(2*k) / (2*k*z**(2*k)) for k in range(1, m)]
r -= Add(*l)
o = Order(1/z**(2*m), x)
return r._eval_nseries(x, n, logx) + o
else:
# proper polygamma function
# Abramowitz & Stegun, p. 260, 6.4.10
# We return terms to order higher than O(x**n) on purpose
# -- otherwise we would not be able to return any terms for
# quite a long time!
fac = gamma(N)
e0 = fac + N*fac/(2*z)
m = ceiling((n + 1)//2)
for k in range(1, m):
fac = fac*(2*k + N - 1)*(2*k + N - 2) / ((2*k)*(2*k - 1))
e0 += bernoulli(2*k)*fac/z**(2*k)
o = Order(1/z**(2*m), x)
if n == 0:
o = Order(1/z, x)
elif n == 1:
o = Order(1/z**2, x)
r = e0._eval_nseries(z, n, logx) + o
return (-1 * (-1/z)**N * r)._eval_nseries(x, n, logx)
@classmethod
def eval(cls, n, z):
n, z = list(map(sympify, (n, z)))
from sympy import unpolarify
if n.is_integer:
if n.is_nonnegative:
nz = unpolarify(z)
if z != nz:
return polygamma(n, nz)
if n == -1:
return loggamma(z)
else:
if z.is_Number:
if z is S.NaN:
return S.NaN
elif z is S.Infinity:
if n.is_Number:
if n is S.Zero:
return S.Infinity
else:
return S.Zero
elif z.is_Integer:
if z.is_nonpositive:
return S.ComplexInfinity
else:
if n is S.Zero:
return -S.EulerGamma + harmonic(z - 1, 1)
elif n.is_odd:
return (-1)**(n + 1)*factorial(n)*zeta(n + 1, z)
if n == 0:
if z is S.NaN:
return S.NaN
elif z.is_Rational:
# TODO actually *any* n/m can be done, but that is messy
lookup = {S(1)/2: -2*log(2) - S.EulerGamma,
S(1)/3: -S.Pi/2/sqrt(3) - 3*log(3)/2 - S.EulerGamma,
S(1)/4: -S.Pi/2 - 3*log(2) - S.EulerGamma,
S(3)/4: -3*log(2) - S.EulerGamma + S.Pi/2,
S(2)/3: -3*log(3)/2 + S.Pi/2/sqrt(3) - S.EulerGamma}
if z > 0:
n = floor(z)
z0 = z - n
if z0 in lookup:
return lookup[z0] + Add(*[1/(z0 + k) for k in range(n)])
elif z < 0:
n = floor(1 - z)
z0 = z + n
if z0 in lookup:
return lookup[z0] - Add(*[1/(z0 - 1 - k) for k in range(n)])
elif z in (S.Infinity, S.NegativeInfinity):
return S.Infinity
else:
t = z.extract_multiplicatively(S.ImaginaryUnit)
if t in (S.Infinity, S.NegativeInfinity):
return S.Infinity
# TODO n == 1 also can do some rational z
def _eval_expand_func(self, **hints):
n, z = self.args
if n.is_Integer and n.is_nonnegative:
if z.is_Add:
coeff = z.args[0]
if coeff.is_Integer:
e = -(n + 1)
if coeff > 0:
tail = Add(*[Pow(
z - i, e) for i in range(1, int(coeff) + 1)])
else:
tail = -Add(*[Pow(
z + i, e) for i in range(0, int(-coeff))])
return polygamma(n, z - coeff) + (-1)**n*factorial(n)*tail
elif z.is_Mul:
coeff, z = z.as_two_terms()
if coeff.is_Integer and coeff.is_positive:
tail = [ polygamma(n, z + Rational(
i, coeff)) for i in range(0, int(coeff)) ]
if n == 0:
return Add(*tail)/coeff + log(coeff)
else:
return Add(*tail)/coeff**(n + 1)
z *= coeff
return polygamma(n, z)
def _eval_rewrite_as_zeta(self, n, z):
if n >= S.One:
return (-1)**(n + 1)*factorial(n)*zeta(n + 1, z)
else:
return self
def _eval_rewrite_as_harmonic(self, n, z):
if n.is_integer:
if n == S.Zero:
return harmonic(z - 1) - S.EulerGamma
else:
return S.NegativeOne**(n+1) * factorial(n) * (zeta(n+1) - harmonic(z-1, n+1))
def _eval_as_leading_term(self, x):
from sympy import Order
n, z = [a.as_leading_term(x) for a in self.args]
o = Order(z, x)
if n == 0 and o.contains(1/x):
return o.getn() * log(x)
else:
return self.func(n, z)
class loggamma(Function):
r"""
The ``loggamma`` function implements the logarithm of the
gamma function i.e, `\log\Gamma(x)`.
Examples
========
Several special values are known. For numerical integral
arguments we have:
>>> from sympy import loggamma
>>> loggamma(-2)
oo
>>> loggamma(0)
oo
>>> loggamma(1)
0
>>> loggamma(2)
0
>>> loggamma(3)
log(2)
and for symbolic values:
>>> from sympy import Symbol
>>> n = Symbol("n", integer=True, positive=True)
>>> loggamma(n)
log(gamma(n))
>>> loggamma(-n)
oo
for half-integral values:
>>> from sympy import S, pi
>>> loggamma(S(5)/2)
log(3*sqrt(pi)/4)
>>> loggamma(n/2)
log(2**(-n + 1)*sqrt(pi)*gamma(n)/gamma(n/2 + 1/2))
and general rational arguments:
>>> from sympy import expand_func
>>> L = loggamma(S(16)/3)
>>> expand_func(L).doit()
-5*log(3) + loggamma(1/3) + log(4) + log(7) + log(10) + log(13)
>>> L = loggamma(S(19)/4)
>>> expand_func(L).doit()
-4*log(4) + loggamma(3/4) + log(3) + log(7) + log(11) + log(15)
>>> L = loggamma(S(23)/7)
>>> expand_func(L).doit()
-3*log(7) + log(2) + loggamma(2/7) + log(9) + log(16)
The loggamma function has the following limits towards infinity:
>>> from sympy import oo
>>> loggamma(oo)
oo
>>> loggamma(-oo)
zoo
The loggamma function obeys the mirror symmetry
if `x \in \mathbb{C} \setminus \{-\infty, 0\}`:
>>> from sympy.abc import x
>>> from sympy import conjugate
>>> conjugate(loggamma(x))
loggamma(conjugate(x))
Differentiation with respect to x is supported:
>>> from sympy import diff
>>> diff(loggamma(x), x)
polygamma(0, x)
Series expansion is also supported:
>>> from sympy import series
>>> series(loggamma(x), x, 0, 4)
-log(x) - EulerGamma*x + pi**2*x**2/12 + x**3*polygamma(2, 1)/6 + O(x**4)
We can numerically evaluate the gamma function to arbitrary precision
on the whole complex plane:
>>> from sympy import I
>>> loggamma(5).evalf(30)
3.17805383034794561964694160130
>>> loggamma(I).evalf(20)
-0.65092319930185633889 - 1.8724366472624298171*I
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Gamma_function
.. [2] http://dlmf.nist.gov/5
.. [3] http://mathworld.wolfram.com/LogGammaFunction.html
.. [4] http://functions.wolfram.com/GammaBetaErf/LogGamma/
"""
@classmethod
def eval(cls, z):
z = sympify(z)
if z.is_integer:
if z.is_nonpositive:
return S.Infinity
elif z.is_positive:
return log(gamma(z))
elif z.is_rational:
p, q = z.as_numer_denom()
# Half-integral values:
if p.is_positive and q == 2:
return log(sqrt(S.Pi) * 2**(1 - p) * gamma(p) / gamma((p + 1)*S.Half))
if z is S.Infinity:
return S.Infinity
elif abs(z) is S.Infinity:
return S.ComplexInfinity
if z is S.NaN:
return S.NaN
def _eval_expand_func(self, **hints):
from sympy import Sum
z = self.args[0]
if z.is_Rational:
p, q = z.as_numer_denom()
# General rational arguments (u + p/q)
# Split z as n + p/q with p < q
n = p // q
p = p - n*q
if p.is_positive and q.is_positive and p < q:
k = Dummy("k")
if n.is_positive:
return loggamma(p / q) - n*log(q) + Sum(log((k - 1)*q + p), (k, 1, n))
elif n.is_negative:
return loggamma(p / q) - n*log(q) + S.Pi*S.ImaginaryUnit*n - Sum(log(k*q - p), (k, 1, -n))
elif n.is_zero:
return loggamma(p / q)
return self
def _eval_nseries(self, x, n, logx=None):
x0 = self.args[0].limit(x, 0)
if x0 is S.Zero:
f = self._eval_rewrite_as_intractable(*self.args)
return f._eval_nseries(x, n, logx)
return super(loggamma, self)._eval_nseries(x, n, logx)
def _eval_aseries(self, n, args0, x, logx):
from sympy import Order
if args0[0] != oo:
return super(loggamma, self)._eval_aseries(n, args0, x, logx)
z = self.args[0]
m = min(n, ceiling((n + S(1))/2))
r = log(z)*(z - S(1)/2) - z + log(2*pi)/2
l = [bernoulli(2*k) / (2*k*(2*k - 1)*z**(2*k - 1)) for k in range(1, m)]
o = None
if m == 0:
o = Order(1, x)
else:
o = Order(1/z**(2*m - 1), x)
# It is very inefficient to first add the order and then do the nseries
return (r + Add(*l))._eval_nseries(x, n, logx) + o
def _eval_rewrite_as_intractable(self, z):
return log(gamma(z))
def _eval_is_real(self):
return self.args[0].is_real
def _eval_conjugate(self):
z = self.args[0]
if not z in (S.Zero, S.NegativeInfinity):
return self.func(z.conjugate())
def fdiff(self, argindex=1):
if argindex == 1:
return polygamma(0, self.args[0])
else:
raise ArgumentIndexError(self, argindex)
def _sage_(self):
import sage.all as sage
return sage.log_gamma(self.args[0]._sage_())
def digamma(x):
r"""
The digamma function is the first derivative of the loggamma function i.e,
.. math::
\psi(x) := \frac{\mathrm{d}}{\mathrm{d} z} \log\Gamma(z)
= \frac{\Gamma'(z)}{\Gamma(z) }
In this case, ``digamma(z) = polygamma(0, z)``.
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Digamma_function
.. [2] http://mathworld.wolfram.com/DigammaFunction.html
.. [3] http://functions.wolfram.com/GammaBetaErf/PolyGamma2/
"""
return polygamma(0, x)
def trigamma(x):
r"""
The trigamma function is the second derivative of the loggamma function i.e,
.. math::
\psi^{(1)}(z) := \frac{\mathrm{d}^{2}}{\mathrm{d} z^{2}} \log\Gamma(z).
In this case, ``trigamma(z) = polygamma(1, z)``.
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Trigamma_function
.. [2] http://mathworld.wolfram.com/TrigammaFunction.html
.. [3] http://functions.wolfram.com/GammaBetaErf/PolyGamma2/
"""
return polygamma(1, x)
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import itertools
import numpy as np
import operator
from ...tests.helper import pytest
from .. import (Time, TimeDelta, OperandTypeError, ScaleValueError,
TIME_SCALES, TIME_DELTA_SCALES)
from ... import units as u
allclose_jd = functools.partial(np.allclose, rtol=2. ** -52, atol=0)
allclose_jd2 = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52) # 20 ps atol
allclose_sec = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52 * 24 * 3600) # 20 ps atol
class TestTimeDelta():
"""Test TimeDelta class"""
def setup(self):
self.t = Time('2010-01-01', scale='utc')
self.t2 = Time('2010-01-02 00:00:01', scale='utc')
self.t3 = Time('2010-01-03 01:02:03', scale='utc', precision=9,
in_subfmt='date_hms', out_subfmt='date_hm',
location=(-75.*u.degree, 30.*u.degree, 500*u.m))
self.dt = TimeDelta(100.0, format='sec')
self.dt_array = TimeDelta(np.arange(100, 1000, 100), format='sec')
def test_sub(self):
# time - time
dt = self.t2 - self.t
assert (repr(dt).startswith("<TimeDelta object: scale='tai' "
"format='jd' value=1.00001157407"))
assert allclose_jd(dt.jd, 86401.0 / 86400.0)
assert allclose_sec(dt.sec, 86401.0)
# time - delta_time
t = self.t2 - dt
assert t.iso == self.t.iso
# delta_time - delta_time
dt2 = dt - self.dt
assert allclose_sec(dt2.sec, 86301.0)
# delta_time - time
with pytest.raises(OperandTypeError):
dt - self.t
def test_add(self):
# time + time
with pytest.raises(OperandTypeError):
self.t2 + self.t
# time + delta_time
dt = self.t2 - self.t
t2 = self.t + dt
assert t2.iso == self.t2.iso
# delta_time + delta_time
dt2 = dt + self.dt
assert allclose_sec(dt2.sec, 86501.0)
# delta_time + time
dt = self.t2 - self.t
t2 = dt + self.t
assert t2.iso == self.t2.iso
def test_add_vector(self):
"""Check time arithmetic as well as properly keeping track of whether
a time is a scalar or a vector"""
t = Time(0.0, format='mjd', scale='utc')
t2 = Time([0.0, 1.0], format='mjd', scale='utc')
dt = TimeDelta(100.0, format='jd')
dt2 = TimeDelta([100.0, 200.0], format='jd')
out = t + dt
assert allclose_jd(out.mjd, 100.0)
assert out.isscalar
out = t + dt2
assert allclose_jd(out.mjd, [100.0, 200.0])
assert not out.isscalar
out = t2 + dt
assert allclose_jd(out.mjd, [100.0, 101.0])
assert not out.isscalar
out = dt + dt
assert allclose_jd(out.jd, 200.0)
assert out.isscalar
out = dt + dt2
assert allclose_jd(out.jd, [200.0, 300.0])
assert not out.isscalar
# Reverse the argument order
out = dt + t
assert allclose_jd(out.mjd, 100.0)
assert out.isscalar
out = dt2 + t
assert allclose_jd(out.mjd, [100.0, 200.0])
assert not out.isscalar
out = dt + t2
assert allclose_jd(out.mjd, [100.0, 101.0])
assert not out.isscalar
out = dt2 + dt
assert allclose_jd(out.jd, [200.0, 300.0])
assert not out.isscalar
def test_sub_vector(self):
"""Check time arithmetic as well as properly keeping track of whether
a time is a scalar or a vector"""
t = Time(0.0, format='mjd', scale='utc')
t2 = Time([0.0, 1.0], format='mjd', scale='utc')
dt = TimeDelta(100.0, format='jd')
dt2 = TimeDelta([100.0, 200.0], format='jd')
out = t - dt
assert allclose_jd(out.mjd, -100.0)
assert out.isscalar
out = t - dt2
assert allclose_jd(out.mjd, [-100.0, -200.0])
assert not out.isscalar
out = t2 - dt
assert allclose_jd(out.mjd, [-100.0, -99.0])
assert not out.isscalar
out = dt - dt
assert allclose_jd(out.jd, 0.0)
assert out.isscalar
out = dt - dt2
assert allclose_jd(out.jd, [0.0, -100.0])
assert not out.isscalar
def test_copy_timedelta(self):
"""Test copying the values of a TimeDelta object by passing it into the
Time initializer.
"""
t = Time(2455197.5, format='jd', scale='utc')
t2 = Time(2455198.5, format='jd', scale='utc')
dt = t2 - t
dt2 = TimeDelta(dt, copy=False)
assert dt.jd == dt2.jd
assert dt._time.jd1 is dt2._time.jd1
assert dt._time.jd2 is dt2._time.jd2
dt2 = TimeDelta(dt, copy=True)
assert dt.jd == dt2.jd
assert dt._time.jd1 is not dt2._time.jd1
assert dt._time.jd2 is not dt2._time.jd2
# Include initializers
dt2 = TimeDelta(dt, format='sec')
assert allclose_sec(dt2.value, 86400.0)
def test_neg_abs(self):
for dt in (self.dt, self.dt_array):
dt2 = -dt
assert np.all(dt2.jd == -dt.jd)
dt3 = abs(dt)
assert np.all(dt3.jd == dt.jd)
dt4 = abs(dt2)
assert np.all(dt4.jd == dt.jd)
def test_mul_div(self):
for dt in (self.dt, self.dt_array):
dt2 = dt + dt + dt
dt3 = 3. * dt
assert allclose_jd(dt2.jd, dt3.jd)
dt4 = dt3 / 3.
assert allclose_jd(dt4.jd, dt.jd)
dt5 = self.dt * np.arange(3)
assert dt5[0].jd == 0.
assert dt5[-1].jd == (self.dt + self.dt).jd
with pytest.raises(OperandTypeError):
self.dt * self.dt
with pytest.raises(OperandTypeError):
self.dt * self.t
def test_keep_properties(self):
# closes #1924 (partially)
dt = TimeDelta(1000., format='sec')
for t in (self.t, self.t3):
ta = t + dt
assert ta.location is t.location
assert ta.precision == t.precision
assert ta.in_subfmt == t.in_subfmt
assert ta.out_subfmt == t.out_subfmt
tr = dt + t
assert tr.location is t.location
assert tr.precision == t.precision
assert tr.in_subfmt == t.in_subfmt
assert tr.out_subfmt == t.out_subfmt
ts = t - dt
assert ts.location is t.location
assert ts.precision == t.precision
assert ts.in_subfmt == t.in_subfmt
assert ts.out_subfmt == t.out_subfmt
t_tdb = self.t.tdb
assert hasattr(t_tdb, '_delta_tdb_tt')
assert not hasattr(t_tdb, '_delta_ut1_utc')
t_tdb_ut1 = t_tdb.ut1
assert hasattr(t_tdb_ut1, '_delta_tdb_tt')
assert hasattr(t_tdb_ut1, '_delta_ut1_utc')
t_tdb_ut1_utc = t_tdb_ut1.utc
assert hasattr(t_tdb_ut1_utc, '_delta_tdb_tt')
assert hasattr(t_tdb_ut1_utc, '_delta_ut1_utc')
# adding or subtracting some time should remove the delta's
# since these are time-dependent and should be recalculated
for op in (operator.add, operator.sub):
t1 = op(t_tdb, dt)
assert not hasattr(t1, '_delta_tdb_tt')
assert not hasattr(t1, '_delta_ut1_utc')
t2 = op(t_tdb_ut1, dt)
assert not hasattr(t2, '_delta_tdb_tt')
assert not hasattr(t2, '_delta_ut1_utc')
t3 = op(t_tdb_ut1_utc, dt)
assert not hasattr(t3, '_delta_tdb_tt')
assert not hasattr(t3, '_delta_ut1_utc')
class TestTimeDeltaScales():
"""Test scale conversion for Time Delta.
Go through @taldcroft's list of expected behaviour from #1932"""
def setup(self):
# pick a date that includes a leap second for better testing
self.iso_times = ['2012-06-30 12:00:00', '2012-06-30 23:59:59',
'2012-07-01 00:00:00', '2012-07-01 12:00:00']
self.t = dict((scale, Time(self.iso_times, scale=scale, precision=9))
for scale in TIME_SCALES)
self.dt = dict((scale, self.t[scale]-self.t[scale][0])
for scale in TIME_SCALES)
def test_delta_scales_definition(self):
for scale in list(TIME_DELTA_SCALES) + [None]:
TimeDelta([0., 1., 10.], format='sec', scale=scale)
with pytest.raises(ScaleValueError):
TimeDelta([0., 1., 10.], format='sec', scale='utc')
@pytest.mark.parametrize(('scale1', 'scale2'),
list(itertools.product(TIME_SCALES, TIME_SCALES)))
def test_scales_for_time_minus_time(self, scale1, scale2):
"""T(X) - T2(Y) -- does T(X) - T2(Y).X and return dT(X)
and T(X) +/- dT(Y) -- does (in essence) (T(X).Y +/- dT(Y)).X
I.e., time differences of two times should have the scale of the
first time. The one exception is UTC, which returns TAI.
There are no timescales for which this does not work.
"""
t1 = self.t[scale1]
t2 = self.t[scale2]
dt = t1 - t2
if scale1 in TIME_DELTA_SCALES:
assert dt.scale == scale1
else:
assert scale1 == 'utc'
assert dt.scale == 'tai'
# now check with delta time; also check reversibility
t1_recover_t2_scale = t2 + dt
assert t1_recover_t2_scale.scale == scale2
t1_recover = getattr(t1_recover_t2_scale, scale1)
assert allclose_jd(t1_recover.jd, t1.jd)
t2_recover_t1_scale = t1 - dt
assert t2_recover_t1_scale.scale == scale1
t2_recover = getattr(t2_recover_t1_scale, scale2)
assert allclose_jd(t2_recover.jd, t2.jd)
def test_scales_for_delta_minus_delta(self):
"""dT(X) +/- dT2(Y) -- Add/substract JDs for dT(X) and dT(Y).X
I.e. this will succeed if dT(Y) can be converted to scale X.
Returns delta time in scale X
"""
# geocentric timescales
dt_tai = self.dt['tai']
dt_tt = self.dt['tt']
dt0 = dt_tai - dt_tt
assert dt0.scale == 'tai'
# tai and tt have the same scale, so differences should be the same
assert allclose_sec(dt0.sec, 0.)
dt_tcg = self.dt['tcg']
dt1 = dt_tai - dt_tcg
assert dt1.scale == 'tai'
# tai and tcg do not have the same scale, so differences different
assert not allclose_sec(dt1.sec, 0.)
t_tai_tcg = self.t['tai'].tcg
dt_tai_tcg = t_tai_tcg - t_tai_tcg[0]
dt2 = dt_tai - dt_tai_tcg
assert dt2.scale == 'tai'
# but if tcg difference calculated from tai, it should roundtrip
assert allclose_sec(dt2.sec, 0.)
# check that if we put TCG first, we get a TCG scale back
dt3 = dt_tai_tcg - dt_tai
assert dt3.scale == 'tcg'
assert allclose_sec(dt3.sec, 0.)
for scale in 'tdb', 'tcb', 'ut1':
with pytest.raises(TypeError):
dt_tai - self.dt[scale]
# barycentric timescales
dt_tcb = self.dt['tcb']
dt_tdb = self.dt['tdb']
dt4 = dt_tcb - dt_tdb
assert dt4.scale == 'tcb'
assert not allclose_sec(dt1.sec, 0.)
t_tcb_tdb = self.t['tcb'].tdb
dt_tcb_tdb = t_tcb_tdb - t_tcb_tdb[0]
dt5 = dt_tcb - dt_tcb_tdb
assert dt5.scale == 'tcb'
assert allclose_sec(dt5.sec, 0.)
for scale in 'utc', 'tai', 'tt', 'tcg', 'ut1':
with pytest.raises(TypeError):
dt_tcb - self.dt[scale]
# rotational timescale
dt_ut1 = self.dt['ut1']
dt5 = dt_ut1 - dt_ut1[-1]
assert dt5.scale == 'ut1'
assert dt5[-1].sec == 0.
for scale in 'utc', 'tai', 'tt', 'tcg', 'tcb', 'tdb':
with pytest.raises(TypeError):
dt_ut1 - self.dt[scale]
@pytest.mark.parametrize(
('scale', 'op'), list(itertools.product(TIME_SCALES,
(operator.add, operator.sub))))
def test_scales_for_delta_scale_is_none(self, scale, op):
"""T(X) +/- dT(None) or T(X) +/- Quantity(time-like)
This is always allowed and just adds JDs, i.e., the scale of
the TimeDelta or time-like Quantity will be taken to be X.
The one exception is again for X=UTC, where TAI is assumed instead,
so that a day is always defined as 86400 seconds.
"""
dt_none = TimeDelta([0., 1., -1., 1000.], format='sec')
assert dt_none.scale is None
q_time = dt_none.to('s')
dt = self.dt[scale]
dt1 = op(dt, dt_none)
assert dt1.scale == dt.scale
assert allclose_jd(dt1.jd, op(dt.jd, dt_none.jd))
dt2 = op(dt_none, dt)
assert dt2.scale == dt.scale
assert allclose_jd(dt2.jd, op(dt_none.jd, dt.jd))
dt3 = op(q_time, dt)
assert dt3.scale == dt.scale
assert allclose_jd(dt3.jd, dt2.jd)
t = self.t[scale]
t1 = op(t, dt_none)
assert t1.scale == t.scale
assert allclose_jd(t1.jd, op(t.jd, dt_none.jd))
if op is operator.add:
t2 = op(dt_none, t)
assert t2.scale == t.scale
assert allclose_jd(t2.jd, t1.jd)
t3 = op(t, q_time)
assert t3.scale == t.scale
assert allclose_jd(t3.jd, t1.jd)
@pytest.mark.parametrize('scale', TIME_SCALES)
def test_delta_day_is_86400_seconds(self, scale):
"""TimeDelta or Quantity holding 1 day always means 24*60*60 seconds
This holds true for all timescales but UTC, for which leap-second
days are longer or shorter by one second.
"""
t = self.t[scale]
dt_day = TimeDelta(1., format='jd')
q_day = dt_day.to('day')
dt_day_leap = t[-1] - t[0]
# ^ = exclusive or, so either equal and not UTC, or not equal and UTC
assert allclose_jd(dt_day_leap.jd, dt_day.jd) ^ (scale == 'utc')
t1 = t[0] + dt_day
assert allclose_jd(t1.jd, t[-1].jd) ^ (scale == 'utc')
t2 = q_day + t[0]
assert allclose_jd(t2.jd, t[-1].jd) ^ (scale == 'utc')
t3 = t[-1] - dt_day
assert allclose_jd(t3.jd, t[0].jd) ^ (scale == 'utc')
t4 = t[-1] - q_day
assert allclose_jd(t4.jd, t[0].jd) ^ (scale == 'utc')
| |
#
# Module for starting a process object using os.fork() or CreateProcess()
#
# multiprocessing/forking.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
import os
import sys
import signal
from multiprocessing import util, process
__all__ = ['Popen', 'assert_spawning', 'exit', 'duplicate', 'close', 'ForkingPickler']
#
# Check that the current thread is spawning a child process
#
def assert_spawning(self):
if not Popen.thread_is_spawning():
raise RuntimeError(
'%s objects should only be shared between processes'
' through inheritance' % type(self).__name__
)
#
# Try making some callable types picklable
#
from pickle import Pickler
class ForkingPickler(Pickler):
dispatch = Pickler.dispatch.copy()
@classmethod
def register(cls, type, reduce):
def dispatcher(self, obj):
rv = reduce(obj)
self.save_reduce(obj=obj, *rv)
cls.dispatch[type] = dispatcher
def _reduce_method(m):
if m.im_self is None:
return getattr, (m.im_class, m.im_func.func_name)
else:
return getattr, (m.im_self, m.im_func.func_name)
ForkingPickler.register(type(ForkingPickler.save), _reduce_method)
def _reduce_method_descriptor(m):
return getattr, (m.__objclass__, m.__name__)
ForkingPickler.register(type(list.append), _reduce_method_descriptor)
ForkingPickler.register(type(int.__add__), _reduce_method_descriptor)
#def _reduce_builtin_function_or_method(m):
# return getattr, (m.__self__, m.__name__)
#ForkingPickler.register(type(list().append), _reduce_builtin_function_or_method)
#ForkingPickler.register(type(int().__add__), _reduce_builtin_function_or_method)
try:
from functools import partial
except ImportError:
pass
else:
def _reduce_partial(p):
return _rebuild_partial, (p.func, p.args, p.keywords or {})
def _rebuild_partial(func, args, keywords):
return partial(func, *args, **keywords)
ForkingPickler.register(partial, _reduce_partial)
#
# Unix
#
if sys.platform != 'win32':
import time
exit = os._exit
duplicate = os.dup
close = os.close
#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument.
#
class Popen(object):
def __init__(self, process_obj):
sys.stdout.flush()
sys.stderr.flush()
self.returncode = None
self.pid = os.fork()
if self.pid == 0:
if 'random' in sys.modules:
import random
random.seed()
code = process_obj._bootstrap()
sys.stdout.flush()
sys.stderr.flush()
os._exit(code)
def poll(self, flag=os.WNOHANG):
if self.returncode is None:
pid, sts = os.waitpid(self.pid, flag)
if pid == self.pid:
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
else:
assert os.WIFEXITED(sts)
self.returncode = os.WEXITSTATUS(sts)
return self.returncode
def wait(self, timeout=None):
if timeout is None:
return self.poll(0)
deadline = time.time() + timeout
delay = 0.0005
while 1:
res = self.poll()
if res is not None:
break
remaining = deadline - time.time()
if remaining <= 0:
break
delay = min(delay * 2, remaining, 0.05)
time.sleep(delay)
return res
def terminate(self):
if self.returncode is None:
try:
os.kill(self.pid, signal.SIGTERM)
except OSError, e:
if self.wait(timeout=0.1) is None:
raise
@staticmethod
def thread_is_spawning():
return False
#
# Windows
#
else:
import thread
import msvcrt
import _subprocess
import time
from ._multiprocessing import win32, Connection, PipeConnection
from .util import Finalize
#try:
# from cPickle import dump, load, HIGHEST_PROTOCOL
#except ImportError:
from pickle import load, HIGHEST_PROTOCOL
def dump(obj, file, protocol=None):
ForkingPickler(file, protocol).dump(obj)
#
#
#
TERMINATE = 0x10000
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
exit = win32.ExitProcess
close = win32.CloseHandle
#
# _python_exe is the assumed path to the python executable.
# People embedding Python want to modify it.
#
if sys.executable.lower().endswith('pythonservice.exe'):
_python_exe = os.path.join(sys.exec_prefix, 'python.exe')
else:
_python_exe = sys.executable
def set_executable(exe):
global _python_exe
_python_exe = exe
#
#
#
def duplicate(handle, target_process=None, inheritable=False):
if target_process is None:
target_process = _subprocess.GetCurrentProcess()
return _subprocess.DuplicateHandle(
_subprocess.GetCurrentProcess(), handle, target_process,
0, inheritable, _subprocess.DUPLICATE_SAME_ACCESS
).Detach()
#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument.
#
class Popen(object):
'''
Start a subprocess to run the code of a process object
'''
_tls = thread._local()
def __init__(self, process_obj):
# create pipe for communication with child
rfd, wfd = os.pipe()
# get handle for read end of the pipe and make it inheritable
rhandle = duplicate(msvcrt.get_osfhandle(rfd), inheritable=True)
os.close(rfd)
# start process
cmd = get_command_line() + [rhandle]
cmd = ' '.join('"%s"' % x for x in cmd)
hp, ht, pid, tid = _subprocess.CreateProcess(
_python_exe, cmd, None, None, 1, 0, None, None, None
)
ht.Close()
close(rhandle)
# set attributes of self
self.pid = pid
self.returncode = None
self._handle = hp
# send information to child
prep_data = get_preparation_data(process_obj._name)
to_child = os.fdopen(wfd, 'wb')
Popen._tls.process_handle = int(hp)
try:
dump(prep_data, to_child, HIGHEST_PROTOCOL)
dump(process_obj, to_child, HIGHEST_PROTOCOL)
finally:
del Popen._tls.process_handle
to_child.close()
@staticmethod
def thread_is_spawning():
return getattr(Popen._tls, 'process_handle', None) is not None
@staticmethod
def duplicate_for_child(handle):
return duplicate(handle, Popen._tls.process_handle)
def wait(self, timeout=None):
if self.returncode is None:
if timeout is None:
msecs = _subprocess.INFINITE
else:
msecs = max(0, int(timeout * 1000 + 0.5))
res = _subprocess.WaitForSingleObject(int(self._handle), msecs)
if res == _subprocess.WAIT_OBJECT_0:
code = _subprocess.GetExitCodeProcess(self._handle)
if code == TERMINATE:
code = -signal.SIGTERM
self.returncode = code
return self.returncode
def poll(self):
return self.wait(timeout=0)
def terminate(self):
if self.returncode is None:
try:
_subprocess.TerminateProcess(int(self._handle), TERMINATE)
except WindowsError:
if self.wait(timeout=0.1) is None:
raise
#
#
#
def is_forking(argv):
'''
Return whether commandline indicates we are forking
'''
if len(argv) >= 2 and argv[1] == '--multiprocessing-fork':
assert len(argv) == 3
return True
else:
return False
def freeze_support():
'''
Run code for process object if this in not the main process
'''
if is_forking(sys.argv):
main()
sys.exit()
def get_command_line():
'''
Returns prefix of command line used for spawning a child process
'''
if process.current_process()._identity==() and is_forking(sys.argv):
raise RuntimeError('''
Attempt to start a new process before the current process
has finished its bootstrapping phase.
This probably means that you are on Windows and you have
forgotten to use the proper idiom in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce a Windows executable.''')
if getattr(sys, 'frozen', False):
return [sys.executable, '--multiprocessing-fork']
else:
prog = 'from multiprocessing.forking import main; main()'
return [_python_exe, '-c', prog, '--multiprocessing-fork']
def main():
'''
Run code specifed by data received over pipe
'''
assert is_forking(sys.argv)
handle = int(sys.argv[-1])
fd = msvcrt.open_osfhandle(handle, os.O_RDONLY)
from_parent = os.fdopen(fd, 'rb')
process.current_process()._inheriting = True
preparation_data = load(from_parent)
prepare(preparation_data)
self = load(from_parent)
process.current_process()._inheriting = False
from_parent.close()
exitcode = self._bootstrap()
exit(exitcode)
def get_preparation_data(name):
'''
Return info about parent needed by child to unpickle process object
'''
from .util import _logger, _log_to_stderr
d = dict(
name=name,
sys_path=sys.path,
sys_argv=sys.argv,
log_to_stderr=_log_to_stderr,
orig_dir=process.ORIGINAL_DIR,
authkey=process.current_process().authkey,
)
if _logger is not None:
d['log_level'] = _logger.getEffectiveLevel()
if not WINEXE:
main_path = getattr(sys.modules['__main__'], '__file__', None)
if not main_path and sys.argv[0] not in ('', '-c'):
main_path = sys.argv[0]
if main_path is not None:
if not os.path.isabs(main_path) and \
process.ORIGINAL_DIR is not None:
main_path = os.path.join(process.ORIGINAL_DIR, main_path)
d['main_path'] = os.path.normpath(main_path)
return d
#
# Make (Pipe)Connection picklable
#
def reduce_connection(conn):
if not Popen.thread_is_spawning():
raise RuntimeError(
'By default %s objects can only be shared between processes\n'
'using inheritance' % type(conn).__name__
)
return type(conn), (Popen.duplicate_for_child(conn.fileno()),
conn.readable, conn.writable)
ForkingPickler.register(Connection, reduce_connection)
ForkingPickler.register(PipeConnection, reduce_connection)
#
# Prepare current process
#
old_main_modules = []
def prepare(data):
'''
Try to get current process ready to unpickle process object
'''
old_main_modules.append(sys.modules['__main__'])
if 'name' in data:
process.current_process().name = data['name']
if 'authkey' in data:
process.current_process()._authkey = data['authkey']
if 'log_to_stderr' in data and data['log_to_stderr']:
util.log_to_stderr()
if 'log_level' in data:
util.get_logger().setLevel(data['log_level'])
if 'sys_path' in data:
sys.path = data['sys_path']
if 'sys_argv' in data:
sys.argv = data['sys_argv']
if 'dir' in data:
os.chdir(data['dir'])
if 'orig_dir' in data:
process.ORIGINAL_DIR = data['orig_dir']
if 'main_path' in data:
main_path = data['main_path']
main_name = os.path.splitext(os.path.basename(main_path))[0]
if main_name == '__init__':
main_name = os.path.basename(os.path.dirname(main_path))
if main_name != 'ipython':
import imp
if main_path is None:
dirs = None
elif os.path.basename(main_path).startswith('__init__.py'):
dirs = [os.path.dirname(os.path.dirname(main_path))]
else:
dirs = [os.path.dirname(main_path)]
assert main_name not in sys.modules, main_name
file, path_name, etc = imp.find_module(main_name, dirs)
try:
# We would like to do "imp.load_module('__main__', ...)"
# here. However, that would cause 'if __name__ ==
# "__main__"' clauses to be executed.
main_module = imp.load_module(
'__parents_main__', file, path_name, etc
)
finally:
if file:
file.close()
sys.modules['__main__'] = main_module
main_module.__name__ = '__main__'
# Try to make the potentially picklable objects in
# sys.modules['__main__'] realize they are in the main
# module -- somewhat ugly.
for obj in main_module.__dict__.values():
try:
if obj.__module__ == '__parents_main__':
obj.__module__ = '__main__'
except Exception:
pass
| |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Gun Pinyo (gunpinyo@google.com)'
import os
from xml.etree import cElementTree
import appengine_config
from common import schema_fields
from common import tags
from controllers import sites
from models import custom_modules
from modules.oeditor import oeditor
CODETAGS_MODULE_URI = '/modules/code_tags'
CODETAGS_RESOURCES_URI = CODETAGS_MODULE_URI + '/resources'
CODEMIRROR_URI = CODETAGS_MODULE_URI + '/codemirror'
SELECT_DATA = [
# codemirror does not have plain text mode
# however by setting an incorrect mode it will default to plain text
('', 'Plain Text'),
('htmlmixed', 'Html'),
('javascript', 'JavaScript'),
('css', 'CSS'),
('python', 'Python'),
('ruby', 'Ruby'),
('shell', 'Shell'),
('xml', 'XML'),
('xquery', 'XQuery'),
('yaml', 'Yaml'),
('perl', 'Perl'),
('php', 'PHP'),
('coffeescript', 'CoffeeScript'),
('clike', 'C (and relative)'),
('apl', 'apl'),
('asterisk', 'asterisk'),
('clojure', 'clojure'),
('cobol', 'cobol'),
('commonlisp', 'commonlisp'),
('cypher', 'cypher'),
('d', 'd'),
('diff', 'diff'),
('django', 'django'),
('dtd', 'dtd'),
('dylan', 'dylan'),
('ecl', 'ecl'),
('eiffel', 'eiffel'),
('erlang', 'erlang'),
('fortran', 'fortran'),
('gas', 'gas'),
('gfm', 'gfm'),
('gherkin', 'gherkin'),
('go', 'go'),
('groovy', 'groovy'),
('haml', 'haml'),
('haskell', 'haskell'),
('haxe', 'haxe'),
('htmlembedded', 'htmlembedded'),
('http', 'http'),
('jade', 'jade'),
('jinja2', 'jinja2'),
('julia', 'julia'),
('kotlin', 'kotlin'),
('livescript', 'livescript'),
('lua', 'lua'),
('markdown', 'markdown'),
('mirc', 'mirc'),
('mllike', 'mllike'),
('nginx', 'nginx'),
('ntriples', 'ntriples'),
('octave', 'octave'),
('pascal', 'pascal'),
('pegjs', 'pegjs'),
('pig', 'pig'),
('properties', 'properties'),
('puppet', 'puppet'),
('q', 'q'),
('r', 'r'),
('rpm', 'rpm'),
('rst', 'rst'),
('rust', 'rust'),
('sass', 'sass'),
('scheme', 'scheme'),
('sieve', 'sieve'),
('slim', 'slim'),
('smalltalk', 'smalltalk'),
('smarty', 'smarty'),
('smartymixed', 'smartymixed'),
('solr', 'solr'),
('sparql', 'sparql'),
('sql', 'sql'),
('stex', 'stex'),
('tcl', 'tcl'),
('tiddlywiki', 'tiddlywiki'),
('tiki', 'tiki'),
('toml', 'toml'),
('turtle', 'turtle'),
('vb', 'vb'),
('vbscript', 'vbscript'),
('velocity', 'velocity'),
('verilog', 'verilog'),
('z80', 'z80'),
]
class CodeTag(tags.ContextAwareTag):
"""Custom tag for showing piece of code using CodeMirror."""
binding_name = 'gcb-code'
@classmethod
def name(cls):
return 'Embedded Code'
@classmethod
def vendor(cls):
return 'gcb'
@classmethod
def extra_js_files(cls):
if oeditor.CAN_HIGHLIGHT_CODE.value:
return ['code_tags_popup.js']
else:
return []
@classmethod
def additional_dirs(cls):
return [os.path.join(
appengine_config.BUNDLE_ROOT, 'modules', 'code_tags', 'resources')]
def render(self, node, context):
code_elt = cElementTree.Element('code')
code_elt.text = node.text or ''
code_elt.set('class', 'codemirror-container-readonly')
code_elt.set('data-mode', node.attrib.get('mode'))
return code_elt
def rollup_header_footer(self, context):
"""Include CodeMirror library only when a code tag is present."""
if oeditor.CAN_HIGHLIGHT_CODE.value:
header = tags.html_string_to_element_tree(
'<script src="%s/lib/codemirror.js"></script>'
'<link rel="stylesheet" href="%s/lib/codemirror.css">'
'<script src="%s/addon/mode/loadmode.js"></script>'
'<link rel="stylesheet" href="%s/code_tags.css">' % (
CODEMIRROR_URI, CODEMIRROR_URI, CODEMIRROR_URI,
CODETAGS_RESOURCES_URI))
footer = tags.html_string_to_element_tree(
'<script src="%s/code_tags.js">'
'</script>' % CODETAGS_RESOURCES_URI)
else:
header = cElementTree.Element('link')
header.attrib['rel'] = 'stylesheet'
header.attrib['href'] = '%s/code_tags_no_highlight.css' % (
CODETAGS_RESOURCES_URI)
footer = cElementTree.Comment('Empty footer')
return (header, footer)
def get_icon_url(self):
return CODETAGS_RESOURCES_URI + '/code_tags.png'
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(CodeTag.name())
reg.add_property(
schema_fields.SchemaField(
'mode', 'Language', 'string',
optional=True,
select_data=SELECT_DATA))
reg.add_property(
schema_fields.SchemaField(
'code', 'Code', 'text',
optional=True,
description=('The code which will be displayed.')))
return reg
custom_module = None
def register_module():
"""Registers this module for use."""
def on_module_enable():
tags.Registry.add_tag_binding(CodeTag.binding_name, CodeTag)
def on_module_disable():
tags.Registry.remove_tag_binding(CodeTag.binding_name)
global_routes = [
(CODETAGS_RESOURCES_URI + '/.*', tags.ResourcesHandler),
(CODEMIRROR_URI + '/(.*)', sites.make_zip_handler(os.path.join(
appengine_config.BUNDLE_ROOT, 'lib/codemirror-4.5.0.zip')))]
namespaced_routes = []
global custom_module
custom_module = custom_modules.Module(
'Code Editor and Code Example Display',
'Allow teacher to use a proper code editor and'
'allow student to see a proper piece of code',
global_routes, namespaced_routes,
notify_module_enabled=on_module_enable,
notify_module_disabled=on_module_disable)
return custom_module
| |
import tempfile
import shutil
import os
import inspect
from lib import BaseTest
class AddRepo1Test(BaseTest):
"""
add package to local repo: .deb file
"""
fixtureCmds = [
"aptly repo create -comment=Repo1 -distribution=squeeze repo1",
]
runCmd = "aptly repo add repo1 ${files}/libboost-program-options-dev_1.49.0.1_i386.deb"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo1", "repo_show")
# check pool
self.check_exists('pool/00/35/libboost-program-options-dev_1.49.0.1_i386.deb')
class AddRepo2Test(BaseTest):
"""
add package to local repo: .dsc file
"""
fixtureCmds = [
"aptly repo create -comment=Repo2 -distribution=squeeze repo2",
]
runCmd = "aptly repo add repo2 ${files}/pyspi_0.6.1-1.3.dsc ${files}/pyspi-0.6.1-1.3.stripped.dsc"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo2", "repo_show")
# check pool
self.check_exists('pool/22/ff/pyspi_0.6.1-1.3.diff.gz')
self.check_exists('pool/b7/2c/pyspi_0.6.1-1.3.dsc')
self.check_exists('pool/de/f3/pyspi_0.6.1.orig.tar.gz')
self.check_exists('pool/2f/5b/pyspi-0.6.1-1.3.stripped.dsc')
class AddRepo3Test(BaseTest):
"""
add package to local repo: directory
"""
fixtureCmds = [
"aptly repo create -comment=Repo3 -distribution=squeeze repo3",
]
runCmd = "aptly repo add repo3 ${files}"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo3", "repo_show")
# check pool
self.check_exists('pool/00/35/libboost-program-options-dev_1.49.0.1_i386.deb')
self.check_exists('pool/22/ff/pyspi_0.6.1-1.3.diff.gz')
self.check_exists('pool/b7/2c/pyspi_0.6.1-1.3.dsc')
self.check_exists('pool/de/f3/pyspi_0.6.1.orig.tar.gz')
self.check_exists('pool/2f/5b/pyspi-0.6.1-1.3.stripped.dsc')
class AddRepo4Test(BaseTest):
"""
add package to local repo: complex directory + remove
"""
fixtureCmds = [
"aptly repo create -comment=Repo4 -distribution=squeeze repo4",
]
runCmd = "aptly repo add -remove-files repo4 "
def prepare(self):
super(AddRepo4Test, self).prepare()
self.tempSrcDir = tempfile.mkdtemp()
os.makedirs(os.path.join(self.tempSrcDir, "01"), 0755)
os.makedirs(os.path.join(self.tempSrcDir, "02", "03"), 0755)
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "libboost-program-options-dev_1.49.0.1_i386.deb"),
os.path.join(self.tempSrcDir, "01"))
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "pyspi_0.6.1-1.3.dsc"),
os.path.join(self.tempSrcDir, "02", "03"))
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "pyspi_0.6.1.orig.tar.gz"),
os.path.join(self.tempSrcDir, "02", "03"))
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "pyspi_0.6.1-1.3.diff.gz"),
os.path.join(self.tempSrcDir, "02", "03"))
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "pyspi_0.6.1-1.3.diff.gz"),
os.path.join(self.tempSrcDir, "02", "03", "other.file"))
self.runCmd += self.tempSrcDir
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo4", "repo_show")
# check pool
self.check_exists('pool/00/35/libboost-program-options-dev_1.49.0.1_i386.deb')
self.check_exists('pool/22/ff/pyspi_0.6.1-1.3.diff.gz')
self.check_exists('pool/b7/2c/pyspi_0.6.1-1.3.dsc')
self.check_exists('pool/de/f3/pyspi_0.6.1.orig.tar.gz')
path = os.path.join(self.tempSrcDir, "01", "libboost-program-options-dev_1.49.0.1_i386.deb")
if os.path.exists(path):
raise Exception("path %s shouldn't exist" % (path, ))
path = os.path.join(self.tempSrcDir, "02", "03", "pyspi_0.6.1.orig.tar.gz")
if os.path.exists(path):
raise Exception("path %s shouldn't exist" % (path, ))
path = os.path.join(self.tempSrcDir, "02", "03", "other.file")
if not os.path.exists(path):
raise Exception("path %s doesn't exist" % (path, ))
shutil.rmtree(self.tempSrcDir)
class AddRepo5Test(BaseTest):
"""
add package to local repo: some source files missing
"""
fixtureCmds = [
"aptly repo create -comment=Repo5 -distribution=squeeze repo5",
]
runCmd = "aptly repo add repo5 "
outputMatchPrepare = lambda self, s: s.replace(self.tempSrcDir, "")
expectedCode = 1
def prepare(self):
super(AddRepo5Test, self).prepare()
self.tempSrcDir = tempfile.mkdtemp()
os.makedirs(os.path.join(self.tempSrcDir, "02", "03"), 0755)
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "pyspi_0.6.1-1.3.dsc"),
os.path.join(self.tempSrcDir, "02", "03"))
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "pyspi_0.6.1.orig.tar.gz"),
os.path.join(self.tempSrcDir, "02", "03"))
self.runCmd += self.tempSrcDir
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show repo5", "repo_show")
shutil.rmtree(self.tempSrcDir)
class AddRepo6Test(BaseTest):
"""
add package to local repo: missing file
"""
fixtureCmds = [
"aptly repo create -comment=Repo6 -distribution=squeeze repo6",
]
runCmd = "aptly repo add repo6 no-such-file"
expectedCode = 1
class AddRepo7Test(BaseTest):
"""
add package to local repo: missing repo
"""
runCmd = "aptly repo add repo7 ${files}"
expectedCode = 1
class AddRepo8Test(BaseTest):
"""
add package to local repo: conflict in packages
"""
fixtureCmds = [
"aptly repo create -comment=Repo8 -distribution=squeeze repo8",
"aptly repo add repo8 ${files}/pyspi_0.6.1-1.3.dsc",
]
runCmd = "aptly repo add repo8 ${testfiles}/pyspi_0.6.1-1.3.conflict.dsc"
outputMatchPrepare = lambda self, s: s.replace(os.path.join(os.path.dirname(inspect.getsourcefile(self.__class__)), self.__class__.__name__), "").replace(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files"), "")
expectedCode = 1
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo8", "repo_show")
class AddRepo9Test(BaseTest):
"""
add package to local repo: conflict in files
"""
fixtureCmds = [
"aptly repo create -comment=Repo9 -distribution=squeeze repo9",
]
runCmd = "aptly repo add repo9 ${files}/pyspi_0.6.1-1.3.dsc"
gold_processor = BaseTest.expand_environ
outputMatchPrepare = lambda self, s: s.replace(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files"), "")
expectedCode = 1
def prepare(self):
super(AddRepo9Test, self).prepare()
os.makedirs(os.path.join(os.environ["HOME"], ".aptly", "pool/de/f3/"))
with open(os.path.join(os.environ["HOME"], ".aptly", "pool/de/f3/pyspi_0.6.1.orig.tar.gz"), "w") as f:
f.write("abcd")
class AddRepo10Test(BaseTest):
"""
add package to local repo: double import
"""
fixtureCmds = [
"aptly repo create -comment=Repo10 -distribution=squeeze repo10",
"aptly repo add repo10 ${files}",
]
runCmd = "aptly repo add repo10 ${files}/pyspi_0.6.1-1.3.dsc"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo10", "repo_show")
class AddRepo11Test(BaseTest):
"""
add package to local repo: conflict in packages + -force-replace
"""
fixtureCmds = [
"aptly repo create -comment=Repo11 -distribution=squeeze repo11",
"aptly repo add repo11 ${files}/pyspi_0.6.1-1.3.dsc",
]
runCmd = "aptly repo add -force-replace repo11 ${testfiles}/pyspi_0.6.1-1.3.conflict.dsc"
outputMatchPrepare = lambda self, s: s.replace(os.path.join(os.path.dirname(inspect.getsourcefile(self.__class__)), self.__class__.__name__), "").replace(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files"), "")
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo11", "repo_show")
class AddRepo12Test(BaseTest):
"""
add package to local repo: .udeb file
"""
fixtureCmds = [
"aptly repo create -comment=Repo12 -distribution=squeeze repo12",
]
runCmd = "aptly repo add repo12 ${udebs}/dmraid-udeb_1.0.0.rc16-4.1_amd64.udeb"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo12", "repo_show")
# check pool
self.check_exists('pool/72/16/dmraid-udeb_1.0.0.rc16-4.1_amd64.udeb')
class AddRepo13Test(BaseTest):
"""
add package to local repo: .udeb and .deb files
"""
fixtureCmds = [
"aptly repo create -comment=Repo13 -distribution=squeeze repo13",
]
runCmd = "aptly repo add repo13 ${udebs} ${files}"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo13", "repo_show")
# check pool
self.check_exists('pool/72/16/dmraid-udeb_1.0.0.rc16-4.1_amd64.udeb')
self.check_exists('pool/b7/2c/pyspi_0.6.1-1.3.dsc')
class AddRepo14Test(BaseTest):
"""
add same package to local repo twice and make sure the file doesn't get truncated.
"""
fixtureCmds = [
"aptly repo create -comment=Repo14 -distribution=squeeze repo14",
"aptly repo add repo14 ${files}/libboost-program-options-dev_1.49.0.1_i386.deb"
]
runCmd = "aptly repo add repo14 $aptlyroot/pool/00/35/libboost-program-options-dev_1.49.0.1_i386.deb"
def check(self):
super(AddRepo14Test, self).check()
# check pool
self.check_file_not_empty('pool/00/35/libboost-program-options-dev_1.49.0.1_i386.deb')
class AddRepo15Test(BaseTest):
"""
add package with wrong case in stanza and missing fields
"""
fixtureCmds = [
"aptly repo create -comment=Repo15 -distribution=squeeze repo15",
]
runCmd = "aptly repo add repo15 ${testfiles}"
outputMatchPrepare = lambda self, s: s.replace(os.path.join(os.path.dirname(inspect.getsourcefile(self.__class__)), self.__class__.__name__), "").replace(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files"), "")
expectedCode = 1
| |
"""
Computational geometry code for PySAL: Python Spatial Analysis Library.
"""
__author__ = "Sergio J. Rey, Xinyue Ye, Charles Schmidt, Andrew Winslow"
__credits__ = "Copyright (c) 2005-2011 Sergio J. Rey"
import math
import copy
import doctest
from rtree import *
from standalone import *
from shapes import *
__all__ = ["IntervalTree", "Grid", "BruteForcePointLocator",
"PointLocator", "PolygonLocator"]
class IntervalTree:
"""
Representation of an interval tree. An interval tree is a data structure which is used to
quickly determine which intervals in a set contain a value or overlap with a query interval.
Reference:
de Berg, van Kreveld, Overmars, Schwarzkopf. Computational Geometry: Algorithms and Application.
212-217. Springer-Verlag, Berlin, 2000.
"""
class _Node:
"""
Private class representing a node in an interval tree.
"""
def __init__(self, val, left_list, right_list, left_node, right_node):
self.val = val
self.left_list = left_list
self.right_list = right_list
self.left_node = left_node
self.right_node = right_node
def query(self, q):
i = 0
if q < self.val:
while i < len(self.left_list) and self.left_list[i][0] <= q:
i += 1
return [rec[2] for rec in self.left_list[0:i]]
else:
while i < len(self.right_list) and self.right_list[i][1] >= q:
i += 1
return [rec[2] for rec in self.right_list[0:i]]
def add(self, i):
"""
Adds an interval to the IntervalTree node.
"""
if not i[0] <= self.val <= i[1]:
raise Exception('Attempt to add an interval to an inappropriate IntervalTree node')
index = 0
while index < len(self.left_list) and self.left_list[index] < i[0]:
index = index + 1
self.left_list.insert(index, i)
index = 0
while index < len(self.right_list) and self.right_list[index] > i[1]:
index = index + 1
self.right_list.insert(index, i)
def remove(self, i):
"""
Removes an interval from the IntervalTree node.
"""
l = 0
r = len(self.left_list)
while l < r:
m = (l + r) / 2
if self.left_list[m] < i[0]:
l = m + 1
elif self.left_list[m] > i[0]:
r = m
else:
if self.left_list[m] == i:
self.left_list.pop(m)
else:
raise Exception('Attempt to remove an unknown interval')
l = 0
r = len(self.right_list)
while l < r:
m = (l + r) / 2
if self.right_list[m] > i[1]:
l = m + 1
elif self.right_left[m] < i[1]:
r = m
else:
if self.right_list[m] == i:
self.right_list.pop(m)
else:
raise Exception('Attempt to remove an unknown interval')
def __init__(self, intervals):
"""
__init__((number, number, x) list) -> IntervalTree
Returns an interval tree containing specified intervals.
Parameters
----------
intervals : a list of (lower, upper, item) elements to build the interval tree
Attributes
----------
Examples
--------
>>> intervals = [(-1, 2, 'A'), (5, 9, 'B'), (3, 6, 'C')]
>>> it = IntervalTree(intervals)
>>> isinstance(it, IntervalTree)
True
"""
self._build(intervals)
def _build(self, intervals):
"""
Build an interval tree containing _intervals_.
Each interval should be of the form (start, end, object).
build((number, number, x) list) -> None
Test tag: <tc>#is#IntervalTree.build</tc>
"""
bad_is = filter(lambda i: i[0] > i[1], intervals)
if bad_is != []:
raise Exception('Attempt to build IntervalTree with invalid intervals: ' + str(bad_is))
eps = list(set([i[0] for i in intervals] + [i[1] for i in intervals]))
eps.sort()
self.root = self._recursive_build(copy.copy(intervals), eps)
def query(self, q):
"""
Returns the intervals intersected by a value or interval.
query((number, number) or number) -> x list
Parameters
----------
q : a value or interval to find intervals intersecting
Attributes
----------
Examples
--------
>>> intervals = [(-1, 2, 'A'), (5, 9, 'B'), (3, 6, 'C')]
>>> it = IntervalTree(intervals)
>>> it.query((7, 14))
['B']
>>> it.query(1)
['A']
"""
if isinstance(q, tuple):
return self._query_range(q, self.root)
else:
return self._query_points(q)
def _query_range(self, q, root):
if root is None:
return []
if root.val < q[0]:
return self._query_range(q, root.right_node) + root.query(q[0])
elif root.val > q[1]:
return self._query_range(q, root.left_node) + root.query(q[1])
else:
return root.query(root.val) + self._query_range(q, root.left_node) + self._query_range(q, root.right_node)
def _query_points(self, q):
found = []
cur = self.root
while cur is not None:
found.extend(cur.query(q))
if q < cur.val:
cur = cur.left_node
else:
cur = cur.right_node
return found
def _recursive_build(self, intervals, eps):
def sign(x):
if x < 0:
return -1
elif x > 0:
return 1
else:
return 0
def binary_search(list, q):
l = 0
r = len(list)
while l < r:
m = (l + r) / 2
if list[m] < q:
l = m + 1
else:
r = m
return l
if eps == []:
return None
median = eps[len(eps) / 2]
hit_is = []
rem_is = []
for i in intervals:
if i[0] <= median <= i[1]:
hit_is.append(i)
else:
rem_is.append(i)
left_list = copy.copy(hit_is)
left_list.sort(lambda a, b: sign(a[0] - b[0]))
right_list = copy.copy(hit_is)
right_list.sort(lambda a, b: sign(b[1] - a[1]))
eps = list(set([i[0] for i in intervals] + [i[1] for i in intervals]))
eps.sort()
bp = binary_search(eps, median)
left_eps = eps[:bp]
right_eps = eps[bp:]
node = (IntervalTree._Node(median, left_list, right_list,
self._recursive_build(rem_is, left_eps),
self._recursive_build(rem_is, right_eps)))
return node
class Grid:
"""
Representation of a binning data structure.
"""
def __init__(self, bounds, resolution):
"""
Returns a grid with specified properties.
__init__(Rectangle, number) -> Grid
Parameters
----------
bounds : the area for the grid to encompass
resolution : the diameter of each bin
Examples
--------
TODO: complete this doctest
>>> g = Grid(Rectangle(0, 0, 10, 10), 1)
"""
if resolution == 0:
raise Exception('Cannot create grid with resolution 0')
self.res = resolution
self.hash = {}
self.x_range = (bounds.left, bounds.right)
self.y_range = (bounds.lower, bounds.upper)
try:
self.i_range = int(math.ceil(
(self.x_range[1] - self.x_range[0]) / self.res))
self.j_range = int(math.ceil(
(self.y_range[1] - self.y_range[0]) / self.res))
except Exception:
raise Exception('Invalid arguments for Grid(): (' +
str(x_range) + ', ' + str(y_range) + ', ' + str(res) + ')')
def in_grid(self, loc):
"""
Returns whether a 2-tuple location _loc_ lies inside the grid bounds.
Test tag: <tc>#is#Grid.in_grid</tc>
"""
return (self.x_range[0] <= loc[0] <= self.x_range[1] and
self.y_range[0] <= loc[1] <= self.y_range[1])
def __grid_loc(self, loc):
i = min(self.i_range, max(int((loc[0] - self.x_range[0]) /
self.res), 0))
j = min(self.j_range, max(int((loc[1] - self.y_range[0]) /
self.res), 0))
return (i, j)
def add(self, item, pt):
"""
Adds an item to the grid at a specified location.
add(x, Point) -> x
Parameters
----------
item -- the item to insert into the grid
pt -- the location to insert the item at
Attributes
----------
Examples
--------
>>> g = Grid(Rectangle(0, 0, 10, 10), 1)
>>> g.add('A', Point((4.2, 8.7)))
'A'
"""
if not self.in_grid(pt):
raise Exception('Attempt to insert item at location outside grid bounds: ' + str(pt))
grid_loc = self.__grid_loc(pt)
if grid_loc in self.hash:
self.hash[grid_loc].append((pt, item))
else:
self.hash[grid_loc] = [(pt, item)]
return item
def remove(self, item, pt):
"""
Removes an item from the grid at a specified location.
remove(x, Point) -> x
Parameters
----------
item -- the item to remove from the grid
pt -- the location the item was added at
Attributes
----------
Examples
--------
>>> g = Grid(Rectangle(0, 0, 10, 10), 1)
>>> g.add('A', Point((4.2, 8.7)))
'A'
>>> g.remove('A', Point((4.2, 8.7)))
'A'
"""
if not self.in_grid(pt):
raise Exception('Attempt to remove item at location outside grid bounds: ' + str(pt))
grid_loc = self.__grid_loc(pt)
self.hash[grid_loc].remove((pt, item))
if self.hash[grid_loc] == []:
del self.hash[grid_loc]
return item
def bounds(self, bounds):
"""
Returns a list of items found in the grid within the bounds specified.
bounds(Rectangle) -> x list
Parameters
----------
item : the item to remove from the grid
pt : the location the item was added at
Attributes
----------
Examples
--------
>>> g = Grid(Rectangle(0, 0, 10, 10), 1)
>>> g.add('A', Point((1.0, 1.0)))
'A'
>>> g.add('B', Point((4.0, 4.0)))
'B'
>>> g.bounds(Rectangle(0, 0, 3, 3))
['A']
>>> g.bounds(Rectangle(2, 2, 5, 5))
['B']
>>> sorted(g.bounds(Rectangle(0, 0, 5, 5)))
['A', 'B']
"""
x_range = (bounds.left, bounds.right)
y_range = (bounds.lower, bounds.upper)
items = []
lower_left = self.__grid_loc((x_range[0], y_range[0]))
upper_right = self.__grid_loc((x_range[1], y_range[1]))
for i in xrange(lower_left[0], upper_right[0] + 1):
for j in xrange(lower_left[1], upper_right[1] + 1):
if (i, j) in self.hash:
items.extend(map(lambda item: item[1], filter(lambda item: x_range[0] <= item[0][0] <= x_range[1] and y_range[0] <= item[0][1] <= y_range[1], self.hash[(i, j)])))
return items
def proximity(self, pt, r):
"""
Returns a list of items found in the grid within a specified distance of a point.
proximity(Point, number) -> x list
Parameters
----------
pt : the location to search around
r : the distance to search around the point
Attributes
----------
Examples
--------
>>> g = Grid(Rectangle(0, 0, 10, 10), 1)
>>> g.add('A', Point((1.0, 1.0)))
'A'
>>> g.add('B', Point((4.0, 4.0)))
'B'
>>> g.proximity(Point((2.0, 1.0)), 2)
['A']
>>> g.proximity(Point((6.0, 5.0)), 3.0)
['B']
>>> sorted(g.proximity(Point((4.0, 1.0)), 4.0))
['A', 'B']
"""
items = []
lower_left = self.__grid_loc((pt[0] - r, pt[1] - r))
upper_right = self.__grid_loc((pt[0] + r, pt[1] + r))
for i in xrange(lower_left[0], upper_right[0] + 1):
for j in xrange(lower_left[1], upper_right[1] + 1):
if (i, j) in self.hash:
items.extend(map(lambda item: item[1], filter(lambda item: get_points_dist(pt, item[0]) <= r, self.hash[(i, j)])))
return items
def nearest(self, pt):
"""
Returns the nearest item to a point.
nearest(Point) -> x
Parameters
----------
pt : the location to search near
Attributes
----------
Examples
--------
>>> g = Grid(Rectangle(0, 0, 10, 10), 1)
>>> g.add('A', Point((1.0, 1.0)))
'A'
>>> g.add('B', Point((4.0, 4.0)))
'B'
>>> g.nearest(Point((2.0, 1.0)))
'A'
>>> g.nearest(Point((7.0, 5.0)))
'B'
"""
search_size = self.res
while (self.proximity(pt, search_size) == [] and
(get_points_dist((self.x_range[0], self.y_range[0]), pt) > search_size or
get_points_dist((self.x_range[1], self.y_range[0]), pt) > search_size or
get_points_dist((self.x_range[0], self.y_range[1]), pt) > search_size or
get_points_dist((self.x_range[1], self.y_range[1]), pt) > search_size)):
search_size = 2 * search_size
items = []
lower_left = self.__grid_loc(
(pt[0] - search_size, pt[1] - search_size))
upper_right = self.__grid_loc(
(pt[0] + search_size, pt[1] + search_size))
for i in xrange(lower_left[0], upper_right[0] + 1):
for j in xrange(lower_left[1], upper_right[1] + 1):
if (i, j) in self.hash:
items.extend(map(lambda item: (get_points_dist(pt, item[
0]), item[1]), self.hash[(i, j)]))
if items == []:
return None
return min(items)[1]
class BruteForcePointLocator:
"""
A class which does naive linear search on a set of Point objects.
"""
def __init__(self, points):
"""
Creates a naive index of the points specified.
__init__(Point list) -> BruteForcePointLocator
Parameters
----------
points : a list of points to index (Point list)
Attributes
----------
Examples
--------
>>> pl = BruteForcePointLocator([Point((0, 0)), Point((5, 0)), Point((0, 10))])
"""
self._points = points
def nearest(self, query_point):
"""
Returns the nearest point indexed to a query point.
nearest(Point) -> Point
Parameters
----------
query_point : a point to find the nearest indexed point to
Attributes
----------
Examples
--------
>>> points = [Point((0, 0)), Point((1, 6)), Point((5.4, 1.4))]
>>> pl = BruteForcePointLocator(points)
>>> n = pl.nearest(Point((1, 1)))
>>> str(n)
'(0.0, 0.0)'
"""
return min(self._points, key=lambda p: get_points_dist(p, query_point))
def region(self, region_rect):
"""
Returns the indexed points located inside a rectangular query region.
region(Rectangle) -> Point list
Parameters
----------
region_rect : the rectangular range to find indexed points in
Attributes
----------
Examples
--------
>>> points = [Point((0, 0)), Point((1, 6)), Point((5.4, 1.4))]
>>> pl = BruteForcePointLocator(points)
>>> pts = pl.region(Rectangle(-1, -1, 10, 10))
>>> len(pts)
3
"""
return filter(lambda p: get_rectangle_point_intersect(region_rect, p) is not None, self._points)
def proximity(self, origin, r):
"""
Returns the indexed points located within some distance of an origin point.
proximity(Point, number) -> Point list
Parameters
----------
origin : the point to find indexed points near
r : the maximum distance to find indexed point from the origin point
Attributes
----------
Examples
--------
>>> points = [Point((0, 0)), Point((1, 6)), Point((5.4, 1.4))]
>>> pl = BruteForcePointLocator(points)
>>> neighs = pl.proximity(Point((1, 0)), 2)
>>> len(neighs)
1
>>> p = neighs[0]
>>> isinstance(p, Point)
True
>>> str(p)
'(0.0, 0.0)'
"""
return filter(lambda p: get_points_dist(p, origin) <= r, self._points)
class PointLocator:
"""
An abstract representation of a point indexing data structure.
"""
def __init__(self, points):
"""
Returns a point locator object.
__init__(Point list) -> PointLocator
Parameters
----------
points : a list of points to index
Attributes
----------
Examples
--------
>>> points = [Point((0, 0)), Point((1, 6)), Point((5.4, 1.4))]
>>> pl = PointLocator(points)
"""
self._locator = BruteForcePointLocator(points)
def nearest(self, query_point):
"""
Returns the nearest point indexed to a query point.
nearest(Point) -> Point
Parameters
----------
query_point : a point to find the nearest indexed point to
Attributes
----------
Examples
--------
>>> points = [Point((0, 0)), Point((1, 6)), Point((5.4, 1.4))]
>>> pl = PointLocator(points)
>>> n = pl.nearest(Point((1, 1)))
>>> str(n)
'(0.0, 0.0)'
"""
return self._locator.nearest(query_point)
def region(self, region_rect):
"""
Returns the indexed points located inside a rectangular query region.
region(Rectangle) -> Point list
Parameters
----------
region_rect : the rectangular range to find indexed points in
Attributes
----------
Examples
--------
>>> points = [Point((0, 0)), Point((1, 6)), Point((5.4, 1.4))]
>>> pl = PointLocator(points)
>>> pts = pl.region(Rectangle(-1, -1, 10, 10))
>>> len(pts)
3
"""
return self._locator.region(region_rect)
overlapping = region
def polygon(self, polygon):
"""
Returns the indexed points located inside a polygon
"""
# get points in polygon bounding box
# for points in bounding box, check for inclusion in polygon
def proximity(self, origin, r):
"""
Returns the indexed points located within some distance of an origin point.
proximity(Point, number) -> Point list
Parameters
----------
origin : the point to find indexed points near
r : the maximum distance to find indexed point from the origin point
Attributes
----------
Examples
--------
>>> points = [Point((0, 0)), Point((1, 6)), Point((5.4, 1.4))]
>>> pl = PointLocator(points)
>>> len(pl.proximity(Point((1, 0)), 2))
1
"""
return self._locator.proximity(origin, r)
class PolygonLocator:
"""
An abstract representation of a polygon indexing data structure.
"""
def __init__(self, polygons):
"""
Returns a polygon locator object.
__init__(Polygon list) -> PolygonLocator
Parameters
----------
polygons : a list of polygons to index
Attributes
----------
Examples
--------
>>> p1 = Polygon([Point((0, 1)), Point((4, 5)), Point((5, 1))])
>>> p2 = Polygon([Point((3, 9)), Point((6, 7)), Point((1, 1))])
>>> pl = PolygonLocator([p1, p2])
>>> isinstance(pl, PolygonLocator)
True
"""
self._locator = polygons
# create and rtree
self._rtree = RTree()
for polygon in polygons:
x = polygon.bounding_box.left
y = polygon.bounding_box.lower
X = polygon.bounding_box.right
Y = polygon.bounding_box.upper
self._rtree.insert(polygon, Rect(x, y, X, Y))
def inside(self, query_rectangle):
"""
Returns polygons that are inside query_rectangle
Attributes
----------
Examples
--------
>>> p1 = Polygon([Point((0, 1)), Point((4, 5)), Point((5, 1))])
>>> p2 = Polygon([Point((3, 9)), Point((6, 7)), Point((1, 1))])
>>> p3 = Polygon([Point((7, 1)), Point((8, 7)), Point((9, 1))])
>>> pl = PolygonLocator([p1, p2, p3])
>>> qr = Rectangle(0, 0, 5, 5)
>>> res = pl.inside( qr )
>>> len(res)
1
>>> qr = Rectangle(3, 7, 5, 8)
>>> res = pl.inside( qr )
>>> len(res)
0
>>> qr = Rectangle(10, 10, 12, 12)
>>> res = pl.inside( qr )
>>> len(res)
0
>>> qr = Rectangle(0, 0, 12, 12)
>>> res = pl.inside( qr )
>>> len(res)
3
Notes
-----
inside means the intersection of the query rectangle and a
polygon is not empty and is equal to the area of the polygon
"""
left = query_rectangle.left
right = query_rectangle.right
upper = query_rectangle.upper
lower = query_rectangle.lower
# rtree rect
qr = Rect(left, lower, right, upper)
# bb overlaps
res = [r.leaf_obj() for r in self._rtree.query_rect(qr)
if r.is_leaf()]
qp = Polygon([Point((left, lower)), Point((right, lower)),
Point((right, upper)), Point((left, upper))])
ip = []
GPPI = get_polygon_point_intersect
for poly in res:
flag = True
lower = poly.bounding_box.lower
right = poly.bounding_box.right
upper = poly.bounding_box.upper
left = poly.bounding_box.left
p1 = Point((left, lower))
p2 = Point((right, upper))
if GPPI(qp, p1) and GPPI(qp, p2):
ip.append(poly)
return ip
def overlapping(self, query_rectangle):
"""
Returns list of polygons that overlap query_rectangle
Examples
--------
>>> p1 = Polygon([Point((0, 1)), Point((4, 5)), Point((5, 1))])
>>> p2 = Polygon([Point((3, 9)), Point((6, 7)), Point((1, 1))])
>>> p3 = Polygon([Point((7, 1)), Point((8, 7)), Point((9, 1))])
>>> pl = PolygonLocator([p1, p2, p3])
>>> qr = Rectangle(0, 0, 5, 5)
>>> res = pl.overlapping( qr )
>>> len(res)
2
>>> qr = Rectangle(3, 7, 5, 8)
>>> res = pl.overlapping( qr )
>>> len(res)
1
>>> qr = Rectangle(10, 10, 12, 12)
>>> res = pl.overlapping( qr )
>>> len(res)
0
>>> qr = Rectangle(0, 0, 12, 12)
>>> res = pl.overlapping( qr )
>>> len(res)
3
>>> qr = Rectangle(8, 3, 9, 4)
>>> p1 = Polygon([Point((2, 1)), Point((2, 3)), Point((4, 3)), Point((4,1))])
>>> p2 = Polygon([Point((7, 1)), Point((7, 5)), Point((10, 5)), Point((10, 1))])
>>> pl = PolygonLocator([p1, p2])
>>> res = pl.overlapping(qr)
>>> len(res)
1
Notes
-----
overlapping means the intersection of the query rectangle and a
polygon is not empty and is no larger than the area of the polygon
"""
left = query_rectangle.left
right = query_rectangle.right
upper = query_rectangle.upper
lower = query_rectangle.lower
# rtree rect
qr = Rect(left, lower, right, upper)
# bb overlaps
res = [r.leaf_obj() for r in self._rtree.query_rect(qr)
if r.is_leaf()]
# have to check for polygon overlap using segment intersection
# add polys whose bb contains at least one of the corners of the query
# rectangle
sw = (left, lower)
se = (right, lower)
ne = (right, upper)
nw = (left, upper)
pnts = [sw, se, ne, nw]
cs = []
for pnt in pnts:
c = [r.leaf_obj() for r in self._rtree.query_point(
pnt) if r.is_leaf()]
cs.extend(c)
cs = list(set(cs))
overlapping = []
# first find polygons with at least one vertex inside query rectangle
remaining = copy.copy(res)
for polygon in res:
vertices = polygon.vertices
for vertex in vertices:
xb = vertex[0] >= left
xb *= vertex[0] < right
yb = vertex[1] >= lower
yb *= vertex[1] < upper
if xb * yb:
overlapping.append(polygon)
remaining.remove(polygon)
break
# for remaining polys in bb overlap check if vertex chains intersect
# segments of the query rectangle
left_edge = LineSegment(Point((left, lower)), Point((left,
upper)))
right_edge = LineSegment(Point((right, lower)), Point((right,
upper)))
lower_edge = LineSegment(Point((left, lower)), Point((right,
lower)))
upper_edge = LineSegment(Point((left, upper)), Point((right,
upper)))
for polygon in remaining:
vertices = copy.copy(polygon.vertices)
if vertices[-1] != vertices[0]:
vertices.append(vertices[0]) # put on closed cartographic form
nv = len(vertices)
for i in range(nv - 1):
head = vertices[i]
tail = vertices[i + 1]
edge = LineSegment(head, tail)
li = get_segments_intersect(edge, left_edge)
if li:
overlapping.append(polygon)
break
elif get_segments_intersect(edge, right_edge):
overlapping.append(polygon)
break
elif get_segments_intersect(edge, lower_edge):
overlapping.append(polygon)
break
elif get_segments_intersect(edge, upper_edge):
overlapping.append(polygon)
break
# check remaining for explicit containment of the bounding rectangle
# cs has candidates for this check
sw = Point(sw)
se = Point(se)
ne = Point(ne)
nw = Point(nw)
for polygon in cs:
if get_polygon_point_intersect(polygon, sw):
overlapping.append(polygon)
break
elif get_polygon_point_intersect(polygon, se):
overlapping.append(polygon)
break
elif get_polygon_point_intersect(polygon, ne):
overlapping.append(polygon)
break
elif get_polygon_point_intersect(polygon, nw):
overlapping.append(polygon)
break
return list(set(overlapping))
def nearest(self, query_point, rule='vertex'):
"""
Returns the nearest polygon indexed to a query point based on
various rules.
nearest(Polygon) -> Polygon
Parameters
----------
query_point : a point to find the nearest indexed polygon to
rule : representative point for polygon in nearest query.
vertex -- measures distance between vertices and query_point
centroid -- measures distance between centroid and
query_point
edge -- measures the distance between edges and query_point
Attributes
----------
Examples
--------
>>> p1 = Polygon([Point((0, 1)), Point((4, 5)), Point((5, 1))])
>>> p2 = Polygon([Point((3, 9)), Point((6, 7)), Point((1, 1))])
>>> pl = PolygonLocator([p1, p2])
>>> try: n = pl.nearest(Point((-1, 1)))
... except NotImplementedError: print "future test: str(min(n.vertices())) == (0.0, 1.0)"
future test: str(min(n.vertices())) == (0.0, 1.0)
"""
raise NotImplementedError
def region(self, region_rect):
"""
Returns the indexed polygons located inside a rectangular query region.
region(Rectangle) -> Polygon list
Parameters
----------
region_rect : the rectangular range to find indexed polygons in
Attributes
----------
Examples
--------
>>> p1 = Polygon([Point((0, 1)), Point((4, 5)), Point((5, 1))])
>>> p2 = Polygon([Point((3, 9)), Point((6, 7)), Point((1, 1))])
>>> pl = PolygonLocator([p1, p2])
>>> n = pl.region(Rectangle(0, 0, 4, 10))
>>> len(n)
2
"""
n = self._locator
for polygon in n:
points = polygon.vertices
pl = BruteForcePointLocator(points)
pts = pl.region(region_rect)
if len(pts) == 0:
n.remove(polygon)
return n
def contains_point(self, point):
"""
Returns polygons that contain point
Parameters
----------
point: point (x,y)
Returns
-------
list of polygons containing point
Examples
--------
>>> p1 = Polygon([Point((0,0)), Point((6,0)), Point((4,4))])
>>> p2 = Polygon([Point((1,2)), Point((4,0)), Point((4,4))])
>>> p1.contains_point((2,2))
1
>>> p2.contains_point((2,2))
1
>>> pl = PolygonLocator([p1, p2])
>>> len(pl.contains_point((2,2)))
2
>>> p2.contains_point((1,1))
0
>>> p1.contains_point((1,1))
1
>>> len(pl.contains_point((1,1)))
1
>>> p1.centroid
(3.3333333333333335, 1.3333333333333333)
>>> pl.contains_point((1,1))[0].centroid
(3.3333333333333335, 1.3333333333333333)
"""
# bbounding box containment
res = [r.leaf_obj() for r in self._rtree.query_point(point)
if r.is_leaf()]
# explicit containment check for candidate polygons needed
return [poly for poly in res if poly.contains_point(point)]
def proximity(self, origin, r, rule='vertex'):
"""
Returns the indexed polygons located within some distance of an
origin point based on various rules.
proximity(Polygon, number) -> Polygon list
Parameters
----------
origin : the point to find indexed polygons near
r : the maximum distance to find indexed polygon from the origin point
rule : representative point for polygon in nearest query.
vertex -- measures distance between vertices and query_point
centroid -- measures distance between centroid and
query_point
edge -- measures the distance between edges and query_point
Attributes
----------
Examples
--------
>>> p1 = Polygon([Point((0, 1)), Point((4, 5)), Point((5, 1))])
>>> p2 = Polygon([Point((3, 9)), Point((6, 7)), Point((1, 1))])
>>> pl = PolygonLocator([p1, p2])
>>> try:
... len(pl.proximity(Point((0, 0)), 2))
... except NotImplementedError:
... print "future test: len(pl.proximity(Point((0, 0)), 2)) == 2"
future test: len(pl.proximity(Point((0, 0)), 2)) == 2
"""
raise NotImplementedError
| |
"""
Utility functions.
"""
import csv
import io
import logging
import os
import subprocess
import sys
import typing as ty
import click
from tabulate import tabulate
import yaml
LOG = logging.getLogger(__name__)
def ensure_str(s: ty.Any) -> str:
if s is None:
s = ''
elif isinstance(s, bytes):
s = s.decode('utf-8', 'strict')
elif not isinstance(s, str):
s = str(s)
return s
def trim(string: str, length: int = 70) -> str:
"""Trim a string to the given length."""
return (string[: length - 1] + '...') if len(string) > length else string
def git_config(value: str) -> str:
"""Parse config from ``git-config`` cache.
Returns:
Matching setting for ``key`` if available, else None.
"""
cmd = ['git', 'config', value]
LOG.debug('Fetching git config info for %s', value)
LOG.debug('Running: %s', ' '.join(cmd))
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError:
output = b''
return output.decode('utf-8').strip()
def git_am(mbox: str, args: ty.Tuple[str, ...]) -> None:
"""Execute git-am on a given mbox file."""
cmd = ['git', 'am']
if args:
cmd.extend(args)
else:
cmd.append('-3')
cmd.append(mbox)
LOG.debug('Applying patch at %s', mbox)
LOG.debug('Running: %s', ' '.join(cmd))
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
LOG.error('Failed to apply patch:\n%s', exc.output.decode('utf-8'))
sys.exit(exc.returncode)
else:
LOG.info(output.decode('utf-8'))
def _tabulate(
output: ty.List[ty.Tuple[str, ty.Any]],
headers: ty.List[str],
fmt: str,
) -> str:
fmt = fmt or git_config('pw.format') or 'table'
if fmt == 'table':
return tabulate(output, headers, tablefmt='psql')
elif fmt == 'simple':
return tabulate(output, headers, tablefmt='simple')
elif fmt == 'csv':
result = io.StringIO()
writer = csv.writer(
result, quoting=csv.QUOTE_ALL, lineterminator=os.linesep
)
writer.writerow([ensure_str(h) for h in headers])
for item in output:
writer.writerow([ensure_str(i) for i in item])
return result.getvalue()
elif fmt == 'yaml':
data = [
{headers[i].lower(): entry[i] for i in range(len(headers))}
for entry in output
]
return yaml.dump(data, default_flow_style=False)
LOG.error('pw.format must be one of: table, simple, csv, yaml')
sys.exit(1)
def _echo_via_pager(pager: str, output: str) -> None:
env = dict(os.environ)
# When the LESS environment variable is unset, Git sets it to FRX (if
# LESS environment variable is set, Git does not change it at all).
if 'LESS' not in env:
env['LESS'] = 'FRX'
proc = subprocess.Popen(pager.split(), stdin=subprocess.PIPE, env=env)
try:
proc.communicate(input=output.encode('utf-8', 'strict'))
except (IOError, KeyboardInterrupt):
pass
else:
if proc.stdin:
proc.stdin.close()
while True:
try:
proc.wait()
except KeyboardInterrupt:
pass
else:
break
def echo_via_pager(
output: ty.List[ty.Tuple[str, ty.Any]],
headers: ty.List[str],
fmt: str,
) -> None:
"""Echo using git's default pager.
Wrap ``click.echo_via_pager``, setting some environment variables in the
processs to mimic the pager settings used by Git:
The order of preference is the ``$GIT_PAGER`` environment variable,
then ``core.pager`` configuration, then ``$PAGER``, and then the
default chosen at compile time (usually ``less``).
"""
out = _tabulate(output, headers, fmt)
pager = os.environ.get('GIT_PAGER', None)
if pager:
_echo_via_pager(pager, out)
return
pager = git_config('core.parser')
if pager:
_echo_via_pager(pager, out)
return
pager = os.environ.get('PAGER', None)
if pager:
_echo_via_pager(pager, out)
return
_echo_via_pager('less', out)
def echo(
output: ty.List[ty.Tuple[str, ty.Any]],
headers: ty.List[str],
fmt: str,
) -> None:
click.echo(_tabulate(output, headers, fmt))
def pagination_options(
sort_fields: ty.Tuple[str, ...],
default_sort: str,
) -> ty.Callable:
"""Shared pagination options."""
def _pagination_options(f):
f = click.option(
'--limit',
metavar='LIMIT',
type=click.INT,
help='Maximum number of items to show.',
)(f)
f = click.option(
'--page',
metavar='PAGE',
type=click.INT,
help='Page to retrieve items from. This is '
'influenced by the size of LIMIT.',
)(f)
f = click.option(
'--sort',
metavar='FIELD',
default=default_sort,
type=click.Choice(sort_fields),
help='Sort output on given field.',
)(f)
return f
return _pagination_options
def format_options(
original_function: ty.Callable = None,
headers: ty.Tuple[str, ...] = None,
) -> ty.Callable:
"""Shared output format options."""
def _format_options(f):
f = click.option(
'--format',
'-f',
'fmt',
default=None,
type=click.Choice(['simple', 'table', 'csv', 'yaml']),
help=(
"Output format. Defaults to the value of "
"'git config pw.format' else 'table'."
),
)(f)
if headers:
f = click.option(
'--column',
'-c',
'headers',
metavar='COLUMN',
multiple=True,
default=headers,
type=click.Choice(headers),
help='Columns to be included in output.',
)(f)
return f
if original_function:
return _format_options(original_function)
return _format_options
| |
import StringIO
import json
import logging
import random
import urllib
import urllib2
import time
import math
import re
import requests
# import requests_toolbelt.adapters.appengine
# Use the App Engine Requests adapter. This makes sure that Requests uses
# URLFetch.
# requests_toolbelt.adapters.appengine.monkeypatch()
# sending images
# try:
# from PIL import Image
# except:
# pass
# import multipart
#
# # standard app engineimports
# from google.appengine.api import urlfetch
# from google.appengine.ext import deferred
# from google.appengine.ext import ndb
# from google.appengine.api.taskqueue import TaskRetryOptions
# import webapp2
TOKEN = '363749995:AAEMaasMVLSPqSuSr1MiEFcgQH_Yn88hlbg'
BASE_URL = 'https://api.telegram.org/bot' + TOKEN + '/'
#urlfetch.set_default_fetch_deadline(60)
ALERTS = set()
#
# def deffered_track_pair_price(pair, current_price, target_price, chat_id, message_id):
# alert_key = (pair, target_price)
# logging.info("Checking price alert..{} if {}".format(pair, target_price))
# kraken = KrakenExchange()
# ticker = kraken.getTicker(pair=ASSETPAIRS[pair])
# askPrice = float(ticker['Ask Price'][0])
# bidPrice = float(ticker['Bid Price'][0])
# live_price = (askPrice + bidPrice) / 2
# target_price = float(target_price)
# if current_price < target_price and live_price >= target_price:
# ALERTS.remove(alert_key)
# reply_message(
# chat_id=chat_id,
# message_id=message_id,
# msg="{} just hit {}!".format(
# pair, live_price
# )
# )
# elif current_price > target_price and live_price <= target_price:
# ALERTS.remove(alert_key)
# reply_message(
# chat_id=chat_id,
# message_id=message_id,
# msg="{} just hit {}!".format(
# pair, live_price
# )
# )
# else:
# raise Exception("Alert not hit, fail task so it is retried")
#
#
# def track_pair_price(pair, current_price, target_price, chat_id, message_id):
# ALERTS.add(
# (pair, target_price)
# )
#
# deferred.defer(
# deffered_track_pair_price,
# pair, current_price, target_price, chat_id, message_id,
# _retry_options=TaskRetryOptions(
# min_backoff_seconds=60,
# task_age_limit=86400
# ) # 1 day
# )
#
#
# # ================================
#
# class EnableStatus(ndb.Model):
# # key name: str(chat_id)
# enabled = ndb.BooleanProperty(indexed=False, default=False)
#
#
# # ================================
#
# def setEnabled(chat_id, yes):
# es = EnableStatus.get_or_insert(str(chat_id))
# es.enabled = yes
# es.put()
#
# def getEnabled(chat_id):
# es = EnableStatus.get_by_id(str(chat_id))
# if es:
# return es.enabled
# return False
#
#
# # ================================
#
# class MeHandler(webapp2.RequestHandler):
# def get(self):
# urlfetch.set_default_fetch_deadline(60)
# self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getMe'))))
#
#
# class GetUpdatesHandler(webapp2.RequestHandler):
# def get(self):
# urlfetch.set_default_fetch_deadline(60)
# self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getUpdates'))))
#
#
# class SetWebhookHandler(webapp2.RequestHandler):
# def get(self):
# urlfetch.set_default_fetch_deadline(60)
# url = self.request.get('url')
# if url:
# self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'setWebhook', urllib.urlencode({'url': url})))))
#
#
# def reply_message(chat_id, message_id, msg=None, img=None):
# if msg:
# resp = urllib2.urlopen(BASE_URL + 'sendMessage', urllib.urlencode({
# 'chat_id': str(chat_id),
# 'text': msg.encode('utf-8'),
# 'disable_web_page_preview': 'true',
# 'reply_to_message_id': str(message_id),
# 'parse_mode': 'Markdown'
# })).read()
# elif img:
# resp = multipart.post_multipart(BASE_URL + 'sendPhoto', [
# ('chat_id', str(chat_id)),
# ('reply_to_message_id', str(message_id)),
# ], [
# ('photo', 'image.jpg', img),
# ])
# else:
# logging.error('no msg or img specified')
# resp = None
#
# logging.info('send response:')
# logging.info(resp)
class WebhookHandler(webapp2.RequestHandler):
def post(self):
urlfetch.set_default_fetch_deadline(60)
body = json.loads(self.request.body)
logging.info('request body:')
logging.info(body)
self.response.write(json.dumps(body))
update_id = body['update_id']
try:
message = body['message']
except:
message = body['edited_message']
message_id = message.get('message_id')
date = message.get('date')
text = message.get('text')
fr = message.get('from')
chat = message['chat']
chat_id = chat['id']
def reply(msg=None, img=None):
reply_message(msg=msg, img=img, chat_id=chat_id, message_id=message_id)
if not text:
logging.info('no text')
return
if text.startswith('/'):
text_kraken = re.sub('(\/btc)', '/xbt', text)
text_kraken = re.sub('(btc$)', 'xbt', text)
text_kraken = re.sub('(btc\s+)', 'xbt ', text)
if text == '/start':
reply('Bot enabled')
setEnabled(chat_id, True)
if text == '/alerts':
reply(
"*Alerts*\n{}".format(
"\n".join([
"{}: {}".format(pair, price)
for pair, price in ALERTS
])
)
)
elif text == '/stop':
reply('Bot disabled')
setEnabled(chat_id, False)
elif text == '/rules':
reply('1. You do not talk about WHALE HUNTERS \n2. You DO NOT talk about WHALE HUNTERS \n3. Master level of TA skills required \n3.141592 Bring pie \n4. Inactive members will be banned')
elif text == '/image':
img = Image.new('RGB', (512, 512))
base = random.randint(0, 16777216)
pixels = [base+i*j for i in range(512) for j in range(512)]
img.putdata(pixels)
output = StringIO.StringIO()
img.save(output, 'JPEG')
reply(img=output.getvalue())
elif text == '/help' or text == '/options':
r = '/rules : show rules\n/image : generate an image\n/time(s) : get server time\n/assets : list of assets\n/pairs : list of all pairs (long)\n/<asset> : show this assets pairs\n/<assetpair> : show assetpairs price\n/alerts : show alerts'
reply(r)
elif text == '/time' or text == '/times':
time = KrakenExchange().getServerTime()['rfc1123']
r = 'Kraken server time: {}'.format(time)
reply(r)
elif text == '/assets':
r = 'Reply with /<asset> to get its pairs\n{}'.format(', '.join(ASSETS))
reply(r)
elif text == '/pairs':
assets = ASSETPAIRS.keys()
assets.sort()
r = 'Reply with /<assetpair> to get bid/ask prices\n{}'.format(', '.join(assets))
reply(r)
elif text[1:].upper() in ASSETS:
pairs = []
for pair in ASSETPAIRS:
if pair[:3] == text[1:].upper()[:3]:
pairs.append(pair)
r = 'Reply with /<assetpair> to get bid/ask prices\n{}'.format(', '.join(pairs))
reply(r)
elif text_kraken.split(' ')[0][1:].upper() in ASSETPAIRS.keys():
pair = text_kraken.split(' ')[0][1:].upper()
kraken = KrakenExchange()
ticker = kraken.getTicker(pair=ASSETPAIRS[pair])
askPrice = float(ticker['Ask Price'][0])
bidPrice = float(ticker['Bid Price'][0])
price = (askPrice + bidPrice) / 2
highPrice = float(ticker['High'][0])
lowPrice = float(ticker['Low'][0])
# time = kraken.serverTime['rfc1123']
r = ""
if len(text_kraken.split(' ')) > 1:
if text_kraken.split(' ')[1] == 'fib':
l_one = highPrice
l_two = highPrice - ((highPrice - lowPrice) * 0.236)
l_three = highPrice - ((highPrice - lowPrice) * 0.382)
l_four = highPrice - ((highPrice - lowPrice) * 0.5)
l_five = highPrice - ((highPrice - lowPrice) * 0.618)
l_six = highPrice - ((highPrice - lowPrice) * 0.786)
l_seven = lowPrice
l_eight = highPrice - ((highPrice - lowPrice) * 1.272)
l_nine = highPrice - ((highPrice - lowPrice) * 1.618)
r = '*{0}* 24h fib levels\n\n*0%*: {1}\n*23.6%*: {2}\n*38.2%*: {3}\n*50%*: {4}\n*61.8%*: {5}\n*78.6%*: {6}\n*100%*: {7}\n\n*127.2%*: {8}\n*161.8%*: {9}\n'.format(pair, l_one, l_two, l_three, l_four, l_five, l_six, l_seven, l_eight, l_nine)
if text_kraken.split(' ')[1] == 'book':
order_book = kraken.getOrderBook(pair=ASSETPAIRS[pair])
book = order_book[ASSETPAIRS[pair]]
r = "*OrderBook* {0} \n*Asks*\n{1}\n\n*Bids*\n{2}".format(
pair,
"\n".join(
["{} {}".format(ask[0], ask[1]) for ask in book['asks'][:10]]
),
"\n".join(
["{} {}".format(bid[0], bid[1]) for bid in book['bids'][:10]]
),
)
if text_kraken.split(' ')[1] == 'alert':
try:
target_price = text_kraken.split(' ')[2]
track_pair_price(pair, price, target_price, chat_id, message_id)
r = 'You want me to keep an eye on your {}? I will let you know if it rises or drops to {}'.format(
pair, target_price
)
logging.info(r)
except IndexError:
r = 'Tell me what price you want an alert for, doofus!'
else:
r = '*{}* \n*Price:* {} \n*---* \n*High:* {} \n*Low:* {}'.format(pair, price, highPrice, lowPrice)
# r += '\n\n_updated: {}_'.format(time)
reply(r)
elif text.split(' ')[0][1:].upper() in BITT_ASSETPAIRS:
# TODO: insert bittrex methods here
pair = text.split(' ')[0][1:]
bittrex = BittrexExchange()
ticker = bittrex.getTicker(pair=pair)
askPrice = float(ticker['Ask Price'])
bidPrice = float(ticker['Bid Price'])
price = (askPrice + bidPrice) / 2
highPrice = float(ticker['High'])
lowPrice = float(ticker['Low'])
r = ""
if len(text.split(' ')) > 1:
if text.split(' ')[1] == 'fib':
l_one = highPrice
l_two = highPrice - ((highPrice - lowPrice) * 0.236)
l_three = highPrice - ((highPrice - lowPrice) * 0.382)
l_four = highPrice - ((highPrice - lowPrice) * 0.5)
l_five = highPrice - ((highPrice - lowPrice) * 0.618)
l_six = highPrice - ((highPrice - lowPrice) * 0.786)
l_seven = lowPrice
l_eight = highPrice - ((highPrice - lowPrice) * 1.272)
l_nine = highPrice - ((highPrice - lowPrice) * 1.618)
r = '*{0}* 24h fib levels\n\n*0%*: {1}\n*23.6%*: {2}\n*38.2%*: {3}\n*50%*: {4}\n*61.8%*: {5}\n*78.6%*: {6}\n*100%*: {7}\n\n*127.2%*: {8}\n*161.8%*: {9}\n'.format(pair, l_one, l_two, l_three, l_four, l_five, l_six, l_seven, l_eight, l_nine)
else:
r = '*{}* \n*Price:* {} \n*---* \n*High:* {} \n*Low:* {}'.format(pair, price, highPrice, lowPrice)
reply(r)
elif len(text) == 4 or len(text) == 7:
reply('This asset(pair) is not recognized. Pick one from the /assets list, stupid.')
else:
reply('You know, this sort of behaviour could qualify as sexual harassment.')
# bot text reply's
elif 'beach' in text:
reply('dont forget to bring a towel')
# elif ('sell' in text or 'dropping' in text or 'dumping' in text) and random.choice([True, False]):
# reply('weak hands!')
# elif 'what time' in text:
# reply('look at the corner of your screen!')
# elif 'moon' in text:
# reply('http://www.louwmanexclusive.com/nl/brands/lamborghini/')
# elif 'bitch' in text:
# reply('dont talk to me like that!')
# elif 'penny' in text:
# reply('Dont talk behind my back!')
else:
if getEnabled(chat_id):
reply('I got your message! (but I do not know how to answer)')
else:
logging.info('not enabled for chat_id {}'.format(chat_id))
# ===== Kraken Exchange methods & classes ======
PUBLIC_URLS = {
'time': 'https://api.kraken.com/0/public/Time',
'assets': 'https://api.kraken.com/0/public/Assets',
'assetPairs': 'https://api.kraken.com/0/public/AssetPairs',
'ticker': 'https://api.kraken.com/0/public/Ticker',
'ohlc': 'https://api.kraken.com/0/public/OHLC',
'orderBook': 'https://api.kraken.com/0/public/Depth',
'recentTrades': 'https://api.kraken.com/0/public/Trades',
'spread': 'https://api.kraken.com/0/public/Spread',
}
TICKER_MAPPING = {
'a': 'Ask Price',
'b': 'Bid Price',
'c': 'Last Trade',
'v': 'Volume',
'p': 'Volume weighted avg',
't': '# Trades',
'l': 'Low',
'h': 'High',
'o': 'Opening Price',
}
ASSETS = ['DASH', 'EOS', 'ETC', 'ETH', 'GNO', 'ICN', 'LTC', 'MLN', 'REP', 'USDT',
'XBT', 'XDG', 'XLM', 'XMR', 'XRP', 'ZEC', 'BCH']
ASSETPAIRS = {
'DASHEUR': 'DASHEUR',
'DASHUSD': 'DASHUSD',
'DASHXBT': 'DASHXBT',
'EOSETH': 'EOSETH',
'EOSEUR': 'EOSEUR',
'EOSUSD': 'EOSUSD',
'EOSXBT': 'EOSXBT',
'ETCETH': 'XETCXETH',
'ETCEUR': 'XETCZEUR',
'ETCUSD': 'XETCZUSD',
'ETCXBT': 'XETCXXBT',
'ETHCAD': 'XETHZCAD',
'ETHEUR': 'XETHZEUR',
'ETHGBP': 'XETHZGBP',
'ETHJPY': 'XETHZJPY',
'ETHUSD': 'XETHZUSD',
'ETHXBT': 'XETHXXBT',
'GNOETH': 'GNOETH',
'GNOEUR': 'GNOEUR',
'GNOUSD': 'GNOUSD',
'GNOXBT': 'GNOXBT',
'ICNETH': 'XICNXETH',
'ICNXBT': 'XICNXXBT',
'LTCEUR': 'XLTCZEUR',
'LTCUSD': 'XLTCZUSD',
'LTCXBT': 'XLTCXXBT',
'MLNETH': 'XMLNXETH',
'MLNXBT': 'XMLNXXBT',
'REPETH': 'XREPXETH',
'REPEUR': 'XREPZEUR',
'REPUSD': 'XREPZUSD',
'REPXBT': 'XREPXXBT',
'USDTUSD': 'USDTZUSD',
'XBTCAD': 'XXBTZCAD',
'XBTEUR': 'XXBTZEUR',
'XBTGBP': 'XXBTZGBP',
'XBTJPY': 'XXBTZJPY',
'XBTUSD': 'XXBTZUSD',
'XDGXBT': 'XXDGXXBT',
'XLMEUR': 'XXLMZEUR',
'XLMUSD': 'XXLMZUSD',
'XLMXBT': 'XXLMXXBT',
'XMREUR': 'XXMRZEUR',
'XMRUSD': 'XXMRZUSD',
'XMRXBT': 'XXMRXXBT',
'XRPCAD': 'XXRPZCAD',
'XRPEUR': 'XXRPZEUR',
'XRPJPY': 'XXRPZJPY',
'XRPUSD': 'XXRPZUSD',
'XRPXBT': 'XXRPXXBT',
'ZECEUR': 'XZECZEUR',
'ZECUSD': 'XZECZUSD',
'ZECXBT': 'XZECXXBT',
'BCHEUR': 'BCHEUR',
'BCHUSD': 'BCHUSD',
'BCHXBT': 'BCHXBT',
}
MAXREQUESTS = 15
def _query(url, header):
r = requests.post(url, data=header)
if r.status_code == 200:
return json.loads(r.text)['result']
class KrakenExchange(object):
"""
Holds all methods for fetching Assets, Assetpairs and current Ticker
values from the Kraken Exchange.
Time Skew can be displayed by requesting server time.
"""
def __init__(self):
super(KrakenExchange, self).__init__()
def query_public(self, type, header=None):
return _query(PUBLIC_URLS[type], header)
def getServerTime(self):
serverTime = self.query_public('time')
if type(serverTime) == ValueError:
return serverTime.message
self.serverTime = serverTime
return self.serverTime
def getServerSkew(self):
self.serverSkew = time.time() - self.getServerTime()['unixtime']
return self.serverSkew
def getOrderBook(self, pair):
header = dict(
pair=pair,
count=10,
)
r = self.query_public('orderBook', header)
return r
def getTicker(self, pair):
header = {'pair': pair} if pair else None
r = self.query_public('ticker', header)
if type(r) == ValueError:
return r.message
self.ticker = {}
ticker = r[pair]
for t in ticker.keys():
self.ticker[TICKER_MAPPING[t]] = ticker[t]
return self.ticker
# ===== Bittrex Exchange methods & classes ======
BITT_PUBLIC_URLS = {
# hold open markets, assets and pairs.
'markets': 'https://bittrex.com/api/v1.1/public/getmarkets',
'currencies': 'https://bittrex.com/api/v1.1/public/getcurrencies ',
# Just the current price and bid ask.
'ticker': 'https://bittrex.com/api/v1.1/public/getticker',
# > 1 market 24h summary, current high-low etc
'summary': 'https://bittrex.com/api/v1.1/public/getmarketsummary',
# > 1 market 24h summary, current high-low etc
'summaries': 'https://bittrex.com/api/v1.1/public/getmarketsummaries',
'orderBook': 'https://bittrex.com/api/v1.1/public/getorderbook',
'history': 'https://bittrex.com/api/v1.1/public/getmarkethistory'
}
BITT_TICKER_MAPPING = {
'MarketName': 'Pair',
'High': 'High',
'Low': 'Low',
'Volume': 'Volume',
'Last': 'Last',
'BaseVolume': 'Base Volume',
'Bid': 'Bid Price',
'Ask': 'Ask Price',
'OpenBuyOrders': '# Buy Orders',
'OpenSellOrders': '# Sell Orders'
}
BITT_ASSETPAIRS = [
u'BTC-LTC',
u'BTC-DOGE',
u'BTC-VTC',
u'BTC-PPC',
u'BTC-FTC',
u'BTC-RDD',
u'BTC-NXT',
u'BTC-DASH',
u'BTC-POT',
u'BTC-BLK',
u'BTC-EMC2',
u'BTC-XMY',
u'BTC-AUR',
u'BTC-EFL',
u'BTC-GLD',
u'BTC-SLR',
u'BTC-PTC',
u'BTC-GRS',
u'BTC-NLG',
u'BTC-RBY',
u'BTC-XWC',
u'BTC-MONA',
u'BTC-THC',
u'BTC-ENRG',
u'BTC-ERC',
u'BTC-NAUT',
u'BTC-VRC',
u'BTC-CURE',
u'BTC-XBB',
u'BTC-XMR',
u'BTC-CLOAK',
u'BTC-START',
u'BTC-KORE',
u'BTC-XDN',
u'BTC-TRUST',
u'BTC-NAV',
u'BTC-XST',
u'BTC-BTCD',
u'BTC-VIA',
u'BTC-UNO',
u'BTC-PINK',
u'BTC-IOC',
u'BTC-CANN',
u'BTC-SYS',
u'BTC-NEOS',
u'BTC-DGB',
u'BTC-BURST',
u'BTC-EXCL',
u'BTC-SWIFT',
u'BTC-DOPE',
u'BTC-BLOCK',
u'BTC-ABY',
u'BTC-BYC',
u'BTC-XMG',
u'BTC-BLITZ',
u'BTC-BAY',
u'BTC-BTS',
u'BTC-FAIR',
u'BTC-SPR',
u'BTC-VTR',
u'BTC-XRP',
u'BTC-GAME',
u'BTC-COVAL',
u'BTC-NXS',
u'BTC-XCP',
u'BTC-BITB',
u'BTC-GEO',
u'BTC-FLDC',
u'BTC-GRC',
u'BTC-FLO',
u'BTC-NBT',
u'BTC-MUE',
u'BTC-XEM',
u'BTC-CLAM',
u'BTC-DMD',
u'BTC-GAM',
u'BTC-SPHR',
u'BTC-OK',
u'BTC-SNRG',
u'BTC-PKB',
u'BTC-CPC',
u'BTC-AEON',
u'BTC-ETH',
u'BTC-GCR',
u'BTC-TX',
u'BTC-BCY',
u'BTC-EXP',
u'BTC-INFX',
u'BTC-OMNI',
u'BTC-AMP',
u'BTC-AGRS',
u'BTC-XLM',
u'BTC-BTA',
u'USDT-BTC',
u'BITCNY-BTC',
u'BTC-CLUB',
u'BTC-VOX',
u'BTC-EMC',
u'BTC-FCT',
u'BTC-MAID',
u'BTC-EGC',
u'BTC-SLS',
u'BTC-RADS',
u'BTC-DCR',
u'BTC-SAFEX',
u'BTC-BSD',
u'BTC-XVG',
u'BTC-PIVX',
u'BTC-XVC',
u'BTC-MEME',
u'BTC-STEEM',
u'BTC-2GIVE',
u'BTC-LSK',
u'BTC-PDC',
u'BTC-BRK',
u'BTC-DGD',
u'ETH-DGD',
u'BTC-WAVES',
u'BTC-RISE',
u'BTC-LBC',
u'BTC-SBD',
u'BTC-BRX',
u'BTC-DRACO',
u'BTC-ETC',
u'ETH-ETC',
u'BTC-STRAT',
u'BTC-UNB',
u'BTC-SYNX',
u'BTC-TRIG',
u'BTC-EBST',
u'BTC-VRM',
u'BTC-SEQ',
u'BTC-XAUR',
u'BTC-SNGLS',
u'BTC-REP',
u'BTC-SHIFT',
u'BTC-ARDR',
u'BTC-XZC',
u'BTC-NEO',
u'BTC-ZEC',
u'BTC-ZCL',
u'BTC-IOP',
u'BTC-DAR',
u'BTC-GOLOS',
u'BTC-HKG',
u'BTC-UBQ',
u'BTC-KMD',
u'BTC-GBG',
u'BTC-SIB',
u'BTC-ION',
u'BTC-LMC',
u'BTC-QWARK',
u'BTC-CRW',
u'BTC-SWT',
u'BTC-TIME',
u'BTC-MLN',
u'BTC-ARK',
u'BTC-DYN',
u'BTC-TKS',
u'BTC-MUSIC',
u'BTC-DTB',
u'BTC-INCNT',
u'BTC-GBYTE',
u'BTC-GNT',
u'BTC-NXC',
u'BTC-EDG',
u'BTC-LGD',
u'BTC-TRST',
u'ETH-GNT',
u'ETH-REP',
u'USDT-ETH',
u'ETH-WINGS',
u'BTC-WINGS',
u'BTC-RLC',
u'BTC-GNO',
u'BTC-GUP',
u'BTC-LUN',
u'ETH-GUP',
u'ETH-RLC',
u'ETH-LUN',
u'ETH-SNGLS',
u'ETH-GNO',
u'BTC-APX',
u'BTC-TKN',
u'ETH-TKN',
u'BTC-HMQ',
u'ETH-HMQ',
u'BTC-ANT',
u'ETH-TRST',
u'ETH-ANT',
u'BTC-SC',
u'ETH-BAT',
u'BTC-BAT',
u'BTC-ZEN',
u'BTC-1ST',
u'BTC-QRL',
u'ETH-1ST',
u'ETH-QRL',
u'BTC-CRB',
u'ETH-CRB',
u'ETH-LGD',
u'BTC-PTOY',
u'ETH-PTOY',
u'BTC-MYST',
u'ETH-MYST',
u'BTC-CFI',
u'ETH-CFI',
u'BTC-BNT',
u'ETH-BNT',
u'BTC-NMR',
u'ETH-NMR',
u'ETH-TIME',
u'ETH-LTC',
u'ETH-XRP',
u'BTC-SNT',
u'ETH-SNT',
u'BTC-DCT',
u'BTC-XEL',
u'BTC-MCO',
u'ETH-MCO',
u'BTC-ADT',
u'ETH-ADT',
u'BTC-FUN',
u'ETH-FUN',
u'BTC-PAY',
u'ETH-PAY',
u'BTC-MTL',
u'ETH-MTL',
u'BTC-STORJ',
u'ETH-STORJ',
u'BTC-ADX',
u'ETH-ADX',
u'ETH-DASH',
u'ETH-SC',
u'ETH-ZEC',
u'USDT-ZEC',
u'USDT-LTC',
u'USDT-ETC',
u'USDT-XRP',
u'BTC-OMG',
u'ETH-OMG',
u'BTC-CVC',
u'ETH-CVC',
u'BTC-PART',
u'BTC-QTUM',
u'ETH-QTUM',
u'ETH-XMR',
u'ETH-XEM',
u'ETH-XLM',
u'ETH-NEO',
u'USDT-XMR',
u'USDT-DASH',
u'ETH-BCC',
u'USDT-BCC',
u'BTC-BCC',
u'USDT-NEO',
u'ETH-WAVES',
u'ETH-STRAT',
u'ETH-DGB',
u'ETH-FCT',
u'ETH-BTS']
# TODO: retrieve all pairs from the `getmarket` data. Pairs will have "-"
# which will be handy for separation.
class BittrexExchange(object):
"""
Holds all methods for fetching:
- Assets, Assetpairs, Current Ticker, 24h summary, order book, and history
values and current Ticker
values from the Kraken Exchange.
Time Skew can be displayed by requesting server time.
"""
def __init__(self):
super(BittrexExchange, self).__init__()
def query_public(self, type, header=None):
return _query(BITT_PUBLIC_URLS[type], header)
def getTicker(self, pair):
header = {'market': pair} if pair else None
r = self.query_public('summary', header)
if type(r) == ValueError:
return r.message
self.ticker = {}
ticker = r[0]
# print(ticker)
for t in ticker.keys():
if t in BITT_TICKER_MAPPING.keys():
self.ticker[BITT_TICKER_MAPPING[t]] = ticker[t]
return self.ticker
def getmarkets(self, type, header=None):
header = None
r = self.query_public('markets', header)
self.markets = []
markets = r
for i, cont in enumerate(markets):
self.markets.append(markets[i]["MarketName"])
return self.markets
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class JunitTestsIntegrationTest(PantsRunIntegrationTest):
def _assert_junit_output_exists_for_class(self, workdir, classname):
self.assertTrue(os.path.exists(
os.path.join(workdir, 'test', 'junit', '{}.out.txt'.format(classname))))
self.assertTrue(os.path.exists(
os.path.join(workdir, 'test', 'junit', '{}.err.txt'.format(classname))))
def _assert_junit_output(self, workdir):
self._assert_junit_output_exists_for_class(workdir, 'org.pantsbuild.example.hello.greet.GreetingTest')
self._assert_junit_output_exists_for_class(workdir, 'org.pantsbuild.example.hello.welcome.WelSpec')
def test_junit_test_custom_interpreter(self):
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir([
'test',
'examples/tests/java/org/pantsbuild/example/hello/greet',
'examples/tests/scala/org/pantsbuild/example/hello/welcome',
'--interpreter=CPython>=2.6,<3',
'--interpreter=CPython>=3.3'],
workdir)
self.assert_success(pants_run)
self._assert_junit_output(workdir)
def test_junit_test(self):
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir([
'test',
'testprojects/tests/scala/org/pantsbuild/testproject/empty'],
workdir)
self.assert_failure(pants_run)
def test_junit_test_with_test_option_with_relpath(self):
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir([
'test',
'--test-junit-test=examples/tests/java/org/pantsbuild/example/hello/greet/GreetingTest.java',
'examples/tests/java/org/pantsbuild/example/hello/greet',
'examples/tests/scala/org/pantsbuild/example/hello/welcome'],
workdir)
self.assert_success(pants_run)
self._assert_junit_output_exists_for_class(workdir, 'org.pantsbuild.example.hello.greet.GreetingTest')
def test_junit_test_with_test_option_with_dot_slash_relpath(self):
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir([
'test',
'--test-junit-test=./examples/tests/java/org/pantsbuild/example/hello/greet/GreetingTest.java',
'examples/tests/java/org/pantsbuild/example/hello/greet',
'examples/tests/scala/org/pantsbuild/example/hello/welcome'],
workdir)
self.assert_success(pants_run)
self._assert_junit_output_exists_for_class(workdir, 'org.pantsbuild.example.hello.greet.GreetingTest')
def test_junit_test_with_test_option_with_classname(self):
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir([
'test',
'--test-junit-test=org.pantsbuild.example.hello.greet.GreetingTest',
'examples/tests/java/org/pantsbuild/example/hello/greet',
'examples/tests/scala/org/pantsbuild/example/hello/welcome'],
workdir)
self.assert_success(pants_run)
self._assert_junit_output_exists_for_class(workdir, 'org.pantsbuild.example.hello.greet.GreetingTest')
def test_junit_test_requiring_cwd_fails_without_option_specified(self):
pants_run = self.run_pants([
'test',
'testprojects/tests/java/org/pantsbuild/testproject/cwdexample',
'--interpreter=CPython>=2.6,<3',
'--interpreter=CPython>=3.3',
'--jvm-test-junit-options=-Dcwd.test.enabled=true'])
self.assert_failure(pants_run)
def test_junit_test_requiring_cwd_passes_with_option_with_value_specified(self):
pants_run = self.run_pants([
'test',
'testprojects/tests/java/org/pantsbuild/testproject/cwdexample',
'--interpreter=CPython>=2.6,<3',
'--interpreter=CPython>=3.3',
'--jvm-test-junit-options=-Dcwd.test.enabled=true',
'--test-junit-cwd=testprojects/src/java/org/pantsbuild/testproject/cwdexample/subdir'])
self.assert_success(pants_run)
def test_junit_test_requiring_cwd_fails_with_option_with_no_value_specified(self):
pants_run = self.run_pants([
'test',
'testprojects/tests/java/org/pantsbuild/testproject/cwdexample',
'--interpreter=CPython>=2.6,<3',
'--interpreter=CPython>=3.3',
'--jvm-test-junit-options=-Dcwd.test.enabled=true'])
self.assert_failure(pants_run)
def test_junit_test_output_flag(self):
def run_test(output_mode):
args = ['test.junit', '--no-test-junit-fail-fast']
if output_mode is not None:
args.append('--output-mode=' + output_mode)
args.append('testprojects/src/java/org/pantsbuild/testproject/junit/suppressoutput:tests')
return self.run_pants(args)
run_with_all_output = run_test('ALL')
self.assertIn('Failure output', run_with_all_output.stdout_data)
self.assertIn('Success output', run_with_all_output.stdout_data)
run_with_failure_only_output = run_test('FAILURE_ONLY')
self.assertIn('Failure output', run_with_failure_only_output.stdout_data)
self.assertNotIn('Success output', run_with_failure_only_output.stdout_data)
run_with_none_output = run_test('NONE')
self.assertNotIn('Failure output', run_with_none_output.stdout_data)
self.assertNotIn('Success output', run_with_none_output.stdout_data)
run_with_default_output = run_test(None)
self.assertNotIn('Failure output', run_with_default_output.stdout_data)
self.assertNotIn('Success output', run_with_default_output.stdout_data)
def test_junit_before_class_exception(self):
for output_mode in ['ALL', 'FAILURE_ONLY', 'NONE']:
run_result = self.run_pants([
'test.junit', '--no-test-junit-fail-fast',
'--output-mode=' + output_mode,
'testprojects/src/java/org/pantsbuild/testproject/junit/beforeclassexception:tests'
])
self.assertTrue('Test mechanism' not in run_result.stdout_data,
'Test mechanism exception in case of ' + output_mode + ' output mode.')
def test_junit_test_target_cwd(self):
pants_run = self.run_pants([
'test',
'testprojects/tests/java/org/pantsbuild/testproject/workdirs/onedir',
])
self.assert_success(pants_run)
def test_junit_test_annotation_processor(self):
pants_run = self.run_pants([
'test',
'testprojects/tests/java/org/pantsbuild/testproject/annotation',
])
self.assert_success(pants_run)
def test_junit_test_duplicate_resources(self):
pants_run = self.run_pants([
'test',
'testprojects/maven_layout/junit_resource_collision',
])
self.assert_success(pants_run)
def test_junit_test_target_cwd_overrides_option(self):
pants_run = self.run_pants([
'test',
'testprojects/tests/java/org/pantsbuild/testproject/workdirs/onedir',
'--test-junit-cwd=testprojects/tests/java/org/pantsbuild/testproject/dummies'
])
self.assert_success(pants_run)
def test_junit_test_failure_summary(self):
with self.temporary_workdir() as workdir:
with self.source_clone('testprojects/src/java/org/pantsbuild/testproject/junit/failing') as failing:
pants_run = self.run_pants_with_workdir([
'test',
'--test-junit-failure-summary',
os.path.join(failing, 'tests', 'org', 'pantsbuild', 'tmp', 'tests'),
], workdir)
self.assert_failure(pants_run)
expected_groups = []
expected_groups.append([
'org/pantsbuild/tmp/tests:one',
'org.pantsbuild.tmp.tests.OneTest#testSingle'
])
expected_groups.append([
'org/pantsbuild/tmp/tests:two',
'org.pantsbuild.tmp.tests.TwoTest#testTupleFirst',
'org.pantsbuild.tmp.tests.TwoTest#testTupleSecond',
])
expected_groups.append([
'org/pantsbuild/tmp/tests:three',
'org.pantsbuild.tmp.tests.subtest.ThreeTest#testTripleFirst',
'org.pantsbuild.tmp.tests.subtest.ThreeTest#testTripleSecond',
'org.pantsbuild.tmp.tests.subtest.ThreeTest#testTripleThird',
])
output = '\n'.join(line.strip() for line in pants_run.stdout_data.split('\n'))
for group in expected_groups:
self.assertIn('\n'.join(group), output)
def test_junit_test_no_failure_summary(self):
with self.temporary_workdir() as workdir:
with self.source_clone('testprojects/src/java/org/pantsbuild/testproject/junit/failing') as failing:
pants_run = self.run_pants_with_workdir([
'test',
'--no-test-junit-failure-summary',
os.path.join(failing, 'tests', 'org', 'pantsbuild', 'tmp', 'tests')
], workdir)
self.assert_failure(pants_run)
output = '\n'.join(line.strip() for line in pants_run.stdout_data.split('\n'))
self.assertNotIn('org/pantsbuild/tmp/tests:three\n'
'org.pantsbuild.tmp.tests.subtest.ThreeTest#testTripleFirst',
output)
def test_junit_test_successes_and_failures(self):
with self.temporary_workdir() as workdir:
with self.source_clone('testprojects/src/java/org/pantsbuild/testproject/junit/mixed') as mixed:
pants_run = self.run_pants_with_workdir([
'test',
'--test-junit-failure-summary',
'--no-test-junit-fail-fast',
os.path.join(mixed, 'tests', 'org', 'pantsbuild', 'tmp', 'tests'),
], workdir)
group = [
'org/pantsbuild/tmp/tests:tests',
'org.pantsbuild.tmp.tests.AllTests#test1Failure',
'org.pantsbuild.tmp.tests.AllTests#test3Failure',
'org.pantsbuild.tmp.tests.AllTests#test4Error',
]
output = '\n'.join(line.strip() for line in pants_run.stdout_data.split('\n'))
self.assertIn('\n'.join(group), output,
'{group}\n not found in\n\n{output}.'.format(group='\n'.join(group),
output=output))
| |
#!/usr/bin/python
#
# Build Duktape website. Must be run with cwd in the website/ directory.
#
import os
import sys
import time
import datetime
import shutil
import re
import tempfile
import atexit
import md5
from bs4 import BeautifulSoup, Tag
colorize = True
fancy_stack = True
remove_fixme = True
testcase_refs = False
list_tags = False
floating_list_tags = True
fancy_releaselog = True
dt_now = datetime.datetime.utcnow()
def readFile(x):
f = open(x, 'rb')
data = f.read()
f.close()
return data
def htmlEscape(x):
res = ''
esc = '&<>'
for c in x:
if ord(c) >= 0x20 and ord(c) <= 0x7e and c not in esc:
res += c
else:
res += '&#x%04x;' % ord(c)
return res
def getAutodeleteTempname():
tmp = tempfile.mktemp(suffix='duktape-website')
def f():
os.remove(tmp)
atexit.register(f)
return tmp
# also escapes text automatically
def sourceHighlight(x, sourceLang):
tmp1 = getAutodeleteTempname()
tmp2 = getAutodeleteTempname()
f = open(tmp1, 'wb') # FIXME
f.write(x)
f.close()
# FIXME: safer execution
os.system('source-highlight -s %s -c highlight.css --no-doc <"%s" >"%s"' % \
(sourceLang, tmp1, tmp2))
f = open(tmp2, 'rb')
res = f.read()
f.close()
return res
def rst2Html(filename):
tmp1 = getAutodeleteTempname()
# FIXME: safer execution
os.system('rst2html "%s" >"%s"' % \
(filename, tmp1))
f = open(tmp1, 'rb')
res = f.read()
f.close()
return res
def getFileMd5(filename):
if not os.path.exists(filename):
return None
f = open(filename, 'rb')
d = f.read()
f.close()
return md5.md5(d).digest().encode('hex')
def stripNewline(x):
if len(x) > 0 and x[-1] == '\n':
return x[:-1]
return x
def validateAndParseHtml(data):
# first parse as xml to get errors out
ign_soup = BeautifulSoup(data, 'xml')
# then parse as lenient html, no xml tags etc
soup = BeautifulSoup(data)
return soup
re_stack_line = re.compile(r'^(\[[^\x5d]+\])(?:\s+->\s+(\[[^\x5d]+\]))?(?:\s+(.*?))?\s*$')
def renderFancyStack(inp_line):
# Support various notations here:
#
# [ a b c ]
# [ a b c ] -> [ d e f ]
# [ a b c ] -> [ d e f ] (if foo)
#
m = re_stack_line.match(inp_line)
#print(inp_line)
assert(m is not None)
stacks = [ m.group(1) ]
if m.group(2) is not None:
stacks.append(m.group(2))
res = []
res.append('<div class="stack-wrapper">')
for idx, stk in enumerate(stacks):
if idx > 0:
res.append('<span class="arrow"><b>→</b></span>')
res.append('<span class="stack">')
for part in stk.split(' '):
part = part.strip()
elem_classes = []
elem_classes.append('elem') #FIXME
if len(part) > 0 and part[-1] == '!':
part = part[:-1]
elem_classes.append('active')
elif len(part) > 0 and part[-1] == '*':
part = part[:-1]
elem_classes.append('referred')
elif len(part) > 0 and part[-1] == '?':
part = part[:-1]
elem_classes.append('ghost')
text = part
# FIXME: detect special constants like "true", "null", etc?
if text in [ 'undefined', 'null', 'true', 'false', 'NaN' ] or \
(len(text) > 0 and text[0] == '"' and text[-1] == '"'):
elem_classes.append('literal')
# FIXME: inline elements for reduced size?
# The stack elements use a classless markup to minimize result
# HTML size. HTML inline elements are used to denote different
# kinds of elements; the elements should be reasonable for text
# browsers so a limited set can be used.
use_inline = False
if part == '':
continue
if part == '[':
#res.append('<em>[</em>')
res.append('<span class="cap">[</span>')
continue
if part == ']':
#res.append('<em>]</em>')
res.append('<span class="cap">]</span>')
continue
if part == '...':
text = '. . .'
elem_classes.append('ellipsis')
else:
text = part
if 'ellipsis' in elem_classes and use_inline:
res.append('<i>' + htmlEscape(text) + '</i>')
elif 'active' in elem_classes and use_inline:
res.append('<b>' + htmlEscape(text) + '</b>')
else:
res.append('<span class="' + ' '.join(elem_classes) + '">' + htmlEscape(text) + '</span>')
res.append('</span>')
# FIXME: pretty badly styled now
if m.group(3) is not None:
res.append('<span class="stack-comment">' + htmlEscape(m.group(3)) + '</span>')
res.append('</div>')
return ' '.join(res) + '\n' # stack is a one-liner; spaces are for text browser rendering
def parseApiDoc(filename):
f = open(filename, 'rb')
parts = {}
state = None
for line in f.readlines():
line = stripNewline(line)
if line.startswith('='):
state = line[1:]
elif state is not None:
if not parts.has_key(state):
parts[state] = []
parts[state].append(line)
else:
if line != '':
raise Exception('unparsed non-empty line: %r' % line)
else:
# ignore
pass
f.close()
# remove leading and trailing empty lines
for k in parts:
p = parts[k]
while len(p) > 0 and p[0] == '':
p.pop(0)
while len(p) > 0 and p[-1] == '':
p.pop()
return parts
# C99: these are used if available
type_repl_c99_32bit = [
['duk_int_t', 'int' ],
['duk_uint_t', 'unsigned int' ],
['duk_int32_t', 'int32_t' ],
['duk_uint32_t', 'uint32_t' ],
['duk_uint16_t', 'uint16_t' ],
['duk_idx_t', 'int' ],
['duk_uarridx_t', 'unsigned int' ],
['duk_codepoint_t', 'int' ],
['duk_errcode_t', 'int' ],
['duk_bool_t', 'int' ],
['duk_ret_t', 'int' ],
['duk_size_t', 'size_t' ],
['duk_double_t', 'double' ],
]
# Typical 32-bit legacy/embedded platform (32-bit int)
type_repl_legacy32 = [
['duk_int_t', 'int' ],
['duk_uint_t', 'unsigned int' ],
['duk_int32_t', 'int' ],
['duk_uint32_t', 'unsigned int' ],
['duk_uint16_t', 'unsigned short' ],
['duk_idx_t', 'int' ],
['duk_uarridx_t', 'unsigned int' ],
['duk_codepoint_t', 'int' ],
['duk_errcode_t', 'int' ],
['duk_bool_t', 'int' ],
['duk_ret_t', 'int' ],
['duk_size_t', 'size_t' ],
['duk_double_t', 'double' ],
]
# Typical 16-bit legacy/embedded platform (16-bit int/short, 32-bit long)
type_repl_legacy16 = [
['duk_int_t', 'long' ],
['duk_uint_t', 'unsigned long' ],
['duk_int32_t', 'long' ],
['duk_uint32_t', 'unsigned long' ],
['duk_uint16_t', 'unsigned short' ],
['duk_idx_t', 'long' ],
['duk_uarridx_t', 'unsigned long' ],
['duk_codepoint_t', 'long' ],
['duk_errcode_t', 'long' ],
['duk_bool_t', 'int' ],
['duk_ret_t', 'int' ],
['duk_size_t', 'size_t' ],
['duk_double_t', 'double' ],
]
def substitutePrototypeTypes(line, repl):
# Replace Duktape custom wrapped types with more concrete counterparts
line = unicode(line)
for t in repl:
line = line.replace(t[0], t[1])
return line
def processApiDoc(parts, funcname, testrefs, used_tags):
res = []
# the 'hidechar' span is to allow browser search without showing the char
res.append('<h1 id="%s" class="apih1">' % funcname)
res.append('<a href="#%s"><span class="hidechar">.</span>%s()</a>' % (funcname, funcname))
if floating_list_tags and parts.has_key('tags'):
p = sorted(parts['tags'], reverse=True) # reversed because floated to right (which reverses DOM order)
# For now, add the introduced version as a tag
if parts.has_key('introduced'):
p = [ parts['introduced'][0] ] + p
if parts.has_key('deprecated'):
# XXX: must mark deprecation
pass
if parts.has_key('removed'):
# XXX: must mark removal
pass
for idx, val in enumerate(p):
classes = [ 'apitag' ]
if val == 'experimental' or val == 'nonportable':
classes.append('apitagwarn')
if val == 'protected':
classes.append('apitagprotected')
res.append('<a class="' + ' '.join(classes) + '" ' +
'href="#' + htmlEscape('taglist-' + val) + '">' + htmlEscape(val) + '</a>')
res.append('</h1>')
res.append('<div class="api-call">')
if parts.has_key('proto'):
p = parts['proto']
res.append('<div class="api-part">')
res.append('<h2 class="api-proto">Prototype</h2>')
alt_typing_c99 = []
alt_typing_legacy32 = []
alt_typing_legacy16 = []
for i in p:
alt_typing_c99.append(substitutePrototypeTypes(i, type_repl_c99_32bit))
alt_typing_legacy32.append(substitutePrototypeTypes(i, type_repl_legacy32))
alt_typing_legacy16.append(substitutePrototypeTypes(i, type_repl_legacy16))
# Long tooltips are a bad idea in most browsers, so just put the C99 typing there for now
#res.append('<pre class="c-code" title="' +
# 'C99/C++11: ' + '\n'.join(alt_typing_c99) + '\n' +
# 'Legacy 32-bit: ' + '\n'.join(alt_typing_legacy32) + '\n' +
# 'Legacy 16-bit: ' + '\n'.join(alt_typing_legacy16) + '\n'
# '">')
res.append('<pre class="c-code" title="' +
'C99/C++11 (32-bit): ' + '\n'.join(alt_typing_c99) +
'">')
for i in p:
res.append(htmlEscape(i))
res.append('</pre>')
res.append('</div>') # api-part
res.append('')
else:
pass
if parts.has_key('stack'):
p = parts['stack']
res.append('<div class="api-part">')
res.append('<h2 class="api-stack">Stack</h2>')
for line in p:
res.append('<pre class="stack">' + \
'%s' % htmlEscape(line) + \
'</pre>')
res.append('</div>')
res.append('')
else:
res.append('<div class="api-part">')
res.append('<h2 class="api-stack">Stack</h2>')
res.append('<p>(No effect on value stack.)</p>')
res.append('</div>') # api-part
res.append('')
if parts.has_key('summary'):
p = parts['summary']
res.append('<div class="api-part">')
res.append('<h2 class="api-summary">Summary</h2>')
# If text contains a '<p>', assume it is raw HTML; otherwise
# assume it is a single paragraph (with no markup) and generate
# paragraph tags, escaping into HTML
raw_html = False
for i in p:
if '<p>' in i:
raw_html = True
if raw_html:
for i in p:
res.append(i)
else:
res.append('<p>')
for i in p:
res.append(htmlEscape(i))
res.append('</p>')
res.append('</div>') # api-part
res.append('')
if parts.has_key('example'):
p = parts['example']
res.append('<div class="api-part">')
res.append('<h2 class="api-example">Example</h2>')
res.append('<pre class="c-code">')
for i in p:
res.append(htmlEscape(i))
res.append('</pre>')
res.append('</div>') # api-part
res.append('')
if parts.has_key('seealso'):
p = parts['seealso']
res.append('<div class="api-part">')
res.append('<h2 class="api-seealso">See also</h2>')
res.append('<ul>')
for i in p:
res.append('<li><a href="#%s">%s</a></li>' % (htmlEscape(i), htmlEscape(i)))
res.append('</ul>')
res.append('</div>') # api-part
res.append('')
if testcase_refs:
res.append('<div class="api-part">')
res.append('<h2 class="api-testcases">Related test cases</h2>')
if testrefs.has_key(funcname):
res.append('<ul>')
for i in testrefs[funcname]:
res.append('<li>%s</li>' % htmlEscape(i))
res.append('</ul>')
else:
res.append('<p>None.</p>')
res.append('</div>') # api-part
res.append('')
if not testrefs.has_key(funcname):
res.append('<div class="fixme">This API call has no test cases.</div>')
if list_tags and parts.has_key('tags'):
# FIXME: placeholder
res.append('<div class="api-part">')
res.append('<h2 class="api-tags">Tags</h2>')
res.append('<p>')
p = parts['tags']
for idx, val in enumerate(p):
if idx > 0:
res.append(' ')
res.append(htmlEscape(val))
res.append('</p>')
res.append('</div>') # api-part
res.append('')
if parts.has_key('fixme'):
p = parts['fixme']
res.append('<div class="fixme">')
for i in p:
res.append(htmlEscape(i))
res.append('</div>')
res.append('')
res.append('</div>') # api-call div
return res
def processRawDoc(filename):
f = open(filename, 'rb')
res = []
for line in f.readlines():
line = stripNewline(line)
res.append(line)
f.close()
res.append('')
return res
def transformColorizeCode(soup, cssClass, sourceLang):
for elem in soup.select('pre.' + cssClass):
input_str = elem.string
if len(input_str) > 0 and input_str[0] == '\n':
# hack for leading empty line
input_str = input_str[1:]
colorized = sourceHighlight(input_str, sourceLang)
origTitle = elem.get('title', None)
# source-highlight generates <pre><tt>...</tt></pre>, get rid of <tt>
new_elem = BeautifulSoup(colorized).tt # XXX: parse just a fragment - how?
new_elem.name = 'pre'
new_elem['class'] = cssClass
if origTitle is not None:
# Preserve title (hover tool tip)
new_elem['title'] = origTitle
elem.replace_with(new_elem)
def transformFancyStacks(soup):
for elem in soup.select('pre.stack'):
input_str = elem.string
if len(input_str) > 0 and input_str[0] == '\n':
# hack for leading empty line
input_str = input_str[1:]
new_elem = BeautifulSoup(renderFancyStack(input_str)).div # XXX: fragment?
elem.replace_with(new_elem)
def transformRemoveClass(soup, cssClass):
for elem in soup.select('.' + cssClass):
elem.extract()
def transformReadIncludes(soup, includeDirs):
for elem in soup.select('*'):
if not elem.has_key('include'):
continue
filename = elem['include']
del elem['include']
d = None
for incdir in includeDirs:
fn = os.path.join(incdir, filename)
if os.path.exists(fn):
f = open(fn, 'rb')
d = f.read()
f.close()
break
if d is None:
raise Exception('cannot find include file: ' + repr(filename))
if filename.endswith('.html'):
new_elem = BeautifulSoup(d).div
elem.replace_with(new_elem)
else:
elem.string = d
def transformVersionNumber(soup, verstr):
for elem in soup.select('.duktape-version'):
elem.replaceWith(verstr)
def transformCurrentDate(soup):
curr_date = '%04d-%02d-%02d' % (dt_now.year, dt_now.month, dt_now.day)
for elem in soup.select('.current-date'):
elem.replaceWith(curr_date)
def transformAddHrBeforeH1(soup):
for elem in soup.select('h1'):
elem.insert_before(soup.new_tag('hr'))
# Add automatic anchors so that a basename from an element with an explicit
# ID is appended with dotted number(s). Note that headings do not actually
# nest in the document, so this is now based on document order traversal and
# keeping track of counts of headings at different levels, and the active
# explicit IDs at each level.
def transformAddAutoAnchorsNumbered(soup):
level_counts = [ 0, 0, 0, 0, 0, 0 ] # h1, h2, h3, h4, h5, h6
level_ids = [ None, None, None, None, None, None ] # explicit IDs
hdr_tags = { 'h1': 0, 'h2': 1, 'h3': 2, 'h4': 3, 'h5': 4, 'h6': 5 }
changes = []
def _proc(root, state):
idx = hdr_tags.get(root.name, None)
if idx is None:
return
# bump count at matching level and zero lower levels
level_counts[idx] += 1
for i in xrange(idx + 1, 6):
level_counts[i] = 0
# set explicit ID for current level
if root.has_key('id'):
level_ids[idx] = root['id']
return
# no explicit ID at current level, clear it
level_ids[idx] = None
# figure out an automatic ID: closest explicit ID + dotted
# numbers to current level
parts = []
for i in xrange(idx, -1, -1): # idx, idx-1, ..., 0
if level_ids[i] is not None:
parts.append(level_ids[i])
break
parts.append(str(level_counts[i]))
if i == 0:
parts.append('doc') # if no ID in path, use e.g. 'doc.1.2'
parts.reverse()
auto_id = '.'.join(parts)
# avoid mutation: record changes to be made first
# (adding 'id' would be OK, but this is more flexible
# if explicit anchors are added instead / in addition
# to 'id' attributes)
changes.append((root, auto_id))
def _rec(root, state):
if not isinstance(root, Tag):
return
_proc(root, state)
for elem in root.children:
_rec(elem, state)
_rec(soup.select('body')[0], {})
for elem, auto_id in changes:
elem['id'] = auto_id
# Add automatic anchors where section headings are used to autogenerate
# suitable names. This does not work very well: there are many subsections
# with the name "Example" or "Limitations", for instance. Prepending the
# parent name (or rather names of all the parents) would create very long
# names.
def transformAddAutoAnchorsNamed(soup):
hdr_tags = [ 'h1', 'h2', 'h3', 'h4', 'h5', 'h6' ]
ids = {}
def findAutoName(txt):
# simple name sanitation, not very well thought out; goal is to get
# nice web-like anchor names from whatever titles are present
txt = txt.strip().lower()
if len(txt) > 1 and txt[0] == '.':
txt = txt[1:] # leading dot convention for API section names
txt = txt.replace('c++', 'cpp')
txt = txt.replace('. ', ' ') # e.g. 'vs.' -> 'vs'
txt = txt.replace(', ', ' ') # e.g. 'foo, bar' -> 'foo bar'
txt = txt.replace(' ', '_')
res = ''
for i,c in enumerate(txt):
if (ord(c) >= ord('a') and ord(c) <= ord('z')) or \
(ord(c) >= ord('A') and ord(c) <= ord('Z')) or \
(ord(c) >= ord('0') and ord(c) <= ord('9') and i > 0) or \
c in '_':
res += c
elif c in '()[]{}?\'"':
pass # eat
else:
res += '_'
return res
for elem in soup.select('*'):
if not elem.has_key('id'):
continue
e_id = elem['id']
if ids.has_key(e_id):
print('WARNING: duplicate id %s' % e_id)
ids[e_id] = True
# add automatic anchors for every other heading, with priority in
# naming for higher level sections (e.g. h2 over h3)
for hdr in hdr_tags:
for elem in soup.select(hdr):
if elem.has_key('id'):
continue # already has an id anchor
e_name = elem.text
a_name = findAutoName(e_name)
if ids.has_key(a_name):
print('WARNING: cannot generate automatic anchor name for %s (already exists)' % e_name)
continue
ids[a_name] = True
elem['id'] = a_name
def transformAddHeadingLinks(soup):
hdr_tags = [ 'h1', 'h2', 'h3', 'h4', 'h5', 'h6' ]
changes = []
for elem in soup.select('*'):
if elem.name not in hdr_tags or not elem.has_key('id'):
continue
new_elem = soup.new_tag('a')
new_elem['href'] = '#' + elem['id']
new_elem['class'] = 'sectionlink'
new_elem.string = u'\u00a7' # section sign
# avoid mutation while iterating
changes.append((elem, new_elem))
for elem, new_elem in changes:
if elem.has_key('class'):
elem['class'].append('sectiontitle')
else:
elem['class'] = 'sectiontitle'
elem.append(' ')
elem.append(new_elem)
def setNavSelected(soup, pagename):
# pagename must match <li><a> content
for elem in soup.select('#site-top-nav li'):
if elem.text == pagename:
elem['class'] = 'selected'
# FIXME: refactor shared parts
def scanApiCalls(apitestdir):
re_api_call = re.compile(r'duk_[0-9a-zA-Z_]+')
res = {} # api call -> [ test1, ..., testN ]
tmpfiles = os.listdir(apitestdir)
for filename in tmpfiles:
if os.path.splitext(filename)[1] != '.c':
continue
f = open(os.path.join(apitestdir, filename))
data = f.read()
f.close()
apicalls = re_api_call.findall(data)
for i in apicalls:
if not res.has_key(i):
res[i] = []
if filename not in res[i]:
res[i].append(filename)
for k in res.keys():
res[k].sort()
return res
def createTagIndex(api_docs, used_tags):
res = []
res.append('<h1 id="bytag">API calls by tag</h1>')
for tag in used_tags:
res.append('<h2 id="taglist-' + htmlEscape(tag) + '">' + htmlEscape(tag) + '</h2>')
res.append('<ul class="taglist">')
for doc in api_docs:
if not doc['parts'].has_key('tags'):
continue
for i in doc['parts']['tags']:
if i != tag:
continue
res.append('<li><a href="#%s">%s</a></li>' % (htmlEscape(doc['name']), htmlEscape(doc['name'])))
res.append('</ul>')
return res
def generateApiDoc(apidocdir, apitestdir):
templ_soup = validateAndParseHtml(readFile('template.html'))
setNavSelected(templ_soup, 'API')
# scan api files
tmpfiles = os.listdir(apidocdir)
apifiles = []
for filename in tmpfiles:
if os.path.splitext(filename)[1] == '.txt':
apifiles.append(filename)
apifiles.sort()
#print(apifiles)
print '%d api files' % len(apifiles)
# scan api testcases for references to API calls
testrefs = scanApiCalls(apitestdir)
#print(repr(testrefs))
# title
title_elem = templ_soup.select('#template-title')[0]
del title_elem['id']
title_elem.string = 'Duktape API'
# scan api doc files
used_tags = []
api_docs = [] # [ { 'parts': xxx, 'name': xxx } ]
for filename in apifiles:
parts = parseApiDoc(os.path.join(apidocdir, filename))
funcname = os.path.splitext(os.path.basename(filename))[0]
if parts.has_key('tags') and 'omit' in parts['tags']:
print 'Omit API doc: ' + str(funcname)
continue
if parts.has_key('tags'):
for i in parts['tags']:
if i not in used_tags:
used_tags.append(i)
api_docs.append({ 'parts': parts, 'name': funcname })
used_tags.sort()
# nav
res = []
navlinks = []
navlinks.append(['#introduction', 'Introduction'])
navlinks.append(['#notation', 'Notation'])
navlinks.append(['#concepts', 'Concepts'])
navlinks.append(['#defines', 'Header definitions'])
navlinks.append(['#bytag', 'API calls by tag'])
navlinks.append(['', u'\u00a0']) # XXX: force vertical space
for doc in api_docs:
funcname = doc['name']
navlinks.append(['#' + funcname, funcname])
res.append('<ul>')
for nav in navlinks:
res.append('<li><a href="' + htmlEscape(nav[0]) + '">' + htmlEscape(nav[1]) + '</a></li>')
res.append('</ul>')
nav_soup = validateAndParseHtml('\n'.join(res))
tmp_soup = templ_soup.select('#site-middle-nav')[0]
tmp_soup.clear()
for i in nav_soup.select('body')[0]:
tmp_soup.append(i)
# content
res = []
res += [ '<div class="main-title"><strong>Duktape API</strong></div>' ]
# FIXME: generate from the same list as nav links for these
res += processRawDoc('api/intro.html')
res += processRawDoc('api/notation.html')
res += processRawDoc('api/concepts.html')
res += processRawDoc('api/defines.html')
# tag index
res += createTagIndex(api_docs, used_tags)
# api docs
for doc in api_docs:
# FIXME: Here we'd like to validate individual processApiDoc() results so
# that they don't e.g. have unbalanced tags. Or at least normalize them so
# that they don't break the entire page.
data = None
try:
data = processApiDoc(doc['parts'], doc['name'], testrefs, used_tags)
res += data
except:
print repr(data)
print 'FAIL: ' + repr(filename)
raise
print('used tags: ' + repr(used_tags))
content_soup = validateAndParseHtml('\n'.join(res))
tmp_soup = templ_soup.select('#site-middle-content')[0]
tmp_soup.clear()
for i in content_soup.select('body')[0]:
tmp_soup.append(i)
tmp_soup['class'] = 'content'
return templ_soup
def generateIndexPage():
templ_soup = validateAndParseHtml(readFile('template.html'))
index_soup = validateAndParseHtml(readFile('index/index.html'))
setNavSelected(templ_soup, 'Home')
title_elem = templ_soup.select('#template-title')[0]
del title_elem['id']
title_elem.string = 'Duktape'
tmp_soup = templ_soup.select('#site-middle')[0]
tmp_soup.clear()
for i in index_soup.select('body')[0]:
tmp_soup.append(i)
tmp_soup['class'] = 'content'
return templ_soup
def generateDownloadPage(releases_filename):
templ_soup = validateAndParseHtml(readFile('template.html'))
down_soup = validateAndParseHtml(readFile('download/download.html'))
setNavSelected(templ_soup, 'Download')
title_elem = templ_soup.select('#template-title')[0]
del title_elem['id']
title_elem.string = 'Downloads'
if fancy_releaselog:
# fancy releaselog
rel_data = rst2Html(os.path.abspath(os.path.join('..', 'RELEASES.rst')))
rel_soup = BeautifulSoup(rel_data)
released = rel_soup.select('#released')[0]
# massage the rst2html generated HTML to be more suitable
for elem in released.select('h1'):
elem.extract()
releaselog_elem = down_soup.select('#releaselog')[0]
releaselog_elem.insert_after(released)
else:
# plaintext releaselog
releaselog_elem = down_soup.select('#releaselog')[0]
pre_elem = down_soup.new_tag('pre')
releaselog_elem.append(pre_elem)
f = open(releases_filename, 'rb')
pre_elem.string = f.read().decode('utf-8')
f.close()
# automatic md5sums for downloadable files
# <tr><td class="reldate">2013-09-21</td>
# <td class="filename"><a href="duktape-0.6.0.tar.xz">duktape-0.6.0.tar.xz</a></td>
# <td class="description">alpha, first round of work on public API</td>
# <td class="hash">fa384a42a27d996313e0192c51c50b4a</td></tr>
for tr in down_soup.select('tr'):
tmp = tr.select('.filename')
if len(tmp) != 1:
continue
href = tmp[0].select('a')[0]['href']
hash_elem = tr.select('.hash')[0]
hash_elem.string = getFileMd5(os.path.abspath(os.path.join('..', 'duktape-releases', href))) or '???'
tmp_soup = templ_soup.select('#site-middle')[0]
tmp_soup.clear()
for i in down_soup.select('body')[0]:
tmp_soup.append(i)
tmp_soup['class'] = 'content'
return templ_soup
def generateGuide():
templ_soup = validateAndParseHtml(readFile('template.html'))
setNavSelected(templ_soup, 'Guide')
title_elem = templ_soup.select('#template-title')[0]
del title_elem['id']
title_elem.string = 'Duktape Programmer\'s Guide'
# nav
res = []
navlinks = []
navlinks.append(['#introduction', 'Introduction'])
navlinks.append(['#gettingstarted', 'Getting started'])
navlinks.append(['#programming', 'Programming model'])
navlinks.append(['#stacktypes', 'Stack types'])
navlinks.append(['#ctypes', 'C types'])
navlinks.append(['#typealgorithms', 'Type algorithms'])
navlinks.append(['#duktapebuiltins', 'Duktape built-ins'])
navlinks.append(['#es6features', 'Ecmascript E6 features'])
navlinks.append(['#custombehavior', 'Custom behavior'])
navlinks.append(['#customjson', 'Custom JSON formats'])
navlinks.append(['#customdirectives', 'Custom directives'])
navlinks.append(['#errorobjects', 'Error objects'])
navlinks.append(['#functionobjects', 'Function objects'])
navlinks.append(['#modules', 'Modules'])
navlinks.append(['#logging', 'Logging'])
navlinks.append(['#finalization', 'Finalization'])
navlinks.append(['#coroutines', 'Coroutines'])
navlinks.append(['#virtualproperties', 'Virtual properties'])
navlinks.append(['#internalproperties', 'Internal properties'])
navlinks.append(['#threading', 'Threading'])
navlinks.append(['#sandboxing', 'Sandboxing'])
navlinks.append(['#performance', 'Performance'])
navlinks.append(['#compiling', 'Compiling'])
navlinks.append(['#portability', 'Portability'])
navlinks.append(['#compatibility', 'Compatibility'])
navlinks.append(['#versioning', 'Versioning'])
navlinks.append(['#limitations', 'Limitations'])
navlinks.append(['#comparisontolua', 'Comparison to Lua'])
res.append('<ul>')
for nav in navlinks:
res.append('<li><a href="' + htmlEscape(nav[0]) + '">' + htmlEscape(nav[1]) + '</a></li>')
res.append('</ul>')
nav_soup = validateAndParseHtml('\n'.join(res))
tmp_soup = templ_soup.select('#site-middle-nav')[0]
tmp_soup.clear()
for i in nav_soup.select('body')[0]:
tmp_soup.append(i)
# content
res = []
res += [ '<div class="main-title"><strong>Duktape Programmer\'s Guide</strong></div>' ]
res += processRawDoc('guide/intro.html')
res += processRawDoc('guide/gettingstarted.html')
res += processRawDoc('guide/programming.html')
res += processRawDoc('guide/stacktypes.html')
res += processRawDoc('guide/ctypes.html')
res += processRawDoc('guide/typealgorithms.html')
res += processRawDoc('guide/duktapebuiltins.html')
res += processRawDoc('guide/es6features.html')
res += processRawDoc('guide/custombehavior.html')
res += processRawDoc('guide/customjson.html')
res += processRawDoc('guide/customdirectives.html')
res += processRawDoc('guide/errorobjects.html')
res += processRawDoc('guide/functionobjects.html')
res += processRawDoc('guide/modules.html')
res += processRawDoc('guide/logging.html')
res += processRawDoc('guide/finalization.html')
res += processRawDoc('guide/coroutines.html')
res += processRawDoc('guide/virtualproperties.html')
res += processRawDoc('guide/internalproperties.html')
res += processRawDoc('guide/threading.html')
res += processRawDoc('guide/sandboxing.html')
res += processRawDoc('guide/performance.html')
res += processRawDoc('guide/compiling.html')
res += processRawDoc('guide/portability.html')
res += processRawDoc('guide/compatibility.html')
res += processRawDoc('guide/versioning.html')
res += processRawDoc('guide/limitations.html')
res += processRawDoc('guide/luacomparison.html')
content_soup = validateAndParseHtml('\n'.join(res))
tmp_soup = templ_soup.select('#site-middle-content')[0]
tmp_soup.clear()
for i in content_soup.select('body')[0]:
tmp_soup.append(i)
tmp_soup['class'] = 'content'
return templ_soup
def generateStyleCss():
styles = [
'reset.css',
'style-html.css',
'style-content.css',
'style-top.css',
'style-middle.css',
'style-bottom.css',
'style-index.css',
'style-download.css',
'style-api.css',
'style-guide.css',
'highlight.css'
]
style = ''
for i in styles:
style += '/* === %s === */\n' % i
style += readFile(i)
return style
def postProcess(soup, includeDirs, autoAnchors=False, headingLinks=False, duktapeVersion=None):
# read in source snippets from include files
if True:
transformReadIncludes(soup, includeDirs)
# version number
if True:
transformVersionNumber(soup, duktapeVersion)
# current date
if True:
transformCurrentDate(soup)
# add <hr> elements before all <h1> elements to improve readability
# in text browsers
if True:
transformAddHrBeforeH1(soup)
# add automatic anchors to all headings (as long as they don't conflict
# with any manually assigned "long term" ids)
if autoAnchors:
transformAddAutoAnchorsNumbered(soup)
if headingLinks:
transformAddHeadingLinks(soup)
if colorize:
transformColorizeCode(soup, 'c-code', 'c')
transformColorizeCode(soup, 'ecmascript-code', 'javascript')
if fancy_stack:
transformFancyStacks(soup)
if remove_fixme:
transformRemoveClass(soup, 'fixme')
return soup
def writeFile(name, data):
f = open(name, 'wb')
f.write(data)
f.close()
def scrapeDuktapeVersion():
f = open(os.path.join('..', 'src', 'duk_api_public.h.in'))
re_ver = re.compile(r'^#define DUK_VERSION\s+(\d+)L?\s*$')
for line in f:
line = line.strip()
m = re_ver.match(line)
if m is None:
continue
raw_ver = int(m.group(1))
str_ver = '%d.%d.%d' % ( raw_ver / 10000, raw_ver / 100 % 100, raw_ver % 100)
f.close()
if raw_ver is None:
raise Exception('cannot scrape Duktape version')
return str_ver, raw_ver
def main():
outdir = sys.argv[1]; assert(outdir)
apidocdir = 'api'
apitestdir = '../api-testcases'
guideincdirs = [ './guide', '../examples/guide' ]
apiincdirs = [ './api', '../examples/api' ]
out_charset = 'utf-8'
releases_filename = '../RELEASES.rst'
duk_verstr, duk_verint = scrapeDuktapeVersion()
print 'Scraped version number: ' + duk_verstr
print 'Generating style.css'
data = generateStyleCss()
writeFile(os.path.join(outdir, 'style.css'), data)
#writeFile(os.path.join(outdir, 'reset.css'), readFile('reset.css'))
#writeFile(os.path.join(outdir, 'highlight.css'), readFile('highlight.css'))
print 'Generating api.html'
soup = generateApiDoc(apidocdir, apitestdir)
soup = postProcess(soup, apiincdirs, autoAnchors=True, headingLinks=True, duktapeVersion=duk_verstr)
writeFile(os.path.join(outdir, 'api.html'), soup.encode(out_charset))
print 'Generating guide.html'
soup = generateGuide()
soup = postProcess(soup, guideincdirs, autoAnchors=True, headingLinks=True, duktapeVersion=duk_verstr)
writeFile(os.path.join(outdir, 'guide.html'), soup.encode(out_charset))
print 'Generating index.html'
soup = generateIndexPage()
soup = postProcess(soup, None, duktapeVersion=duk_verstr)
writeFile(os.path.join(outdir, 'index.html'), soup.encode(out_charset))
print 'Generating download.html'
soup = generateDownloadPage(releases_filename)
soup = postProcess(soup, None, duktapeVersion=duk_verstr)
writeFile(os.path.join(outdir, 'download.html'), soup.encode(out_charset))
print 'Copying misc files'
for i in [ 'favicon.ico',
'robots.txt',
'startup_image_320x480.png',
'touch_icon_114x114.png',
'touch_icon_120x120.png',
'touch_icon_144x144.png',
'touch_icon_152x152.png',
'touch_icon_57x57.png',
'touch_icon_60x60.png',
'touch_icon_72x72.png' ]:
shutil.copyfile(os.path.join('./', i), os.path.join(outdir, i))
print 'Copying release binaries'
for i in os.listdir(os.path.join('..', 'duktape-releases')):
if re.match(r'^duktape-.*?.tar.xz$', i) is None:
continue
shutil.copyfile(os.path.join('..', 'duktape-releases', i), os.path.join(outdir, i))
print 'Copying dukweb.js files'
for i in [ '../dukweb.js',
'../jquery-1.11.0.js',
'../dukweb/dukweb.css',
'../dukweb/dukweb.html' ]:
shutil.copyfile(os.path.join('./', i), os.path.join(outdir, os.path.basename(i)))
if __name__ == '__main__':
main()
| |
"""
Functions
---------
.. autosummary::
:toctree: generated/
fmin_l_bfgs_b
"""
## License for the Python wrapper
## ==============================
## Copyright (c) 2004 David M. Cooke <cookedm@physics.mcmaster.ca>
## Permission is hereby granted, free of charge, to any person obtaining a
## copy of this software and associated documentation files (the "Software"),
## to deal in the Software without restriction, including without limitation
## the rights to use, copy, modify, merge, publish, distribute, sublicense,
## and/or sell copies of the Software, and to permit persons to whom the
## Software is furnished to do so, subject to the following conditions:
## The above copyright notice and this permission notice shall be included in
## all copies or substantial portions of the Software.
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
## DEALINGS IN THE SOFTWARE.
## Modifications by Travis Oliphant and Enthought, Inc. for inclusion in SciPy
import numpy as np
from numpy import array, asarray, float64, zeros
from . import _lbfgsb
from ._optimize import (MemoizeJac, OptimizeResult,
_check_unknown_options, _prepare_scalar_function)
from ._constraints import old_bound_to_new
from scipy.sparse.linalg import LinearOperator
__all__ = ['fmin_l_bfgs_b', 'LbfgsInvHessProduct']
def fmin_l_bfgs_b(func, x0, fprime=None, args=(),
approx_grad=0,
bounds=None, m=10, factr=1e7, pgtol=1e-5,
epsilon=1e-8,
iprint=-1, maxfun=15000, maxiter=15000, disp=None,
callback=None, maxls=20):
"""
Minimize a function func using the L-BFGS-B algorithm.
Parameters
----------
func : callable f(x,*args)
Function to minimize.
x0 : ndarray
Initial guess.
fprime : callable fprime(x,*args), optional
The gradient of `func`. If None, then `func` returns the function
value and the gradient (``f, g = func(x, *args)``), unless
`approx_grad` is True in which case `func` returns only ``f``.
args : sequence, optional
Arguments to pass to `func` and `fprime`.
approx_grad : bool, optional
Whether to approximate the gradient numerically (in which case
`func` returns only the function value).
bounds : list, optional
``(min, max)`` pairs for each element in ``x``, defining
the bounds on that parameter. Use None or +-inf for one of ``min`` or
``max`` when there is no bound in that direction.
m : int, optional
The maximum number of variable metric corrections
used to define the limited memory matrix. (The limited memory BFGS
method does not store the full hessian but uses this many terms in an
approximation to it.)
factr : float, optional
The iteration stops when
``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``,
where ``eps`` is the machine precision, which is automatically
generated by the code. Typical values for `factr` are: 1e12 for
low accuracy; 1e7 for moderate accuracy; 10.0 for extremely
high accuracy. See Notes for relationship to `ftol`, which is exposed
(instead of `factr`) by the `scipy.optimize.minimize` interface to
L-BFGS-B.
pgtol : float, optional
The iteration will stop when
``max{|proj g_i | i = 1, ..., n} <= pgtol``
where ``pg_i`` is the i-th component of the projected gradient.
epsilon : float, optional
Step size used when `approx_grad` is True, for numerically
calculating the gradient
iprint : int, optional
Controls the frequency of output. ``iprint < 0`` means no output;
``iprint = 0`` print only one line at the last iteration;
``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations;
``iprint = 99`` print details of every iteration except n-vectors;
``iprint = 100`` print also the changes of active set and final x;
``iprint > 100`` print details of every iteration including x and g.
disp : int, optional
If zero, then no output. If a positive number, then this over-rides
`iprint` (i.e., `iprint` gets the value of `disp`).
maxfun : int, optional
Maximum number of function evaluations. Note that this function
may violate the limit because of evaluating gradients by numerical
differentiation.
maxiter : int, optional
Maximum number of iterations.
callback : callable, optional
Called after each iteration, as ``callback(xk)``, where ``xk`` is the
current parameter vector.
maxls : int, optional
Maximum number of line search steps (per iteration). Default is 20.
Returns
-------
x : array_like
Estimated position of the minimum.
f : float
Value of `func` at the minimum.
d : dict
Information dictionary.
* d['warnflag'] is
- 0 if converged,
- 1 if too many function evaluations or too many iterations,
- 2 if stopped for another reason, given in d['task']
* d['grad'] is the gradient at the minimum (should be 0 ish)
* d['funcalls'] is the number of function calls made.
* d['nit'] is the number of iterations.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'L-BFGS-B' `method` in particular. Note that the
`ftol` option is made available via that interface, while `factr` is
provided via this interface, where `factr` is the factor multiplying
the default machine floating-point precision to arrive at `ftol`:
``ftol = factr * numpy.finfo(float).eps``.
Notes
-----
License of L-BFGS-B (FORTRAN code):
The version included here (in fortran code) is 3.0
(released April 25, 2011). It was written by Ciyou Zhu, Richard Byrd,
and Jorge Nocedal <nocedal@ece.nwu.edu>. It carries the following
condition for use:
This software is freely available, but we expect that all publications
describing work using this software, or all commercial products using it,
quote at least one of the references given below. This software is released
under the BSD License.
References
----------
* R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound
Constrained Optimization, (1995), SIAM Journal on Scientific and
Statistical Computing, 16, 5, pp. 1190-1208.
* C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B,
FORTRAN routines for large scale bound constrained optimization (1997),
ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560.
* J.L. Morales and J. Nocedal. L-BFGS-B: Remark on Algorithm 778: L-BFGS-B,
FORTRAN routines for large scale bound constrained optimization (2011),
ACM Transactions on Mathematical Software, 38, 1.
"""
# handle fprime/approx_grad
if approx_grad:
fun = func
jac = None
elif fprime is None:
fun = MemoizeJac(func)
jac = fun.derivative
else:
fun = func
jac = fprime
# build options
if disp is None:
disp = iprint
opts = {'disp': disp,
'iprint': iprint,
'maxcor': m,
'ftol': factr * np.finfo(float).eps,
'gtol': pgtol,
'eps': epsilon,
'maxfun': maxfun,
'maxiter': maxiter,
'callback': callback,
'maxls': maxls}
res = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds,
**opts)
d = {'grad': res['jac'],
'task': res['message'],
'funcalls': res['nfev'],
'nit': res['nit'],
'warnflag': res['status']}
f = res['fun']
x = res['x']
return x, f, d
def _minimize_lbfgsb(fun, x0, args=(), jac=None, bounds=None,
disp=None, maxcor=10, ftol=2.2204460492503131e-09,
gtol=1e-5, eps=1e-8, maxfun=15000, maxiter=15000,
iprint=-1, callback=None, maxls=20,
finite_diff_rel_step=None, **unknown_options):
"""
Minimize a scalar function of one or more variables using the L-BFGS-B
algorithm.
Options
-------
disp : None or int
If `disp is None` (the default), then the supplied version of `iprint`
is used. If `disp is not None`, then it overrides the supplied version
of `iprint` with the behaviour you outlined.
maxcor : int
The maximum number of variable metric corrections used to
define the limited memory matrix. (The limited memory BFGS
method does not store the full hessian but uses this many terms
in an approximation to it.)
ftol : float
The iteration stops when ``(f^k -
f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= ftol``.
gtol : float
The iteration will stop when ``max{|proj g_i | i = 1, ..., n}
<= gtol`` where ``pg_i`` is the i-th component of the
projected gradient.
eps : float or ndarray
If `jac is None` the absolute step size used for numerical
approximation of the jacobian via forward differences.
maxfun : int
Maximum number of function evaluations.
maxiter : int
Maximum number of iterations.
iprint : int, optional
Controls the frequency of output. ``iprint < 0`` means no output;
``iprint = 0`` print only one line at the last iteration;
``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations;
``iprint = 99`` print details of every iteration except n-vectors;
``iprint = 100`` print also the changes of active set and final x;
``iprint > 100`` print details of every iteration including x and g.
callback : callable, optional
Called after each iteration, as ``callback(xk)``, where ``xk`` is the
current parameter vector.
maxls : int, optional
Maximum number of line search steps (per iteration). Default is 20.
finite_diff_rel_step : None or array_like, optional
If `jac in ['2-point', '3-point', 'cs']` the relative step size to
use for numerical approximation of the jacobian. The absolute step
size is computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``,
possibly adjusted to fit into the bounds. For ``method='3-point'``
the sign of `h` is ignored. If None (default) then step is selected
automatically.
Notes
-----
The option `ftol` is exposed via the `scipy.optimize.minimize` interface,
but calling `scipy.optimize.fmin_l_bfgs_b` directly exposes `factr`. The
relationship between the two is ``ftol = factr * numpy.finfo(float).eps``.
I.e., `factr` multiplies the default machine floating-point precision to
arrive at `ftol`.
"""
_check_unknown_options(unknown_options)
m = maxcor
pgtol = gtol
factr = ftol / np.finfo(float).eps
x0 = asarray(x0).ravel()
n, = x0.shape
if bounds is None:
bounds = [(None, None)] * n
if len(bounds) != n:
raise ValueError('length of x0 != length of bounds')
# unbounded variables must use None, not +-inf, for optimizer to work properly
bounds = [(None if l == -np.inf else l, None if u == np.inf else u) for l, u in bounds]
# LBFGSB is sent 'old-style' bounds, 'new-style' bounds are required by
# approx_derivative and ScalarFunction
new_bounds = old_bound_to_new(bounds)
# check bounds
if (new_bounds[0] > new_bounds[1]).any():
raise ValueError("LBFGSB - one of the lower bounds is greater than an upper bound.")
# initial vector must lie within the bounds. Otherwise ScalarFunction and
# approx_derivative will cause problems
x0 = np.clip(x0, new_bounds[0], new_bounds[1])
if disp is not None:
if disp == 0:
iprint = -1
else:
iprint = disp
sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps,
bounds=new_bounds,
finite_diff_rel_step=finite_diff_rel_step)
func_and_grad = sf.fun_and_grad
fortran_int = _lbfgsb.types.intvar.dtype
nbd = zeros(n, fortran_int)
low_bnd = zeros(n, float64)
upper_bnd = zeros(n, float64)
bounds_map = {(None, None): 0,
(1, None): 1,
(1, 1): 2,
(None, 1): 3}
for i in range(0, n):
l, u = bounds[i]
if l is not None:
low_bnd[i] = l
l = 1
if u is not None:
upper_bnd[i] = u
u = 1
nbd[i] = bounds_map[l, u]
if not maxls > 0:
raise ValueError('maxls must be positive.')
x = array(x0, float64)
f = array(0.0, float64)
g = zeros((n,), float64)
wa = zeros(2*m*n + 5*n + 11*m*m + 8*m, float64)
iwa = zeros(3*n, fortran_int)
task = zeros(1, 'S60')
csave = zeros(1, 'S60')
lsave = zeros(4, fortran_int)
isave = zeros(44, fortran_int)
dsave = zeros(29, float64)
task[:] = 'START'
n_iterations = 0
while 1:
# x, f, g, wa, iwa, task, csave, lsave, isave, dsave = \
_lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr,
pgtol, wa, iwa, task, iprint, csave, lsave,
isave, dsave, maxls)
task_str = task.tobytes()
if task_str.startswith(b'FG'):
# The minimization routine wants f and g at the current x.
# Note that interruptions due to maxfun are postponed
# until the completion of the current minimization iteration.
# Overwrite f and g:
f, g = func_and_grad(x)
elif task_str.startswith(b'NEW_X'):
# new iteration
n_iterations += 1
if callback is not None:
callback(np.copy(x))
if n_iterations >= maxiter:
task[:] = 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT'
elif sf.nfev > maxfun:
task[:] = ('STOP: TOTAL NO. of f AND g EVALUATIONS '
'EXCEEDS LIMIT')
else:
break
task_str = task.tobytes().strip(b'\x00').strip()
if task_str.startswith(b'CONV'):
warnflag = 0
elif sf.nfev > maxfun or n_iterations >= maxiter:
warnflag = 1
else:
warnflag = 2
# These two portions of the workspace are described in the mainlb
# subroutine in lbfgsb.f. See line 363.
s = wa[0: m*n].reshape(m, n)
y = wa[m*n: 2*m*n].reshape(m, n)
# See lbfgsb.f line 160 for this portion of the workspace.
# isave(31) = the total number of BFGS updates prior the current iteration;
n_bfgs_updates = isave[30]
n_corrs = min(n_bfgs_updates, maxcor)
hess_inv = LbfgsInvHessProduct(s[:n_corrs], y[:n_corrs])
task_str = task_str.decode()
return OptimizeResult(fun=f, jac=g, nfev=sf.nfev,
njev=sf.ngev,
nit=n_iterations, status=warnflag, message=task_str,
x=x, success=(warnflag == 0), hess_inv=hess_inv)
class LbfgsInvHessProduct(LinearOperator):
"""Linear operator for the L-BFGS approximate inverse Hessian.
This operator computes the product of a vector with the approximate inverse
of the Hessian of the objective function, using the L-BFGS limited
memory approximation to the inverse Hessian, accumulated during the
optimization.
Objects of this class implement the ``scipy.sparse.linalg.LinearOperator``
interface.
Parameters
----------
sk : array_like, shape=(n_corr, n)
Array of `n_corr` most recent updates to the solution vector.
(See [1]).
yk : array_like, shape=(n_corr, n)
Array of `n_corr` most recent updates to the gradient. (See [1]).
References
----------
.. [1] Nocedal, Jorge. "Updating quasi-Newton matrices with limited
storage." Mathematics of computation 35.151 (1980): 773-782.
"""
def __init__(self, sk, yk):
"""Construct the operator."""
if sk.shape != yk.shape or sk.ndim != 2:
raise ValueError('sk and yk must have matching shape, (n_corrs, n)')
n_corrs, n = sk.shape
super().__init__(dtype=np.float64, shape=(n, n))
self.sk = sk
self.yk = yk
self.n_corrs = n_corrs
self.rho = 1 / np.einsum('ij,ij->i', sk, yk)
def _matvec(self, x):
"""Efficient matrix-vector multiply with the BFGS matrices.
This calculation is described in Section (4) of [1].
Parameters
----------
x : ndarray
An array with shape (n,) or (n,1).
Returns
-------
y : ndarray
The matrix-vector product
"""
s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho
q = np.array(x, dtype=self.dtype, copy=True)
if q.ndim == 2 and q.shape[1] == 1:
q = q.reshape(-1)
alpha = np.empty(n_corrs)
for i in range(n_corrs-1, -1, -1):
alpha[i] = rho[i] * np.dot(s[i], q)
q = q - alpha[i]*y[i]
r = q
for i in range(n_corrs):
beta = rho[i] * np.dot(y[i], r)
r = r + s[i] * (alpha[i] - beta)
return r
def todense(self):
"""Return a dense array representation of this operator.
Returns
-------
arr : ndarray, shape=(n, n)
An array with the same shape and containing
the same data represented by this `LinearOperator`.
"""
s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho
I = np.eye(*self.shape, dtype=self.dtype)
Hk = I
for i in range(n_corrs):
A1 = I - s[i][:, np.newaxis] * y[i][np.newaxis, :] * rho[i]
A2 = I - y[i][:, np.newaxis] * s[i][np.newaxis, :] * rho[i]
Hk = np.dot(A1, np.dot(Hk, A2)) + (rho[i] * s[i][:, np.newaxis] *
s[i][np.newaxis, :])
return Hk
| |
# Copyright 2015 datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from .ast import (
Bool, Class, TypeParam, Macro, Interface, Primitive, String, Number
)
from .helpers import (
is_meta, get_fields, base_bindings, get_methods, is_abstract, constructor, mdroot
)
from .constants import (BUILTIN, REFLECT, OBJECT, VOID)
from .compiler import TypeExpr, texpr
class Reflector:
def __init__(self, root, backend):
self.root = root
self.methods = OrderedDict()
self.classes = []
self.class_uses = OrderedDict()
self.metadata = OrderedDict()
self.entry = None
self.backend = backend
self.gen = self.backend.gen
def visit_File(self, f):
if not self.entry and not is_meta(f):
self.entry = f
def package(self, pkg):
if pkg is None:
return []
else:
return self.package(pkg.package) + [pkg.name.text]
def qtype(self, texp):
if isinstance(texp.type, TypeParam): return OBJECT
result = ".".join(self.package(texp.type.package) + [texp.type.name.text])
if isinstance(texp.type, Class) and texp.type.parameters:
result += "<%s>" % ",".join([self.qtype(texp.bindings.get(p, TypeExpr(p, {})))
for p in texp.type.parameters])
return result
def qname(self, texp):
if isinstance(texp.type, TypeParam): return OBJECT
return ".".join(self.package(texp.type.package) + [texp.type.name.text])
def qparams(self, texp):
if isinstance(texp.type, Class) and texp.type.parameters:
return "[%s]" % ", ".join([self.qexpr(texp.bindings.get(p, TypeExpr(p, {}))) for p in texp.type.parameters])
else:
return "[]"
def qexpr(self, texp):
return '"%s"' % self.qtype(texp)
def _has_reflect_class(self, type):
# Technically List and Map could have classes, possibly? They don't now
# though. Also parameterized types gets passed through, which is kinda
# wrong too.
cls = type.resolved.type
return not (isinstance(cls, (Primitive, Interface, TypeParam)))
def visit_Type(self, type):
cls = type.resolved.type
if not self._has_reflect_class(type):
if cls.name.text not in ("List", "Map"):
return
if cls.parameters:
if cls not in self.class_uses:
self.class_uses[cls] = OrderedDict()
qual = self.qtype(type.resolved)
clazz = type.clazz
package = tuple(self.package(type.package))
if qual not in self.class_uses[cls]:
self.class_uses[cls][qual] = (type.resolved, clazz, package)
def qual(self, cls):
return ".".join(self.package(cls.package) + [cls.name.text])
def visit_Class(self, cls):
if isinstance(cls, (Primitive, Interface)):
if (cls.package and cls.package.name.text == BUILTIN and cls.name.text in ("List", "Map") or
isinstance(cls, Interface)):
self.classes.append(cls)
return
cls._extra_methods = lambda: self.gen_accessors(cls)
self.classes.append(cls)
def gen_pred(self, field):
return self.apply_macro(self.get("Object", "__eq__"), self.texpr("String"), self.gen.name("name"),
[self.string(field.name)])
def gen_ladder(self, texp, rung, default=None, pred=lambda f: True):
cls, use_bindings = texp.type, texp.bindings
ladder = []
bindings = base_bindings(cls)
bindings.update(use_bindings)
for f in get_fields(cls):
if pred(f):
ladder.append(rung(f, bindings))
if default:
ladder.append(default)
return ladder
def gen_accessrung(self, field, bindings, get=True):
if field.static:
path = self.backend.add_import(field.clazz)
cons = self.gen.get_static_field(path,
self.backend.name(field.clazz.name),
self.backend.name(field.name))
else:
cons = self.gen.get_field(self.gen.name("self"), self.backend.name(field.name))
if get:
cons = self.gen.return_(cons)
else:
cons = self.gen.assign(cons, self.gen_cast(texpr(field.resolved.type, bindings, field.resolved.bindings),
self.texpr("Object"),
self.gen.name("value")))
return self.gen.if_(self.gen_pred(field), self.gen.block([cons]), None)
def gen_fieldgets(self, texp):
return self.gen_ladder(texp, self.gen_accessrung, self.gen.return_(self.gen.null()))
def gen_fieldsets(self, texp):
pred = lambda f: not isinstance(f.clazz, Interface)
return self.gen_ladder(texp, lambda f, b: self.gen_accessrung(f, b, False), pred=pred)
def gen_accessors(self, cls):
methods = [
self.gen.method("", self.backend.name(cls.name), self.type("String"),
self.gen.name("_getClass"), [],
self.gen.block([self.gen.return_(self.string(self.qtype(cls.resolved)))])
),
self.gen.method("", self.backend.name(cls.name), self.type("Object"), self.gen.name("_getField"),
[self.gen.param(self.type("String"), self.gen.name("name"), None)],
self.gen.block(self.gen_fieldgets(cls.resolved))
),
self.gen.method("", self.backend.name(cls.name), self.type("void"), self.gen.name("_setField"),
[self.gen.param(self.type("String"), self.gen.name("name"), None),
self.gen.param(self.type("Object"), self.gen.name("value"), None)],
self.gen.block(self.gen_fieldsets(cls.resolved))
)
]
return methods
def gen_refs(self, cls, deps):
statics = []
for dep, mdpkg in deps.items():
mdpath = self.backend.add_import([self.backend.name(mdpkg)], cls.root, cls.file)
gotten = self.gen.get_static_field(mdpath, self.gen.name("Root"), self.gen.name("%s_md" % dep))
statics.append(self.gen.static_field("",
self.backend.name(cls.name),
self.reftype("Class"),
self.gen.name("%s_ref" % dep),
self.apply_macro(self.get("reflect", "__register__"),
None, None,
[gotten])))
return statics
def mdname(self, id):
for c in ".<,>":
id = id.replace(c, "_")
return id
def leave_Root(self, root):
mdpkg = mdroot(self.entry)
self.backend.current_package = [self.backend.name(mdpkg)]
self.code = ""
mdclasses = []
classes = OrderedDict()
for cls in self.classes:
classes[cls] = None
classes.update(self.class_uses)
generated = set()
for cls in classes:
qual = self.qual(cls)
if cls.parameters:
clsid = qual + "<%s>" % ",".join([OBJECT]*len(cls.parameters))
else:
clsid = qual
uses = self.class_uses.get(cls, OrderedDict([(clsid,
(cls.resolved, cls, tuple(self.package(cls.package))))]))
for clsid, (texp, ucls, pkg) in uses.items():
# XXX: I *think* everything is always guaranteed to have a package these days.
if pkg:
if clsid not in generated:
self.gen_clazz(texp, cls, clsid, qual)
generated.add(clsid)
if not ucls: continue
if ucls.package and ucls.package.name.text in (REFLECT, ):
continue
if ucls not in self.metadata:
self.metadata[ucls] = OrderedDict()
mdclasses.append((self.mdname(clsid), cls))
self.metadata[ucls][self.mdname(clsid)] = mdpkg
self.gen_root(mdclasses)
self.backend.current_package = None
def gen_root(self, mdclasses):
gname = self.backend.name("Root")
fname = self.setclassfile(gname)
statics = []
generated = set()
for cls, obj in mdclasses:
varname = self.gen.name("%s_md" % cls)
if varname in generated:
continue
else:
generated.add(varname)
mdpath = self.backend.add_import(self.backend.current_package, obj.root, obj.file)
statics.append(self.gen.static_field("", gname, self.reftype("Class"), varname,
self.gen.get_static_field(mdpath,
self.gen.name(cls),
self.gen.name("singleton"))))
dfn_code = self.gen.clazz("", False, gname, [], None, [], statics, [],
[self.gen.default_constructor(gname)],
self.gen_boilerplate(gname))
self.backend.files[fname] += dfn_code
def gen_qparams(self, texp):
if isinstance(texp.type, Class) and texp.type.parameters:
return self.gen.list_([self.string(self.qtype(texp.bindings.get(p, TypeExpr(p, {}))))
for p in texp.type.parameters])
else:
return self.gen.list_([])
def gen_fields(self, texp):
cls, use_bindings = texp.type, texp.bindings
fields = []
bindings = base_bindings(cls)
bindings.update(use_bindings)
for f in get_fields(cls):
fields.append(self.gen.construct(self.reftype("Field"),
[self.string(self.qtype(texpr(f.resolved.type, bindings,
f.resolved.bindings))),
self.string(f.name.text)]))
return self.gen.list_(fields)
def gen_parents(self, texp):
cls = texp.type
parents = ([self.string(self.qual(parent_type.resolved.type))
for parent_type in cls.bases
if (self._has_reflect_class(parent_type) and
not parent_type.resolved.type.parameters)]
or [self.string("quark.Object")])
return self.gen.list_(parents)
def gen_clazz(self, texp, cls, id, name):
gname = self.backend.name(self.mdname(id))
methods = self.gen_meths(texp, cls, id)
fname = self.setclassfile(gname)
base = self.reftype("Class")
mdpath = self.backend.add_import(self.backend.current_package, cls.root, cls.file)
singleton = self.gen.static_field("", gname, base, self.backend.name("singleton"),
self.gen.construct(self.gen.type(mdpath, gname, []), []))
supargs = [self.string(id)]
body = [
self.gen.assign(self.gen.get_field(self.gen.name("self"), "name"), self.string(name)),
self.gen.assign(self.gen.get_field(self.gen.name("self"), "parameters"), self.gen_qparams(texp)),
self.gen.assign(self.gen.get_field(self.gen.name("self"), "fields"), self.gen_fields(texp)),
self.gen.assign(self.gen.get_field(self.gen.name("self"), "methods"), methods),
self.gen.assign(self.gen.get_field(self.gen.name("self"), "parents"), self.gen_parents(texp))
]
cons = constructor(cls)
argtypes = [self.erase(texpr(p.resolved.type, base_bindings(cls), texp.bindings, p.resolved.bindings))
for p in (cons.params if cons else [])]
# unerased = [texpr(p.resolved.type, base_bindings(cls), texp.bindings, p.resolved.bindings)
# for p in (cons.params if cons else [])]
# for t in unerased:
# if "TLS" in t.type.name.text:
# print t.type.name, t.bindings
construct_args = self.gen_castargs("args", argtypes)
if isinstance(cls, Interface) or is_abstract(cls):
abstract = "true"
construct_body = [self.gen.return_(self.gen.null())]
else:
abstract = "false"
construct_body = [
self.gen.return_(self.gen.construct(self.backend.type(self.erase(texp)), construct_args))
]
construct = self.gen.method("", gname, self.type("Object"), self.gen.name("construct"),
[self.gen.param(self.type("List", "Object"), self.gen.name("args"), None)],
self.gen.block(construct_body))
isabs = self.gen.method("", gname, self.type("bool"), self.gen.name("isAbstract"), [],
self.gen.block([self.gen.return_(self.gen.bool_(Bool(abstract)))]))
dfn_code = self.gen.clazz("", False, gname, [], base, [], [singleton], [], [self.cons(gname, base, supargs,
body)],
[construct, isabs] + self.gen_boilerplate(gname))
self.backend.files[fname] += dfn_code
def gen_meths(self, texp, cls, cid):
if cls.package and cls.package.name.text in (BUILTIN, REFLECT): return self.gen.list_([])
methods = []
bindings = base_bindings(cls)
bindings.update(texp.bindings)
for m in get_methods(cls, False).values():
if isinstance(m, Macro): continue
mid = "%s_%s_Method" % (self.mdname(cid), m.name.text)
mtype = self.erase(texpr(m.type.resolved.type, bindings, m.type.resolved.bindings))
margs = [self.erase(texpr(p.resolved.type, bindings, p.resolved.bindings)) for p in m.params]
self.gen_meth(texp, cls, m, mid, cid, mtype, m.name.text, margs)
mdpath = self.backend.add_import(self.backend.current_package, cls.root, cls.file)
methods.append(self.gen.construct(self.gen.type(mdpath, mid, []), []))
return self.gen.list_(methods)
def gen_meth(self, texp, cls, meth, mid, cid, type, name, params):
fname = self.setclassfile(self.backend.name(mid))
base = self.reftype("Method")
supargs = [self.string(self.qtype(type)), self.string(name),
self.gen.list_([self.string(self.qtype(p)) for p in params])]
body = [self.gen.local(self.backend.type(self.erase(texp)), self.gen.name("obj"),
self.gen_cast(self.erase(texp), self.texpr("Object"), self.gen.name("object")))]
args = self.gen_castargs("args", params)
if meth.static:
invoke = self.gen.invoke_static_method(self.backend.add_import(meth.clazz),
self.backend.name(meth.clazz.name),
self.backend.name(meth.name),
args)
else:
invoke = self.gen.invoke_method(self.gen.name("obj"), self.gen.name(name), args)
if self.qtype(type) == VOID:
body.append(self.gen.expr_stmt(invoke))
body.append(self.gen.return_(self.gen.null()))
else:
body.append(self.gen.return_(invoke))
dfn_code = self.gen.clazz("", False, mid, [], base, [], [], [], [self.cons(self.backend.name(mid),
base, supargs, [])],
[self.gen.method("", self.backend.name(mid), self.type("Object"),
self.gen.name("invoke"),
[self.gen.param(self.type("Object"),
self.gen.name("object"),
None),
self.gen.param(self.type("List", "Object"),
self.gen.name("args"),
None)],
self.gen.block(body))] +
self.gen_boilerplate(self.backend.name(mid)))
self.backend.files[fname] += dfn_code
##### HELPERS #####
def gen_castargs(self, name, types):
result = []
for t in types:
result.append(
self.gen_cast(t, self.texpr("Object"),
self.apply_macro(self.get("List", "__get__"),
self.texpr("List", "Object"),
self.gen.name(name),
[self.number(len(result))]))
)
return result
def erase(self, texp):
if isinstance(texp.type, TypeParam):
if texp.type in texp.bindings:
expr = texp.bindings[texp.type]
return self.erase(texpr(expr.type, expr.bindings, texp.bindings))
else:
return self.texpr("Object")
bindings = {}
for k, v in texp.bindings.items():
if v.type in texp.bindings:
bindings[k] = self.erase(texp.bindings[v.type])
else:
bindings[k] = self.erase(v)
for p in texp.type.parameters:
if p not in bindings:
bindings[p] = self.texpr("Object")
return texpr(texp.type, bindings)
def gen_cast(self, to, from_, expr):
return self.backend.maybe_cast(to, self.backend.fake(from_, expr))
def setclassfile(self, name):
fname = self.gen.class_file(self.backend.current_package, name, None)
if self.backend.setfile(fname, lambda: self.gen.make_class_file(self.backend.current_package, name, rtloc=self.backend.rtloc)):
self.backend.files[fname] += "\n"
return fname
def refclass(self, name):
return self.root.env["quark"].env["reflect"].env[name]
def reftype(self, name):
return self.backend.type(self.refclass(name), {})
def cons(self, name, supname, supargs, body):
body = [self.gen.expr_stmt(self.gen.invoke_super(name,
supname,
supargs))] + body
return self.gen.constructor("", name, [], self.gen.block(body))
def string(self, text):
return self.gen.string(String('"%s"' % text))
def type(self, name, *params):
cls = self.root.env["quark"].env[name]
bindings = {}
idx = 0
for p in cls.parameters:
bindings[p] = texpr(self.root.env["quark"].env[params[idx]], {})
idx += 1
return self.backend.type(cls, bindings)
def texpr(self, name, *params):
cls = self.root.env["quark"].env[name]
bindings = {}
idx = 0
for p in cls.parameters:
bindings[p] = texpr(self.root.env["quark"].env[params[idx]], {})
idx += 1
return texpr(cls, bindings)
def number(self, n):
return self.gen.number(Number(str(n)))
def get(self, name, attr):
return self.root.env["quark"].env[name].env[attr]
def apply_macro(self, macro, type, expr, args):
return self.backend.apply_macro(macro, self.backend.fake(type, expr), args)
def gen_boilerplate(self, name):
return [self.gen_getclass(name, self.gen_cast(self.texpr("String"), self.texpr("void"), self.gen.null())),
self.gen_getfield(name),
self.gen_setfield(name)]
def gen_getclass(self, name, result):
return self.gen.method("", name, self.type("String"), self.gen.name("_getClass"), [],
self.gen.block([self.gen.return_(result)]))
def gen_getfield(self, name):
return self.gen.method("", name, self.type("Object"), self.gen.name("_getField"),
[self.gen.param(self.type("String"),
self.gen.name("name"),
None)],
self.gen.block([self.gen.return_(self.gen.null())]))
def gen_setfield(self, name):
return self.gen.method("", name, self.type("void"), self.gen.name("_setField"),
[self.gen.param(self.type("String"),
self.gen.name("name"),
None),
self.gen.param(self.type("Object"),
self.gen.name("value"),
None)],
self.gen.block([]))
def cleanup(self):
for cls in self.metadata:
if getattr(cls, "_extra_statics", None):
del cls._extra_statics
def reflect(root, be):
ref = Reflector(root, be)
root.traverse(ref)
for cls, deps in ref.metadata.items():
cls._extra_statics = lambda c=cls, d=deps: ref.gen_refs(c, d)
return [mdroot(ref.entry)], ref.cleanup
| |
# Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for# the specific language governing permissions and limitations
# under the License.
import mock
import netaddr
import contextlib
from neutron.common import exceptions as q_exc
from quark.db import api as db_api
import quark.plugin_modules.mac_address_ranges as macrng_api
import quark.plugin_modules.networks as network_api
import quark.plugin_modules.ports as port_api
import quark.plugin_modules.subnets as subnet_api
from quark.tests.functional.base import BaseFunctionalTest
class QuarkFindPortsSorted(BaseFunctionalTest):
def test_ports_sorted_by_created_at(self):
# create a network
network = dict(name="public", tenant_id="fake", network_plugin="BASE")
net_mod = db_api.network_create(self.context, **network)
# create ports
port1 = dict(network_id=net_mod["id"], backend_key="1", device_id="1")
port2 = dict(network_id=net_mod["id"], backend_key="1", device_id="1")
port3 = dict(network_id=net_mod["id"], backend_key="1", device_id="1")
port_mod1 = db_api.port_create(self.context, **port1)
port_mod2 = db_api.port_create(self.context, **port2)
port_mod3 = db_api.port_create(self.context, **port3)
res = db_api.port_find(self.context, None, None, None, None,
scope=db_api.ALL)
self.assertTrue(res[0]["created_at"] < res[1]["created_at"] <
res[2]['created_at'])
db_api.network_delete(self.context, net_mod)
db_api.port_delete(self.context, port_mod1)
db_api.port_delete(self.context, port_mod2)
db_api.port_delete(self.context, port_mod3)
class QuarkCreatePortSatisfyIpam(BaseFunctionalTest):
@contextlib.contextmanager
def _stubs(self, network_info, subnet_v4_info, subnet_v6_info=None):
with contextlib.nested(
mock.patch("neutron.common.rpc.get_notifier"),
mock.patch("neutron.quota.QUOTAS.limit_check")):
self.context.is_admin = True
net = network_api.create_network(self.context, network_info)
mac = {'mac_address_range': dict(cidr="AA:BB:CC")}
macrng_api.create_mac_address_range(self.context, mac)
self.context.is_admin = False
subnet_v4_info['subnet']['network_id'] = net['id']
sub_v4 = subnet_api.create_subnet(self.context, subnet_v4_info)
sub_v6 = None
if subnet_v6_info:
subnet_v6_info['subnet']['network_id'] = net['id']
sub_v6 = subnet_api.create_subnet(self.context, subnet_v6_info)
yield net, sub_v4, sub_v6
def test_port_created_should_satisfy_ipam_strategy(self):
cidr = "192.168.1.0/24"
cidr_v6 = "2001:db8::/32"
ip_network = netaddr.IPNetwork(cidr)
ipv6_network = netaddr.IPNetwork(cidr_v6)
network = dict(id='1', name="public", tenant_id="make",
network_plugin="BASE",
ipam_strategy="BOTH_REQUIRED")
network = {"network": network}
subnet_v4 = dict(id='1', ip_version=4, next_auto_assign_ip=2,
cidr=cidr, first_ip=ip_network.first,
last_ip=ip_network.last, ip_policy=None,
tenant_id="fake")
subnet_v6 = dict(id='2', ip_version=6, next_auto_assign_ip=2,
cidr=cidr_v6, first_ip=ipv6_network.first,
last_ip=ipv6_network.last, ip_policy=None,
tenant_id="fake")
subnet_v4_info = {"subnet": subnet_v4}
subnet_v6_info = {"subnet": subnet_v6}
def _make_body(ipv4, ipv6):
fix_ipv4 = dict(ip_address=ipv4, subnet_id=sub_v4['id'])
fix_ipv6 = dict(ip_address=ipv6, subnet_id=sub_v6['id'])
port_info = {"port": dict(fixed_ips=[fix_ipv4, fix_ipv6],
network_id=net['id'])}
return port_info
with self._stubs(network, subnet_v4_info, subnet_v6_info) as (
net, sub_v4, sub_v6):
ipv4 = "192.168.1.50"
ipv6 = "2001:db8::10"
port = port_api.create_port(self.context, _make_body(ipv4, ipv6))
port_ips = [ip["ip_address"] for ip in port["fixed_ips"]]
for ip in [ipv4, ipv6]:
self.assertTrue(ip in port_ips)
def test_port_created_doesnt_satisfy_ipam_strategy_raises(self):
cidr = "192.168.1.0/24"
ip_network = netaddr.IPNetwork(cidr)
network = dict(id='1', name="public", tenant_id="make",
network_plugin="BASE",
ipam_strategy="BOTH_REQUIRED")
network = {"network": network}
subnet_v4 = dict(id='1', ip_version=4, next_auto_assign_ip=2,
cidr=cidr, first_ip=ip_network.first,
last_ip=ip_network.last, ip_policy=None,
tenant_id="fake")
subnet_v4_info = {"subnet": subnet_v4}
def _make_body_only_v4(ip):
fix_ip = dict(ip_address=ip, subnet_id=sub_v4['id'])
port_info = {"port": dict(fixed_ips=[fix_ip],
network_id=net['id'])}
return port_info
with self._stubs(network, subnet_v4_info) as (
net, sub_v4, sub_v6):
ip = "192.168.1.50"
with self.assertRaises(q_exc.IpAddressGenerationFailure):
port_api.create_port(self.context, _make_body_only_v4(ip))
class QuarkCreatePortWithIpNotMandatory(BaseFunctionalTest):
@contextlib.contextmanager
def _stubs(self, network_info, subnet_v4_infos, subnet_v6_info=None):
with contextlib.nested(
mock.patch("neutron.common.rpc.get_notifier"),
mock.patch("neutron.quota.QUOTAS.limit_check")):
self.context.is_admin = True
net = network_api.create_network(self.context, network_info)
mac = {'mac_address_range': dict(cidr="AA:BB:CC")}
macrng_api.create_mac_address_range(self.context, mac)
self.context.is_admin = False
sub_v4s = []
for sub_info in subnet_v4_infos:
sub_info['subnet']['network_id'] = net['id']
sub_v4s.append(subnet_api.create_subnet(self.context,
sub_info))
sub_v6 = None
if subnet_v6_info:
subnet_v6_info['subnet']['network_id'] = net['id']
sub_v6 = subnet_api.create_subnet(self.context, subnet_v6_info)
yield net, sub_v4s, sub_v6
def test_port_created_with_only_subnet(self):
cidr = "192.168.1.0/24"
ip_network = netaddr.IPNetwork(cidr)
network = dict(id='1', name="public", tenant_id="make",
network_plugin="BASE",
ipam_strategy="ANY")
network = {"network": network}
subnet_v4 = dict(id='1', ip_version=4, next_auto_assign_ip=2,
cidr=cidr, first_ip=ip_network.first,
last_ip=ip_network.last, ip_policy=None,
tenant_id="fake")
subnet_v4_info = {"subnet": subnet_v4}
def _make_body():
fix_ipv4 = dict(subnet_id=sub_v4s[0]['id'])
port_info = {"port": dict(fixed_ips=[fix_ipv4],
network_id=net['id'])}
return port_info
with self._stubs(network, [subnet_v4_info]) as (
net, sub_v4s, sub_v6):
port = port_api.create_port(self.context, _make_body())
port_ip = port["fixed_ips"][0]["ip_address"]
self.assertTrue(netaddr.IPAddress(port_ip) in
netaddr.IPNetwork(cidr))
def test_port_created_with_multiple_fixed_ips_some_only_with_subnet(self):
cidr = "192.168.1.0/24"
another_cidr = "192.168.2.0/24"
cidr_v6 = "2001:db8::/32"
ip_network = netaddr.IPNetwork(cidr)
another_ip_network = netaddr.IPNetwork(another_cidr)
ipv6_network = netaddr.IPNetwork(cidr_v6)
network = dict(id='1', name="public", tenant_id="make",
network_plugin="BASE",
ipam_strategy="BOTH_REQUIRED")
network = {"network": network}
subnet_v4 = dict(id='1', ip_version=4, next_auto_assign_ip=2,
cidr=cidr, first_ip=ip_network.first,
last_ip=ip_network.last, ip_policy=None,
tenant_id="fake")
another_subnet_v4 = dict(id='2', ip_version=4, next_auto_assign_ip=2,
cidr=another_cidr,
first_ip=another_ip_network.first,
last_ip=another_ip_network.last,
ip_policy=None, tenant_id="fake")
subnet_v6 = dict(id='3', ip_version=6, next_auto_assign_ip=2,
cidr=cidr_v6, first_ip=ipv6_network.first,
last_ip=ipv6_network.last, ip_policy=None,
tenant_id="fake")
subnet_v4_info = {"subnet": subnet_v4}
another_subnet_v4_info = {"subnet": another_subnet_v4}
subnet_v6_info = {"subnet": subnet_v6}
def _make_body(ipv4, ipv6):
fix_ipv4 = dict(ip_address=ipv4, subnet_id=sub_v4s[0]['id'])
another_fix_ipv4 = dict(subnet_id=sub_v4s[1]['id'])
fix_ipv6 = dict(ip_address=ipv6, subnet_id=sub_v6['id'])
port_info = {"port": dict(fixed_ips=[fix_ipv4, another_fix_ipv4,
fix_ipv6],
network_id=net['id'])}
return port_info
with self._stubs(network, [subnet_v4_info, another_subnet_v4_info],
subnet_v6_info) as (net, sub_v4s, sub_v6):
ipv4 = "192.168.1.50"
another_ipv4 = "192.168.2.1"
ipv6 = "2001:db8::10"
port = port_api.create_port(self.context, _make_body(ipv4, ipv6))
port_ips = [ip["ip_address"] for ip in port["fixed_ips"]]
for ip in [ipv4, another_ipv4, ipv6]:
self.assertTrue(ip in port_ips)
class QuarkCreatePortWithForbiddenMacRange(BaseFunctionalTest):
@contextlib.contextmanager
def _stubs(self, network_info, subnet_v4_infos, subnet_v6_info=None):
with contextlib.nested(
mock.patch("neutron.common.rpc.get_notifier"),
mock.patch("neutron.quota.QUOTAS.limit_check")):
self.context.is_admin = True
net = network_api.create_network(self.context, network_info)
mac = {'mac_address_range': dict(cidr="AA:BB:CC", do_not_use=True)}
macrng_api.create_mac_address_range(self.context, mac)
self.context.is_admin = False
sub_v4s = []
for sub_info in subnet_v4_infos:
sub_info['subnet']['network_id'] = net['id']
sub_v4s.append(subnet_api.create_subnet(self.context,
sub_info))
sub_v6 = None
if subnet_v6_info:
subnet_v6_info['subnet']['network_id'] = net['id']
sub_v6 = subnet_api.create_subnet(self.context, subnet_v6_info)
yield net, sub_v4s, sub_v6
def test_port_created_with_forbidden_mac_range(self):
cidr = "192.168.1.0/24"
ip_network = netaddr.IPNetwork(cidr)
network = dict(id='1', name="public", tenant_id="make",
network_plugin="BASE",
ipam_strategy="ANY")
network = {"network": network}
subnet_v4 = dict(id='1', ip_version=4, next_auto_assign_ip=2,
cidr=cidr, first_ip=ip_network.first,
last_ip=ip_network.last, ip_policy=None,
tenant_id="fake")
subnet_v4_info = {"subnet": subnet_v4}
def _make_body(use_forbidden_mac_range=False):
fix_ipv4 = dict(subnet_id=sub_v4s[0]['id'])
port_info = \
{"port": dict(
fixed_ips=[fix_ipv4], network_id=net['id'],
use_forbidden_mac_range=use_forbidden_mac_range
)}
return port_info
with self._stubs(network, [subnet_v4_info]) as (
net, sub_v4s, sub_v6):
admin_ctxt = self.context.elevated()
port = port_api.create_port(
admin_ctxt, _make_body(use_forbidden_mac_range=True))
port_mac = port["mac_address"]
self.assertTrue(port_mac.startswith("AA:BB:CC"))
with self.assertRaises(q_exc.MacAddressGenerationFailure):
port_api.create_port(admin_ctxt,
_make_body())
class QuarkFindPortsFilterByDeviceOwner(BaseFunctionalTest):
def test_port_list_device_owner_found_returns_only_those(self):
# create a network
network = dict(name="public", tenant_id="fake", network_plugin="BASE")
net_mod = db_api.network_create(self.context, **network)
# create ports
port1 = dict(network_id=net_mod["id"], backend_key="1", device_id="1",
device_owner="Doge")
port2 = dict(network_id=net_mod["id"], backend_key="1", device_id="1",
device_owner=port1["device_owner"])
port3 = dict(network_id=net_mod["id"], backend_key="1", device_id="1",
device_owner="network:dhcp")
port_mod1 = db_api.port_create(self.context, **port1)
port_mod2 = db_api.port_create(self.context, **port2)
port_mod3 = db_api.port_create(self.context, **port3)
res = db_api.port_find(self.context, None, None, None,
scope=db_api.ALL,
device_owner=port3["device_owner"])
self.assertTrue(len(res) == 1)
self.assertTrue(res[0]["device_owner"] == port3["device_owner"])
res = db_api.port_find(self.context, None, None, None,
scope=db_api.ALL,
device_owner=port1["device_owner"])
self.assertTrue(len(res) == 2)
self.assertTrue(res[0]["device_owner"] == res[1]["device_owner"] ==
port1["device_owner"])
db_api.network_delete(self.context, net_mod)
db_api.port_delete(self.context, port_mod1)
db_api.port_delete(self.context, port_mod2)
db_api.port_delete(self.context, port_mod3)
class QuarkPortFixedIPOperations(BaseFunctionalTest):
def __init__(self, *args, **kwargs):
super(QuarkPortFixedIPOperations, self).__init__(*args, **kwargs)
cidr = "192.168.10.0/24"
ip_network = netaddr.IPNetwork(cidr)
cidr_v6 = "2001:db8::/32"
ip_network_v6 = netaddr.IPNetwork(cidr_v6)
# some default stuff
network = dict(name="public", tenant_id="make",
network_plugin="BASE",
ipam_strategy="ANY")
self.net_info = {"network": network}
subnet_v4 = dict(ip_version=4, next_auto_assign_ip=2,
cidr=cidr, first_ip=ip_network.first,
last_ip=ip_network.last, ip_policy=None,
tenant_id="fake")
subnet_v6 = dict(ip_version=6, next_auto_assign_ip=2,
cidr=cidr_v6, first_ip=ip_network_v6.first,
last_ip=ip_network_v6.last, ip_policy=None,
tenant_id="fake")
self.sub_info = {"subnet": subnet_v4}
self.sub_info_v6 = {"subnet": subnet_v6}
@contextlib.contextmanager
def _stubs(self, network_info, subnet_info):
with contextlib.nested(
mock.patch("neutron.common.rpc.get_notifier"),
mock.patch("neutron.quota.QUOTAS.limit_check")):
mac = {'mac_address_range': dict(cidr="AA:BB:CC")}
self.context.is_admin = True
macrng_api.create_mac_address_range(self.context, mac)
self.context.is_admin = False
network = network_api.create_network(self.context, network_info)
subnet_info['subnet']['network_id'] = network['id']
subnet = subnet_api.create_subnet(self.context, subnet_info)
yield network, subnet
def test_create_port_single_fixed_ip(self):
with self._stubs(self.net_info, self.sub_info) as (network, subnet):
fixed_ips = [dict(subnet_id=subnet['id'], enabled=True,
ip_address="192.168.10.45")]
port = dict(port=dict(network_id=network['id'],
tenant_id=self.context.tenant_id,
device_id=2,
fixed_ips=fixed_ips))
expected = {'status': "ACTIVE",
'device_owner': None,
'network_id': network["id"],
'tenant_id': self.context.tenant_id,
'admin_state_up': True,
'fixed_ips': fixed_ips,
'device_id': 2}
result = port_api.create_port(self.context, port)
for key in expected.keys():
self.assertEqual(result[key], expected[key],
"Mismatch on %s" % key)
def test_create_port_multiple_fixed_ipv4(self):
with self._stubs(self.net_info, self.sub_info) as (network, subnet):
fixed_ips = [dict(subnet_id=subnet['id'], enabled=True,
ip_address="192.168.10.45"),
dict(subnet_id=subnet['id'], enabled=True,
ip_address="192.168.10.199")]
port = dict(port=dict(network_id=network['id'],
tenant_id=self.context.tenant_id,
device_id=2,
fixed_ips=fixed_ips))
expected = {'status': "ACTIVE",
'device_owner': None,
'network_id': network["id"],
'tenant_id': self.context.tenant_id,
'admin_state_up': True,
'fixed_ips': fixed_ips,
'device_id': 2}
result = port_api.create_port(self.context, port)
for key in expected.keys():
if key != 'fixed_ips':
self.assertEqual(result[key], expected[key],
"Mismatch on %s" % key)
for ip in result['fixed_ips']:
self.assertTrue(ip in expected['fixed_ips'])
def test_create_port_multiple_fixed_ipv6(self):
with self._stubs(self.net_info, self.sub_info_v6) as (network, subnet):
ipv6a = "2001:db8::10"
ipv6b = "2001:db8::15"
fixed_ips = [dict(subnet_id=subnet['id'], enabled=True,
ip_address=ipv6a),
dict(subnet_id=subnet['id'], enabled=True,
ip_address=ipv6b)]
port = dict(port=dict(network_id=network['id'],
tenant_id=self.context.tenant_id,
device_id=2,
fixed_ips=fixed_ips))
expected = {'status': "ACTIVE",
'device_owner': None,
'network_id': network["id"],
'tenant_id': self.context.tenant_id,
'admin_state_up': True,
'fixed_ips': fixed_ips,
'device_id': 2}
result = port_api.create_port(self.context, port)
for key in expected.keys():
if key != 'fixed_ips':
self.assertEqual(result[key], expected[key],
"Mismatch on %s" % key)
for ip in result['fixed_ips']:
self.assertTrue(ip in expected['fixed_ips'])
def test_update_port_multiple_fixed_ipv4(self):
with self._stubs(self.net_info, self.sub_info) as (network, subnet):
fixed_ips = [dict(subnet_id=subnet['id'], enabled=True,
ip_address="192.168.10.45"),
dict(subnet_id=subnet['id'], enabled=True,
ip_address="192.168.10.199")]
port = dict(port=dict(network_id=network['id'],
tenant_id=self.context.tenant_id,
device_id=2,
fixed_ips=fixed_ips))
expected = {'status': "ACTIVE",
'device_owner': None,
'network_id': network["id"],
'tenant_id': self.context.tenant_id,
'admin_state_up': True,
'fixed_ips': fixed_ips,
'device_id': '2'}
result = port_api.create_port(self.context, port)
fixed_ips = [dict(subnet_id=subnet['id'], enabled=True,
ip_address="192.168.10.236"),
dict(subnet_id=subnet['id'], enabled=True,
ip_address="192.168.10.42")]
new_port = dict(port=dict(fixed_ips=fixed_ips))
result = port_api.update_port(self.context, result['id'], new_port)
for key in expected.keys():
if key != 'fixed_ips':
self.assertEqual(result[key], expected[key],
"Mismatch on %s" % key)
for ip in result['fixed_ips']:
self.assertTrue(ip in fixed_ips,
'%s not in %s' % (ip, expected['fixed_ips']))
def test_port_show(self):
with self._stubs(self.net_info, self.sub_info) as (network, subnet):
fixed_ips = [dict(subnet_id=subnet['id'], enabled=True,
ip_address="192.168.10.45"),
dict(subnet_id=subnet['id'], enabled=True,
ip_address="192.168.10.199")]
port = dict(port=dict(network_id=network['id'],
tenant_id=self.context.tenant_id,
device_id='2',
fixed_ips=fixed_ips))
expected = {'status': "ACTIVE",
'device_owner': None,
'network_id': network["id"],
'tenant_id': self.context.tenant_id,
'admin_state_up': True,
'fixed_ips': fixed_ips,
'device_id': '2'}
result = port_api.create_port(self.context, port)
result = port_api.get_port(self.context, result['id'])
for key in expected.keys():
if key != 'fixed_ips':
self.assertEqual(result[key], expected[key],
"Mismatch on %s" % key)
for ip in result['fixed_ips']:
self.assertTrue(ip in fixed_ips,
'%s not in %s' % (ip, expected['fixed_ips']))
| |
# coding: utf-8
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model for an Oppia state."""
__author__ = 'Sean Lip'
import copy
import importlib
import logging
from apps.base_model.models import BaseModel
from apps.parameter.models import ParamChange
from apps.parameter.models import ParamChangeProperty
from apps.widget.models import InteractiveWidget
from apps.widget.models import Widget
from data.objects.models import objects
import feconf
import utils
from google.appengine.ext import ndb
class Content(ndb.Model):
"""Non-interactive content in a state."""
type = ndb.StringProperty(choices=['text', 'image', 'video', 'widget'])
value = ndb.TextProperty(default='')
class Rule(ndb.Model):
"""A rule for an answer classifier."""
# TODO(sll): Ensure the types for param_changes are consistent.
# The name of the rule.
name = ndb.StringProperty(required=True)
# Parameters for the classification rule. TODO(sll): Make these the actual params.
inputs = ndb.JsonProperty(default={})
# The id of the destination state.
dest = ndb.StringProperty()
# Feedback to give the reader if this rule is triggered.
feedback = ndb.TextProperty(repeated=True)
# State-level parameter changes to make if this rule is triggered.
param_changes = ParamChangeProperty(repeated=True)
class AnswerHandlerInstance(ndb.Model):
"""An answer event stream (submit, click, drag, etc.)."""
name = ndb.StringProperty(default='submit')
rules = ndb.LocalStructuredProperty(Rule, repeated=True)
# This is a derived property from the corresponding AnswerHandler in
# widget.py. It is added automatically on State.put().
classifier = ndb.StringProperty()
class WidgetInstance(ndb.Model):
"""An instance of a widget."""
# The id of the interactive widget class for this state.
widget_id = ndb.StringProperty(default='Continue')
# Parameters for the interactive widget view, stored as key-value pairs.
# Each parameter is single-valued. The values may be Jinja templates that
# refer to state parameters.
params = ndb.JsonProperty(default={})
# If true, keep the widget instance from the previous state if both are of
# the same type.
sticky = ndb.BooleanProperty(default=False)
# Answer handlers and rulesets.
handlers = ndb.LocalStructuredProperty(AnswerHandlerInstance, repeated=True)
class State(BaseModel):
"""A state which forms part of an exploration."""
# NB: This element's parent should be an Exploration.
def get_default_rule(self):
return Rule(name='Default', dest=self.id)
def get_default_handler(self):
return AnswerHandlerInstance(rules=[self.get_default_rule()])
def get_default_widget(self):
return WidgetInstance(handlers=[self.get_default_handler()])
def _pre_put_hook(self):
"""Ensures that the widget and at least one handler for it exists."""
if not self.widget:
self.widget = self.get_default_widget()
elif not self.widget.handlers:
self.widget.handlers = [self.get_default_handler()]
# TODO(sll): Do other validation.
# Add the corresponding AnswerHandler classifiers for easy reference.
widget = InteractiveWidget.get(self.widget.widget_id)
for curr_handler in self.widget.handlers:
for w_handler in widget.handlers:
if w_handler.name == curr_handler.name:
curr_handler.classifier = w_handler.classifier
# Human-readable name for the state.
name = ndb.StringProperty(default='Activity 1')
# The content displayed to the reader in this state.
content = ndb.StructuredProperty(Content, repeated=True)
# Parameter changes associated with this state.
param_changes = ParamChangeProperty(repeated=True)
# The interactive widget associated with this state.
widget = ndb.StructuredProperty(WidgetInstance, required=True)
# A dict whose keys are unresolved answers associated with this state, and
# whose values are their counts.
unresolved_answers = ndb.JsonProperty(default={})
@classmethod
def create(cls, exploration, name, state_id=None):
"""Creates a new state."""
state_id = state_id or cls.get_new_id(name)
new_state = cls(id=state_id, parent=exploration.key, name=name)
new_state.widget = new_state.get_default_widget()
new_state.put()
return new_state
@classmethod
def get(cls, state_id, exploration):
"""Gets a state by id. If it does not exist, returns None."""
return cls.get_by_id(state_id, parent=exploration.key)
def as_dict(self):
"""Gets a Python dict representation of the state."""
state_dict = self.internals_as_dict()
state_dict.update({'id': self.id, 'name': self.name})
return state_dict
def internals_as_dict(self, human_readable_dests=False):
"""Gets a Python dict of the internals of the state."""
state_dict = copy.deepcopy(self.to_dict(
exclude=['name', 'unresolved_answers']))
# Remove the computed 'classifier' property.
for handler in state_dict['widget']['handlers']:
del handler['classifier']
if human_readable_dests:
# Change the dest ids to human-readable names.
for handler in state_dict['widget']['handlers']:
for rule in handler['rules']:
if rule['dest'] != feconf.END_DEST:
dest_state = State.get_by_id(
rule['dest'], parent=self.key.parent())
rule['dest'] = dest_state.name
return state_dict
@classmethod
def get_by_name(cls, name, exploration, strict=True):
"""Gets a state by name. Fails noisily if strict == True."""
assert name and exploration
state = cls.query(ancestor=exploration.key).filter(
cls.name == name).get()
if strict and not state:
raise Exception('State %s not found.' % name)
return state
@classmethod
def modify_using_dict(cls, exploration, state, sdict):
"""Modifies the properties of a state using values from a dict."""
state.content = [
Content(type=item['type'], value=item['value'])
for item in sdict['content']
]
state.param_changes = [
ParamChange(**pc) for pc in sdict['param_changes']
]
wdict = sdict['widget']
state.widget = WidgetInstance(
widget_id=wdict['widget_id'], sticky=wdict['sticky'])
state.widget.params = wdict['params']
for wp in Widget.get(wdict['widget_id']).params:
if wp.name not in wdict['params']:
state.widget.params[wp.name] = wp.value
state.widget.handlers = []
for handler in wdict['handlers']:
state_handler = AnswerHandlerInstance(name=handler['name'])
for rule in handler['rules']:
rule_dest = (
feconf.END_DEST if rule['dest'] == feconf.END_DEST
else State.get_by_name(rule['dest'], exploration).id)
state_handler.rules.append(Rule(
feedback=rule['feedback'], inputs=rule['inputs'],
name=rule['name'], dest=rule_dest
))
state.widget.handlers.append(state_handler)
state.put()
return state
def transition(self, answer, params, handler):
"""Handle feedback interactions with readers."""
recorded_answer = answer
answer_handler = None
for wi_handler in self.widget.handlers:
if wi_handler.name == handler:
answer_handler = wi_handler
if answer_handler.classifier:
# Import the relevant classifier module.
classifier_module = '.'.join([
feconf.SAMPLE_CLASSIFIERS_DIR.replace('/', '.'),
answer_handler.classifier, answer_handler.classifier])
Classifier = importlib.import_module(classifier_module)
logging.info(Classifier.__name__)
norm_answer = Classifier.DEFAULT_NORMALIZER().normalize(answer)
if norm_answer is None:
raise Exception('Could not normalize %s.' % answer)
# TODO(sll): This is a special case for multiple-choice input
# which should really be handled generically.
if self.widget.widget_id == 'MultipleChoiceInput':
recorded_answer = self.widget.params['choices'][int(answer)]
selected_rule = None
for ind, rule in enumerate(answer_handler.rules):
if rule.name == 'Default':
selected_rule = rule
break
func_name, param_list = self.get_classifier_info(
self.widget.widget_id, answer_handler.name, rule, params)
param_list = [norm_answer] + param_list
classifier_output = getattr(Classifier, func_name)(*param_list)
match, _ = utils.normalize_classifier_return(classifier_output)
if match:
selected_rule = rule
break
feedback = (utils.get_random_choice(selected_rule.feedback)
if selected_rule.feedback else '')
return selected_rule.dest, feedback, rule, recorded_answer
def get_typed_object(self, mutable_rule, param):
param_spec = mutable_rule[mutable_rule.find('{{' + param) + 2:]
param_spec = param_spec[param_spec.find('|') + 1:]
normalizer_string = param_spec[: param_spec.find('}}')]
return getattr(objects, normalizer_string)
def get_classifier_info(self, widget_id, handler_name, rule, state_params):
classifier_func = rule.name.replace(' ', '')
first_bracket = classifier_func.find('(')
mutable_rule = InteractiveWidget.get(widget_id).get_readable_name(
handler_name, rule.name)
func_name = classifier_func[: first_bracket]
str_params = classifier_func[first_bracket + 1: -1].split(',')
param_list = []
for index, param in enumerate(str_params):
parsed_param = rule.inputs[param]
if isinstance(parsed_param, basestring) and '{{' in parsed_param:
parsed_param = utils.parse_with_jinja(
parsed_param, state_params)
typed_object = self.get_typed_object(mutable_rule, param)
normalized_param = typed_object.normalize(parsed_param)
param_list.append(normalized_param)
return func_name, param_list
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Lite tooling helper functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
import enum
from six import PY3
from google.protobuf import text_format as _text_format
from google.protobuf.message import DecodeError
from tensorflow.core.framework import graph_pb2 as _graph_pb2
from tensorflow.lite.experimental.examples.lstm.rnn import dynamic_rnn # pylint: disable=unused-import
from tensorflow.lite.experimental.examples.lstm.rnn_cell import TFLiteLSTMCell # pylint: disable=unused-import
from tensorflow.lite.experimental.examples.lstm.rnn_cell import TfLiteRNNCell # pylint: disable=unused-import
from tensorflow.lite.experimental.tensorboard.ops_util import get_potentially_supported_ops # pylint: disable=unused-import
from tensorflow.lite.python import lite_constants as constants
from tensorflow.lite.python.convert import build_toco_convert_protos # pylint: disable=unused-import
from tensorflow.lite.python.convert import ConverterError # pylint: disable=unused-import
from tensorflow.lite.python.convert import OpsSet
from tensorflow.lite.python.convert import toco_convert # pylint: disable=unused-import
from tensorflow.lite.python.convert import toco_convert_graph_def as _toco_convert_graph_def
from tensorflow.lite.python.convert import toco_convert_impl as _toco_convert_impl
from tensorflow.lite.python.convert import toco_convert_protos # pylint: disable=unused-import
from tensorflow.lite.python.convert_saved_model import freeze_saved_model as _freeze_saved_model
from tensorflow.lite.python.interpreter import Interpreter # pylint: disable=unused-import
from tensorflow.lite.python.op_hint import convert_op_hints_to_stubs # pylint: disable=unused-import
from tensorflow.lite.python.op_hint import OpHint # pylint: disable=unused-import
from tensorflow.lite.python.optimize import calibrator as _calibrator
from tensorflow.lite.python.util import freeze_graph as _freeze_graph
from tensorflow.lite.python.util import get_grappler_config as _get_grappler_config
from tensorflow.lite.python.util import get_tensor_name as _get_tensor_name
from tensorflow.lite.python.util import get_tensors_from_tensor_names as _get_tensors_from_tensor_names
from tensorflow.lite.python.util import is_frozen_graph as _is_frozen_graph
from tensorflow.lite.python.util import run_graph_optimizations as _run_graph_optimizations
from tensorflow.lite.python.util import set_tensor_shapes as _set_tensor_shapes
from tensorflow.python import keras as _keras
from tensorflow.python.client import session as _session
from tensorflow.python.eager import def_function as _def_function
from tensorflow.python.eager import function as _function
from tensorflow.python.framework import convert_to_constants as _convert_to_constants
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework.errors_impl import NotFoundError as _NotFoundError
from tensorflow.python.framework.importer import import_graph_def as _import_graph_def
from tensorflow.python.lib.io import file_io as _file_io
from tensorflow.python.saved_model import signature_constants as _signature_constants
from tensorflow.python.saved_model import tag_constants as _tag_constants
from tensorflow.python.util import deprecation as _deprecation
from tensorflow.python.util.tf_export import tf_export as _tf_export
@_tf_export("lite.Optimize")
class Optimize(enum.Enum):
"""Enum defining the optimizations to apply when generating tflite graphs.
Some optimizations may come at the cost of accuracy.
"""
# Optimize for size.
#
# Optimizations that reduce the size of the model.
# The model size will be reduced. Optimizations can include quantizing the
# weights of the floating point model.
OPTIMIZE_FOR_SIZE = "OPTIMIZE_FOR_SIZE"
# Optimize for latency.
#
# Optimizations that reduce the latency of the model.
# The model latency will be reduced. Optimizations can include quantizing the
# weights of the floating point model.
OPTIMIZE_FOR_LATENCY = "OPTIMIZE_FOR_LATENCY"
def __str__(self):
return self.value
@_tf_export("lite.RepresentativeDataset")
class RepresentativeDataset(object):
"""Representative dataset to evaluate optimizations.
A representative dataset that can be used to evaluate optimizations by the
converter. E.g. converter can use these examples to estimate (min, max) ranges
by calibrating the model on inputs. This can allow converter to quantize a
converted floating point model.
"""
def __init__(self, input_gen, output_gen=None):
"""Creates a representative dataset.
Args:
input_gen: an input generator that can be used to generate input samples
for the model. This must be a callable object that returns an object
that supports the `iter()` protocol (e.g. a generator function). The
elements generated must have same type and shape as inputs to the model.
output_gen: (optional) an output generator that can be used to generate
output samples for the model. This must be a callable object that
returns an object that supports the `iter()` protocol (e.g. a generator
function). The elements generated must have same type and shape as
outputs to the model. (default None)
"""
self.input_gen = input_gen
self.output_gen = output_gen
@_tf_export("lite.TargetSpec")
class TargetSpec(object):
"""Specification of target device.
Details about target device. Converter optimizes the generated model for
specific device.
Attributes:
supported_ops: Experimental flag, subject to change. Set of OpsSet options
supported by the device. (default set([OpsSet.TFLITE_BUILTINS]))
"""
def __init__(self, supported_ops=None):
if supported_ops is None:
supported_ops = set([OpsSet.TFLITE_BUILTINS])
self.supported_ops = supported_ops
@_tf_export("lite.TFLiteConverter", v1=[])
class TFLiteConverterV2(object):
"""Converts a TensorFlow model into TensorFlow Lite model.
Attributes:
allow_custom_ops: Boolean indicating whether to allow custom operations.
When false any unknown operation is an error. When true, custom ops are
created for any op that is unknown. The developer will need to provide
these to the TensorFlow Lite runtime with a custom resolver. (default
False)
target_spec: Experimental flag, subject to change. Specification of target
device.
optimizations: Experimental flag, subject to change, A list of optimizations
to apply when converting the model. The converter applies the
optimizations by giving priority to the optimizations specified earlier in
the list. E.g. `[optimize.OPTIMIZE_FOR_SIZE,
optimize.OPTIMIZE_FOR_LATENCY]` requires the converter to do both size and
latency optimizations giving priority to size optimizations over latency
optimizations.
representative_dataset: A representative dataset that can be used to
generate input and output samples for the model. The converter can use the
dataset to evaluate different optimizations.
Example usage:
```python
# Converting a GraphDef from a ConcreteFunction.
converter = lite.TFLiteConverter.from_concrete_function(func)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
```
"""
def __init__(self, func):
"""Constructor for TFLiteConverter.
Args:
func: TensorFlow ConcreteFunction.
"""
self._func = func
self.allow_custom_ops = False
self.target_spec = TargetSpec()
self.representative_dataset = None
self.optimizations = []
@classmethod
def from_concrete_function(cls, func):
"""Creates a TFLiteConverter class from a ConcreteFunction.
Args:
func: TensorFlow ConcreteFunction.
Returns:
TFLiteConverter class.
"""
if not isinstance(func, _function.ConcreteFunction):
message = "This function takes in a ConcreteFunction."
if isinstance(func, _def_function.Function):
message += (" To get the ConcreteFunction from a Function,"
" call from_concrete_function.")
raise ValueError(message)
return cls(func)
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format.
Raises:
ValueError:
Input shape is not specified.
None value for dimension in input_tensor.
"""
frozen_func = _convert_to_constants.convert_variables_to_constants_v2(
self._func)
input_tensors = [
tensor for tensor in frozen_func.inputs
if tensor.dtype != _dtypes.resource
]
output_tensors = frozen_func.outputs
# Run a Grappler pass.
is_only_flex_enabled = set(
[OpsSet.SELECT_TF_OPS]) == self.target_spec.supported_ops
config = _get_grappler_config(enable_layout_optimizer=is_only_flex_enabled)
graph_def = _run_graph_optimizations(
frozen_func.graph.as_graph_def(),
input_tensors,
output_tensors,
config,
graph=frozen_func.graph)
# Checks dimensions in input tensor.
for tensor in input_tensors:
# Note that shape_list might be empty for scalar shapes.
shape_list = tensor.shape.as_list()
if None in shape_list[1:]:
raise ValueError(
"None is only supported in the 1st dimension. Tensor '{0}' has "
"invalid shape '{1}'.".format(_get_tensor_name(tensor), shape_list))
elif shape_list and shape_list[0] is None:
# Set the batch size to 1 if undefined.
shape = tensor.shape.as_list()
shape[0] = 1
tensor.set_shape(shape)
if self.representative_dataset:
if not isinstance(self.representative_dataset, RepresentativeDataset):
raise TypeError("`representative_dataset` must be an instance of "
"`RepresentativeDataset`")
if self.representative_dataset.input_gen is None:
raise ValueError(
"Provide an input generator for `representative_dataset`")
# TODO(shashishekhar): For now use optimizations order is ignored.
# Both size and latency optimizations decide whether to apply post
# training optimizations.
post_training_optimize = bool(
len(
set(self.optimizations)
& set([Optimize.OPTIMIZE_FOR_LATENCY, Optimize.OPTIMIZE_FOR_SIZE])))
# Do weights only quantization if there is no dataset for calibration.
weights_only_quantize_flag = (
post_training_optimize and (self.representative_dataset is None))
converter_kwargs = {
"input_format": constants.TENSORFLOW_GRAPHDEF,
"allow_custom_ops": self.allow_custom_ops,
"post_training_quantize": weights_only_quantize_flag,
"target_ops": self.target_spec.supported_ops,
}
# Converts model.
result = _toco_convert_impl(
input_data=graph_def,
input_tensors=input_tensors,
output_tensors=output_tensors,
**converter_kwargs)
if self.representative_dataset and post_training_optimize:
calibrate_quantize = _calibrator.Calibrator(result)
result = calibrate_quantize.calibrate_and_quantize(
self.representative_dataset.input_gen)
return result
@_tf_export(v1=["lite.TFLiteConverter"])
class TFLiteConverter(object):
"""Convert a TensorFlow model into `output_format` using TOCO.
This is used to convert from a TensorFlow GraphDef or SavedModel into either a
TFLite FlatBuffer or graph visualization.
Attributes:
inference_type: Target data type of real-number arrays in the output file.
Must be `{tf.float32, tf.uint8}`. (default tf.float32)
inference_input_type: Target data type of real-number input arrays. Allows
for a different type for input arrays in the case of quantization.
Must be `{tf.float32, tf.uint8}`. (default `inference_type`)
output_format: Output file format. Currently must be `{TFLITE,
GRAPHVIZ_DOT}`. (default TFLITE)
quantized_input_stats: Dict of strings representing input tensor names
mapped to tuple of floats representing the mean and standard deviation
of the training data (e.g., {"foo" : (0., 1.)}). Only need if
`inference_input_type` is `QUANTIZED_UINT8`.
real_input_value = (quantized_input_value - mean_value) / std_dev_value.
(default {})
default_ranges_stats: Tuple of integers representing (min, max) range values
for all arrays without a specified range. Intended for experimenting with
quantization via "dummy quantization". (default None)
drop_control_dependency: Boolean indicating whether to drop control
dependencies silently. This is due to TFLite not supporting control
dependencies. (default True)
reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant
nodes in unexpected locations. Used when the location of the FakeQuant
nodes is preventing graph transformations necessary to convert the graph.
Results in a graph that differs from the quantized training graph,
potentially causing differing arithmetic behavior. (default False)
change_concat_input_ranges: Boolean to change behavior of min/max ranges for
inputs and outputs of the concat operator for quantized models. Changes
the ranges of concat operator overlap when true. (default False)
allow_custom_ops: Boolean indicating whether to allow custom operations.
When false any unknown operation is an error. When true, custom ops are
created for any op that is unknown. The developer will need to provide
these to the TensorFlow Lite runtime with a custom resolver.
(default False)
post_training_quantize: deprecated, please specify
`[optimize.OPTIMIZE_FOR_SIZE]` for `optimizations` instead. Boolean
indicating whether to quantize the weights of the converted float model.
Model size will be reduced and there will be latency improvements
(at the cost of accuracy). (default False)
dump_graphviz_dir: Full filepath of folder to dump the graphs at various
stages of processing GraphViz .dot files. Preferred over
--output_format=GRAPHVIZ_DOT in order to keep the requirements of the
output file. (default None)
dump_graphviz_video: Boolean indicating whether to dump the graph after
every graph transformation. (default False)
target_ops: Experimental flag, subject to change. Set of OpsSet
options indicating which converter to use.
(default set([OpsSet.TFLITE_BUILTINS]))
optimizations: Experimental flag, subject to change, A list of
optimizations to apply when converting the model. The converter applies
the optimizations by giving priority to the optimizations specified
earlier in the list. E.g.
`[optimize.OPTIMIZE_FOR_SIZE, optimize.OPTIMIZE_FOR_LATENCY]` requires
the converter to do both size and latency optimizations giving priority
to size optimizations over latency optimizations.
representative_dataset: A representative dataset that can be used to
generate input and output samples for the model. The converter can use
the dataset to evaluate different optimizations.
Example usage:
```python
# Converting a GraphDef from session.
converter = lite.TFLiteConverter.from_session(sess, in_tensors, out_tensors)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
# Converting a GraphDef from file.
converter = lite.TFLiteConverter.from_frozen_graph(
graph_def_file, input_arrays, output_arrays)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
# Converting a SavedModel.
converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
# Converting a tf.keras model.
converter = lite.TFLiteConverter.from_keras_model_file(keras_model)
tflite_model = converter.convert()
```
"""
def __init__(self,
graph_def,
input_tensors,
output_tensors,
input_arrays_with_shape=None,
output_arrays=None):
"""Constructor for TFLiteConverter.
Args:
graph_def: Frozen TensorFlow GraphDef.
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
input_arrays_with_shape: Tuple of strings representing input tensor names
and list of integers representing input shapes
(e.g., [("foo" : [1, 16, 16, 3])]). Use only when graph cannot be loaded
into TensorFlow and when `input_tensors` and `output_tensors` are
None. (default None)
output_arrays: List of output tensors to freeze graph with. Use only when
graph cannot be loaded into TensorFlow and when `input_tensors` and
`output_tensors` are None. (default None)
Raises:
ValueError: Invalid arguments.
"""
self._graph_def = graph_def
self._input_tensors = input_tensors
self._output_tensors = output_tensors
self.inference_type = constants.FLOAT
self.inference_input_type = None
self.output_format = constants.TFLITE
self.quantized_input_stats = {}
self.default_ranges_stats = None
self.drop_control_dependency = True
self.reorder_across_fake_quant = False
self.change_concat_input_ranges = False
self.allow_custom_ops = False
self._post_training_quantize = False
self.dump_graphviz_dir = None
self.dump_graphviz_video = False
self.target_ops = set([OpsSet.TFLITE_BUILTINS])
self.representative_dataset = None
self.optimizations = []
# Attributes are used by models that cannot be loaded into TensorFlow.
if not self._has_valid_tensors():
if not input_arrays_with_shape or not output_arrays:
raise ValueError(
"If input_tensors and output_tensors are None, both "
"input_arrays_with_shape and output_arrays must be defined.")
self._input_arrays_with_shape = input_arrays_with_shape
self._output_arrays = output_arrays
@classmethod
def from_session(cls, sess, input_tensors, output_tensors):
"""Creates a TFLiteConverter class from a TensorFlow Session.
Args:
sess: TensorFlow Session.
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
Returns:
TFLiteConverter class.
"""
graph_def = _freeze_graph(sess, input_tensors, output_tensors)
return cls(graph_def, input_tensors, output_tensors)
@classmethod
def from_frozen_graph(cls,
graph_def_file,
input_arrays,
output_arrays,
input_shapes=None):
"""Creates a TFLiteConverter class from a file containing a frozen GraphDef.
Args:
graph_def_file: Full filepath of file containing frozen GraphDef.
input_arrays: List of input tensors to freeze graph with.
output_arrays: List of output tensors to freeze graph with.
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
Returns:
TFLiteConverter class.
Raises:
IOError:
File not found.
Unable to parse input file.
ValueError:
The graph is not frozen.
input_arrays or output_arrays contains an invalid tensor name.
input_shapes is not correctly defined when required
"""
with _ops.Graph().as_default():
with _session.Session() as sess:
# Read GraphDef from file.
if not _file_io.file_exists(graph_def_file):
raise IOError("File '{0}' does not exist.".format(graph_def_file))
with _file_io.FileIO(graph_def_file, "rb") as f:
file_content = f.read()
try:
graph_def = _graph_pb2.GraphDef()
graph_def.ParseFromString(file_content)
except (_text_format.ParseError, DecodeError):
try:
print("Ignore 'tcmalloc: large alloc' warnings.")
if not isinstance(file_content, str):
if PY3:
file_content = file_content.decode("utf-8")
else:
file_content = file_content.encode("utf-8")
graph_def = _graph_pb2.GraphDef()
_text_format.Merge(file_content, graph_def)
except (_text_format.ParseError, DecodeError):
raise IOError(
"Unable to parse input file '{}'.".format(graph_def_file))
# Handles models with custom TFLite ops that cannot be resolved in
# TensorFlow.
load_model_in_session = True
try:
_import_graph_def(graph_def, name="")
except _NotFoundError:
load_model_in_session = False
if load_model_in_session:
# Check if graph is frozen.
if not _is_frozen_graph(sess):
raise ValueError("Please freeze the graph using freeze_graph.py.")
# Get input and output tensors.
input_tensors = _get_tensors_from_tensor_names(
sess.graph, input_arrays)
output_tensors = _get_tensors_from_tensor_names(
sess.graph, output_arrays)
_set_tensor_shapes(input_tensors, input_shapes)
return cls(sess.graph_def, input_tensors, output_tensors)
else:
if not input_shapes:
raise ValueError("input_shapes must be defined for this model.")
if set(input_arrays) != set(input_shapes.keys()):
raise ValueError("input_shapes must contain a value for each item "
"in input_array.")
input_arrays_with_shape = [
(name, input_shapes[name]) for name in input_arrays
]
return cls(
graph_def,
input_tensors=None,
output_tensors=None,
input_arrays_with_shape=input_arrays_with_shape,
output_arrays=output_arrays)
@classmethod
def from_saved_model(cls,
saved_model_dir,
input_arrays=None,
input_shapes=None,
output_arrays=None,
tag_set=None,
signature_key=None):
"""Creates a TFLiteConverter class from a SavedModel.
Args:
saved_model_dir: SavedModel directory to convert.
input_arrays: List of input tensors to freeze graph with. Uses input
arrays from SignatureDef when none are provided. (default None)
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
output_arrays: List of output tensors to freeze graph with. Uses output
arrays from SignatureDef when none are provided. (default None)
tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present. (default set("serve"))
signature_key: Key identifying SignatureDef containing inputs and outputs.
(default DEFAULT_SERVING_SIGNATURE_DEF_KEY)
Returns:
TFLiteConverter class.
"""
if tag_set is None:
tag_set = set([_tag_constants.SERVING])
if signature_key is None:
signature_key = _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
result = _freeze_saved_model(saved_model_dir, input_arrays, input_shapes,
output_arrays, tag_set, signature_key)
return cls(
graph_def=result[0], input_tensors=result[1], output_tensors=result[2])
@classmethod
def from_keras_model_file(cls,
model_file,
input_arrays=None,
input_shapes=None,
output_arrays=None):
"""Creates a TFLiteConverter class from a tf.keras model file.
Args:
model_file: Full filepath of HDF5 file containing the tf.keras model.
input_arrays: List of input tensors to freeze graph with. Uses input
arrays from SignatureDef when none are provided. (default None)
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
output_arrays: List of output tensors to freeze graph with. Uses output
arrays from SignatureDef when none are provided. (default None)
Returns:
TFLiteConverter class.
"""
_keras.backend.clear_session()
_keras.backend.set_learning_phase(False)
keras_model = _keras.models.load_model(model_file)
sess = _keras.backend.get_session()
# Get input and output tensors.
if input_arrays:
input_tensors = _get_tensors_from_tensor_names(sess.graph, input_arrays)
else:
input_tensors = keras_model.inputs
if output_arrays:
output_tensors = _get_tensors_from_tensor_names(sess.graph, output_arrays)
else:
output_tensors = keras_model.outputs
_set_tensor_shapes(input_tensors, input_shapes)
graph_def = _freeze_graph(sess, input_tensors, output_tensors)
return cls(graph_def, input_tensors, output_tensors)
def __setattr__(self, name, value):
if name == "post_training_quantize":
warnings.warn("Property %s is deprecated, "
"please use optimizations=[Optimize.OPTIMIZE_FOR_SIZE]"
" instead." % name)
if value:
# Use OPTIMIZE_FOR_SIZE for post training for now.
self.optimizations = [Optimize.OPTIMIZE_FOR_SIZE]
else:
self.optimizations = []
return
object.__setattr__(self, name, value)
def __getattribute__(self, name):
if name == "post_training_quantize":
warnings.warn("Property %s is deprecated, "
"please use optimizations=[Optimize.OPTIMIZE_FOR_SIZE]"
" instead." % name)
return Optimize.OPTIMIZE_FOR_SIZE in set(self.optimizations)
return object.__getattribute__(self, name)
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format. Either a TFLite Flatbuffer or a
Graphviz graph depending on value in `output_format`.
Raises:
ValueError:
Input shape is not specified.
None value for dimension in input_tensor.
"""
# Checks dimensions in input tensor.
if self._has_valid_tensors():
for tensor in self._input_tensors:
shape = tensor.shape
if not shape:
raise ValueError("Provide an input shape for input array "
"'{0}'.".format(_get_tensor_name(tensor)))
# Note that shape_list might be empty for scalar shapes.
shape_list = shape.as_list()
if None in shape_list[1:]:
raise ValueError(
"None is only supported in the 1st dimension. Tensor '{0}' has "
"invalid shape '{1}'.".format(
_get_tensor_name(tensor), shape_list))
elif shape_list and shape_list[0] is None:
self._set_batch_size(batch_size=1)
# Get quantization stats. Ensures there is one stat per name if the stats
# are specified.
if self.quantized_input_stats:
quantized_stats = []
invalid_stats = []
for name in self.get_input_arrays():
if name in self.quantized_input_stats:
quantized_stats.append(self.quantized_input_stats[name])
else:
invalid_stats.append(name)
if invalid_stats:
raise ValueError("Quantization input stats are not available for input "
"tensors '{0}'.".format(",".join(invalid_stats)))
else:
quantized_stats = None
if self.representative_dataset:
if not isinstance(self.representative_dataset, RepresentativeDataset):
raise TypeError(
"representative_dataset must be an instance of "
"RepresentativeDataset")
if self.representative_dataset.input_gen is None:
raise ValueError(
"Provide an input generator for representative_dataset")
# TODO(shashishekhar): For now use optimizations order is ignored.
# Both size and latency optimizations decide whether to apply post
# training optimizations.
post_training_optimize = bool(
len(set(self.optimizations) & set([Optimize.OPTIMIZE_FOR_LATENCY,
Optimize.OPTIMIZE_FOR_SIZE])))
# Do weights only quantization if there is no dataset for calibration.
weights_only_quantize_flag = (
post_training_optimize and (self.representative_dataset is None))
converter_kwargs = {
"inference_type": self.inference_type,
"inference_input_type": self.inference_input_type,
"input_format": constants.TENSORFLOW_GRAPHDEF,
"output_format": self.output_format,
"quantized_input_stats": quantized_stats,
"default_ranges_stats": self.default_ranges_stats,
"drop_control_dependency": self.drop_control_dependency,
"reorder_across_fake_quant": self.reorder_across_fake_quant,
"change_concat_input_ranges": self.change_concat_input_ranges,
"allow_custom_ops": self.allow_custom_ops,
"post_training_quantize": weights_only_quantize_flag,
"target_ops": self.target_ops,
"dump_graphviz_dir": self.dump_graphviz_dir,
"dump_graphviz_video": self.dump_graphviz_video
}
optimized_graph = None
if self.inference_type == constants.QUANTIZED_UINT8:
optimized_graph = self._graph_def
else:
try:
is_only_flex_enabled = set([OpsSet.SELECT_TF_OPS]) == self.target_ops
config = _get_grappler_config(
enable_layout_optimizer=is_only_flex_enabled)
optimized_graph = _run_graph_optimizations(
self._graph_def, self._input_tensors, self._output_tensors, config)
except Exception:
optimized_graph = self._graph_def
# Converts model.
if self._has_valid_tensors():
result = _toco_convert_impl(
input_data=optimized_graph,
input_tensors=self._input_tensors,
output_tensors=self._output_tensors,
**converter_kwargs)
else:
result = _toco_convert_graph_def(
input_data=optimized_graph,
input_arrays_with_shape=self._input_arrays_with_shape,
output_arrays=self._output_arrays,
**converter_kwargs)
if self.representative_dataset and post_training_optimize:
calibrate_quantize = _calibrator.Calibrator(result)
result = calibrate_quantize.calibrate_and_quantize(
self.representative_dataset.input_gen)
return result
def get_input_arrays(self):
"""Returns a list of the names of the input tensors.
Returns:
List of strings.
"""
if self._has_valid_tensors():
return [_get_tensor_name(tensor) for tensor in self._input_tensors]
else:
return [name for name, _ in self._input_arrays_with_shape]
def _has_valid_tensors(self):
"""Checks if the input and output tensors have been initialized.
Returns:
Bool.
"""
return self._input_tensors and self._output_tensors
def _set_batch_size(self, batch_size):
"""Sets the first dimension of the input tensor to `batch_size`.
Args:
batch_size: Batch size for the model. Replaces the first dimension of an
input size array if undefined. (default 1)
Raises:
ValueError: input_tensor is not defined.
"""
if not self._has_valid_tensors():
raise ValueError("The batch size cannot be set for this model. Please "
"use input_shapes parameter.")
for tensor in self._input_tensors:
shape = tensor.shape.as_list()
shape[0] = batch_size
tensor.set_shape(shape)
@_tf_export(v1=["lite.TocoConverter"])
class TocoConverter(object):
"""Convert a TensorFlow model into `output_format` using TOCO.
This class has been deprecated. Please use `lite.TFLiteConverter` instead.
"""
@classmethod
@_deprecation.deprecated(None,
"Use `lite.TFLiteConverter.from_session` instead.")
def from_session(cls, sess, input_tensors, output_tensors):
"""Creates a TocoConverter class from a TensorFlow Session."""
return TFLiteConverter.from_session(sess, input_tensors, output_tensors)
@classmethod
@_deprecation.deprecated(
None, "Use `lite.TFLiteConverter.from_frozen_graph` instead.")
def from_frozen_graph(cls,
graph_def_file,
input_arrays,
output_arrays,
input_shapes=None):
"""Creates a TocoConverter class from a file containing a frozen graph."""
return TFLiteConverter.from_frozen_graph(graph_def_file, input_arrays,
output_arrays, input_shapes)
@classmethod
@_deprecation.deprecated(
None, "Use `lite.TFLiteConverter.from_saved_model` instead.")
def from_saved_model(cls,
saved_model_dir,
input_arrays=None,
input_shapes=None,
output_arrays=None,
tag_set=None,
signature_key=None):
"""Creates a TocoConverter class from a SavedModel."""
return TFLiteConverter.from_saved_model(saved_model_dir, input_arrays,
input_shapes, output_arrays,
tag_set, signature_key)
@classmethod
@_deprecation.deprecated(
None, "Use `lite.TFLiteConverter.from_keras_model_file` instead.")
def from_keras_model_file(cls,
model_file,
input_arrays=None,
input_shapes=None,
output_arrays=None):
"""Creates a TocoConverter class from a tf.keras model file."""
return TFLiteConverter.from_keras_model_file(model_file, input_arrays,
input_shapes, output_arrays)
| |
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.googlepb.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides DescriptorPool to use as a container for proto2 descriptors.
The DescriptorPool is used in conjection with a DescriptorDatabase to maintain
a collection of protocol buffer descriptors for use when dynamically creating
message types at runtime.
For most applications protocol buffers should be used via modules generated by
the protocol buffer compiler tool. This should only be used when the type of
protocol buffers used in an application or library cannot be predetermined.
Below is a straightforward example on how to use this class:
pool = DescriptorPool()
file_descriptor_protos = [ ... ]
for file_descriptor_proto in file_descriptor_protos:
pool.Add(file_descriptor_proto)
my_message_descriptor = pool.FindMessageTypeByName('some.package.MessageType')
The message descriptor can be used in conjunction with the message_factory
module in order to create a protocol buffer class that can be encoded and
decoded.
"""
__author__ = 'matthewtoia@googlepb.com (Matt Toia)'
from googlepb.protobuf import descriptor_pb2
from googlepb.protobuf import descriptor
from googlepb.protobuf import descriptor_database
class DescriptorPool(object):
"""A collection of protobufs dynamically constructed by descriptor protos."""
def __init__(self, descriptor_db=None):
"""Initializes a Pool of proto buffs.
The descriptor_db argument to the constructor is provided to allow
specialized file descriptor proto lookup code to be triggered on demand. An
example would be an implementation which will read and compile a file
specified in a call to FindFileByName() and not require the call to Add()
at all. Results from this database will be cached internally here as well.
Args:
descriptor_db: A secondary source of file descriptors.
"""
self._internal_db = descriptor_database.DescriptorDatabase()
self._descriptor_db = descriptor_db
self._descriptors = {}
self._enum_descriptors = {}
self._file_descriptors = {}
def Add(self, file_desc_proto):
"""Adds the FileDescriptorProto and its types to this pool.
Args:
file_desc_proto: The FileDescriptorProto to add.
"""
self._internal_db.Add(file_desc_proto)
def FindFileByName(self, file_name):
"""Gets a FileDescriptor by file name.
Args:
file_name: The path to the file to get a descriptor for.
Returns:
A FileDescriptor for the named file.
Raises:
KeyError: if the file can not be found in the pool.
"""
try:
file_proto = self._internal_db.FindFileByName(file_name)
except KeyError as error:
if self._descriptor_db:
file_proto = self._descriptor_db.FindFileByName(file_name)
else:
raise error
if not file_proto:
raise KeyError('Cannot find a file named %s' % file_name)
return self._ConvertFileProtoToFileDescriptor(file_proto)
def FindFileContainingSymbol(self, symbol):
"""Gets the FileDescriptor for the file containing the specified symbol.
Args:
symbol: The name of the symbol to search for.
Returns:
A FileDescriptor that contains the specified symbol.
Raises:
KeyError: if the file can not be found in the pool.
"""
try:
file_proto = self._internal_db.FindFileContainingSymbol(symbol)
except KeyError as error:
if self._descriptor_db:
file_proto = self._descriptor_db.FindFileContainingSymbol(symbol)
else:
raise error
if not file_proto:
raise KeyError('Cannot find a file containing %s' % symbol)
return self._ConvertFileProtoToFileDescriptor(file_proto)
def FindMessageTypeByName(self, full_name):
"""Loads the named descriptor from the pool.
Args:
full_name: The full name of the descriptor to load.
Returns:
The descriptor for the named type.
"""
full_name = full_name.lstrip('.') # fix inconsistent qualified name formats
if full_name not in self._descriptors:
self.FindFileContainingSymbol(full_name)
return self._descriptors[full_name]
def FindEnumTypeByName(self, full_name):
"""Loads the named enum descriptor from the pool.
Args:
full_name: The full name of the enum descriptor to load.
Returns:
The enum descriptor for the named type.
"""
full_name = full_name.lstrip('.') # fix inconsistent qualified name formats
if full_name not in self._enum_descriptors:
self.FindFileContainingSymbol(full_name)
return self._enum_descriptors[full_name]
def _ConvertFileProtoToFileDescriptor(self, file_proto):
"""Creates a FileDescriptor from a proto or returns a cached copy.
This method also has the side effect of loading all the symbols found in
the file into the appropriate dictionaries in the pool.
Args:
file_proto: The proto to convert.
Returns:
A FileDescriptor matching the passed in proto.
"""
if file_proto.name not in self._file_descriptors:
file_descriptor = descriptor.FileDescriptor(
name=file_proto.name,
package=file_proto.package,
options=file_proto.options,
serialized_pb=file_proto.SerializeToString())
scope = {}
dependencies = list(self._GetDeps(file_proto))
for dependency in dependencies:
dep_desc = self.FindFileByName(dependency.name)
dep_proto = descriptor_pb2.FileDescriptorProto.FromString(
dep_desc.serialized_pb)
package = '.' + dep_proto.package
package_prefix = package + '.'
def _strip_package(symbol):
if symbol.startswith(package_prefix):
return symbol[len(package_prefix):]
return symbol
symbols = list(self._ExtractSymbols(dep_proto.message_type, package))
scope.update(symbols)
scope.update((_strip_package(k), v) for k, v in symbols)
symbols = list(self._ExtractEnums(dep_proto.enum_type, package))
scope.update(symbols)
scope.update((_strip_package(k), v) for k, v in symbols)
for message_type in file_proto.message_type:
message_desc = self._ConvertMessageDescriptor(
message_type, file_proto.package, file_descriptor, scope)
file_descriptor.message_types_by_name[message_desc.name] = message_desc
for enum_type in file_proto.enum_type:
self._ConvertEnumDescriptor(enum_type, file_proto.package,
file_descriptor, None, scope)
for desc_proto in self._ExtractMessages(file_proto.message_type):
self._SetFieldTypes(desc_proto, scope)
for desc_proto in file_proto.message_type:
desc = scope[desc_proto.name]
file_descriptor.message_types_by_name[desc_proto.name] = desc
self.Add(file_proto)
self._file_descriptors[file_proto.name] = file_descriptor
return self._file_descriptors[file_proto.name]
def _ConvertMessageDescriptor(self, desc_proto, package=None, file_desc=None,
scope=None):
"""Adds the proto to the pool in the specified package.
Args:
desc_proto: The descriptor_pb2.DescriptorProto protobuf message.
package: The package the proto should be located in.
file_desc: The file containing this message.
scope: Dict mapping short and full symbols to message and enum types.
Returns:
The added descriptor.
"""
if package:
desc_name = '.'.join((package, desc_proto.name))
else:
desc_name = desc_proto.name
if file_desc is None:
file_name = None
else:
file_name = file_desc.name
if scope is None:
scope = {}
nested = [
self._ConvertMessageDescriptor(nested, desc_name, file_desc, scope)
for nested in desc_proto.nested_type]
enums = [
self._ConvertEnumDescriptor(enum, desc_name, file_desc, None, scope)
for enum in desc_proto.enum_type]
fields = [self._MakeFieldDescriptor(field, desc_name, index)
for index, field in enumerate(desc_proto.field)]
extensions = [self._MakeFieldDescriptor(extension, desc_name, True)
for index, extension in enumerate(desc_proto.extension)]
extension_ranges = [(r.start, r.end) for r in desc_proto.extension_range]
if extension_ranges:
is_extendable = True
else:
is_extendable = False
desc = descriptor.Descriptor(
name=desc_proto.name,
full_name=desc_name,
filename=file_name,
containing_type=None,
fields=fields,
nested_types=nested,
enum_types=enums,
extensions=extensions,
options=desc_proto.options,
is_extendable=is_extendable,
extension_ranges=extension_ranges,
file=file_desc,
serialized_start=None,
serialized_end=None)
for nested in desc.nested_types:
nested.containing_type = desc
for enum in desc.enum_types:
enum.containing_type = desc
scope[desc_proto.name] = desc
scope['.' + desc_name] = desc
self._descriptors[desc_name] = desc
return desc
def _ConvertEnumDescriptor(self, enum_proto, package=None, file_desc=None,
containing_type=None, scope=None):
"""Make a protobuf EnumDescriptor given an EnumDescriptorProto protobuf.
Args:
enum_proto: The descriptor_pb2.EnumDescriptorProto protobuf message.
package: Optional package name for the new message EnumDescriptor.
file_desc: The file containing the enum descriptor.
containing_type: The type containing this enum.
scope: Scope containing available types.
Returns:
The added descriptor
"""
if package:
enum_name = '.'.join((package, enum_proto.name))
else:
enum_name = enum_proto.name
if file_desc is None:
file_name = None
else:
file_name = file_desc.name
values = [self._MakeEnumValueDescriptor(value, index)
for index, value in enumerate(enum_proto.value)]
desc = descriptor.EnumDescriptor(name=enum_proto.name,
full_name=enum_name,
filename=file_name,
file=file_desc,
values=values,
containing_type=containing_type,
options=enum_proto.options)
scope[enum_proto.name] = desc
scope['.%s' % enum_name] = desc
self._enum_descriptors[enum_name] = desc
return desc
def _MakeFieldDescriptor(self, field_proto, message_name, index,
is_extension=False):
"""Creates a field descriptor from a FieldDescriptorProto.
For message and enum type fields, this method will do a look up
in the pool for the appropriate descriptor for that type. If it
is unavailable, it will fall back to the _source function to
create it. If this type is still unavailable, construction will
fail.
Args:
field_proto: The proto describing the field.
message_name: The name of the containing message.
index: Index of the field
is_extension: Indication that this field is for an extension.
Returns:
An initialized FieldDescriptor object
"""
if message_name:
full_name = '.'.join((message_name, field_proto.name))
else:
full_name = field_proto.name
return descriptor.FieldDescriptor(
name=field_proto.name,
full_name=full_name,
index=index,
number=field_proto.number,
type=field_proto.type,
cpp_type=None,
message_type=None,
enum_type=None,
containing_type=None,
label=field_proto.label,
has_default_value=False,
default_value=None,
is_extension=is_extension,
extension_scope=None,
options=field_proto.options)
def _SetFieldTypes(self, desc_proto, scope):
"""Sets the field's type, cpp_type, message_type and enum_type.
Args:
desc_proto: The message descriptor to update.
scope: Enclosing scope of available types.
"""
desc = scope[desc_proto.name]
for field_proto, field_desc in zip(desc_proto.field, desc.fields):
if field_proto.type_name:
type_name = field_proto.type_name
if type_name not in scope:
type_name = '.' + type_name
desc = scope[type_name]
else:
desc = None
if not field_proto.HasField('type'):
if isinstance(desc, descriptor.Descriptor):
field_proto.type = descriptor.FieldDescriptor.TYPE_MESSAGE
else:
field_proto.type = descriptor.FieldDescriptor.TYPE_ENUM
field_desc.cpp_type = descriptor.FieldDescriptor.ProtoTypeToCppProtoType(
field_proto.type)
if (field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE
or field_proto.type == descriptor.FieldDescriptor.TYPE_GROUP):
field_desc.message_type = desc
if field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM:
field_desc.enum_type = desc
if field_proto.label == descriptor.FieldDescriptor.LABEL_REPEATED:
field_desc.has_default = False
field_desc.default_value = []
elif field_proto.HasField('default_value'):
field_desc.has_default = True
if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or
field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT):
field_desc.default_value = float(field_proto.default_value)
elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING:
field_desc.default_value = field_proto.default_value
elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL:
field_desc.default_value = field_proto.default_value.lower() == 'true'
elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM:
field_desc.default_value = field_desc.enum_type.values_by_name[
field_proto.default_value].index
else:
field_desc.default_value = int(field_proto.default_value)
else:
field_desc.has_default = False
field_desc.default_value = None
field_desc.type = field_proto.type
for nested_type in desc_proto.nested_type:
self._SetFieldTypes(nested_type, scope)
def _MakeEnumValueDescriptor(self, value_proto, index):
"""Creates a enum value descriptor object from a enum value proto.
Args:
value_proto: The proto describing the enum value.
index: The index of the enum value.
Returns:
An initialized EnumValueDescriptor object.
"""
return descriptor.EnumValueDescriptor(
name=value_proto.name,
index=index,
number=value_proto.number,
options=value_proto.options,
type=None)
def _ExtractSymbols(self, desc_protos, package):
"""Pulls out all the symbols from descriptor protos.
Args:
desc_protos: The protos to extract symbols from.
package: The package containing the descriptor type.
Yields:
A two element tuple of the type name and descriptor object.
"""
for desc_proto in desc_protos:
if package:
message_name = '.'.join((package, desc_proto.name))
else:
message_name = desc_proto.name
message_desc = self.FindMessageTypeByName(message_name)
yield (message_name, message_desc)
for symbol in self._ExtractSymbols(desc_proto.nested_type, message_name):
yield symbol
for symbol in self._ExtractEnums(desc_proto.enum_type, message_name):
yield symbol
def _ExtractEnums(self, enum_protos, package):
"""Pulls out all the symbols from enum protos.
Args:
enum_protos: The protos to extract symbols from.
package: The package containing the enum type.
Yields:
A two element tuple of the type name and enum descriptor object.
"""
for enum_proto in enum_protos:
if package:
enum_name = '.'.join((package, enum_proto.name))
else:
enum_name = enum_proto.name
enum_desc = self.FindEnumTypeByName(enum_name)
yield (enum_name, enum_desc)
def _ExtractMessages(self, desc_protos):
"""Pulls out all the message protos from descriptos.
Args:
desc_protos: The protos to extract symbols from.
Yields:
Descriptor protos.
"""
for desc_proto in desc_protos:
yield desc_proto
for message in self._ExtractMessages(desc_proto.nested_type):
yield message
def _GetDeps(self, file_proto):
"""Recursively finds dependencies for file protos.
Args:
file_proto: The proto to get dependencies from.
Yields:
Each direct and indirect dependency.
"""
for dependency in file_proto.dependency:
dep_desc = self.FindFileByName(dependency)
dep_proto = descriptor_pb2.FileDescriptorProto.FromString(
dep_desc.serialized_pb)
yield dep_proto
for parent_dep in self._GetDeps(dep_proto):
yield parent_dep
| |
"""Tests for the VerificationServiceClass."""
import logging
import time
import pytest
from pydicom.dataset import Dataset
from pynetdicom import AE, evt, debug_logger
from pynetdicom.dimse_primitives import C_ECHO
from pynetdicom.service_class import VerificationServiceClass
from pynetdicom.sop_class import VerificationSOPClass
#debug_logger()
class TestVerificationServiceClass(object):
"""Test the VerifictionSOPClass"""
def setup(self):
"""Run prior to each test"""
self.ae = None
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
def test_scp_handler_return_dataset(self):
"""Test handler returning a Dataset status"""
def handle(event):
status = Dataset()
status.Status = 0x0001
return status
handlers = [(evt.EVT_C_ECHO, handle)]
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
rsp = assoc.send_c_echo()
assert rsp.Status == 0x0001
assoc.release()
assert assoc.is_released
scp.shutdown()
def test_scp_handler_return_dataset_no_status(self):
"""Test handler returning a Dataset with no Status elem"""
def handle(event):
return Dataset()
handlers = [(evt.EVT_C_ECHO, handle)]
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
rsp = assoc.send_c_echo()
assert rsp.Status == 0x0000
assoc.release()
assert assoc.is_released
scp.shutdown()
def test_scp_handler_return_dataset_multi(self):
"""Test handler returning a Dataset status with other elements"""
def handle(event):
status = Dataset()
status.Status = 0x0001
status.ErrorComment = 'Test'
return status
handlers = [(evt.EVT_C_ECHO, handle)]
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
rsp = assoc.send_c_echo()
assert rsp.Status == 0x0001
assert rsp.ErrorComment == 'Test'
assoc.release()
assert assoc.is_released
scp.shutdown()
def test_scp_handler_return_dataset_unknown(self):
"""Test a status ds with an unknown element."""
def handle(event):
status = Dataset()
status.Status = 0x0001
status.PatientName = 'test name'
return status
handlers = [(evt.EVT_C_ECHO, handle)]
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
rsp = assoc.send_c_echo()
assert rsp.Status == 0x0001
assoc.release()
assert assoc.is_released
scp.shutdown()
def test_scp_handler_return_int(self):
"""Test handler returning an int status"""
def handle(event):
return 0x0002
handlers = [(evt.EVT_C_ECHO, handle)]
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
rsp = assoc.send_c_echo()
assert rsp.Status == 0x0002
assert not 'ErrorComment' in rsp
assoc.release()
assert assoc.is_released
scp.shutdown()
def test_scp_handler_return_valid(self):
"""Test handler returning a valid status"""
def handle(event):
return 0x0000
handlers = [(evt.EVT_C_ECHO, handle)]
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
rsp = assoc.send_c_echo()
assert rsp.Status == 0x0000
assoc.release()
assert assoc.is_released
scp.shutdown()
def test_scp_handler_no_status(self):
"""Test handler not returning a status"""
def handle(event):
return None
handlers = [(evt.EVT_C_ECHO, handle)]
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
rsp = assoc.send_c_echo()
assert rsp.Status == 0x0000
assoc.release()
assert assoc.is_released
scp.shutdown()
def test_scp_handler_exception(self):
"""Test handler raising an exception"""
def handle(event):
raise ValueError
handlers = [(evt.EVT_C_ECHO, handle)]
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
rsp = assoc.send_c_echo()
assert rsp.Status == 0x0000
assoc.release()
assert assoc.is_released
scp.shutdown()
def test_scp_handler_context(self):
"""Test handler event's context attribute."""
attr = {}
def handle(event):
attr['assoc'] = event.assoc
attr['context'] = event.context
attr['request'] = event.request
return 0x0000
handlers = [(evt.EVT_C_ECHO, handle)]
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
rsp = assoc.send_c_echo()
assert rsp.Status == 0x0000
assoc.release()
assert assoc.is_released
cx = attr['context']
assert cx.context_id == 1
assert cx.abstract_syntax == '1.2.840.10008.1.1'
assert cx.transfer_syntax == '1.2.840.10008.1.2'
scp.shutdown()
def test_scp_handler_assoc(self):
"""Test handler event's assoc attribute."""
attr = {}
def handle(event):
attr['assoc'] = event.assoc
attr['context'] = event.context
attr['request'] = event.request
return 0x0000
handlers = [(evt.EVT_C_ECHO, handle)]
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
rsp = assoc.send_c_echo()
assert rsp.Status == 0x0000
scp_assoc = attr['assoc']
assert scp_assoc == scp.active_associations[0]
assoc.release()
assert assoc.is_released
scp.shutdown()
def test_scp_handler_request(self):
"""Test handler event's request attribute."""
attr = {}
def handle(event):
attr['assoc'] = event.assoc
attr['context'] = event.context
attr['request'] = event.request
return 0x0000
handlers = [(evt.EVT_C_ECHO, handle)]
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
rsp = assoc.send_c_echo()
assert rsp.Status == 0x0000
assoc.release()
assert assoc.is_released
req = attr['request']
assert req.MessageID == 1
assert isinstance(req, C_ECHO)
scp.shutdown()
def test_abort(self, caplog):
"""Test handler aborting the association"""
def handle(event):
event.assoc.abort()
return 0x0000
handlers = [(evt.EVT_C_ECHO, handle)]
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
with caplog.at_level(logging.DEBUG, logger='pynetdicom'):
rsp = assoc.send_c_echo()
assert rsp == Dataset()
time.sleep(0.1)
assert assoc.is_aborted
scp.shutdown()
assert "Association Aborted" in caplog.text
assert "(A-P-ABORT)" not in caplog.text
assert "Connection closed" not in caplog.text
assert "DIMSE timeout reached" not in caplog.text
def test_disconnection(self, caplog):
"""Test peer disconnecting during DIMSE messaging."""
def handle(event):
event.assoc.dul.socket.close()
return 0x0000
handlers = [(evt.EVT_C_ECHO, handle)]
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
with caplog.at_level(logging.DEBUG, logger='pynetdicom'):
rsp = assoc.send_c_echo()
assert rsp == Dataset()
time.sleep(0.1)
assert assoc.is_aborted
scp.shutdown()
assert "Connection closed" in caplog.text
assert "Association Aborted (A-P-ABORT)" in caplog.text
def test_timeout(self, caplog):
"""Test peer timing out during DIMSE messaging."""
def handle(event):
time.sleep(0.1)
return 0x0000
handlers = [(evt.EVT_C_ECHO, handle)]
self.ae = ae = AE()
ae.dimse_timeout = 0.05
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
with caplog.at_level(logging.DEBUG, logger='pynetdicom'):
rsp = assoc.send_c_echo()
assert rsp == Dataset()
time.sleep(0.1)
assert assoc.is_aborted
scp.shutdown()
assert "DIMSE timeout reached" in caplog.text
assert "Aborting Association" in caplog.text
def test_dimse_network_timeout(self, caplog):
"""Regression test for #460: invalid second abort."""
def handle(event):
time.sleep(0.1)
return 0x0000
handlers = [(evt.EVT_C_ECHO, handle)]
self.ae = ae = AE()
ae.dimse_timeout = 0.05
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
with caplog.at_level(logging.DEBUG, logger='pynetdicom'):
rsp = assoc.send_c_echo()
assert rsp == Dataset()
time.sleep(0.1)
assert assoc.is_aborted
scp.shutdown()
assert "Invalid event 'Evt15' for the current state" not in caplog.text
assert "DIMSE timeout reached" in caplog.text
assert "Aborting Association" in caplog.text
| |
import ptypes
from ptypes import *
class __mixin(object):
def summary(self):
n,s = self.int(),self.blocksize()
fmt = '0x%%0%dx (%%d)'% (s*2)
return fmt % (n,n)
class code_t(ptype.type): pass
class slong(__mixin,pint.int64_t): pass
class ulong(__mixin,pint.uint64_t): pass
class sint(__mixin,pint.int32_t): pass
class uint(__mixin,pint.uint32_t): pass
class sshort(__mixin,pint.int16_t): pass
class ushort(__mixin,pint.uint16_t): pass
class schar(__mixin,pint.int8_t): pass
class uchar(__mixin,pint.uint8_t): pass
class unused(ptype.block):
def summary(self):
result = reduce(lambda a,b:a+ord(b), self.serialize(), 0)
if result == 0:
return '[...empty...]'
return '[...NOT-empty...]'
block = dyn.block
class string(pstr.string):
def summary(self):
return '"%s"'%self.str().strip()
class wstring(pstr.wstring):
class _object_(pstr.wchar_t):
length = 2
def set(self, value):
self.value = b'\x00' + value
return self
def get(self):
return self.value[1]
def summary(self):
return '"%s"'%self.str().strip()
def str(self):
return ''.join(x.get() for x in self)
class __mixin(object):
def summary(self):
l,r=self['little'].int(),self['big'].int()
s = self['little'].blocksize()*2
if l == r:
fmt = '0x%%0%dx (%%d)'% s
return fmt % (l,l)
fmt = '0x%%0%dx <error-diff:0x%%0%dx>'% (s,s)
return fmt % (l,r)
class dual_ushort(__mixin,pstruct.type):
_fields_ = [(pint.littleendian(ushort),'little'),(pint.bigendian(ushort),'big')]
class dual_uint(__mixin,pstruct.type):
_fields_ = [(pint.littleendian(uint),'little'),(pint.bigendian(uint),'big')]
class stringinteger(pstr.string):
def int(self):
if len(self.str()) > 0:
return int(self.str())
return 0
def __int__(self):
return self.int()
def summary(self):
return str(self.int())
class sector(unused):
length = 2048
def is_empty(self):
return reduce(lambda x,y:x+ord(y), self.value, 0) == 0
class sectorpointer(ptype.opointer_t):
_type_ = uint
def _calculate_(self, sectornumber):
offset = sectornumber*sector.length
return offset
def pointer(object):
return dyn.clone(sectorpointer, _target_=object)
class iso_date(pstruct.type):
_fields_ = [
(dyn.clone(stringinteger, length=4), 'year'),
(dyn.clone(stringinteger, length=2), 'month'),
(dyn.clone(stringinteger, length=2), 'day'),
(dyn.clone(stringinteger, length=2), 'hour'),
(dyn.clone(stringinteger, length=2), 'minute'),
(dyn.clone(stringinteger, length=2), 'second'),
(dyn.clone(stringinteger, length=2), 'hundredth'),
(schar, 'gmtoffset'),
]
def summary(self):
year,month,day=self['year'].int(),self['month'].int(),self['day'].int()
hour,minute,second=self['hour'].int(),self['minute'].int(),self['second'].int()
hundredth=self['hundredth'].int()
gmtoffset = self['gmtoffset'].int()
gmt = ('+%d'%gmtoffset) if gmtoffset > 0 else repr(gmtoffset)
return '[%d-%d-%d %d:%02d:%02d.%02d GMT:%s]'%(year,month,day,hour,minute,second,hundredth,gmt)
class iso_path(pstruct.type):
'''directory structure'''
_fields_ = [
(uchar, 'length'),
(uchar, 'ext_length'),
(uint, 'first_sector'),
(ushort, 'parent'),
(lambda s: dyn.clone(string,length=s['length'].int()-8), 'identifier'),
(dyn.align(2), 'padding'),
]
class iso_directory_record(pstruct.type):
class date(pstruct.type):
_fields_ = [
(uchar, 'year'),
(uchar, 'month'),
(uchar, 'day'),
(uchar, 'hour'),
(uchar, 'minute'),
(uchar, 'second'),
(schar, 'gmtoffset'),
]
def summary(self):
year,month,day=self['year'].int()+1900,self['month'].int(),self['day'].int()
hour,minute,second=self['hour'].int(),self['minute'].int(),self['second'].int()
gmtoffset = self['gmtoffset'].int()
gmt = ('+%d'%gmtoffset) if gmtoffset > 0 else repr(gmtoffset)
return '[%d-%d-%d %d:%02d:%02d GMT:%s]'%(year,month,day,hour,minute,second,gmt)
class __flags(pbinary.struct):
_fields_ = [
(1,'multi-extent'),
(2,'unused'),
(1,'protection'),
(1,'recordformat'),
(1,'associated'),
(1,'directory'),
(1,'existence'),
]
_fields_ = [
(uchar, 'length'),
(uchar, 'ext_attr_length'),
(dual_uint, 'extent'),
(dual_uint, 'size'),
(date, 'date'),
(__flags, 'flags'),
(uchar, 'file_unit_size'),
(uchar, 'interleave'),
(dual_ushort, 'volume_sequence_number'),
(uchar, 'name_len'),
(lambda s: dyn.clone(string, length=s['name_len'].li.int()), 'name'),
(dyn.align(2), 'padding'),
]
#/* can't take sizeof(iso_directory_record), because of possible alignment
# of the last entry (34 instead of 33) */
#define ISO_DIRECTORY_RECORD_SIZE 33
class iso_volume_descriptor(pstruct.type):
def __data(self):
res = self['type'].li.int()
return volume_descriptor.withdefault(res, type=res)
_fields_ = [
(uchar, 'type'),
(dyn.clone(string,length=5), 'id'),
(uchar, 'version'),
(__data, 'data'),
]
class volume_descriptor(ptype.definition):
cache = {}
class unknown(unused):
length = 2041
default = unknown
class array(parray.terminated):
_object_ = iso_volume_descriptor
def isTerminator(self, value):
return value['type'].int() == 0xff
@volume_descriptor.define
class iso_volume_descriptor_terminator(unused):
type = 0xff
length = 2041
@volume_descriptor.define
class iso_primary_descriptor(pstruct.type):
type = 1
_fields_ = [
(uchar, 'unused1'),
(dyn.clone(string,length=32), 'system_id'),
(dyn.clone(string,length=32), 'volume_id'),
(dyn.clone(unused,length=8), 'unused2'),
(dual_uint, 'volume_space_size'),
(dyn.clone(unused,length=32), 'unused3'),
(dual_ushort, 'volume_set_size'),
(dual_ushort, 'volume_sequence_number'),
(dual_ushort, 'logical_block_size'),
(dual_uint, 'path_table_size'),
(uint, 'type_l_path_table'),
(uint, 'opt_type_l_path_table'),
(uint, 'type_m_path_table'),
(uint, 'opt_type_m_path_table'),
(iso_directory_record, 'root_directory_record'),
(dyn.clone(string,length=128), 'volume_set_id'),
(dyn.clone(string,length=128), 'publisher_id'),
(dyn.clone(string,length=128), 'preparer_id'),
(dyn.clone(string,length=128), 'application_id'),
(dyn.clone(string,length=37), 'copyright_file_id'),
(dyn.clone(string,length=37), 'abstract_file_id'),
(dyn.clone(string,length=37), 'bibliographic_file_id'),
(iso_date, 'creation_date'),
(iso_date, 'modification_date'),
(iso_date, 'expiration_date'),
(iso_date, 'effective_date'),
(uchar, 'file_structure_version'),
(uchar, 'unused4'),
(dyn.clone(unused, length=512), 'application_data'),
(dyn.clone(unused,length=653), 'unused5'),
]
@volume_descriptor.define
class iso_boot_record(pstruct.type):
type = 0
_fields_ = [
(dyn.clone(string,length=32), 'system_id'),
(dyn.clone(unused,length=32), 'boot_id'),
(pointer(dyn.clone(code_t,mode=16, length=2048)), 'boot_catalog'),
(dyn.clone(unused,length=1973), 'unused2'),
]
@volume_descriptor.define
class iso_supplementary_descriptor(pstruct.type):
type = 2
_fields_ = [
(uchar, 'flags'),
(dyn.clone(wstring,length=16), 'system_id'),
(dyn.clone(wstring,length=16), 'volume_id'),
(block(8), 'unused2'),
(dual_uint, 'volume_space_size'),
(block(32), 'escape'),
(uint, 'volume_set_size'),
(dual_ushort, 'volume_sequence_number'),
(uint, 'logical_block_size'),
(dual_uint, 'path_table_size'),
(uint, 'type_l_path_table'),
(uint, 'opt_type_l_path_table'),
(uint, 'type_m_path_table'),
(uint, 'opt_type_m_path_table'),
(iso_directory_record, 'root_directory_record'),
(dyn.clone(string,length=128), 'volume_set_id'),
(dyn.clone(string,length=128), 'publisher_id'),
(dyn.clone(string,length=128), 'preparer_id'),
(dyn.clone(string,length=128), 'application_id'),
(dyn.clone(string,length=37), 'copyright_file_id'),
(dyn.clone(string,length=37), 'abstract_file_id'),
(dyn.clone(string,length=37), 'bibliographic_file_id'),
(iso_date, 'creation_date'),
(iso_date, 'modification_date'),
(iso_date, 'expiration_date'),
(iso_date, 'effective_date'),
(uchar, 'file_structure_version'),
(uchar, 'unused4'),
(dyn.clone(unused,length=512), 'application_data'),
(dyn.clone(unused,length=653), 'unused5'),
]
@volume_descriptor.define
class iso_volume_partition(pstruct.type):
type = 3
_fields_ = [
(uchar, 'unused'),
(dyn.clone(string,length=32), 'system_id'),
(dyn.clone(string,length=32), 'partition_id'),
(dual_uint, 'partition_location'),
(dual_uint, 'partition_size'),
(dyn.clone(unused,length=1960), 'partition_size'),
]
if False:
class iso_sierra_primary_descriptor(pstruct.type):
_fields_ = [
(uchar, 'unused1'),
(block(32), 'system_id'),
(block(32), 'volume_id'),
(dyn.clone(unused,length=8), 'unused2'),
(dual_uint, 'volume_space_size'),
(dyn.clone(unused,length=32), 'unused3'),
(uint, 'volume_set_size'),
(dual_ushort, 'volume_sequence_number'),
(uint, 'logical_block_size'),
(dual_uint, 'path_table_size'),
(uint, 'type_l_path_table'),
(uint, 'opt_type_l_path_table'),
(uint, 'unknown2'),
(uint, 'unknown3'),
(uint, 'type_m_path_table'),
(uint, 'opt_type_m_path_table'),
(uint, 'unknown4'),
(uint, 'unknown5'),
# (block(34), 'root_directory_record'),
(iso_directory_record, 'root_directory_record'),
(dyn.clone(string,length=128), 'volume_set_id'),
(dyn.clone(string,length=128), 'publisher_id'),
(dyn.clone(string,length=128), 'preparer_id'),
(dyn.clone(string,length=128), 'application_id'),
(dyn.clone(string,length=64), 'copyright_id'),
(block(16), 'creation_date'),
(block(16), 'modification_date'),
(block(16), 'expiration_date'),
(block(16), 'effective_date'),
(uchar, 'file_structure_version'),
(dyn.clone(unused,length=1193), 'unused4'),
]
class iso_extended_attributes(pstruct.type):
_fields_ = [
(uint, 'owner'),
(uint, 'group'),
(ushort, 'perm'),
(iso_date, 'ctime'),
(iso_date, 'mtime'),
(iso_date, 'xtime'),
(iso_date, 'ftime'),
(uchar, 'recfmt'),
(uchar, 'recattr'),
(uint, 'reclen'),
(dyn.clone(string,length=32), 'system_id'),
(block(64), 'system_use'),
(uchar, 'version'),
(uchar, 'len_esc'),
(block(64), 'reserved'),
(uint, 'len_au'),
]
class File(pstruct.type):
_fields_ = [
(dyn.array(sector, 16), 'unused'),
(volume_descriptor.array, 'desc'),
]
class section_validation_entry(pstruct.type):
_fields_ = [
(uchar, 'header_id'),
(uchar, 'platform_id'),
(ushort, 'resered'),
(dyn.clone(string, length=24), 'manufacturer_id'),
(ushort, 'checksum'),
(ushort, 'key'),
]
class section_initial_entry(pstruct.type):
_fields_ = [
(uchar, 'boot_indicator'),
(uchar, 'media_type'),
(ushort, 'load_segment'),
(uchar, 'system_type'),
(uchar, 'unused'),
(ushort, 'sector_count'),
(uint, 'load_rba'),
(uchar,'unused2'),
]
class section_header_entry(pstruct.type):
_fields_ = [
(uchar, 'indicator'),
(uchar, 'platform_id'),
(ushort, 'section_count'),
(dyn.clone(string, length=28), 'id_string'),
]
class section_entry(pstruct.type):
_fields_ = [
(uchar, 'indicator'),
(uchar, 'media_type'),
(ushort, 'load_segment'),
(uchar, 'system_type'),
(uchar, 'unused'),
(ushort, 'sector_count'),
(uint, 'load_rba'),
(uchar, 'selection_criteria'),
(dyn.block(19), 'vendor_criteria'),
]
class section_entry_extension(pstruct.type):
class __field(pbinary.struct):
_fields_ = [(4,'unused'),(1,'record_follows'),(2,'unused2'),(1,'wtf')]
_fields_ = [
(uchar, 'indicator'),
(uchar, 'field'),
(dyn.block(30), 'vendor_criteria'),
]
if __name__ == '__main__':
import ptypes,fs.iso9660 as iso9660
ptypes.setsource(ptypes.provider.WindowsFile('~/downloads/6euj41uc.iso', 'r'))
z = iso9660.File()
z = z.l
boot_sector = z['desc'][1]['data']['boot_catalog']
if False:
a = iso9660.sector(offset=boot_sector*2048).l
print(a.cast(iso9660.section_validation_entry))
# print(z['iso'])
# print([x for x in z['unused'] if not x.is_empty()])
# date = z['primary']['root_directory_record']['date']
# print(date)
# print(date['year'].summary())
a = z['desc'][1]['data']['boot_catalog']
print(a.cast(iso9660.sectorpointer))
if False:
# x = iso_volume_descriptor()
# x = block(32768)()
# print(x.l.hexdump())
# x = iso_volume_descriptor(source=ptypes.file('~/downloads/6euj41uc.iso', 'r'))
# x.setoffset(32768)
# print(x.l)
# print(iso_volume_descriptor().a.size())
#
# print(z['desc'][0])
p = z['primary']
print(p)
x = p['type_l_path_table']
print(x)
x = iso9660.pointer(iso9660.sector)()
x.set(0x16)
print(x.d.l.hexdump())
a = iso9660.sector(offset=p.getoffset()+p.size())
print(a.l)
print(a.cast(iso9660.iso_volume_descriptor))
x = a
x = iso9660.sector(offset=x.getoffset()+x.size())
print(x.l.cast(iso9660.iso_volume_descriptor))
print(x.l)
| |
"""Windows directory watcher API wrapper module
"""
from __future__ import with_statement
import ctypes.wintypes
import os
import Queue
import select
import thread
import threading
# Windows constatns
INVALID_HANDLE_VALUE = ctypes.c_void_p(-1).value
FILE_NOTIFY_CHANGE_FILE_NAME = 0x01
FILE_NOTIFY_CHANGE_DIR_NAME = 0x02
FILE_NOTIFY_CHANGE_ATTRIBUTES = 0x04
FILE_NOTIFY_CHANGE_SIZE = 0x08
FILE_NOTIFY_CHANGE_LAST_WRITE = 0x010
FILE_NOTIFY_CHANGE_LAST_ACCESS = 0x020
FILE_NOTIFY_CHANGE_CREATION = 0x040
FILE_NOTIFY_CHANGE_SECURITY = 0x0100
FILE_FLAG_BACKUP_SEMANTICS = 0x02000000
FILE_FLAG_OVERLAPPED = 0x40000000
FILE_LIST_DIRECTORY = 0x01
FILE_SHARE_READ = 0x01
FILE_SHARE_WRITE = 0x02
FILE_SHARE_DELETE = 0x04
OPEN_EXISTING = 3
FILE_ACTION_CREATED = 1
FILE_ACTION_DELETED = 2
FILE_ACTION_MODIFIED = 3
FILE_ACTION_RENAMED_OLD_NAME = 4
FILE_ACTION_RENAMED_NEW_NAME = 5
FILE_ACTION_OVERFLOW = 0xFFFF
class OVERLAPPED(ctypes.Structure):
"""Structure for async IO."""
_fields_ = [('Internal', ctypes.c_void_p),
('InternalHigh', ctypes.c_void_p),
('Offset', ctypes.wintypes.DWORD),
('OffsetHigh', ctypes.wintypes.DWORD),
('Pointer', ctypes.c_void_p),
('hEvent', ctypes.wintypes.HANDLE), ]
def _errcheck_bool(value, func, args): # pylint: disable=W0613
"""Helper function for checking bool value."""
if not value:
raise ctypes.WinError()
return args
def _errcheck_handle(value, func, args): # pylint: disable=W0613
"""Helper function for checking handle value."""
if not value:
raise ctypes.WinError()
if value == INVALID_HANDLE_VALUE:
raise ctypes.WinError()
return args
def _errcheck_dword(value, func, args): # pylint: disable=W0613
"""Helper function for checking DWORD value."""
if value == 0xFFFFFFFF:
raise ctypes.WinError()
return args
# pylint: disable=C0103
ReadDirectoryChangesW = ctypes.windll.kernel32.ReadDirectoryChangesW
ReadDirectoryChangesW.restype = ctypes.wintypes.BOOL
ReadDirectoryChangesW.errcheck = _errcheck_bool
ReadDirectoryChangesW.argtypes = (
ctypes.wintypes.HANDLE, # hDirectory
ctypes.c_void_p, # lpBuffer
ctypes.wintypes.DWORD, # nBufferLength
ctypes.wintypes.BOOL, # bWatchSubtree
ctypes.wintypes.DWORD, # dwNotifyFilter
ctypes.POINTER(ctypes.wintypes.DWORD), # lpBytesReturned
ctypes.POINTER(OVERLAPPED), # lpOverlapped
ctypes.c_void_p # FileIOCompletionRoutine # lpCompletionRoutine
)
# pylint: disable=C0103
CreateFileW = ctypes.windll.kernel32.CreateFileW
CreateFileW.restype = ctypes.wintypes.HANDLE
CreateFileW.errcheck = _errcheck_handle
CreateFileW.argtypes = (
ctypes.wintypes.LPCWSTR, # lpFileName
ctypes.wintypes.DWORD, # dwDesiredAccess
ctypes.wintypes.DWORD, # dwShareMode
ctypes.c_void_p, # lpSecurityAttributes
ctypes.wintypes.DWORD, # dwCreationDisposition
ctypes.wintypes.DWORD, # dwFlagsAndAttributes
ctypes.wintypes.HANDLE # hTemplateFile
)
# pylint: disable=C0103
CloseHandle = ctypes.windll.kernel32.CloseHandle
CloseHandle.restype = ctypes.wintypes.BOOL
CloseHandle.argtypes = (
ctypes.wintypes.HANDLE, # hObject
)
# pylint: disable=C0103
CancelIoEx = ctypes.windll.kernel32.CancelIoEx
CancelIoEx.restype = ctypes.wintypes.BOOL
CancelIoEx.errcheck = _errcheck_bool
CancelIoEx.argtypes = (
ctypes.wintypes.HANDLE, # hObject
ctypes.POINTER(OVERLAPPED) # lpOverlapped
)
# pylint: disable=C0103
class FILE_NOTIFY_INFORMATION(ctypes.Structure):
"""Info for file notification."""
_fields_ = [("NextEntryOffset", ctypes.wintypes.DWORD),
("Action", ctypes.wintypes.DWORD),
("FileNameLength", ctypes.wintypes.DWORD),
("FileName", (ctypes.c_char * 1))]
PFILE_NOTIFY_INFORMATION = ctypes.POINTER(FILE_NOTIFY_INFORMATION)
WATCHDOG_FILE_FLAGS = FILE_FLAG_BACKUP_SEMANTICS
WATCHDOG_FILE_SHARE_FLAGS = reduce(
lambda x, y: x | y, [
FILE_SHARE_READ,
FILE_SHARE_WRITE,
FILE_SHARE_DELETE,
])
WATCHDOG_FILE_NOTIFY_FLAGS = reduce(
lambda x, y: x | y, [
FILE_NOTIFY_CHANGE_FILE_NAME,
FILE_NOTIFY_CHANGE_DIR_NAME,
FILE_NOTIFY_CHANGE_ATTRIBUTES,
FILE_NOTIFY_CHANGE_SIZE,
FILE_NOTIFY_CHANGE_LAST_WRITE,
FILE_NOTIFY_CHANGE_SECURITY,
FILE_NOTIFY_CHANGE_LAST_ACCESS,
FILE_NOTIFY_CHANGE_CREATION,
])
BUFFER_SIZE = 2048
class InotifyEvent(object):
"""Event class for file change."""
def __init__(self, action, src_path):
"""Initialize a new InotifyEvent object."""
self.action = action
self.src_path = src_path
@property
def is_create(self):
"""Returns true if the file was created."""
return self.action == FILE_ACTION_CREATED
@property
def is_attrib(self):
"""Returns true if the file attribute was created."""
return self.is_modify
@property
def is_delete(self):
"""Returns true if the file was deleted."""
return self.action == FILE_ACTION_DELETED
@property
def is_delete_self(self):
"""Returns true if the file was deleted."""
return self.is_delete
@property
def is_modify(self):
"""Returns true if the file was modified."""
return self.action == FILE_ACTION_MODIFIED
@property
def is_moved_from(self):
"""Returns true if the file was moved."""
return self.action == FILE_ACTION_RENAMED_OLD_NAME
@property
def is_moved_to(self):
"""Returns true if the file was moved."""
return self.action == FILE_ACTION_RENAMED_NEW_NAME
@property
def is_close_write(self):
"""Returns false. Not used."""
return False
@property
def is_close_nowrite(self):
"""Returns false. Not used."""
return False
@property
def is_access(self):
"""Returns false. Not used."""
return False
@property
def is_move(self):
"""Returns true if the file was moved."""
return self.is_moved_from or self.is_moved_to
@property
def is_move_self(self):
"""Returns false. Not used."""
return False
@property
def is_ignored(self):
"""Returns false. Not used."""
return False
@property
def is_directory(self):
"""Returns false. Not used."""
return False
class ReadDirChange(object):
"""ReadDirectoryChangesW system interface."""
def __init__(self):
"""Initialize a new ReadDirChange object.
"""
self._path = ""
self._handle = 0
self._shouldexit = False
self._eventsqueue = Queue.Queue(100)
self._waitevent = threading.Event()
def fileno(self):
"""The directory handle associated with the inotify instance."""
return self._handle
def close(self):
"""Close the inotify directory handle.
NOTE: After call this, this object will be unusable.
"""
self._shouldexit = True
try:
CancelIoEx(self._handle, None)
except: # pylint: disable=W0702
pass
try:
CloseHandle(self._handle)
except: # pylint: disable=W0702
pass
def add_watch(self, path):
"""
Adds a watch for the given path to monitor events specified by the
mask.
:param path:
Path to monitor
:type path:
``str``
:param event_mask:
*optional* Bit mask of the request events.
:type event_mask:
``int``
:returns:
Unique watch descriptor identifier
:rtype:
``int``
"""
self._path = os.path.normpath(path)
self._handle = CreateFileW(self._path,
FILE_LIST_DIRECTORY,
WATCHDOG_FILE_SHARE_FLAGS,
None, OPEN_EXISTING,
WATCHDOG_FILE_FLAGS,
None)
thread.start_new_thread(self._read_events_tread_proc, ())
return self._handle
def remove_watch(self, watch_id): # pylint: disable=W0613
"""
Removes a watch.
:param watch_id:
Watch descriptor returned by :meth:`~Inotify.add_watch`
:type watch_id:
``int``
:returns:
``None``
"""
self.close()
def wait(self, timeout):
"""wait for file changes."""
if timeout == -1:
result = self._waitevent.wait()
else:
result = self._waitevent.wait(float(timeout/1000))
if result:
self._waitevent.clear()
return result
def _read_events_tread_proc(self, *args): # pylint: disable=W0613
"""Thread function to monitor file changes."""
while True:
events = self._get_directory_change_events()
if not events:
continue
for event in events:
self._eventsqueue.put(event)
self._waitevent.set()
def _get_directory_change_events(self):
"""Gets changes to the directory."""
event_buffer = ctypes.create_string_buffer(BUFFER_SIZE)
nbytes = ctypes.wintypes.DWORD()
ReadDirectoryChangesW(self._handle,
ctypes.byref(event_buffer),
len(event_buffer),
True,
WATCHDOG_FILE_NOTIFY_FLAGS,
ctypes.byref(nbytes),
None,
None)
events = self._create_events_from_byte_array(event_buffer.raw,
int(nbytes.value))
return [InotifyEvent(action, os.path.join(self._path, path))
for action, path in events]
def _create_events_from_byte_array(self, event_buffer, buffer_len):
"""Parse the file change event buffer."""
results = []
while buffer_len > 0:
pfni = ctypes.cast(event_buffer, PFILE_NOTIFY_INFORMATION)[0]
offset = FILE_NOTIFY_INFORMATION.FileName.offset
ptr = ctypes.addressof(pfni) + offset
filename_unicode = ctypes.string_at(ptr, pfni.FileNameLength)
filename_ascii = filename_unicode.decode('utf-16')
results.append((pfni.Action, filename_ascii))
numToSkip = pfni.NextEntryOffset
if numToSkip <= 0:
break
event_buffer = event_buffer[numToSkip:]
buffer_len -= numToSkip
return results
# pylint: disable=W0613
def read_events(self, event_buffer_size=BUFFER_SIZE):
"""
Reads events from inotify and yields them.
:param event_buffer_size:
not used
:type event_buffer_size:
``int``
:returns:
List of :class:`InotifyEvent` instances
:rtype:
``list``
"""
if self._eventsqueue.empty():
return []
event_list = []
while not self._eventsqueue.empty():
event = self._eventsqueue.get(False)
event_list.append(event)
return event_list
class Poll(object):
"""Helper class for poll."""
def __init__(self):
"""Initialize a new InotifyEvent object."""
self._inotify = None
self._mask = 0
def register(self, inotify, mask):
"""Register poll."""
self._inotify = inotify
self._mask = mask
def poll(self, timeout):
"""Polls for changes."""
return self._inotify.wait(timeout)
def poll():
"""Creates a new poll object."""
return Poll()
select.poll = poll
select.POLLIN = 1
| |
from typing import List, Optional, Tuple
import graphene
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db import transaction
from django.db.models import Prefetch
from django.utils import timezone
from ...account.error_codes import AccountErrorCode
from ...checkout import models
from ...checkout.error_codes import CheckoutErrorCode
from ...checkout.utils import (
abort_order_data,
add_promo_code_to_checkout,
add_variant_to_checkout,
add_voucher_to_checkout,
change_billing_address_in_checkout,
change_shipping_address_in_checkout,
clean_checkout,
create_order,
get_user_checkout,
get_valid_shipping_methods_for_checkout,
get_voucher_for_checkout,
prepare_order_data,
recalculate_checkout_discount,
remove_promo_code_from_checkout,
remove_voucher_from_checkout,
)
from ...core import analytics
from ...core.exceptions import InsufficientStock
from ...core.permissions import OrderPermissions
from ...core.taxes import TaxError
from ...core.utils.url import validate_storefront_url
from ...discount import models as voucher_model
from ...payment import PaymentError, gateway, models as payment_models
from ...payment.interface import AddressData
from ...payment.utils import store_customer_id
from ...product import models as product_models
from ..account.i18n import I18nMixin
from ..account.types import AddressInput, User
from ..core.mutations import (
BaseMutation,
ClearMetaBaseMutation,
ModelMutation,
UpdateMetaBaseMutation,
)
from ..core.types.common import CheckoutError
from ..core.utils import from_global_id_strict_type
from ..order.types import Order
from ..product.types import ProductVariant
from ..shipping.types import ShippingMethod
from .types import Checkout, CheckoutLine
ERROR_DOES_NOT_SHIP = "This checkout doesn't need shipping"
def clean_shipping_method(
checkout: models.Checkout, method: Optional[models.ShippingMethod], discounts
) -> bool:
"""Check if current shipping method is valid."""
if not method:
# no shipping method was provided, it is valid
return True
if not checkout.is_shipping_required():
raise ValidationError(
ERROR_DOES_NOT_SHIP, code=CheckoutErrorCode.SHIPPING_NOT_REQUIRED
)
if not checkout.shipping_address:
raise ValidationError(
"Cannot choose a shipping method for a checkout without the "
"shipping address.",
code=CheckoutErrorCode.SHIPPING_ADDRESS_NOT_SET,
)
valid_methods = get_valid_shipping_methods_for_checkout(checkout, discounts)
return method in valid_methods
def update_checkout_shipping_method_if_invalid(checkout: models.Checkout, discounts):
# remove shipping method when empty checkout
if checkout.quantity == 0 or not checkout.is_shipping_required():
checkout.shipping_method = None
checkout.save(update_fields=["shipping_method"])
is_valid = clean_shipping_method(
checkout=checkout, method=checkout.shipping_method, discounts=discounts
)
if not is_valid:
cheapest_alternative = get_valid_shipping_methods_for_checkout(
checkout, discounts
).first()
checkout.shipping_method = cheapest_alternative
checkout.save(update_fields=["shipping_method"])
def check_lines_quantity(variants, quantities):
"""Check if stock is sufficient for each line in the list of dicts."""
for variant, quantity in zip(variants, quantities):
if quantity < 0:
raise ValidationError(
{
"quantity": ValidationError(
"The quantity should be higher than zero.",
code=CheckoutErrorCode.ZERO_QUANTITY,
)
}
)
if quantity > settings.MAX_CHECKOUT_LINE_QUANTITY:
raise ValidationError(
{
"quantity": ValidationError(
"Cannot add more than %d times this item."
"" % settings.MAX_CHECKOUT_LINE_QUANTITY,
code=CheckoutErrorCode.QUANTITY_GREATER_THAN_LIMIT,
)
}
)
try:
variant.check_quantity(quantity)
except InsufficientStock as e:
message = (
"Could not add item "
+ "%(item_name)s. Only %(remaining)d remaining in stock."
% {
"remaining": e.item.quantity_available,
"item_name": e.item.display_product(),
}
)
raise ValidationError({"quantity": ValidationError(message, code=e.code)})
class CheckoutLineInput(graphene.InputObjectType):
quantity = graphene.Int(required=True, description="The number of items purchased.")
variant_id = graphene.ID(required=True, description="ID of the product variant.")
class CheckoutCreateInput(graphene.InputObjectType):
lines = graphene.List(
CheckoutLineInput,
description=(
"A list of checkout lines, each containing information about "
"an item in the checkout."
),
required=True,
)
email = graphene.String(description="The customer's email address.")
shipping_address = AddressInput(
description=(
"The mailing address to where the checkout will be shipped. "
"Note: the address will be ignored if the checkout "
"doesn't contain shippable items."
)
)
billing_address = AddressInput(description="Billing address of the customer.")
class CheckoutCreate(ModelMutation, I18nMixin):
created = graphene.Field(
graphene.Boolean,
description=(
"Whether the checkout was created or the current active one was returned. "
"Refer to checkoutLinesAdd and checkoutLinesUpdate to merge a cart "
"with an active checkout."
),
)
class Arguments:
input = CheckoutCreateInput(
required=True, description="Fields required to create checkout."
)
class Meta:
description = "Create a new checkout."
model = models.Checkout
return_field_name = "checkout"
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def process_checkout_lines(
cls, lines
) -> Tuple[List[product_models.ProductVariant], List[int]]:
variant_ids = [line.get("variant_id") for line in lines]
variants = cls.get_nodes_or_error(
variant_ids,
"variant_id",
ProductVariant,
qs=product_models.ProductVariant.objects.prefetch_related(
"product__product_type"
),
)
quantities = [line.get("quantity") for line in lines]
check_lines_quantity(variants, quantities)
return variants, quantities
@classmethod
def retrieve_shipping_address(cls, user, data: dict) -> Optional[models.Address]:
if "shipping_address" in data:
return cls.validate_address(data["shipping_address"])
if user.is_authenticated:
return user.default_shipping_address
return None
@classmethod
def retrieve_billing_address(cls, user, data: dict) -> Optional[models.Address]:
if "billing_address" in data:
return cls.validate_address(data["billing_address"])
if user.is_authenticated:
return user.default_billing_address
return None
@classmethod
def clean_input(cls, info, instance: models.Checkout, data):
cleaned_input = super().clean_input(info, instance, data)
user = info.context.user
# Resolve and process the lines, retrieving the variants and quantities
lines = data.pop("lines", None)
if lines:
(
cleaned_input["variants"],
cleaned_input["quantities"],
) = cls.process_checkout_lines(lines)
cleaned_input["shipping_address"] = cls.retrieve_shipping_address(user, data)
cleaned_input["billing_address"] = cls.retrieve_billing_address(user, data)
# Use authenticated user's email as default email
if user.is_authenticated:
email = data.pop("email", None)
cleaned_input["email"] = email or user.email
return cleaned_input
@classmethod
def save_addresses(cls, instance: models.Checkout, cleaned_input: dict):
shipping_address = cleaned_input.get("shipping_address")
billing_address = cleaned_input.get("billing_address")
updated_fields = []
if shipping_address and instance.is_shipping_required():
shipping_address.save()
instance.shipping_address = shipping_address.get_copy()
updated_fields.append("shipping_address")
if billing_address:
billing_address.save()
instance.billing_address = billing_address.get_copy()
updated_fields.append("billing_address")
# Note django will simply return if the list is empty
instance.save(update_fields=updated_fields)
@classmethod
@transaction.atomic()
def save(cls, info, instance: models.Checkout, cleaned_input):
# Create the checkout object
instance.save()
# Retrieve the lines to create
variants = cleaned_input.get("variants")
quantities = cleaned_input.get("quantities")
# Create the checkout lines
if variants and quantities:
for variant, quantity in zip(variants, quantities):
try:
add_variant_to_checkout(instance, variant, quantity)
except InsufficientStock as exc:
raise ValidationError(
f"Insufficient product stock: {exc.item}", code=exc.code
)
# Save provided addresses and associate them to the checkout
cls.save_addresses(instance, cleaned_input)
@classmethod
def perform_mutation(cls, _root, info, **data):
user = info.context.user
# `perform_mutation` is overridden to properly get or create a checkout
# instance here and abort mutation if needed.
if user.is_authenticated:
checkout, _ = get_user_checkout(user)
if checkout is not None:
# If user has an active checkout, return it without any
# modifications.
return CheckoutCreate(checkout=checkout, created=False)
checkout = models.Checkout(user=user)
else:
checkout = models.Checkout()
cleaned_input = cls.clean_input(info, checkout, data.get("input"))
checkout = cls.construct_instance(checkout, cleaned_input)
cls.clean_instance(checkout)
cls.save(info, checkout, cleaned_input)
cls._save_m2m(info, checkout, cleaned_input)
return CheckoutCreate(checkout=checkout, created=True)
class CheckoutLinesAdd(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated checkout.")
class Arguments:
checkout_id = graphene.ID(description="The ID of the checkout.", required=True)
lines = graphene.List(
CheckoutLineInput,
required=True,
description=(
"A list of checkout lines, each containing information about "
"an item in the checkout."
),
)
class Meta:
description = "Adds a checkout line to the existing checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, lines, replace=False):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
variant_ids = [line.get("variant_id") for line in lines]
variants = cls.get_nodes_or_error(variant_ids, "variant_id", ProductVariant)
quantities = [line.get("quantity") for line in lines]
check_lines_quantity(variants, quantities)
if variants and quantities:
for variant, quantity in zip(variants, quantities):
try:
add_variant_to_checkout(
checkout, variant, quantity, replace=replace
)
except InsufficientStock as exc:
raise ValidationError(
f"Insufficient product stock: {exc.item}", code=exc.code
)
update_checkout_shipping_method_if_invalid(checkout, info.context.discounts)
recalculate_checkout_discount(checkout, info.context.discounts)
return CheckoutLinesAdd(checkout=checkout)
class CheckoutLinesUpdate(CheckoutLinesAdd):
checkout = graphene.Field(Checkout, description="An updated checkout.")
class Meta:
description = "Updates checkout line in the existing checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, root, info, checkout_id, lines):
return super().perform_mutation(root, info, checkout_id, lines, replace=True)
class CheckoutLineDelete(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated checkout.")
class Arguments:
checkout_id = graphene.ID(description="The ID of the checkout.", required=True)
line_id = graphene.ID(description="ID of the checkout line to delete.")
class Meta:
description = "Deletes a CheckoutLine."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, line_id):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
line = cls.get_node_or_error(
info, line_id, only_type=CheckoutLine, field="line_id"
)
if line and line in checkout.lines.all():
line.delete()
update_checkout_shipping_method_if_invalid(checkout, info.context.discounts)
recalculate_checkout_discount(checkout, info.context.discounts)
return CheckoutLineDelete(checkout=checkout)
class CheckoutCustomerAttach(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated checkout.")
class Arguments:
checkout_id = graphene.ID(required=True, description="ID of the checkout.")
customer_id = graphene.ID(required=True, description="The ID of the customer.")
class Meta:
description = "Sets the customer as the owner of the checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, customer_id):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
customer = cls.get_node_or_error(
info, customer_id, only_type=User, field="customer_id"
)
checkout.user = customer
checkout.save(update_fields=["user"])
return CheckoutCustomerAttach(checkout=checkout)
class CheckoutCustomerDetach(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated checkout.")
class Arguments:
checkout_id = graphene.ID(description="Checkout ID.", required=True)
class Meta:
description = "Removes the user assigned as the owner of the checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
checkout.user = None
checkout.save(update_fields=["user"])
return CheckoutCustomerDetach(checkout=checkout)
class CheckoutShippingAddressUpdate(BaseMutation, I18nMixin):
checkout = graphene.Field(Checkout, description="An updated checkout.")
class Arguments:
checkout_id = graphene.ID(required=True, description="ID of the checkout.")
shipping_address = AddressInput(
required=True,
description="The mailing address to where the checkout will be shipped.",
)
class Meta:
description = "Update shipping address in the existing checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, shipping_address):
pk = from_global_id_strict_type(checkout_id, Checkout, field="checkout_id")
try:
checkout = models.Checkout.objects.prefetch_related(
"lines__variant__product__product_type"
).get(pk=pk)
except ObjectDoesNotExist:
raise ValidationError(
{
"checkout_id": ValidationError(
f"Couldn't resolve to a node: {checkout_id}",
code=CheckoutErrorCode.NOT_FOUND,
)
}
)
if not checkout.is_shipping_required():
raise ValidationError(
{
"shipping_address": ValidationError(
ERROR_DOES_NOT_SHIP,
code=CheckoutErrorCode.SHIPPING_NOT_REQUIRED,
)
}
)
shipping_address = cls.validate_address(
shipping_address, instance=checkout.shipping_address
)
update_checkout_shipping_method_if_invalid(checkout, info.context.discounts)
with transaction.atomic():
shipping_address.save()
change_shipping_address_in_checkout(checkout, shipping_address)
recalculate_checkout_discount(checkout, info.context.discounts)
return CheckoutShippingAddressUpdate(checkout=checkout)
class CheckoutBillingAddressUpdate(CheckoutShippingAddressUpdate):
checkout = graphene.Field(Checkout, description="An updated checkout.")
class Arguments:
checkout_id = graphene.ID(required=True, description="ID of the checkout.")
billing_address = AddressInput(
required=True, description="The billing address of the checkout."
)
class Meta:
description = "Update billing address in the existing checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, billing_address):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
billing_address = cls.validate_address(
billing_address, instance=checkout.billing_address
)
with transaction.atomic():
billing_address.save()
change_billing_address_in_checkout(checkout, billing_address)
return CheckoutBillingAddressUpdate(checkout=checkout)
class CheckoutEmailUpdate(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated checkout.")
class Arguments:
checkout_id = graphene.ID(description="Checkout ID.")
email = graphene.String(required=True, description="email.")
class Meta:
description = "Updates email address in the existing checkout object."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, email):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
checkout.email = email
cls.clean_instance(checkout)
checkout.save(update_fields=["email"])
return CheckoutEmailUpdate(checkout=checkout)
class CheckoutShippingMethodUpdate(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated checkout.")
class Arguments:
checkout_id = graphene.ID(description="Checkout ID.")
shipping_method_id = graphene.ID(required=True, description="Shipping method.")
class Meta:
description = "Updates the shipping address of the checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, shipping_method_id):
pk = from_global_id_strict_type(
checkout_id, only_type=Checkout, field="checkout_id"
)
try:
checkout = models.Checkout.objects.prefetch_related(
"lines__variant__product__collections",
"lines__variant__product__product_type",
).get(pk=pk)
except ObjectDoesNotExist:
raise ValidationError(
{
"checkout_id": ValidationError(
f"Couldn't resolve to a node: {checkout_id}",
code=CheckoutErrorCode.NOT_FOUND,
)
}
)
if not checkout.is_shipping_required():
raise ValidationError(
{
"shipping_method": ValidationError(
ERROR_DOES_NOT_SHIP,
code=CheckoutErrorCode.SHIPPING_NOT_REQUIRED,
)
}
)
shipping_method = cls.get_node_or_error(
info,
shipping_method_id,
only_type=ShippingMethod,
field="shipping_method_id",
)
shipping_method_is_valid = clean_shipping_method(
checkout=checkout, method=shipping_method, discounts=info.context.discounts
)
if not shipping_method_is_valid:
raise ValidationError(
{
"shipping_method": ValidationError(
"This shipping method is not applicable.",
code=CheckoutErrorCode.SHIPPING_METHOD_NOT_APPLICABLE,
)
}
)
checkout.shipping_method = shipping_method
checkout.save(update_fields=["shipping_method"])
recalculate_checkout_discount(checkout, info.context.discounts)
return CheckoutShippingMethodUpdate(checkout=checkout)
class CheckoutComplete(BaseMutation):
order = graphene.Field(Order, description="Placed order.")
class Arguments:
checkout_id = graphene.ID(description="Checkout ID.", required=True)
store_source = graphene.Boolean(
default_value=False,
description=(
"Determines whether to store the payment source for future usage."
),
)
redirect_url = graphene.String(
required=False,
description=(
"URL of a view where users should be redirected to "
"see the order details. URL in RFC 1808 format."
),
)
class Meta:
description = (
"Completes the checkout. As a result a new order is created and "
"a payment charge is made. This action requires a successful "
"payment before it can be performed."
)
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, store_source, **data):
checkout = cls.get_node_or_error(
info,
checkout_id,
only_type=Checkout,
field="checkout_id",
qs=models.Checkout.objects.prefetch_related(
"gift_cards",
"lines",
Prefetch(
"payments",
queryset=payment_models.Payment.objects.prefetch_related(
"order", "order__lines"
),
),
).select_related("shipping_method", "shipping_method__shipping_zone"),
)
discounts = info.context.discounts
user = info.context.user
clean_checkout(checkout, discounts)
payment = checkout.get_last_active_payment()
with transaction.atomic():
try:
order_data = prepare_order_data(
checkout=checkout,
tracking_code=analytics.get_client_id(info.context),
discounts=discounts,
)
except InsufficientStock as e:
raise ValidationError(
f"Insufficient product stock: {e.item}", code=e.code
)
except voucher_model.NotApplicable:
raise ValidationError(
"Voucher not applicable",
code=CheckoutErrorCode.VOUCHER_NOT_APPLICABLE,
)
except TaxError as tax_error:
return ValidationError(
"Unable to calculate taxes - %s" % str(tax_error),
code=CheckoutErrorCode.TAX_ERROR,
)
billing_address = order_data["billing_address"]
shipping_address = order_data.get("shipping_address", None)
billing_address = AddressData(**billing_address.as_data())
if shipping_address is not None:
shipping_address = AddressData(**shipping_address.as_data())
try:
txn = gateway.process_payment(
payment=payment, token=payment.token, store_source=store_source
)
if not txn.is_success:
raise PaymentError(txn.error)
except PaymentError as e:
abort_order_data(order_data)
raise ValidationError(str(e), code=CheckoutErrorCode.PAYMENT_ERROR)
if txn.customer_id and user.is_authenticated:
store_customer_id(user, payment.gateway, txn.customer_id)
redirect_url = data.get("redirect_url", "")
if redirect_url:
try:
validate_storefront_url(redirect_url)
except ValidationError as error:
raise ValidationError(
{"redirect_url": error}, code=AccountErrorCode.INVALID
)
# create the order into the database
order = create_order(
checkout=checkout,
order_data=order_data,
user=user,
redirect_url=redirect_url,
)
# remove checkout after order is successfully paid
checkout.delete()
# return the success response with the newly created order data
return CheckoutComplete(order=order)
class CheckoutUpdateVoucher(BaseMutation):
checkout = graphene.Field(Checkout, description="An checkout with updated voucher.")
class Arguments:
checkout_id = graphene.ID(description="Checkout ID.", required=True)
voucher_code = graphene.String(description="Voucher code.")
class Meta:
description = (
"DEPRECATED: Will be removed in Saleor 2.10, use CheckoutAddPromoCode "
"or CheckoutRemovePromoCode instead. Adds voucher to the checkout. Query "
"it without voucher_code field to remove voucher from checkout."
)
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, voucher_code=None):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
if voucher_code:
try:
voucher = voucher_model.Voucher.objects.active(date=timezone.now()).get(
code=voucher_code
)
except voucher_model.Voucher.DoesNotExist:
raise ValidationError(
{
"voucher_code": ValidationError(
"Voucher with given code does not exist.",
code=CheckoutErrorCode.NOT_FOUND,
)
}
)
try:
add_voucher_to_checkout(checkout, voucher)
except voucher_model.NotApplicable:
raise ValidationError(
{
"voucher_code": ValidationError(
"Voucher is not applicable to that checkout.",
code=CheckoutErrorCode.VOUCHER_NOT_APPLICABLE,
)
}
)
else:
existing_voucher = get_voucher_for_checkout(checkout)
if existing_voucher:
remove_voucher_from_checkout(checkout)
return CheckoutUpdateVoucher(checkout=checkout)
class CheckoutAddPromoCode(BaseMutation):
checkout = graphene.Field(
Checkout, description="The checkout with the added gift card or voucher."
)
class Arguments:
checkout_id = graphene.ID(description="Checkout ID.", required=True)
promo_code = graphene.String(
description="Gift card code or voucher code.", required=True
)
class Meta:
description = "Adds a gift card or a voucher to a checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, promo_code):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
add_promo_code_to_checkout(checkout, promo_code, info.context.discounts)
return CheckoutAddPromoCode(checkout=checkout)
class CheckoutRemovePromoCode(BaseMutation):
checkout = graphene.Field(
Checkout, description="The checkout with the removed gift card or voucher."
)
class Arguments:
checkout_id = graphene.ID(description="Checkout ID.", required=True)
promo_code = graphene.String(
description="Gift card code or voucher code.", required=True
)
class Meta:
description = "Remove a gift card or a voucher from a checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, checkout_id, promo_code):
checkout = cls.get_node_or_error(
info, checkout_id, only_type=Checkout, field="checkout_id"
)
remove_promo_code_from_checkout(checkout, promo_code)
return CheckoutUpdateVoucher(checkout=checkout)
class CheckoutUpdateMeta(UpdateMetaBaseMutation):
class Meta:
description = "Updates metadata for checkout."
permissions = (OrderPermissions.MANAGE_ORDERS,)
model = models.Checkout
public = True
error_type_class = CheckoutError
error_type_field = "checkout_errors"
class CheckoutUpdatePrivateMeta(UpdateMetaBaseMutation):
class Meta:
description = "Updates private metadata for checkout."
permissions = (OrderPermissions.MANAGE_ORDERS,)
model = models.Checkout
public = False
error_type_class = CheckoutError
error_type_field = "checkout_errors"
class CheckoutClearMeta(ClearMetaBaseMutation):
class Meta:
description = "Clear metadata for checkout."
permissions = (OrderPermissions.MANAGE_ORDERS,)
model = models.Checkout
public = True
error_type_class = CheckoutError
error_type_field = "checkout_errors"
class CheckoutClearPrivateMeta(ClearMetaBaseMutation):
class Meta:
description = "Clear private metadata for checkout."
permissions = (OrderPermissions.MANAGE_ORDERS,)
model = models.Checkout
public = False
error_type_class = CheckoutError
error_type_field = "checkout_errors"
| |
import datetime
import inspect
import re
from operator import attrgetter
from uuid import UUID as PythonUUID
import pytz
from dateutil import parser
# Sentinel value which means "pick the default value" when encountered.
default_sentinel = object()
class ValidationError(Exception):
pass
class StopValidation(Exception):
pass
class Field(object):
# If specified, the field ensures that the supplied value is an instance
# of this type. Can be either a single specific type (e.g. int) or a tuple
# of multiple types.
base_type = None
# Value to be used when there was no specific value supplied for this
# field and the field is not required.
blank_value = None
def __init__(
self,
required=True,
default=None,
field_name=None,
raw_field_name=None,
mutable=True,
read_only=False,
blank_value=default_sentinel,
):
"""
By default, the field name is derived from the schema model, but in
certain cases it can be overridden. Specifying field_name overrides
both the name of the field in the raw (unclean) data, as well as in the
clean data model. If the raw data has a different field name than the
clean data, raw_field_name can be overridden.
"""
self.required = required
self.default = default
self.mutable = mutable
self.field_name = field_name
self.raw_field_name = raw_field_name or field_name
self.read_only = read_only
if blank_value is not default_sentinel:
self.blank_value = blank_value
def has_value(self, value):
return value is not None
def clean(self, value):
"""Take a dirty value and clean it."""
if (
self.base_type is not None
and value is not None
and not isinstance(value, self.base_type)
):
if isinstance(self.base_type, tuple):
allowed_types = [typ.__name__ for typ in self.base_type]
allowed_types_text = ' or '.join(allowed_types)
else:
allowed_types_text = self.base_type.__name__
err_msg = 'Value must be of %s type.' % allowed_types_text
raise ValidationError(err_msg)
if not self.has_value(value):
if self.default is not None:
raise StopValidation(self.default)
if self.required:
raise ValidationError('This field is required.')
else:
raise StopValidation(self.blank_value)
return value
def serialize(self, value):
"""
Takes a cleaned value and serializes it.
Keep in mind that if this field is not required, the cleaned value
might be None.
"""
return value
class String(Field):
base_type = str
blank_value = ''
min_length = None
max_length = None
def __init__(self, min_length=None, max_length=None, **kwargs):
if min_length is not None:
self.min_length = min_length
if max_length is not None:
self.max_length = max_length
super(String, self).__init__(**kwargs)
def _check_length(self, value):
if self.max_length is not None and len(value) > self.max_length:
err_msg = 'The value must be no longer than %s characters.' % (
self.max_length
)
raise ValidationError(err_msg)
if self.min_length is not None and len(value) < self.min_length:
err_msg = 'The value must be at least %s characters long.' % (
self.min_length
)
raise ValidationError(err_msg)
def clean(self, value):
value = super(String, self).clean(value)
self._check_length(value)
return value
def has_value(self, value):
return bool(value)
class TrimmedString(String):
base_type = str
blank_value = ''
def clean(self, value):
# XXX we skip a level of inheritance so that we can perform length
# checks *after* trimming.
value = super(String, self).clean(value)
if value:
value = value.strip()
self._check_length(value)
return value
def has_value(self, value):
return bool(value and value.strip())
class Bool(Field):
base_type = bool
blank_value = False
class Regex(String):
regex = None
regex_flags = 0
regex_message = 'Invalid input.'
def __init__(
self, regex=None, regex_flags=None, regex_message=None, **kwargs
):
super(Regex, self).__init__(**kwargs)
if regex is not None:
self.regex = regex
if regex_flags is not None:
self.regex_flags = regex_flags
if regex_message is not None:
self.regex_message = regex_message
def get_regex(self):
if not getattr(self, '_compiled_regex', None):
self._compiled_regex = re.compile(self.regex, self.regex_flags)
return self._compiled_regex
def clean(self, value):
value = super(Regex, self).clean(value)
if not self.get_regex().match(value):
raise ValidationError(self.regex_message)
return value
class DateTime(Regex):
"""ISO 8601 from http://www.pelagodesign.com/blog/2009/05/20/iso-8601-date-validation-that-doesnt-suck/"""
regex = "^([\\+-]?\\d{4}(?!\\d{2}\\b))((-?)((0[1-9]|1[0-2])(\\3([12]\\d|0[1-9]|3[01]))?|W([0-4]\\d|5[0-2])(-?[1-7])?|(00[1-9]|0[1-9]\\d|[12]\\d{2}|3([0-5]\\d|6[1-6])))([T\\s]((([01]\\d|2[0-3])((:?)[0-5]\\d)?|24\\:?00)([\\.,]\\d+(?!:))?)?(\\17[0-5]\\d([\\.,]\\d+)?)?([zZ]|([\\+-])([01]\\d|2[0-3]):?([0-5]\\d)?)?)?)?$"
regex_message = 'Invalid ISO 8601 datetime.'
blank_value = None
def __init__(self, *args, **kwargs):
self.min_date = kwargs.pop('min_date', None)
super(DateTime, self).__init__(*args, **kwargs)
def clean(self, value):
# XXX we're skipping a level of inheritance so that we can reuse
# the regex match later in this method.
value = super(Regex, self).clean(value)
match = self.get_regex().match(value)
if not match:
raise ValidationError(self.regex_message)
try:
dt = parser.parse(value)
except ValueError:
raise ValidationError('Could not parse datetime')
if self.min_date:
if dt.tzinfo is not None and self.min_date.tzinfo is None:
min_date = self.min_date.replace(tzinfo=pytz.utc)
else:
min_date = self.min_date
if dt < min_date:
err_msg = 'Date cannot be earlier than %s.' % (
self.min_date.strftime('%Y-%m-%d')
)
raise ValidationError(err_msg)
time_group = match.groups()[11]
if time_group and len(time_group) > 1:
return dt
return dt.date()
def serialize(self, value):
if value is not None:
return value.isoformat()
class Email(Regex):
regex = (
r'^(?:[^\.@\s]|[^\.@\s]\.(?!\.))*[^.@\s]@'
r'[^.@\s](?:[^\.@\s]|\.(?!\.))*\.[a-z]{2,63}$'
)
regex_flags = re.IGNORECASE
regex_message = 'Invalid email address.'
max_length = 254
def clean(self, value):
# trim any leading/trailing whitespace before validating the email
if isinstance(value, str):
value = value.strip()
return super(Email, self).clean(value)
class URL(Regex):
blank_value = None
def __init__(
self,
require_tld=True,
default_scheme=None,
allowed_schemes=None,
disallowed_schemes=None,
**kwargs
):
def normalize_scheme(sch):
if sch.endswith('://') or sch.endswith(':'):
return sch
return sch + '://'
# FQDN validation similar to https://github.com/chriso/validator.js/blob/master/src/lib/isFQDN.js
# ff01-ff5f -> full-width chars, not allowed
alpha_numeric_and_symbols_ranges = u'0-9a-z\u00a1-\uff00\uff5f-\uffff'
tld_part = (
require_tld
and r'\.[%s-]{2,63}' % alpha_numeric_and_symbols_ranges
or ''
)
scheme_part = '[a-z]+://'
self.default_scheme = default_scheme
if self.default_scheme:
self.default_scheme = normalize_scheme(self.default_scheme)
self.scheme_regex = re.compile('^' + scheme_part, re.IGNORECASE)
if default_scheme:
scheme_part = '(%s)?' % scheme_part
regex = (
r'^%s([-%s@:%%_+.~#?&/\\=]{1,256}%s|([0-9]{1,3}\.){3}[0-9]{1,3})(:[0-9]+)?([/?].*)?$'
% (scheme_part, alpha_numeric_and_symbols_ranges, tld_part)
)
super(URL, self).__init__(
regex=regex,
regex_flags=re.IGNORECASE | re.UNICODE,
regex_message='Invalid URL.',
**kwargs
)
def compile_schemes_to_regexes(schemes):
return [
re.compile('^' + normalize_scheme(sch) + '.*', re.IGNORECASE)
for sch in schemes
]
self.allowed_schemes = allowed_schemes or []
self.allowed_schemes_regexes = compile_schemes_to_regexes(
self.allowed_schemes
)
self.disallowed_schemes = disallowed_schemes or []
self.disallowed_schemes_regexes = compile_schemes_to_regexes(
self.disallowed_schemes
)
def clean(self, value):
value = super(URL, self).clean(value)
if not self.scheme_regex.match(value):
value = self.default_scheme + value
if self.allowed_schemes:
if not any(
allowed_regex.match(value)
for allowed_regex in self.allowed_schemes_regexes
):
allowed_schemes_text = ' or '.join(self.allowed_schemes)
err_msg = (
"This URL uses a scheme that's not allowed. You can only "
"use %s." % allowed_schemes_text
)
raise ValidationError(err_msg)
if self.disallowed_schemes:
if any(
disallowed_regex.match(value)
for disallowed_regex in self.disallowed_schemes_regexes
):
err_msg = "This URL uses a scheme that's not allowed."
raise ValidationError(err_msg)
return value
class RelaxedURL(URL):
"""Like URL but will just ignore values like "http://" and treat them
as blank.
"""
def clean(self, value):
if not self.required and value == self.default_scheme:
return None
value = super(RelaxedURL, self).clean(value)
return value
class Integer(Field):
base_type = int
def __init__(self, min_value=None, max_value=None, **kwargs):
self.max_value = max_value
self.min_value = min_value
super(Integer, self).__init__(**kwargs)
def _check_value(self, value):
if self.max_value is not None and value > self.max_value:
err_msg = 'The value must not be larger than %d.' % self.max_value
raise ValidationError(err_msg)
if self.min_value is not None and value < self.min_value:
err_msg = 'The value must be at least %d.' % self.min_value
raise ValidationError(err_msg)
def clean(self, value):
value = super(Integer, self).clean(value)
self._check_value(value)
return value
class List(Field):
base_type = list
blank_value = []
def __init__(self, field_instance, max_length=None, **kwargs):
self.max_length = max_length
super(List, self).__init__(**kwargs)
self.field_instance = field_instance
def has_value(self, value):
return bool(value)
def clean(self, value):
value = super(List, self).clean(value)
item_cnt = len(value)
if self.required and not item_cnt:
raise ValidationError('List must not be empty.')
if self.max_length and item_cnt > self.max_length:
raise ValidationError('List is too long.')
errors = {}
data = []
for n, item in enumerate(value):
try:
cleaned_data = self.field_instance.clean(item)
except ValidationError as e:
errors[n] = e.args and e.args[0]
else:
data.append(cleaned_data)
if errors:
raise ValidationError({'errors': errors})
return data
def serialize(self, value):
# Serialize all falsy values as an empty list.
if not value:
return []
return [self.field_instance.serialize(item) for item in value]
class Dict(Field):
base_type = dict
def has_value(self, value):
return bool(value)
def serialize(self, value):
# Serialize all falsy values as an empty dict.
return value or {}
class Embedded(Dict):
def __init__(self, schema_class, **kwargs):
super(Embedded, self).__init__(**kwargs)
self.schema_class = schema_class
def clean(self, value):
value = super(Embedded, self).clean(value)
try:
cleaned_value = self.schema_class(value).full_clean()
except ValidationError as e:
raise e
else:
return cleaned_value
def is_valid(self):
try:
self.clean()
except ValidationError:
return False
else:
return True
def serialize(self, value):
if value is not None:
return self.schema_class(data=value).serialize()
class ReferenceNotFoundError(Exception):
"""Exception to be raised when a referenced object isn't found."""
class EmbeddedReference(Dict):
"""Represents an object which can be referenced by its ID.
This field allows one to submit a dict of values which will then create
a new instance of the object, or it will update fields of an existing
object if a field representing its ID (called `pk_field`) was included
in the submitted dict.
"""
def __init__(self, object_class, schema_class, pk_field='id', **kwargs):
self.object_class = object_class
self.schema_class = schema_class
self.pk_field = pk_field
super(EmbeddedReference, self).__init__(**kwargs)
def clean(self, value):
# Clean the dict first.
value = super(EmbeddedReference, self).clean(value)
# Then, depending on whether `pk_field` is in the dict of submitted
# values or not, update an existing object or create a new one.
if value and self.pk_field in value:
return self.clean_existing(value)
return self.clean_new(value)
def serialize(self, obj):
if obj is None:
return
obj_data = self.get_orig_data_from_existing(obj)
serialized = self.schema_class(data=obj_data).serialize()
serialized[self.pk_field] = getattr(obj, self.pk_field)
return serialized
def clean_new(self, value):
"""Return a new object instantiated with cleaned data."""
value = self.schema_class(value).full_clean()
return self.object_class(**value)
def clean_existing(self, value):
"""Clean the data and return an existing document with its fields
updated based on the cleaned values.
"""
existing_pk = value[self.pk_field]
try:
obj = self.fetch_existing(existing_pk)
except ReferenceNotFoundError:
raise ValidationError('Object does not exist.')
orig_data = self.get_orig_data_from_existing(obj)
# Clean the data (passing the new data dict and the original data to
# the schema).
value = self.schema_class(value, orig_data).full_clean()
# Set cleaned data on the object (except for the pk_field).
for field_name, field_value in value.items():
if field_name != self.pk_field:
setattr(obj, field_name, field_value)
return obj
def fetch_existing(self, pk):
"""Fetch an existing object that corresponds to a given ID.
This needs to be subclassed since, depending on the object class,
the fetching mechanism might be different. See implementations of
SQLAEmbeddedReference and MongoEmbeddedReference for a concrete
example of fetching objects from a relational and non-relational
database.
:param str pk: ID of the object that's supposed to exist.
:returns: an instance of the object class.
:raises: ReferenceNotFoundError if the object doesn't exist.
"""
raise NotImplementedError # should be subclassed
def get_orig_data_from_existing(self, obj):
"""Return a dictionary of field names and values for a given object.
The values in the dictionary should be in their "cleaned" state (as
in, exactly as they were set on the object, without any serialization).
:param object obj: existing object for which new data is currently
being cleaned.
:returns: dict of fields and values that are currently set on the
object (before the new cleaned data is applied).
"""
raise NotImplementedError # should be subclassed
class Reference(Field):
"""Represents an object which can be referenced by its ID.
This field allows one to submit an ID of an object and get a cleaned
instance of that object. Equivalently, serialization accepts an object
and outputs its ID.
"""
# The ID is assumed to be supplied as a string. However, subclasses can
# change this field if need be (e.g. it's common for objects persisted in
# relational databases to use integers as IDs).
base_type = str
def __init__(self, object_class, **kwargs):
self.object_class = object_class
super(Reference, self).__init__(**kwargs)
def clean(self, value):
obj_id = super(Reference, self).clean(value)
try:
return self.fetch_object(obj_id)
except ReferenceNotFoundError:
raise ValidationError('Object does not exist.')
def fetch_object(self, ref_id):
"""Fetch an existing object that corresponds to a given ID.
This needs to be subclassed since, depending on the object class,
the fetching mechanism might be different. See implementations of
SQLAReference and MongoReference for a concrete example of fetching
objects from a relational and non-relational database.
:param str pk: ID of the referenced object.
:returns: an instance of the object class.
:raises: ReferenceNotFoundError if the object doesn't exist.
"""
raise NotImplementedError # should be subclassed
class Choices(Field):
"""
A field that accepts the given choices.
"""
def __init__(
self,
choices,
case_insensitive=False,
error_invalid_choice=None,
**kwargs
):
super(Choices, self).__init__(**kwargs)
self.choices = choices
self.case_insensitive = case_insensitive
self.error_invalid_choice = (
error_invalid_choice or 'Not a valid choice.'
)
def get_choices(self):
return self.choices
def format_invalid_choice_msg(self, value):
return self.error_invalid_choice.format(
value=value, valid_choices=', '.join(self.get_choices())
)
def clean(self, value):
value = super(Choices, self).clean(value)
choices = self.get_choices()
if self.case_insensitive:
choices = {choice.lower(): choice for choice in choices}
if not isinstance(value, str):
raise ValidationError(u'Value needs to be a string.')
if value.lower() not in choices:
err_msg = self.format_invalid_choice_msg(value)
raise ValidationError(err_msg)
return choices[value.lower()]
if value not in choices:
err_msg = self.format_invalid_choice_msg(value)
raise ValidationError(err_msg)
return value
class Enum(Choices):
"""Like Choices, but expects a Python 3 Enum."""
def __init__(self, choices, **kwargs):
"""Initialize the Enum field.
The `choices` param can be either:
* an enum.Enum class (in which case all of its values will become
valid choices),
* a list containing a subset of the enum's choices (e.g.
`[SomeEnumCls.OptionA, SomeEnumCls.OptionB]`). You must provide
more than one choice in this list and *all* of the choices must
belong to the same enum class.
"""
is_cls = inspect.isclass(choices)
if is_cls:
self.enum_cls = choices
else:
assert choices, 'You need to provide at least one enum choice.'
self.enum_cls = choices[0].__class__
return super(Enum, self).__init__(choices, **kwargs)
def get_choices(self):
return [choice.value for choice in self.choices]
def clean(self, value):
value = super(Enum, self).clean(value)
return self.enum_cls(value)
def serialize(self, choice):
if choice is not None:
return choice.value
class SortedSet(List):
"""Sorted, unique set of values represented as a list."""
def __init__(self, field_instance, max_length=None, key=None, **kwargs):
super(SortedSet, self).__init__(field_instance, max_length, **kwargs)
if isinstance(field_instance, Enum) and key is None:
key = attrgetter('value')
self.key = key
def clean(self, value):
return list(
sorted(set(super(SortedSet, self).clean(value)), key=self.key)
)
class Schema(object):
"""
Base Schema class. Provides core behavior like fields declaration
and construction, validation, and data and error proxying.
There are 3 steps to using a Schema:
1. Define the Schema, e.g.
class UserSchema(Schema):
first_name = String()
last_name = String()
email = Email()
2. Create a Schema instance, passing data into it.
# Scenario 1: Creation of a new object.
schema = UserSchema({
'first_name': 'Donald',
'last_name': 'Glover',
'email': 'gambino@example.com'
})
# Scenario 2: Update of an existing object.
schema = UserSchema(
raw_data={
'first_name': 'Childish',
'last_name': 'Gambino'
},
data={
'first_name': 'Donald',
'last_name': 'Glover',
'email': 'gambino@example.com'
}
)
3. Clean the Schema (validating the data you passed into it).
data = schema.full_clean()
This operation will raise a ValidationError if the data you passed
into the Schema is invalid.
To introduce custom validation to the Schema (beyond the basics
covered by various Field types), override the "clean" method and
raise a ValidationError with a descriptive message if you encounter
any invalid data.
Parameters:
- raw_data - a dict with the data you want to validate.
- data - dict with existing data, e.g. based on some object you're
trying to update.
"""
@classmethod
def get_fields(cls):
"""
Returns a dictionary of fields and field instances for this schema.
"""
fields = {}
for field_name in dir(cls):
if isinstance(getattr(cls, field_name), Field):
field = getattr(cls, field_name)
field_name = field.field_name or field_name
fields[field_name] = field
return fields
@classmethod
def obj_to_dict(cls, obj):
"""
Takes a model object and converts it into a dictionary suitable for
passing to the constructor's data attribute.
"""
data = {}
for field_name in cls.get_fields():
try:
value = getattr(obj, field_name)
except AttributeError:
# If the field doesn't exist on the object, fail gracefully
# and don't include the field in the data dict at all. Fail
# loudly if the field exists but produces a different error
# (edge case: accessing an *existing* field could technically
# produce an unrelated AttributeError).
continue
if callable(value):
value = value()
data[field_name] = value
return data
def __init__(self, raw_data=None, data=None):
conflicting_fields = set(
[
'raw_data',
'orig_data',
'data',
'errors',
'field_errors',
'fields',
]
).intersection(dir(self))
if conflicting_fields:
raise Exception(
'The following field names are reserved and need to be renamed: %s. '
'Please use the field_name keyword to use them.'
% list(conflicting_fields)
)
self.raw_data = raw_data or {}
self.orig_data = data or None
self.data = data and dict(data) or {}
self.field_errors = {}
self.errors = []
self.fields = self.get_fields()
def clean(self):
"""
Override to add additional validations.
Always called at the end of `full_clean()` method, even if validation
failed for some fields.
Cleaned input data are available in `self.data` dict.
Keys that failed validation won't be set. Use conditional dictionary
access or return early `if self.errors or self.field_errors`.
"""
pass
def full_clean(self):
if not isinstance(self.raw_data, dict):
raise ValidationError(
{'errors': ['Invalid request: JSON dictionary expected.']}
)
for field_name, field in self.fields.items():
if field.read_only:
continue
raw_field_name = field.raw_field_name or field_name
try:
# Validate a field if it's posted in raw_data, or if we don't
# have a value for it in case it's required.
if raw_field_name in self.raw_data or not field.has_value(
self.data.get(field_name, None)
):
value = field.clean(self.raw_data.get(raw_field_name))
if (
not field.mutable
and self.orig_data
and field_name in self.orig_data
):
old_value = self.orig_data[field_name]
# compare datetimes properly, regardless of whether they're offset-naive or offset-aware
if isinstance(value, datetime.datetime) and isinstance(
old_value, datetime.datetime
):
value = value.replace(tzinfo=None) + (
value.utcoffset()
or datetime.timedelta(seconds=0)
)
old_value = old_value.replace(tzinfo=None) + (
old_value.utcoffset()
or datetime.timedelta(seconds=0)
)
if value != old_value:
raise ValidationError('Value cannot be changed.')
self.data[field_name] = value
except ValidationError as e:
self.field_errors[raw_field_name] = e.args and e.args[0]
except StopValidation as e:
self.data[field_name] = e.args and e.args[0]
try:
self.clean()
except ValidationError as e:
self.errors = [e.args and e.args[0]]
self.raise_on_errors()
return self.data
def raise_on_errors(self):
if self.field_errors or self.errors:
raise ValidationError(
{'field-errors': self.field_errors, 'errors': self.errors}
)
def external_clean(self, cls, raise_on_errors=True):
try:
# Instantiate the external schema with the right raw_data/data.
external_schema = cls(raw_data=self.raw_data, data=self.data)
# Make sure its orig_data is the same as this schema's
external_schema.orig_data = self.orig_data
# Validate the schema and update self.data with its results.
self.data.update(external_schema.full_clean())
except ValidationError as e:
self.field_errors.update(e.args[0]['field-errors'])
self.errors += e.args[0]['errors']
if raise_on_errors:
self.raise_on_errors()
def serialize(self):
data = {}
for field_name, field in self.fields.items():
raw_field_name = field.raw_field_name or field_name
value = self.data[field_name]
data[raw_field_name] = field.serialize(value)
return data
DEFAULT_TYPE_FIELD = 'type'
class PolymorphicField(Dict):
"""A field that can be validated with one of multiple schemas,
depending on type provided in input values.
Requires a dictionary-shaped value.
Actual schema is taken from provided mapping and selected using
a field in the input dictionary, for example:
PolymorphicField(type_map={'my_type': MySchema})
{'type': 'my_type', 'more': 'data', ...} will be validated with
MySchema based on 'my_type' value.
keep_type_field (default no) allows keeping the type-dispatching field
in dispatched schemas' data.
"""
base_type = dict
def __init__(
self,
type_map={},
type_field=DEFAULT_TYPE_FIELD,
keep_type_field=False,
*args,
**kwargs
):
super(PolymorphicField, self).__init__(*args, **kwargs)
self.type_map = type_map
self.type_field = type_field
self.keep_type_field = keep_type_field
def clean(self, value):
clean = super(PolymorphicField, self).clean(value)
field_type = clean.get(self.type_field)
if field_type not in self.type_map:
raise ValidationError(
'{} must be one of {}'.format(
self.type_field, ','.join(self.type_map.keys())
)
)
return self.type_map[field_type].clean(
{
k: v
for k, v in value.items()
if self.keep_type_field or k != self.type_field
}
)
class UUID(String):
"""Schema field for UUIDs.
It handles deserialization from a string to a Python UUID object
and serialization from a Python UUID object to a string.
"""
blank_value = None
def clean(self, value):
value = super(UUID, self).clean(value)
try:
return PythonUUID(value)
except ValueError:
raise ValidationError('Not a UUID.')
def serialize(self, value):
return str(value)
class CleanDict(Dict):
"""A dictionary in which both keys and values are validated with separate schema fields."""
def __init__(self, key_schema, value_schema, max_length=None, **kwargs):
super(CleanDict, self).__init__(**kwargs)
self.key_schema = key_schema
self.value_schema = value_schema
self.max_length = max_length
def clean(self, value):
value = super(CleanDict, self).clean(value)
if self.max_length and len(value) > self.max_length:
raise ValidationError('Dict is too long.')
errors = {}
data = {}
for key, value in value.items():
try:
cleaned_key = self.key_schema.clean(key)
except ValidationError as e:
errors[key] = e.args and e.args[0]
else:
try:
cleaned_value = self.value_schema.clean(value)
except ValidationError as e:
errors[key] = e.args and e.args[0]
else:
data[cleaned_key] = cleaned_value
if errors:
raise ValidationError({'errors': errors})
return data
def serialize(self, value):
# Serialize all falsy values as an empty dict.
if not value:
return {}
return {
self.key_schema.serialize(key): self.value_schema.serialize(value)
for key, value in value.items()
}
class EmbeddedFactory(Embedded):
"""Schema field for arbitrary embedded object.
It generates the object using provided factory function.
Kwargs for the factory function is the input dict, validated
validated with schema_class (from Embedded field).
"""
def __init__(self, factory, *args, **kwargs):
self.factory = factory
super(EmbeddedFactory, self).__init__(*args, **kwargs)
def clean(self, value):
"""Clean the provided dict of values and then return an
an object created using a factory.
"""
value = super(EmbeddedFactory, self).clean(value)
return self.factory(**value)
class LazyField(Field):
"""A field which is instantiated later.
Useful for resolving circular dependencies in code.
"""
def __init__(self, fn, *args, **kwargs):
self.fn = fn
self.args = args
self.kwargs = kwargs
self.real_field = None
def get_real_field(self):
if self.real_field is None:
self.real_field = self.fn(*self.args, **self.kwargs)
assert self.real_field
return self.real_field
def __getattr__(self, item):
return getattr(self.get_real_field(), item)
def has_value(self, value):
return self.get_real_field().has_value(value)
def clean(self, value):
return self.get_real_field().clean(value)
def serialize(self, value):
return self.get_real_field().serialize(value)
| |
#!/usr/bin/python
#
#Converts a LCM log to a "matrix" format that is easier to work with in external
#tools such as Matlab. The set of messages on a given channel can be represented
#as a matrix, where the columns of this matrix are the the fields of the lcm type
#with one message per row
#This script was taken from libbot
#https://code.google.com/p/libbot/source/browse/bot2-lcm-utils/python/src/bot_log2mat/log_to_mat.py?r=401
import os
import sys
import binascii
import types
import numpy
import re
import getopt
# check which version for mio location
if sys.version_info < (2, 6):
import scipy.io.mio
else:
import scipy.io.matlab.mio
from lcm import EventLog
def usage():
pname, sname = os.path.split(sys.argv[0])
sys.stderr.write("usage: % s %s < filename > \n" % (sname, str(longOpts)))
print """
-h --help print this message
-p --print Output log data to stdout instead of to .mat
-f --format print the data format to stderr
-s --seperator=sep print data with separator [sep] instead of default to ["" ""]
-c --channelsToProcess=chan Parse channelsToProcess that match Python regex [chan] defaults to [".*"]
-i --ignore=chan Ignore channelsToProcess that match Python regex [chan]
ignores take precedence over includes!
-o --outfile=ofname output data to [ofname] instead of default [filename.mat or stdout]
-l --lcmtype_pkgs=pkgs load python modules from comma seperated list of packages [pkgs] defaults to ["botlcm"]
-v Verbose
"""
sys.exit()
class LCMTypeDatabase:
def __init__(self, package_names, verbose):
for p in lcm_packages:
try:
__import__(p)
except:
if verbose:
sys.stderr.write("couldn't load module %s\n" % p)
lcm_packages.remove(p)
self.klasses = {}
for pkg in [ sys.modules[n] for n in package_names ]:
for mname in dir(pkg):
module = getattr(pkg, mname)
if type(module) != types.TypeType:
continue
self.klasses[module._get_packed_fingerprint()] = module
def find_type(self, packed_fingerprint):
return self.klasses.get(packed_fingerprint, None)
flatteners = {}
data = {}
def make_simple_accessor(fieldname):
return lambda lst, x: lst.append(getattr(x, fieldname))
def make_numpy_array_accessor(fieldname):
return lambda lst, x: lst.extend(numpy.array(getattr(x, fieldname)).ravel())
def make_obj_accessor(fieldname, func):
return lambda lst, x: func(lst, getattr(x, fieldname))
def make_obj_list_accessor(fieldname, func):
return lambda lst, x: map(lambda item: func(lst, item), getattr(x, fieldname))
# def list_accessor(lst, msg):
# msg_lst = getattr(msg, fieldname)
# for elem in msg_lst:
# func(lst, elem)
# return list_accessor
#
def make_lcmtype_accessor(msg):
funcs = []
for fieldname in getattr(msg, '__slots__'):
m = getattr(msg, fieldname)
if type(m) in [ types.IntType, types.LongType, types.FloatType,
types.BooleanType ]:
# scalar
accessor = make_simple_accessor(fieldname)
funcs.append(accessor)
elif type(m) in [ types.ListType, types.TupleType ]:
# convert to a numpy array
arr = numpy.array(m)
# check the data type of the array
if arr.dtype.kind in "bif":
# numeric data type
funcs.append(make_numpy_array_accessor(fieldname))
elif arr.dtype.kind == "O":
# compound data type
typeAccess = make_lcmtype_accessor(m[0])
funcs.append(make_obj_list_accessor(fieldname, typeAccess))
#pass
elif type(m) in types.StringTypes:
# ignore strings
pass
else:
funcs.append(make_obj_accessor(fieldname, make_lcmtype_accessor(m)))
def flatten(lst, m):
for func in funcs:
func(lst, m)
return flatten
def make_flattener(msg):
accessor = make_lcmtype_accessor(msg)
def flattener(m):
result = []
accessor(result, m)
return result
return flattener
def make_lcmtype_string(msg, base=True):
typeStr = []
count = 0
for fieldname in getattr(msg, '__slots__'):
m = getattr(msg, fieldname)
if type(m) in [ types.IntType, types.LongType, types.FloatType, types.BooleanType ]:
count = count + 1
if base:
typeStr.append("%d- %s" % (count, fieldname))
else:
typeStr.append(fieldname)
elif type(m) in [ types.ListType, types.TupleType ]:
# convert to a numpy array
arr = numpy.array(m)
# check the data type of the array
if arr.dtype.kind in "bif":
# numeric data type
if base:
typeStr.append("%d- %s(%d)" % (count + 1, fieldname, len(arr.ravel())))
else:
typeStr.append("%s(%d)" % (fieldname, len(arr.ravel())))
count = count + len(arr.ravel())
elif arr.dtype.kind == "O":
# compound data type
subStr, subCount = make_lcmtype_string(m[0], False)
numSub = len(m)
if base:
subStr = "%d- %s<%s>(%d)" % (count + 1, fieldname, ", ".join(subStr), numSub)
else:
subStr = "%s<%s>(%d)" % (fieldname, ", ".join(subStr), numSub)
typeStr.append(subStr)
count = count + numSub * subCount
#pass
elif type(m) in types.StringTypes:
# ignore strings
pass
else:
subStr, subCount = make_lcmtype_string(m, False);
if base:
for s in subStr:
count = count + 1
typeStr.append("%d- %s.%s" % (count, fieldname , s))
else:
count = count + subCount
for s in subStr:
typeStr.append(fieldname + "." + s)
return typeStr, count
def deleteStatusMsg(statMsg):
if statMsg:
sys.stderr.write("\r")
sys.stderr.write(" " * (len(statMsg)))
sys.stderr.write("\r")
return ""
longOpts = ["help", "print", "format", "separator", "channelsToProcess", "ignore", "outfile", "lcm_packages"]
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "hpvfs:c:i:o:l:", longOpts)
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
if len(args) != 1:
usage()
#default options
fname = args[0]
lcm_packages = [ "botlcm"]
outFname = fname
outFname = outFname.replace(".", "_")
outFname = outFname.replace("-", "_")
outFname = outFname + ".mat"
printFname = "stdout"
printFile = sys.stdout
verbose = False
printOutput = False
printFormat = False
channelsToIgnore = ""
checkIgnore = False
channelsToProcess = ".*"
separator = ' '
for o, a in opts:
if o == "-v":
verbose = True
elif o in ("-h", "--help"):
usage()
elif o in ("-p", "--print"):
printOutput = True
elif o in ("-f", "--format"):
printFormat = True
elif o in ("-s", "--separator="):
separator = a
elif o in ("-o", "--outfile="):
outFname = a
printFname = a
elif o in ("-c", "--channelsToProcess="):
channelsToProcess = a
elif o in ("-i", "--ignore="):
channelsToIgnore = a
checkIgnore = True
elif o in ("-l", "--lcm_packages="):
lcm_packages = a.split(",")
else:
assert False, "unhandled option"
fullPathName = os.path.abspath(outFname)
dirname = os.path.dirname(fullPathName)
outBaseName = os.path.basename(outFname).split(".")[0]
fullBaseName = dirname + "/" + outBaseName
type_db = LCMTypeDatabase(lcm_packages, verbose)
channelsToProcess = re.compile(channelsToProcess)
channelsToIgnore = re.compile(channelsToIgnore)
log = EventLog(fname, "r")
if printOutput:
sys.stderr.write("opened % s, printing output to %s \n" % (fname, printFname))
if printFname == "stdout":
printFile = sys.stdout
else:
printFile = open(printFname, "w")
else:
sys.stderr.write("opened % s, outputing to % s\n" % (fname, outFname))
ignored_channels = []
msgCount = 0
statusMsg = ""
startTime = 0
for e in log:
# MATLAB can't handle hyphens, this fixes that
e.channel = e.channel.replace('-','_')
if msgCount == 0:
startTime = e.timestamp
if e.channel in ignored_channels:
continue
if ((checkIgnore and channelsToIgnore.match(e.channel) and len(channelsToIgnore.match(e.channel).group())==len(e.channel)) \
or (not channelsToProcess.match(e.channel))):
if verbose:
sys.stderr.write("ignoring channel %s\n" % e.channel)
ignored_channels.append(e.channel)
continue
lcmtype = type_db.find_type(e.data[:8])
if not lcmtype:
if verbose:
sys.stderr.write("ignoring channel %s\n" % e.channel)
ignored_channels.append(e.channel)
continue
try:
msg = lcmtype.decode(e.data)
except:
sys.stderr.write("error: couldn't decode msg on channel %s" % e.channel)
continue
msgCount = msgCount + 1
if (msgCount % 5000) == 0:
statusMsg = deleteStatusMsg(statusMsg)
statusMsg = "read % d messages, % d %% done" % (msgCount, log.tell() / float(log.size())*100)
sys.stderr.write(statusMsg)
sys.stderr.flush()
if e.channel in flatteners:
flattener = flatteners[e.channel]
else:
flattener = make_flattener(msg)
flatteners[e.channel] = flattener
data[e.channel] = []
if printFormat:
statusMsg = deleteStatusMsg(statusMsg)
typeStr, fieldCount = make_lcmtype_string(msg)
typeStr.append("%d- log_timestamp" % (fieldCount + 1))
typeStr = "\n#%s %s :\n#[\n#%s\n#]\n" % (e.channel, lcmtype, "\n#".join(typeStr))
sys.stderr.write(typeStr)
a = flattener(msg)
#in case the initial flattener didn't work for whatever reason :-/
# convert to a numpy array
arr = numpy.array(a)
# check the data type of the array
if not(arr.dtype.kind in "bif"):
statusMsg = deleteStatusMsg(statusMsg)
sys.stderr.write("WARNING: needed to create new flattener for channel %s\n" % (e.channel))
flattener = make_flattener(msg)
flatteners[e.channel] = flattener
a = flattener(msg)
a.append((e.timestamp - startTime) / 1e6)
if printOutput:
printFile.write("%s%s%s\n" % (e.channel, separator, separator.join([str(k) for k in a])))
else:
data[e.channel].append(a)
deleteStatusMsg(statusMsg)
if not printOutput:
#need to pad variable length messages with zeros...
for chan in data:
lengths = map(len, data[chan])
maxLen = max(lengths)
minLen = min(lengths)
if maxLen != minLen:
sys.stderr.write("padding channel %s with zeros, messages ranged from %d to %d \n" % (chan, minLen, maxLen))
count = 0
for i in data[chan]:
pad = numpy.zeros(maxLen - lengths[count])
i.extend(pad)
count = count + 1
sys.stderr.write("loaded all %d messages, saving to % s\n" % (msgCount, outFname))
if sys.version_info < (2, 6):
scipy.io.mio.savemat(outFname, data)
else:
scipy.io.matlab.mio.savemat(outFname, data)
mfile = open(dirname + "/" + outBaseName + ".m", "w")
loadFunc = """function [d imFnames]=%s()
full_fname = '%s';
fname = '%s';
if (exist(full_fname,'file'))
filename = full_fname;
else
filename = fname;
end
d = load(filename);
""" % (outBaseName, outFname, fullPathName)
mfile.write(loadFunc);
mfile.close()
| |
import collections
import six
from . import logical
from avro import schema
from avro import io
if six.PY3:
io_validate = io.Validate
else:
io_validate = io.validate
_PRIMITIVE_TYPES = set(schema.PRIMITIVE_TYPES)
class AvroJsonConverter(object):
def __init__(self, use_logical_types=False, logical_types=logical.DEFAULT_LOGICAL_TYPES, schema_types=None):
self.use_logical_types = use_logical_types
self.logical_types = logical_types or {}
self.schema_types = schema_types or {}
self.fastavro = False
# Register self with all the schema objects.
for klass in self.schema_types.values():
klass._json_converter = self
def with_tuple_union(self, enable=True) -> 'AvroJsonConverter':
ret = AvroJsonConverter(self.use_logical_types, self.logical_types, self.schema_types)
ret.fastavro = enable
return ret
def validate(self, expected_schema, datum, skip_logical_types=False):
if self.use_logical_types and expected_schema.props.get('logicalType') and not skip_logical_types \
and expected_schema.props.get('logicalType') in self.logical_types:
return self.logical_types[expected_schema.props.get('logicalType')].can_convert(expected_schema) \
and self.logical_types[expected_schema.props.get('logicalType')].validate(expected_schema, datum)
schema_type = expected_schema.type
if schema_type == 'array':
return (isinstance(datum, list) and
False not in [self.validate(expected_schema.items, d, skip_logical_types) for d in datum])
elif schema_type == 'map':
return (isinstance(datum, dict) and
False not in [isinstance(k, six.string_types) for k in datum.keys()] and
False not in
[self.validate(expected_schema.values, v, skip_logical_types) for v in datum.values()])
elif schema_type in ['union', 'error_union']:
return True in [self.validate(s, datum, skip_logical_types) for s in expected_schema.schemas]
elif schema_type in ['record', 'error', 'request']:
return (isinstance(datum, dict) and
False not in
[self.validate(f.type, datum.get(f.name), skip_logical_types) for f in expected_schema.fields])
return io_validate(expected_schema, datum)
def from_json_object(self, json_obj, writers_schema=None, readers_schema=None):
if readers_schema is None:
readers_schema = writers_schema
if writers_schema is None:
writers_schema = readers_schema
if writers_schema is None:
raise Exception('At least one schema must be specified')
if not io.DatumReader.match_schemas(writers_schema, readers_schema):
raise io.SchemaResolutionException('Could not match schemas', writers_schema, readers_schema)
return self._generic_from_json(json_obj, writers_schema, readers_schema)
def to_json_object(self, data_obj, writers_schema=None):
if writers_schema is None:
writers_schema = self._get_record_schema_if_available(data_obj)
if writers_schema is None:
raise Exception("Could not determine writer's schema from the object type and schema was not passed")
assert isinstance(writers_schema, schema.Schema)
if not self.validate(writers_schema, data_obj):
raise io.AvroTypeException(writers_schema, data_obj)
return self._generic_to_json(data_obj, writers_schema)
def _fullname(self, schema_):
if isinstance(schema_, schema.NamedSchema):
return schema_.fullname if six.PY2 else schema_.fullname.lstrip('.')
return schema_.type
def _get_record_schema_if_available(self, data_obj):
if hasattr(type(data_obj), 'RECORD_SCHEMA'):
return type(data_obj).RECORD_SCHEMA
return None
def _generic_to_json(self, data_obj, writers_schema):
if self.use_logical_types and writers_schema.props.get('logicalType'):
lt = self.logical_types.get(writers_schema.props.get('logicalType')) # type: logical.LogicalTypeProcessor
if lt.can_convert(writers_schema):
if lt.validate(writers_schema, data_obj):
data_obj = lt.convert(writers_schema, data_obj)
else:
raise schema.AvroException(
'Wrong object for %s logical type' % writers_schema.props.get('logicalType'))
if writers_schema.type in _PRIMITIVE_TYPES:
result = self._primitive_to_json(data_obj, writers_schema)
elif writers_schema.type == 'fixed':
result = self._fixed_to_json(data_obj, writers_schema)
elif writers_schema.type == 'enum':
result = self._enum_to_json(data_obj, writers_schema)
elif writers_schema.type == 'array':
result = self._array_to_json(data_obj, writers_schema)
elif writers_schema.type == 'map':
result = self._map_to_json(data_obj, writers_schema)
elif writers_schema.type in ['record', 'error', 'request']:
result = self._record_to_json(data_obj, writers_schema)
elif writers_schema.type in ['union', 'error_union']:
result = self._union_to_json(data_obj, writers_schema)
else:
raise schema.AvroException('Invalid schema type: %s' % writers_schema.type)
return result
def _primitive_to_json(self, data_obj, writers_schema):
return data_obj
def _fixed_to_json(self, data_obj, writers_schema):
return data_obj
def _enum_to_json(self, data_obj, writers_schema):
return data_obj
def _array_to_json(self, data_obj, writers_schema):
return [self._generic_to_json(x, writers_schema.items) for x in data_obj]
def _map_to_json(self, data_obj, writers_schema):
return {name: self._generic_to_json(x, writers_schema.values) for name, x in six.iteritems(data_obj)}
def _record_to_json(self, data_obj, writers_schema):
result = collections.OrderedDict()
for field in writers_schema.fields:
result[field.name] = self._generic_to_json(
data_obj.get(field.name,
self.from_json_object(field.default, field.type) if field.has_default else None),
field.type)
return result
def _union_to_json(self, data_obj, writers_schema):
index_of_schema = -1
data_schema = self._get_record_schema_if_available(data_obj)
for i, candidate_schema in enumerate(writers_schema.schemas):
# Check for exact matches first.
if data_schema and candidate_schema.namespace == data_schema.namespace \
and candidate_schema.name == data_schema.name:
index_of_schema = i
break
# Fallback to schema guessing based on validation.
if self.validate(candidate_schema, data_obj):
index_of_schema = i
if candidate_schema.type == 'boolean':
break
if index_of_schema < 0:
raise io.AvroTypeException(writers_schema, data_obj)
candidate_schema = writers_schema.schemas[index_of_schema]
if candidate_schema.type == 'null':
return None
if self.fastavro:
# Fastavro likes tuples instead of dicts for union types.
return (self._fullname(candidate_schema), self._generic_to_json(data_obj, candidate_schema))
return {self._fullname(candidate_schema): self._generic_to_json(data_obj, candidate_schema)}
def _generic_from_json(self, json_obj, writers_schema, readers_schema):
if (writers_schema.type not in ['union', 'error_union']
and readers_schema.type in ['union', 'error_union']):
for s in readers_schema.schemas:
if io.DatumReader.match_schemas(writers_schema, s):
return self._generic_from_json(json_obj, writers_schema, s)
raise io.SchemaResolutionException('Schemas do not match', writers_schema, readers_schema)
result = None
if writers_schema.type == 'null':
result = None
elif writers_schema.type in _PRIMITIVE_TYPES:
result = self._primitive_from_json(json_obj, writers_schema, readers_schema)
elif writers_schema.type == 'fixed':
result = self._fixed_from_json(json_obj, writers_schema, readers_schema)
elif writers_schema.type == 'enum':
result = self._enum_from_json(json_obj, writers_schema, readers_schema)
elif writers_schema.type == 'array':
result = self._array_from_json(json_obj, writers_schema, readers_schema)
elif writers_schema.type == 'map':
result = self._map_from_json(json_obj, writers_schema, readers_schema)
elif writers_schema.type in ('union', 'error_union'):
result = self._union_from_json(json_obj, writers_schema, readers_schema)
elif writers_schema.type in ('record', 'error', 'request'):
result = self._record_from_json(json_obj, writers_schema, readers_schema)
result = self._logical_type_from_json(writers_schema, readers_schema, result)
return result
def _logical_type_from_json(self, writers_schema, readers_schema, result):
if self.use_logical_types and readers_schema.props.get('logicalType'):
lt = self.logical_types.get(readers_schema.props.get('logicalType')) # type: logical.LogicalTypeProcessor
if lt and lt.does_match(writers_schema, readers_schema):
result = lt.convert_back(writers_schema, readers_schema, result)
return result
def _primitive_from_json(self, json_obj, writers_schema, readers_schema):
return json_obj
def _fixed_from_json(self, json_obj, writers_schema, readers_schema):
return json_obj
def _enum_from_json(self, json_obj, writers_schema, readers_schema):
return json_obj
def _array_from_json(self, json_obj, writers_schema, readers_schema):
return [self._generic_from_json(x, writers_schema.items, readers_schema.items)
for x in json_obj]
def _map_from_json(self, json_obj, writers_schema, readers_schema):
return {name: self._generic_from_json(value, writers_schema.values, readers_schema.values)
for name, value in six.iteritems(json_obj)}
def _union_from_json(self, json_obj, writers_schema, readers_schema):
if json_obj is None:
return None
value_type = None
value = None
if not self.fastavro and isinstance(json_obj, collections.Mapping):
items = list(six.iteritems(json_obj))
if not items:
return None
value_type = items[0][0]
value = items[0][1]
if self.fastavro and (isinstance(json_obj, list) or isinstance(json_obj, tuple)):
if len(json_obj) == 2:
value_type = json_obj[0]
value = json_obj[1]
if value_type is not None:
for s in writers_schema.schemas:
name = self._fullname(s)
if name == value_type:
return self._generic_from_json(value, s, readers_schema)
for s in writers_schema.schemas:
if self.validate(s, json_obj, skip_logical_types=True):
return self._generic_from_json(json_obj, s, readers_schema)
raise schema.AvroException('Datum union type not in schema: %s', value_type)
def _instantiate_record(self, decoded_record, writers_schema, readers_schema):
# First try the fullname, which includes namespaces.
readers_name = self._fullname(readers_schema)
if readers_name in self.schema_types:
return self.schema_types[readers_name](decoded_record)
# Fallback to the bare name, without namespace.
readers_name = readers_schema.name
if readers_name in self.schema_types:
return self.schema_types[readers_name](decoded_record)
return decoded_record
def _record_from_json(self, json_obj, writers_schema, readers_schema):
writer_fields = writers_schema.fields_dict if six.PY2 else writers_schema.field_map
result = {}
for field in readers_schema.fields:
writers_field = writer_fields.get(field.name)
if writers_field is None:
field_value = self._generic_from_json(field.default, field.type, field.type) \
if field.has_default else None
else:
if field.name in json_obj:
field_value = self._generic_from_json(json_obj[field.name], writers_field.type, field.type)
else:
field_value = self._generic_from_json(writers_field.default,
writers_field.type, field.type) \
if writers_field.has_default else None
result[field.name] = field_value
return self._instantiate_record(result, writers_schema, readers_schema)
| |
# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import unittest
from nose.tools import eq_, ok_
from ryu.services.protocols.bgp.utils import validation
LOG = logging.getLogger(__name__)
class Test_Utils_Validation(unittest.TestCase):
"""
Test case for ryu.services.protocols.bgp.utils.validation
"""
def test_is_valid_mac(self):
ok_(validation.is_valid_mac('aa:bb:cc:dd:ee:ff'))
def test_is_valid_mac_hyphenation(self):
ok_(validation.is_valid_mac('aa-bb-cc-dd-ee-ff'))
def test_is_valid_mac_short(self):
eq_(False, validation.is_valid_mac('aa:bb:cc:dd:ee'))
def test_is_valid_ip_prefix(self):
ok_(validation.is_valid_ip_prefix(24, 32))
def test_is_valid_ip_prefix_str(self):
ok_(validation.is_valid_ip_prefix('24', 32))
def test_is_valid_ip_prefix_not_digit(self):
eq_(False, validation.is_valid_ip_prefix('foo', 32))
def test_is_valid_ip_prefix_over(self):
eq_(False, validation.is_valid_ip_prefix(100, 32))
def test_is_valid_ipv4(self):
ok_(validation.is_valid_ipv4('10.0.0.1'))
def test_is_valid_ipv4_not_dot(self):
eq_(False, validation.is_valid_ipv4('192:168:0:1'))
def test_is_valid_ipv4_prefix(self):
ok_(validation.is_valid_ipv4_prefix('10.0.0.1/24'))
def test_is_valid_ipv4_prefix_not_str(self):
eq_(False, validation.is_valid_ipv4_prefix(1234))
def test_is_valid_ipv4_prefix_without_prefix(self):
eq_(False, validation.is_valid_ipv4_prefix('10.0.0.1'))
def test_is_valid_ipv4_prefix_invalid_addr(self):
eq_(False, validation.is_valid_ipv4_prefix('xxx.xxx.xxx.xxx/24'))
def test_is_valid_ipv6(self):
ok_(validation.is_valid_ipv6('fe80::0011:aabb:ccdd:eeff'))
def test_is_valid_ipv6_not_colon(self):
eq_(False, validation.is_valid_ipv6('fe80--0011-aabb-ccdd-eeff'))
def test_is_valid_ipv6_prefix(self):
ok_(validation.is_valid_ipv6_prefix('fe80::0011:aabb:ccdd:eeff/64'))
def test_is_valid_ipv6_prefix_not_str(self):
eq_(False, validation.is_valid_ipv6_prefix(1234))
def test_is_valid_ipv6_prefix_without_prefix(self):
eq_(False,
validation.is_valid_ipv6_prefix('fe80::0011:aabb:ccdd:eeff'))
def test_is_valid_ipv6_prefix_invalid_addr(self):
eq_(False, validation.is_valid_ipv6_prefix('xxxx::xxxx/64'))
def test_is_valid_old_asn(self):
ok_(validation.is_valid_old_asn(65000))
def test_is_valid_old_asn_negative(self):
eq_(False, validation.is_valid_old_asn(-1))
def test_is_valid_old_asn_over(self):
eq_(False, validation.is_valid_old_asn(0xffff + 1))
def test_is_valid_asn(self):
ok_(validation.is_valid_asn(6553800))
def test_is_valid_asn_old(self):
ok_(validation.is_valid_asn(65000))
def test_is_valid_asn_negative(self):
eq_(False, validation.is_valid_asn(-1))
def test_is_valid_asn_over(self):
eq_(False, validation.is_valid_asn(0xffffffff + 1))
def test_is_valid_vpnv4_prefix(self):
ok_(validation.is_valid_vpnv4_prefix('100:200:10.0.0.1/24'))
def test_is_valid_vpnv4_prefix_not_str(self):
eq_(False, validation.is_valid_vpnv4_prefix(1234))
def test_is_valid_vpnv4_prefix_short_rd(self):
eq_(False, validation.is_valid_vpnv4_prefix('100:10.0.0.1/24'))
def test_is_valid_vpnv4_prefix_invalid_rd(self):
eq_(False, validation.is_valid_vpnv4_prefix('foo:bar:10.0.0.1/24'))
def test_is_valid_vpnv6_prefix(self):
ok_(validation.is_valid_vpnv6_prefix(
'100:200:fe80::0011:aabb:ccdd:eeff/64'))
def test_is_valid_vpnv6_prefix_not_str(self):
eq_(False, validation.is_valid_vpnv6_prefix(1234))
def test_is_valid_vpnv6_prefix_short_rd(self):
eq_(False, validation.is_valid_vpnv6_prefix('100:eeff/64'))
def test_is_valid_vpnv6_prefix_invalid_rd(self):
eq_(False, validation.is_valid_vpnv6_prefix('foo:bar:10.0.0.1/24'))
def test_is_valid_med(self):
ok_(validation.is_valid_med(100))
def test_is_valid_med_not_num(self):
eq_(False, validation.is_valid_med('foo'))
def test_is_valid_med_negative(self):
eq_(False, validation.is_valid_med(-1))
def test_is_valid_med_over(self):
eq_(False, validation.is_valid_med(0xffffffff + 1))
def test_is_valid_mpls_label(self):
ok_(validation.is_valid_mpls_label(100))
def test_is_valid_mpls_label_reserved(self):
eq_(False, validation.is_valid_mpls_label(4))
def test_is_valid_mpls_label_not_num(self):
eq_(False, validation.is_valid_mpls_label('foo'))
def test_is_valid_mpls_label_negative(self):
eq_(False, validation.is_valid_mpls_label(-1))
def test_is_valid_mpls_label_over(self):
eq_(False, validation.is_valid_mpls_label(0x100000 + 1))
def test_is_valid_mpls_labels(self):
ok_(validation.is_valid_mpls_labels([100, 200]))
def test_is_valid_mpls_labels_not_list(self):
eq_(False, validation.is_valid_mpls_labels(100))
def test_is_valid_mpls_labels_with_invalid_label(self):
eq_(False, validation.is_valid_mpls_labels(['foo', 200]))
def test_is_valid_route_dist(self):
ok_(validation.is_valid_route_dist('65000:222'))
def test_is_valid_route_dist_ipv4_based(self):
ok_(validation.is_valid_route_dist('10.0.0.1:333'))
def test_is_valid_route_not_str(self):
eq_(False, validation.is_valid_route_dist(65000))
def test_is_valid_route_dist_short(self):
eq_(False, validation.is_valid_route_dist('65000'))
def test_is_valid_route_dist_invalid_ipv4_addr(self):
eq_(False, validation.is_valid_route_dist('xxx.xxx.xxx.xxx:333'))
def test_is_valid_esi(self):
ok_(validation.is_valid_esi(100))
def test_is_valid_esi_not_int(self):
eq_(False, validation.is_valid_esi('foo'))
def test_is_valid_ethernet_tag_id(self):
ok_(validation.is_valid_ethernet_tag_id(100))
def test_is_valid_ethernet_tag_id_not_int(self):
eq_(False, validation.is_valid_ethernet_tag_id('foo'))
def test_is_valid_ethernet_tag_id_negative(self):
eq_(False, validation.is_valid_ethernet_tag_id(-1))
def test_is_valid_ethernet_tag_id_over(self):
eq_(False, validation.is_valid_ethernet_tag_id(0xffffffff + 1))
def test_is_valid_vni(self):
ok_(validation.is_valid_vni(100))
def test_is_valid_vni_not_int(self):
eq_(False, validation.is_valid_vni('foo'))
def test_is_valid_vni_negative(self):
eq_(False, validation.is_valid_vni(-1))
def test_is_valid_vni_over(self):
eq_(False, validation.is_valid_vni(0xffffff + 1))
| |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Preprocess a QA file into TFRecords for training dualencoder models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gzip
import json
import random
from absl import app
from absl import flags
from bert import tokenization
import six
import tensorflow.compat.v1 as tf
from tqdm import tqdm
FLAGS = flags.FLAGS
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string("input_file", None,
"The input JSONL file (gzipped) to read questions from.")
flags.DEFINE_string("output_file", None,
"The output TFRecord file to store examples to.")
flags.DEFINE_string("feature_file", None,
"The output TFRecord file to store examples to.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 192,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_integer(
"doc_stride", 64,
"When splitting up a long document into chunks, how much stride to "
"take between chunks.")
flags.DEFINE_integer(
"max_query_length", 48,
"The maximum number of tokens for the question. Questions longer than "
"this will be truncated to this length.")
flags.DEFINE_bool(
"version_2_with_negative", True,
"If true, the input contain some that do not have an answer.")
class Example(object):
"""A single training/test example for simple sequence classification.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=False):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (tokenization.printable_text(self.qas_id))
s += ", question_text: %s" % (
tokenization.printable_text(self.question_text))
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.start_position:
s += ", end_position: %d" % (self.end_position)
if self.start_position:
s += ", is_impossible: %r" % (self.is_impossible)
return s
def read_examples(input_file):
"""Read a SQuAD-like json file into a list of Examples."""
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
reader = tf.gfile.Open(input_file, "rb")
if input_file.endswith(".gz"):
reader = gzip.GzipFile(fileobj=reader)
next(reader)
examples = []
for line in tqdm(reader):
item = json.loads(line.strip())
paragraph_text = item["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in item["qas"]:
qas_id = qa["qid"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
is_impossible = False
start_position = -1
end_position = -1
orig_answer_text = ""
if FLAGS.version_2_with_negative:
is_impossible = qa["is_impossible"]
if not is_impossible:
answer_offset = qa["detected_answers"][0]["char_spans"][0][0]
answer_end = qa["detected_answers"][0]["char_spans"][0][1]
answer_length = answer_end - answer_offset + 1
orig_answer_text = item["context"][answer_offset:answer_end + 1]
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset + answer_length - 1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(
tokenization.whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
tf.logging.warning("Example %d", len(examples))
tf.logging.warning(json.dumps(item, indent=2))
tf.logging.warning("Could not find answer: '%s' vs. '%s'",
actual_text, cleaned_answer_text)
continue
example = Example(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
examples.append(example)
reader.close()
return examples
class FeatureWriter(object):
"""Writes InputFeature to TF example file."""
def __init__(self, filename):
self.filename = filename
self.num_features = 0
self._writer = tf.python_io.TFRecordWriter(filename)
def process_feature(self, feature):
"""Write a InputFeature to the TFRecordWriter as a tf.train.Example."""
self.num_features += 1
def create_int_feature(values):
feature = tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
return feature
def create_bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
features = collections.OrderedDict()
features["unique_ids"] = create_int_feature([feature.unique_id])
features["qas_id"] = create_bytes_feature(feature.qas_id)
features["doc_input_ids"] = create_int_feature(feature.doc_input_ids)
features["doc_input_mask"] = create_int_feature(feature.doc_input_mask)
features["doc_segment_ids"] = create_int_feature(feature.doc_segment_ids)
features["qry_input_ids"] = create_int_feature(feature.qry_input_ids)
features["qry_input_mask"] = create_int_feature(feature.qry_input_mask)
features["qry_segment_ids"] = create_int_feature(feature.qry_segment_ids)
features["start_positions"] = create_int_feature([feature.start_position])
features["end_positions"] = create_int_feature([feature.end_position])
impossible = 0
if feature.is_impossible:
impossible = 1
features["is_impossible"] = create_int_feature([impossible])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
self._writer.write(tf_example.SerializeToString())
def close(self):
self._writer.close()
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
qas_id,
example_index,
doc_span_index,
doc_tokens,
doc_token_to_orig_map,
doc_token_is_max_context,
doc_input_ids,
doc_input_mask,
doc_segment_ids,
qry_tokens,
qry_input_ids,
qry_input_mask,
qry_segment_ids,
start_position=None,
end_position=None,
is_impossible=None):
self.unique_id = unique_id
self.qas_id = qas_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.doc_tokens = doc_tokens
self.doc_token_to_orig_map = doc_token_to_orig_map
self.doc_token_is_max_context = doc_token_is_max_context
self.doc_input_ids = doc_input_ids
self.doc_input_mask = doc_input_mask
self.doc_segment_ids = doc_segment_ids
self.qry_tokens = qry_tokens
self.qry_input_ids = qry_input_ids
self.qry_input_mask = qry_input_mask
self.qry_segment_ids = qry_segment_ids
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def convert_examples_to_features(examples, tokenizer, max_doc_length,
doc_stride, max_query_length, output_fn):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
for (example_index, example) in tqdm(enumerate(examples)):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length - 2:
query_tokens = query_tokens[0:max_query_length - 2] # -2 for [CLS], [SEP]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -2 accounts for [CLS] and [SEP]
max_tokens_for_doc = max_doc_length - 2
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
doc_tokens, qry_tokens = [], []
doc_token_to_orig_map = {}
doc_token_is_max_context = {}
doc_segment_ids, qry_segment_ids = [], []
# Question
qry_tokens.append("[CLS]")
qry_segment_ids.append(0)
for token in query_tokens:
qry_tokens.append(token)
qry_segment_ids.append(0)
qry_tokens.append("[SEP]")
qry_segment_ids.append(0)
# Document
doc_tokens.append("[CLS]")
doc_segment_ids.append(1)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
doc_token_to_orig_map[len(
doc_tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
doc_token_is_max_context[len(doc_tokens)] = is_max_context
doc_tokens.append(all_doc_tokens[split_token_index])
doc_segment_ids.append(1)
doc_tokens.append("[SEP]")
doc_segment_ids.append(1)
doc_input_ids = tokenizer.convert_tokens_to_ids(doc_tokens)
qry_input_ids = tokenizer.convert_tokens_to_ids(qry_tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
doc_input_mask = [1] * len(doc_input_ids)
qry_input_mask = [1] * len(qry_input_ids)
# Zero-pad up to the sequence length.
while len(doc_input_ids) < max_doc_length:
doc_input_ids.append(0)
doc_input_mask.append(0)
doc_segment_ids.append(0)
while len(qry_input_ids) < max_query_length:
qry_input_ids.append(0)
qry_input_mask.append(0)
qry_segment_ids.append(0)
assert len(doc_input_ids) == max_doc_length
assert len(doc_input_mask) == max_doc_length
assert len(doc_segment_ids) == max_doc_length
assert len(qry_input_ids) == max_query_length
assert len(qry_input_mask) == max_query_length
assert len(qry_segment_ids) == max_query_length
start_position = None
end_position = None
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
doc_offset = 1
if not example.is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if example.is_impossible:
start_position = 0
end_position = 0
if example_index < 20:
tf.logging.info("*** Example ***")
tf.logging.info("unique_id: %s", unique_id)
tf.logging.info("example_index: %s", example_index)
tf.logging.info("doc_span_index: %s", doc_span_index)
tf.logging.info(
"doc_tokens: %s",
" ".join([tokenization.printable_text(x) for x in doc_tokens]))
tf.logging.info(
"qry_tokens: %s",
" ".join([tokenization.printable_text(x) for x in qry_tokens]))
tf.logging.info(
"doc_token_to_orig_map: %s", " ".join([
"%d:%d" % (x, y)
for (x, y) in six.iteritems(doc_token_to_orig_map)
]))
tf.logging.info(
"doc_token_is_max_context: %s", " ".join([
"%d:%s" % (x, y)
for (x, y) in six.iteritems(doc_token_is_max_context)
]))
tf.logging.info("doc_input_ids: %s",
" ".join([str(x) for x in doc_input_ids]))
tf.logging.info("doc_input_mask: %s",
" ".join([str(x) for x in doc_input_mask]))
tf.logging.info("doc_segment_ids: %s",
" ".join([str(x) for x in doc_segment_ids]))
tf.logging.info("qry_input_ids: %s",
" ".join([str(x) for x in qry_input_ids]))
tf.logging.info("qry_input_mask: %s",
" ".join([str(x) for x in qry_input_mask]))
tf.logging.info("qry_segment_ids: %s",
" ".join([str(x) for x in qry_segment_ids]))
if example.is_impossible:
tf.logging.info("impossible example")
if not example.is_impossible:
answer_text = " ".join(doc_tokens[start_position:(end_position + 1)])
tf.logging.info("start_position: %d", start_position)
tf.logging.info("end_position: %d", end_position)
tf.logging.info("answer: %s",
tokenization.printable_text(answer_text))
feature = InputFeatures(
unique_id=unique_id,
qas_id=example.qas_id.encode("utf-8"),
example_index=example_index,
doc_span_index=doc_span_index,
doc_tokens=doc_tokens,
doc_token_to_orig_map=doc_token_to_orig_map,
doc_token_is_max_context=doc_token_is_max_context,
doc_input_ids=doc_input_ids,
doc_input_mask=doc_input_mask,
doc_segment_ids=doc_segment_ids,
qry_tokens=qry_tokens,
qry_input_ids=qry_input_ids,
qry_input_mask=qry_input_mask,
qry_segment_ids=qry_segment_ids,
start_position=start_position,
end_position=end_position,
is_impossible=example.is_impossible)
# Run callback
output_fn(feature)
unique_id += 1
def main(_):
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
examples = read_examples(input_file=FLAGS.input_file)
# Pre-shuffle the input to avoid having to make a very large shuffle
# buffer in in the `input_fn`.
rng = random.Random(12345)
rng.shuffle(examples)
# We write to a temporary file to avoid storing very large
# constant tensors in memory.
writer = FeatureWriter(filename=FLAGS.output_file)
features = []
def append_feature(feature):
features.append(feature)
writer.process_feature(feature)
convert_examples_to_features(
examples=examples,
tokenizer=tokenizer,
max_doc_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
output_fn=append_feature)
writer.close()
tf.logging.info("%d original examples read.", len(examples))
tf.logging.info("%d split records written.", writer.num_features)
if FLAGS.feature_file is not None:
json.dump([[vars(ee) for ee in examples], [vars(ff) for ff in features]],
tf.gfile.Open(FLAGS.feature_file, "w"))
if __name__ == "__main__":
app.run(main)
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""cond_v2 and gradient.
This is a version of cond that emits a single If op, as well as the gradient
function for If ops produced by cond_v2. This will eventually replace the
current tf.cond implementation once it reaches feature and performance parity.
NOTE: most users of cond_v2 should import cond_v2, not this module! This module
does not contain all the necessary imports to prevent circular dependencies,
while cond_v2 does.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import gen_functional_ops
from tensorflow.python.util import compat
# The following modules cannot be imported directly because they cause circular
# dependencies. These are set in each corresponding module.
_function = None
_function_def_to_graph = None
_gradients_impl = None
# NOTE(skyewm): TensorFlow uses protected class methods and fields to signify
# that they aren't part of the official public API. These protected members
# often need to be used by implementation code however. Rather than litter the
# code with pylint comments, we ignore protected access violations for
# readability.
# pylint: disable=protected-access
def cond_v2(pred, true_fn, false_fn, name="cond"):
"""Like tf.cond, except emits a single If op."""
if not name:
name = "cond"
with ops.name_scope(name) as scope:
# Identify if there is a caller device, & get the innermost if possible.
device_stack = ops.get_default_graph()._device_function_stack
caller_device = device_stack[-1] if device_stack else None
caller_colocation_stack = ops.get_default_graph()._colocation_stack
caller_container = ops.get_default_graph()._container
caller_collection_ref = ops.get_default_graph()._collections
func_name_prefix = scope.replace("/", "_")
true_graph = _function.func_graph_from_py_func(
true_fn, [], [],
name="%strue" % func_name_prefix,
device=caller_device,
colocation_stack=caller_colocation_stack,
collections_ref=caller_collection_ref,
container=caller_container)
false_graph = _function.func_graph_from_py_func(
false_fn, [], [],
name="%sfalse" % func_name_prefix,
device=caller_device,
colocation_stack=caller_colocation_stack,
collections_ref=caller_collection_ref,
container=caller_container)
_check_same_outputs(true_graph, false_graph)
# Add inputs to true_graph and false_graph to make them match. Note that
# this modifies true_graph and false_graph.
cond_inputs = _make_inputs_match(true_graph, false_graph,
true_graph.extra_inputs,
false_graph.extra_inputs)
# Add all intermediate tensors as function outputs so they're available for
# the gradient computation.
true_intermediates = _get_intermediates(true_graph)
false_intermediates = _get_intermediates(false_graph)
# Save the original number of outputs to return to the caller.
num_cond_outputs = len(true_graph.outputs)
# Make the number/type of new intermediate outputs match.
extra_true_outputs, extra_false_outputs = _pad_params(
true_graph, false_graph, true_intermediates, false_intermediates)
true_graph.outputs.extend(extra_true_outputs)
false_graph.outputs.extend(extra_false_outputs)
# Create the If op.
tensors = gen_functional_ops._if(
pred, cond_inputs, [t.dtype for t in true_graph.outputs],
_create_new_tf_function(true_graph),
_create_new_tf_function(false_graph),
name=scope)
# Set the flag to enable lowering on the `if` op if necessary
# Lowering allows cond_v2 to avoid some of the limitations of Functions,
# allowing users to specify devices & colocation inside of cond_v2 branches,
# and enabling non-strict evaluation & partial pruning of cond_v2 branches.
# This brings cond_v2 closer to feature parity with tf.cond.
#
# However, we do not lower `If` in the XLA context because it is easier for
# XLA to apply its own optimizations when dealing with un-lowered `If`
# operators than with lowered switch/merge control flow.
#
# TODO(b/110167197) this approach requires cond_v2 to have at least 1 output
if_op = tensors[0].op
if not control_flow_util.IsInXLAContext(if_op):
if_op._set_attr("_lower_using_switch_merge",
attr_value_pb2.AttrValue(b=True))
return tensors[:num_cond_outputs]
@ops.RegisterGradient("If")
def _IfGrad(op, *grads): # pylint: disable=invalid-name
"""The gradient of an If op produced by cond_v2."""
true_graph, false_graph = _get_func_graphs(op)
# Note: op.graph != ops.get_default_graph() when we are computing the gradient
# of a nested cond.
assert true_graph._outer_graph == op.graph
assert false_graph._outer_graph == op.graph
# Create grad functions that compute the gradient of the true/false forward
# graphs. These functions will capture tensors from the forward pass
# functions.
true_grad_graph = _create_grad_func(
true_graph, grads, _get_grad_fn_name(true_graph))
false_grad_graph = _create_grad_func(
false_graph, grads, _get_grad_fn_name(false_graph))
assert ([t.dtype for t in true_grad_graph.outputs] ==
[t.dtype for t in false_grad_graph.outputs])
# Resolve references to forward graph tensors in grad graphs and ensure
# they are in-scope, i.e., belong to one of outer graphs of the grad graph.
true_grad_extra_inputs = _resolve_grad_inputs(true_graph, true_grad_graph)
false_grad_extra_inputs = _resolve_grad_inputs(false_graph, false_grad_graph)
# Make the inputs to true_grad_graph and false_grad_graph match. Note that
# this modifies true_grad_graph and false_grad_graph.
grad_inputs = _make_inputs_match(true_grad_graph, false_grad_graph,
true_grad_extra_inputs,
false_grad_extra_inputs)
# Add all intermediate tensors as function outputs so they're available for
# higher-order gradient computations.
true_grad_intermediates = _get_intermediates(true_grad_graph)
false_grad_intermediates = _get_intermediates(false_grad_graph)
# Save the original number of gradient outputs to return.
num_grad_outputs = len(true_grad_graph.outputs)
# Make the number/type of new intermediate outputs match.
extra_true_grad_outputs, extra_false_grad_outputs = _pad_params(
true_grad_graph, false_grad_graph,
true_grad_intermediates, false_grad_intermediates)
true_grad_graph.outputs.extend(extra_true_grad_outputs)
false_grad_graph.outputs.extend(extra_false_grad_outputs)
# Create the gradient If op.
tensors = gen_functional_ops._if(
op.inputs[0], grad_inputs, [t.dtype for t in true_grad_graph.outputs],
_create_new_tf_function(true_grad_graph),
_create_new_tf_function(false_grad_graph))
# The predicate has no gradient.
return [None] + tensors[:num_grad_outputs]
def _get_func_graphs(if_op):
"""Returns `_FuncGraph`s for the input op branches.
Args:
if_op: The _If Operation.
Returns:
A 2-tuple of the `_FuncGraph`s of the then_branch and else_branch.
"""
def _get_func_graph_for_branch(branch_name):
"""Generates and returns a _FuncGraph for the given branch."""
extra_inputs = if_op.inputs[1:] # First input is pred.
input_shapes = [t.shape for t in extra_inputs]
func_name = if_op.get_attr(branch_name).name
fdef = if_op.graph._get_function(func_name).definition
# `if_op.graph` may not be the same as `ops.get_default_graph()` e.g.
# in the case of nested if ops or when the gradient is being computed
# from inside a Defun. We build the `func_graph` with `if_op.graph` as its
# `outer_graph`. This resembles how the `_FuncGraph` was built in the
# forward pass. We need this so that we can resolve references to tensors
# in `func_graph` from its gradient graph in `_resolve_grad_inputs`.
with if_op.graph.as_default():
func_graph = _function_def_to_graph.function_def_to_graph(
fdef, input_shapes)
func_graph.extra_inputs = extra_inputs
func_graph.extra_args = func_graph.inputs
func_graph._captured = dict(zip(extra_inputs, func_graph.inputs))
# Set the if op so that the gradient code can use it.
func_graph._if = if_op
return func_graph
return (_get_func_graph_for_branch("then_branch"),
_get_func_graph_for_branch("else_branch"))
def _grad_fn(func_graph, grads):
"""The gradient function for each conditional branch.
This function builds the gradient graph of the corresponding forward-pass
conditional branch in `func_graph`. This is done by differentiating
func_graph's outputs w.r.t. its inputs.
Args:
func_graph: function._FuncGraph. The corresponding forward-pass function.
grads: The list of input gradient Tensors.
Returns:
The output gradient Tensors.
"""
# Filter out untrainable function outputs.
# NOTE(skyewm): If we don't do this, the untrainable tensors can sometimes
# cause _GradientsHelper to raise an exception (e.g. the implementation
# doesn't expect 'ys' to contain boolean tensors).
assert len(func_graph.outputs) == len(grads)
ys = []
grad_ys = []
for y, grad_y in zip(func_graph.outputs, grads):
if not _gradients_impl._IsTrainable(y):
continue
ys.append(y)
grad_ys.append(grad_y)
# Build the gradient graph. Note that this builds the gradient computation of
# func_graph in the current graph, which requires capturing tensors from
# func_graph. The captured func_graph tensors are resolved to external tensors
# in _resolve_grad_inputs.
result = _gradients_impl._GradientsHelper(
ys, func_graph.inputs, grad_ys=grad_ys,
src_graph=func_graph)
# Functions can't return None; replace Nones with zero tensors.
# TODO(b/80444525): don't return anything here and make _IfGrad return None if
# both branches have zero gradient.
for i in range(len(result)):
if result[i] is None:
result[i] = array_ops.zeros_like(func_graph.inputs[i])
return result
def _create_grad_func(func_graph, grads, name):
"""Returns the _FuncGraph representation of _grad_fn."""
return _function.func_graph_from_py_func(lambda: _grad_fn(func_graph, grads),
[], [], name)
def _resolve_grad_inputs(cond_graph, grad_graph):
"""Returns the tensors to pass as `extra_inputs` to `grad_graph`.
The `grad_graph` may have external references to
1. Its outer graph containing the input gradients. These references are kept
as is.
2. Tensors in the forward pass graph. These tensors may not be "live"
when the gradient is being computed. We replace such references by their
corresponding tensor in the least common ancestor graph of `grad_graph` and
`cond_graph`. Since we export intermediate tensors for all branch
functions, this is always possible.
Args:
cond_graph: function._FuncGraph. The forward-pass function.
grad_graph: function._FuncGraph. The gradients function.
Returns:
A list of inputs tensors to be passed to grad_graph.
"""
new_extra_inputs = []
for t in grad_graph.extra_inputs:
if t.graph != grad_graph._outer_graph:
# `t` is a tensor in `cond_graph` or one of its ancestors. We bubble this
# tensor to the least common ancestor of the `cond_graph` and
# `grad_graph` so that it is "in-scope" for `grad_graph`.
# TODO(srbs): `_is_ancestor` calls may be expensive. Compute the least
# common ancestor once and re-use.
assert _is_ancestor(cond_graph, t.graph)
while not _is_ancestor(grad_graph, t.graph):
assert isinstance(t.graph, _function._FuncGraph)
if t in t.graph.extra_args:
# TODO(srbs): Consider building a map of extra_args -> extra_inputs.
# instead of searching for `t` twice.
t = t.graph.extra_inputs[t.graph.extra_args.index(t)]
else:
# Note: All intermediate tensors are output by the If op.
# TODO(srbs): .index() calls may be expensive. Optimize.
t = t.graph._if.outputs[t.graph.outputs.index(t)]
assert _is_ancestor(grad_graph, t.graph)
new_extra_inputs.append(t)
return new_extra_inputs
def _create_new_tf_function(func_graph):
"""Converts func_graph to a TF_Function and adds it to the current graph.
Args:
func_graph: function._FuncGraph
Returns:
The name of the new TF_Function.
"""
c_func = c_api.TF_GraphToFunction_wrapper(
func_graph._c_graph,
compat.as_str(func_graph.name),
False, # append_hash_to_fn_name
None, # opers
[t._as_tf_output() for t in func_graph.inputs],
[t._as_tf_output() for t in func_graph.outputs],
[],
None, # opts
None) # description
_ = c_api_util.ScopedTFFunction(c_func)
# TODO(b/109833212): this sucks, we're serializing the TF_Function*,
# deserializing it into a Python FunctionDef, then reserializing it to create
# a new TF_Function that we add to the graph.
fdef = _function.function_def_from_tf_function(c_func)
defined_func = _function._from_definition(fdef)
defined_func._sub_functions = func_graph._functions
defined_func.add_to_graph(func_graph._outer_graph)
return func_graph.name
def _get_intermediates(func_graph):
"""Returns all tensors in `func_graph` that aren't inputs or outputs."""
intermediates = []
for op in func_graph.get_operations():
for t in op.outputs:
if t in func_graph.inputs: continue
if t in func_graph.outputs: continue
intermediates.append(t)
return intermediates
def _separate_unique_inputs(true_inputs, false_inputs):
"""Separates tensors appearing only in true_inputs or false_inputs, or both.
Args:
true_inputs: list of Tensors
false_inputs: list of Tensors
Returns:
Three lists of Tensors:
1. The tensors that appear in both true_inputs and false_inputs
2. The tensors that only appear in true_inputs
3. The tensors that only appear in false_inputs
"""
true_inputs = set(true_inputs)
false_inputs = set(false_inputs)
shared_inputs = true_inputs.intersection(false_inputs)
true_only_inputs = true_inputs - false_inputs
false_only_inputs = false_inputs - true_inputs
return list(shared_inputs), list(true_only_inputs), list(false_only_inputs)
def _pad_params(true_graph, false_graph, true_params, false_params):
"""Returns new param lists that have matching signatures.
This is done by mirroring each param list in the other using dummy params.
There is no merging of params.
Args:
true_graph: function._FuncGraph
false_graph: function._FuncGraph
true_params: a list of Tensors from true_graph
false_params: a list of Tensors from false_graph
Returns:
A new list of Tensors in true_graph and a new list of Tensors in
false_graph. The two lists have the same number of Tensors, with matching
types and shapes across the lists.
"""
new_true_params = (true_params +
_create_dummy_params(true_graph, false_params))
new_false_inputs = (_create_dummy_params(false_graph, true_params)
+ false_params)
return new_true_params, new_false_inputs
def _make_inputs_match(true_graph, false_graph, true_extra_inputs,
false_extra_inputs):
"""Modifies true_graph and false_graph so they have the same input signature.
This method reorders and/or adds parameters to true_graph and false_graph so
they have the same input signature, and updates the 'inputs', 'extra_inputs',
and '_captured' fields of both graphs accordingly. It uses the input tensors
from the outer graph to avoid duplicating shared arguments.
Args:
true_graph: function._FuncGraph
false_graph: function._FuncGraph
true_extra_inputs: a list of Tensors in the outer graph. The inputs for
true_graph.
false_extra_inputs: a list of Tensors in the outer graph. The inputs for
false_graph.
Returns:
A new list of Tensors from the outer graph that are the new inputs for both
true_graph and false_graph. This is a deduped version of true_inputs +
false_inputs.
"""
shared_inputs, true_only_inputs, false_only_inputs = _separate_unique_inputs(
true_extra_inputs, false_extra_inputs)
new_inputs = shared_inputs + true_only_inputs + false_only_inputs
true_input_to_param = dict(zip(true_extra_inputs, true_graph.inputs))
false_input_to_param = dict(zip(false_extra_inputs, false_graph.inputs))
true_graph.inputs = (
[true_input_to_param[t] for t in shared_inputs] +
[true_input_to_param[t] for t in true_only_inputs] +
_create_dummy_params(true_graph, false_only_inputs))
false_graph.inputs = (
[false_input_to_param[t] for t in shared_inputs] +
_create_dummy_params(false_graph, true_only_inputs) +
[false_input_to_param[t] for t in false_only_inputs])
# Rewrite the _FuncGraphs' state to reflect the new inputs.
true_graph.extra_inputs = new_inputs
false_graph.extra_inputs = new_inputs
true_graph.extra_args = true_graph.inputs
false_graph.extra_args = false_graph.inputs
true_graph._captured = dict(zip(new_inputs, true_graph.inputs))
false_graph._captured = dict(zip(new_inputs, false_graph.inputs))
return new_inputs
def _create_dummy_params(func_graph, template_tensors):
"""Creates tensors in func_graph to represent template_tensors.
Args:
func_graph: function._FuncGraph.
template_tensors: a list of tensors in the outer graph.
Returns:
A list of tensors in func_graph.
"""
with func_graph.as_default():
return [gen_functional_ops.fake_param(dtype=t.dtype, shape=t.shape)
for t in template_tensors]
def _get_grad_fn_name(func_graph):
"""Returns a unique name to use for the grad function of `func_graph`.
Ensures this name is unique in the entire hierarchy.
Args:
func_graph: The _FuncGraph.
Returns:
A string, the name to use for the gradient function.
"""
name = "%s_grad" % func_graph.name
base_name = name
counter = 1
has_conflict = True
while has_conflict:
curr_graph = func_graph._outer_graph
has_conflict = curr_graph._is_function(name)
while not has_conflict and isinstance(curr_graph, _function._FuncGraph):
curr_graph = curr_graph._outer_graph
has_conflict = curr_graph._is_function(name)
if has_conflict:
name = "%s_%s" % (base_name, counter)
counter += 1
return name
def _check_same_outputs(true_graph, false_graph):
"""Raises an error if true_graph and false_graph have different outputs."""
true_output_types = [t.dtype for t in true_graph.outputs]
false_output_types = [t.dtype for t in false_graph.outputs]
if (len(true_graph.outputs) != len(false_graph.outputs) or
true_output_types != false_output_types):
raise ValueError(
"true_fn() and false_fn() must return the same number and type of "
"arguments, got:\n"
" true_fn: %s\n"
" false_fn: %s" % (true_output_types, false_output_types))
def _is_ancestor(graph, maybe_ancestor):
if maybe_ancestor == graph:
return True
if isinstance(graph, _function._FuncGraph):
return _is_ancestor(graph._outer_graph, maybe_ancestor)
return False
| |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipeline configuration linter."""
import collections
import copy
import datetime
import json
import sys
import types
import jinja2
from jinja2 import exceptions as jinja2_exceptions
from jsonminify import minify_json
from google.appengine.api import app_identity
from src.pipelines import pipelines
class PipelineLinter(object):
"""Linter for a pipeline's configuration."""
VALID_ROOT_KEYS = set(('inputs', 'outputs', 'transforms', 'options'))
CHECK_SYNTAX_VALID = 'SyntaxValid'
CHECK_TEMPLATE_VALID = 'TemplateValid'
CHECK_REQ_IO_STAGES = 'HasOneInputOrOutputStage'
CHECK_UNKNOWN_CONFIG_KEYS = 'NoUnknownKeys'
MSG_MISSING_IO_STAGES = 'Must have at least one "inputs" or "outputs" stage.'
MSG_UNKNOWN_CONFIG_KEYS_FMT = 'Unrecognized config keys: %s'
def __init__(self,
config_json,
default_options=None):
self.results = LintResults()
self.config = None
self.Lint(config_json, default_options)
def Lint(self, config_json, default_options):
"""Lint the pipeline.
Performs the following steps:
1. Strips any comments.
2. Validates JSON syntax.
3. Adds default options.
4. Substitutes for embedded variables and re-checks syntax.
5. Runs per-stage linting/
Args:
config_json: The pipeline configuration JSON rep.
default_options: a default options section to merge in with the config.
"""
self.SyntaxCheck(config_json)
if self.config and default_options:
self.AddDefaultOptions(default_options)
config_json = self.ExpandTemplateVariables()
# json parse it a second time.
self.SyntaxCheck(config_json, phase='PostTemplate: ')
self.StageCheck()
def AddDefaultOptions(self, default_options):
# take self.config and merge in the default_options
options = self.config.get('options', {})
if default_options:
UpdateNestedDict(options, default_options)
self.config['options'] = options
def ExpandTemplateVariables(self):
template_variables = self.GetTemplateVariables()
config_json = json.dumps(self.config, indent=2, separators=(',', ': '))
try:
config_json = jinja2.Template(config_json).render(template_variables)
except jinja2_exceptions.TemplateSyntaxError as err:
self.results.AddCheckResults(self.CHECK_TEMPLATE_VALID, False, str(err))
self.results.AddCheckResults(self.CHECK_TEMPLATE_VALID, True)
return config_json
def GetTemplateVariables(self):
"""Return the values that can be used as jinja variables in templates."""
today = datetime.date.today()
options = copy.deepcopy(self.config.get('options', {}))
storage = options.get('storage', {})
UpdateNestedDict(storage, {'bucket': '', 'prefix': ''})
storage['url'] = 'gs://%s/%s' % (storage['bucket'], storage['prefix'])
UpdateNestedDict(options, {
'app': {
'id': app_identity.get_application_id(),
'hostname': app_identity.get_default_version_hostname(),
'serviceAccountName': app_identity.get_service_account_name(),
},
'storage': storage,
'date': {
'y-m-d': today.strftime('%Y-%m-%d'),
'ymd': today.strftime('%Y%m%d'),
},
})
return options
def SyntaxCheck(self, config_json, phase=None):
"""Ensure the config string is valid JSON and is loadable in to a dict.
Args:
config_json: The pipeline configuration JSON rep.
phase: what part of the syntax checking are we at
"""
try:
self.config = json.loads(minify_json.json_minify(config_json))
self.results.AddCheckResults(self.CHECK_SYNTAX_VALID, True)
except (ValueError, TypeError) as e:
phase = phase or 'PreTemplate: '
self.results.AddCheckResults(
self.CHECK_SYNTAX_VALID, False, phase + str(e))
def StageCheck(self):
"""Lints all stages in the config."""
config = self.config or {}
has_io_stage = False
if (('inputs' in config and config['inputs']) or
('outputs' in config and config['outputs'])):
has_io_stage = True
self.results.AddCheckResults(self.CHECK_REQ_IO_STAGES,
has_io_stage,
self.MSG_MISSING_IO_STAGES)
unknown_keys = set(config.keys()) - self.VALID_ROOT_KEYS
self.results.AddCheckResults(
self.CHECK_UNKNOWN_CONFIG_KEYS,
not unknown_keys,
self.MSG_UNKNOWN_CONFIG_KEYS_FMT % ', '.join(sorted(unknown_keys)))
for s in config.get('inputs', []):
self.LintStage('inputs', s)
for s in config.get('transforms', []):
self.LintStage('transforms', s)
for s in config.get('outputs', []):
self.LintStage('outputs', s)
def LintStage(self, category, config):
"""Lints a single stage config.
Args:
category: a stage category (e.g. 'inputs')
config: a stage config.
"""
stage_config = copy.deepcopy(config)
sl = StageLinter(category, stage_config)
sl.TypeCheck()
if sl.results.valid:
stage = pipelines.GetStage(stage_config)
if hasattr(stage, 'Lint'):
stage.Lint(sl)
sl.SourceSinkCheck()
self.results.AddStageCheckResults(category, sl.results)
class StageLinter(object):
"""Provides helper functions for stages to lint their configurations."""
CHECK_FIELD_EXISTS_FMT = 'FieldExists [%s]'
CHECK_FIELD_MIN_LENGTH = 'FieldMinLength'
CHECK_FIELD_MAX_LENGTH = 'FieldMaxLength'
CHECK_TYPE_FMT = 'TypeValid [%s]'
MSG_FIELD_BAD_LENGTH_FMT = '%r has wrong number of items. expected %r got %r'
MSG_FIELD_INVALID_FMT = 'Invalid value: %r'
MSG_REQUIRE_AT_LEAST_ONE_FMT = 'At least one of %r must be provided.'
MSG_REQUIRED_FIELD_FMT = '%r must be provided.'
MSG_TYPE_NOT_FOUND = 'Type must be provided.'
MSG_WRONG_TYPE_FMT = 'Type must be a %r.'
def __init__(self, category, config):
"""Initialize the linter.
Args:
category: The stage category (e.g. 'inputs')
config: The stage configuration.
"""
self.category = category
self.config = config
self.results = LintResults()
def SourceSinkCheck(self):
for which in ['sources', 'sinks']:
if which in self.config:
self.results.AddCheckResults(self.CHECK_FIELD_EXISTS_FMT % str(which),
self.config[which] is None or
None not in self.config[which],
self.MSG_FIELD_INVALID_FMT % 'null')
def TypeCheck(self):
stage = self.config.get('type')
check = StageLinter.CHECK_TYPE_FMT % str(stage)
if stage:
try:
pipelines.GetStage(self.config)
self.results.AddCheckResults(check, True)
except ImportError as e:
self.results.AddCheckResults(check, False, str(e))
else:
self.results.AddCheckResults(check, False, StageLinter.MSG_TYPE_NOT_FOUND)
def FieldCheck(self, field_name, field_type=None, required=False,
validator=None, list_min=None, list_max=None):
"""Performs a linting check on a configuration field.
Args:
field_name: the field name
field_type: the expected type, None to skip
required: if the field must be provided
validator: optional validation function taking the field value
list_min: if typ is list, the minimum number of items required/allowed
list_max: if typ is list, the maximum number of items required/allowed
"""
def _GetValue(key, d):
if not d:
return None
k = key.rsplit('.', 1)
if len(k) == 1:
return d.get(k[0])
else:
return _GetValue(k[1], d.get(k[0]))
val = _GetValue(field_name, self.config)
if required:
self.results.AddCheckResults(self.CHECK_FIELD_EXISTS_FMT % field_name,
val is not None,
self.MSG_REQUIRED_FIELD_FMT % field_name)
if val:
if field_type:
self.results.AddCheckResults(
self.CHECK_TYPE_FMT % field_name,
isinstance(val, field_type),
self.MSG_WRONG_TYPE_FMT % repr(field_type))
is_list = isinstance(
field_type, collections.Iterable) and not isinstance(
field_type, types.StringTypes)
if is_list and (list_min or list_max):
l = len(val)
self.results.AddCheckResults(
self.CHECK_FIELD_MIN_LENGTH,
list_min is None or l >= (list_min or 0),
self.MSG_FIELD_BAD_LENGTH_FMT %
(field_name, list_min, l))
self.results.AddCheckResults(
self.CHECK_FIELD_MAX_LENGTH,
list_max is None or l <= (list_max or sys.maxint),
self.MSG_FIELD_BAD_LENGTH_FMT %
(field_name, list_max, l))
if validator:
msg = ''
valid = False
try:
if is_list:
for v in val:
validator(v)
else:
validator(val)
valid = True
except Exception as e: # pylint: disable=broad-except
msg = str(e)
self.results.AddCheckResults(self.CHECK_FIELD_EXISTS_FMT % field_name,
valid,
self.MSG_FIELD_INVALID_FMT % msg)
def AtLeastOneFieldRequiredCheck(self, fields):
missing = True
for f in fields:
if f in self.config:
missing = False
break
self.results.AddCheckResults(self.CHECK_FIELD_EXISTS_FMT % fields,
not missing,
self.MSG_REQUIRE_AT_LEAST_ONE_FMT % fields)
class LintResults(object):
"""Encapsulates the results on the pipeline configuration linting."""
def __init__(self):
self.valid = True
self.results = {}
def AddCheckResults(self, name, valid, reason=None):
c = {'pass': valid}
if not valid:
c['reason'] = reason
self.valid = self.valid and valid
self.results = UpdateNestedDict(self.results, {name: c})
def AddStageCheckResults(self, category, check):
self.valid = self.valid and check.valid
if 'stages' not in self.results:
self.results['stages'] = {category: [check.results]}
elif category not in self.results['stages']:
self.results['stages'][category] = [check.results]
else:
self.results['stages'][category].append(check.results)
def UpdateNestedDict(master, to_add):
"""Merges the values from to_add into the master dict."""
for key, value in to_add.items():
if key not in master:
master[key] = value
elif isinstance(value, dict):
UpdateNestedDict(master[key], value)
return master
| |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
""" Copyright (c) 2003-2006 LOGILAB S.A. (Paris, FRANCE).
http://www.logilab.fr/ -- mailto:contact@logilab.fr
add an abstraction level to transparently import optik classes from optparse
(python >= 2.3) or the optik package.
It also defines three new types for optik/optparse command line parser :
* regexp
argument of this type will be converted using re.compile
* csv
argument of this type will be converted using split(',')
* yn
argument of this type will be true if 'y' or 'yes', false if 'n' or 'no'
* named
argument of this type are in the form <NAME>=<VALUE> or <NAME>:<VALUE>
"""
import re
import sys
import time
from copy import copy
from os.path import exists
try:
# python >= 2.3
from optparse import OptionParser as BaseParser, Option as BaseOption, \
OptionGroup, OptionValueError, OptionError, Values, HelpFormatter, \
NO_DEFAULT
except ImportError:
# python < 2.3
from optik import OptionParser as BaseParser, Option as BaseOption, \
OptionGroup, OptionValueError, OptionError, Values, HelpFormatter
try:
from optik import NO_DEFAULT
except:
NO_DEFAULT = []
try:
from mx import DateTime
HAS_MX_DATETIME = True
except ImportError:
HAS_MX_DATETIME = False
OPTPARSE_FORMAT_DEFAULT = sys.version_info >= (2, 4)
from clonedigger.logilab.common.textutils import get_csv
def check_regexp(option, opt, value):
"""check a regexp value by trying to compile it
return the compiled regexp
"""
if hasattr(value, 'pattern'):
return value
try:
return re.compile(value)
except ValueError:
raise OptionValueError(
"option %s: invalid regexp value: %r" % (opt, value))
def check_csv(option, opt, value):
"""check a csv value by trying to split it
return the list of separated values
"""
if isinstance(value, (list, tuple)):
return value
try:
return get_csv(value)
except ValueError:
raise OptionValueError(
"option %s: invalid csv value: %r" % (opt, value))
def check_yn(option, opt, value):
"""check a yn value
return true for yes and false for no
"""
if isinstance(value, int):
return bool(value)
if value in ('y', 'yes'):
return True
if value in ('n', 'no'):
return False
msg = "option %s: invalid yn value %r, should be in (y, yes, n, no)"
raise OptionValueError(msg % (opt, value))
def check_named(option, opt, value):
"""check a named value
return a dictionnary containing (name, value) associations
"""
if isinstance(value, dict):
return value
values = []
for value in check_csv(option, opt, value):
if value.find('=') != -1:
values.append(value.split('=', 1))
elif value.find(':') != -1:
values.append(value.split(':', 1))
if values:
return dict(values)
msg = "option %s: invalid named value %r, should be <NAME>=<VALUE> or \
<NAME>:<VALUE>"
raise OptionValueError(msg % (opt, value))
def check_password(option, opt, value):
"""check a password value (can't be empty)
"""
# no actual checking, monkey patch if you want more
return value
def check_file(option, opt, value):
"""check a file value
return the filepath
"""
if exists(value):
return value
msg = "option %s: file %r does not exist"
raise OptionValueError(msg % (opt, value))
def check_date(option, opt, value):
"""check a file value
return the filepath
"""
try:
return DateTime.strptime(value, "%Y/%m/%d")
except DateTime.Error :
raise OptionValueError(
"expected format of %s is yyyy/mm/dd" % opt)
def check_color(option, opt, value):
"""check a color value and returns it
/!\ does *not* check color labels (like 'red', 'green'), only
checks hexadecimal forms
"""
# Case (1) : color label, we trust the end-user
if re.match('[a-z0-9 ]+$', value, re.I):
return value
# Case (2) : only accepts hexadecimal forms
if re.match('#[a-f0-9]{6}', value, re.I):
return value
# Else : not a color label neither a valid hexadecimal form => error
msg = "option %s: invalid color : %r, should be either hexadecimal \
value or predefinied color"
raise OptionValueError(msg % (opt, value))
import types
class Option(BaseOption):
"""override optik.Option to add some new option types
"""
TYPES = BaseOption.TYPES + ('regexp', 'csv', 'yn', 'named', 'password',
'multiple_choice', 'file', 'font', 'color')
TYPE_CHECKER = copy(BaseOption.TYPE_CHECKER)
TYPE_CHECKER['regexp'] = check_regexp
TYPE_CHECKER['csv'] = check_csv
TYPE_CHECKER['yn'] = check_yn
TYPE_CHECKER['named'] = check_named
TYPE_CHECKER['multiple_choice'] = check_csv
TYPE_CHECKER['file'] = check_file
TYPE_CHECKER['color'] = check_color
TYPE_CHECKER['password'] = check_password
if HAS_MX_DATETIME:
TYPES += ('date',)
TYPE_CHECKER['date'] = check_date
def _check_choice(self):
"""FIXME: need to override this due to optik misdesign"""
if self.type in ("choice", "multiple_choice"):
if self.choices is None:
raise OptionError(
"must supply a list of choices for type 'choice'", self)
elif type(self.choices) not in (types.TupleType, types.ListType):
raise OptionError(
"choices must be a list of strings ('%s' supplied)"
% str(type(self.choices)).split("'")[1], self)
elif self.choices is not None:
raise OptionError(
"must not supply choices for type %r" % self.type, self)
BaseOption.CHECK_METHODS[2] = _check_choice
def process(self, opt, value, values, parser):
# First, convert the value(s) to the right type. Howl if any
# value(s) are bogus.
try:
value = self.convert_value(opt, value)
except AttributeError: # py < 2.4
value = self.check_value(opt, value)
if self.type == 'named':
existant = getattr(values, self.dest)
if existant:
existant.update(value)
value = existant
# And then take whatever action is expected of us.
# This is a separate method to make life easier for
# subclasses to add new actions.
return self.take_action(
self.action, self.dest, opt, value, values, parser)
class OptionParser(BaseParser):
"""override optik.OptionParser to use our Option class
"""
def __init__(self, option_class=Option, *args, **kwargs):
BaseParser.__init__(self, option_class=Option, *args, **kwargs)
class ManHelpFormatter(HelpFormatter):
"""Format help using man pages ROFF format"""
def __init__ (self,
indent_increment=0,
max_help_position=24,
width=79,
short_first=0):
HelpFormatter.__init__ (
self, indent_increment, max_help_position, width, short_first)
def format_heading(self, heading):
return '.SH %s\n' % heading.upper()
def format_description(self, description):
return description
def format_option(self, option):
try:
optstring = option.option_strings
except AttributeError:
optstring = self.format_option_strings(option)
if option.help:
help = ' '.join([l.strip() for l in option.help.splitlines()])
else:
help = ''
return '''.IP "%s"
%s
''' % (optstring, help)
def format_head(self, optparser, pkginfo, section=1):
try:
pgm = optparser._get_prog_name()
except AttributeError:
# py >= 2.4.X (dunno which X exactly, at least 2)
pgm = optparser.get_prog_name()
short_desc = self.format_short_description(pgm, pkginfo.short_desc)
long_desc = self.format_long_description(pgm, pkginfo.long_desc)
return '%s\n%s\n%s\n%s' % (self.format_title(pgm, section), short_desc,
self.format_synopsis(pgm), long_desc)
def format_title(self, pgm, section):
date = '-'.join([str(num) for num in time.localtime()[:3]])
return '.TH %s %s "%s" %s' % (pgm, section, date, pgm)
def format_short_description(self, pgm, short_desc):
return '''.SH NAME
.B %s
\- %s
''' % (pgm, short_desc.strip())
def format_synopsis(self, pgm):
return '''.SH SYNOPSIS
.B %s
[
.I OPTIONS
] [
.I <arguments>
]
''' % pgm
def format_long_description(self, pgm, long_desc):
long_desc = '\n'.join([line.lstrip()
for line in long_desc.splitlines()])
long_desc = long_desc.replace('\n.\n', '\n\n')
if long_desc.lower().startswith(pgm):
long_desc = long_desc[len(pgm):]
return '''.SH DESCRIPTION
.B %s
%s
''' % (pgm, long_desc.strip())
def format_tail(self, pkginfo):
return '''.SH SEE ALSO
/usr/share/doc/pythonX.Y-%s/
.SH COPYRIGHT
%s
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston,
MA 02111-1307 USA.
.SH BUGS
Please report bugs on the project\'s mailing list:
%s
.SH AUTHOR
%s <%s>
''' % (getattr(pkginfo, 'debian_name', pkginfo.modname), pkginfo.copyright,
pkginfo.mailinglist, pkginfo.author, pkginfo.author_email)
def generate_manpage(optparser, pkginfo, section=1, stream=sys.stdout):
"""generate a man page from an optik parser"""
formatter = ManHelpFormatter()
print >> stream, formatter.format_head(optparser, pkginfo, section)
print >> stream, optparser.format_option_help(formatter)
print >> stream, formatter.format_tail(pkginfo)
__all__ = ('OptionParser', 'Option', 'OptionGroup', 'OptionValueError',
'Values')
| |
#Renato Fernandes Rodrigues
#Universidade Federal do ABC
#Engenharia de Instrumentacao, automacao e robotica
#Trabalho de Graduacao
#Orientador: Prof. Dr. Luiz Antonio Celiberto Junior
import argparse
# import imutils
import time
import cv2
import os
import numpy as np
import math
from Configs import Configs
# construct the argument parse and set flags
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--display", type=int, default=1,help="#Show display? Y=1 N=0")
ap.add_argument("-r", "--raspberry", type=int, default=0,help="# runnning on Raspberry Pi? Y=1 N=0")
ap.add_argument("-s", "--send", type=int, default=0,help="#Send commands to ESP? Y=1 N=0")
args = vars(ap.parse_args())
flag_display = args["display"]
flag_raspberry = args["raspberry"]
flag_send = args["send"]
# Raspberry pi mode initialization
if flag_raspberry > 0:
from pivideostream import PiVideoStream
from picamera.array import PiRGBArray
from picamera import PiCamera
# Configure file JSON path
configs = Configs(os.path.dirname(os.path.realpath(__file__))+'/Configs.json')
# Socket initialization
if flag_send > 0:
import socket
HOST = configs.get("host")
PORT = configs.get("port")
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
dest = (HOST, PORT)
tcp.settimeout(0.0)
print "Sending commands to "+str(dest)
try:
tcp.connect(dest)
except socket.error as msg:
if msg[0] != 115:
print "Socket Error "+str(msg[0])
print "Abort operation, try again later."
raise SystemExit
# Global variables
plotWidth = configs.get("cameraRes")[0]
plotHeigth = configs.get("cameraRes")[1]
hand_color_l = np.array(configs.get("hand_color_l"))
hand_color_h = np.array(configs.get("hand_color_h"))
span_h = configs.get("span_h")
span_l = configs.get("span_l")
span_s = configs.get("span_s")
timer_send_start = 0
timer_list = []
timer_start = 0
timer_list_flag = True
timer_list_2 = []
loop_timer_start = 0
timer_send_start = 0
calibrate_hand_flag = False
command = " "
status_detector = " "
calibration_counter = 0
command_buffer = []
flag_calibration_timeout = False
calibration_timeout_counter = 0
# Variables initialization
plot = np.zeros((plotHeigth, plotWidth, 3), np.uint8)
frame = np.zeros((plotHeigth, plotWidth, 3), np.uint8)
frame_hls = np.zeros((plotHeigth, plotWidth, 3), np.uint8)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
print "Color Ranges Loaded: "+str(hand_color_h)+" to "+str(hand_color_l)
def send_command(comm):
global timer_send_start
data = ""
if flag_send > 0:
if int((cv2.getTickCount() - timer_send_start)/ cv2.getTickFrequency()*1000) > 200:
print "Command to send:"+comm
try:
if comm != "":
tcp.send(comm)
data = tcp.recv(1024)
print "Received: "+str(data)
if data == "O!\r\n":
print "Sucessfull Sent!"
except:
print "Send Failed"
timer_send_start = cv2.getTickCount()
def calc_multi_params(start,end,far,center):
xs,ys = start
xe,ye = end
xf,yf = far
xc,yc = center
dist_start = math.sqrt(math.pow((xs-xf),2)+math.pow((ys-yf),2))
dist_end = math.sqrt(math.pow((xe-xf),2)+math.pow((ye-yf),2))
dist_start_end = math.sqrt(math.pow((xe-xs),2)+math.pow((ye-ys),2))
angle = math.degrees(math.acos((dist_start*dist_start + dist_end*dist_end - dist_start_end*dist_start_end)/(2*dist_start*dist_end)))
dist_start_center = math.sqrt(math.pow((xs-xc),2)+math.pow((ys-yc),2))
dist_end_center = math.sqrt(math.pow((xe-xc),2)+math.pow((ye-yc),2))
dist_far_center = math.sqrt(math.pow((xf-xc),2)+math.pow((yf-yc),2))
return int(angle),int(dist_start),int(dist_end),int(dist_start_center),int(dist_end_center),int(dist_far_center)
def calc_distance(p1,p2):
return int(math.sqrt(math.pow((p1[0]-p2[0]),2)+math.pow((p1[1]-p2[1]),2)))
def calc_meanpoint(p1,p2):
return int((p1[0]+p2[0])/2),int((p1[1]+p2[1])/2)
def start_timer():
global timer_start
timer_start = cv2.getTickCount()
def end_timer(name):
global timer_list
timer_list.append([name,int((cv2.getTickCount() - timer_start)/ cv2.getTickFrequency()*1000000)])
def update_trackbars(arg1):
# print arg1
global span_h
global span_l
global span_s
span_h = cv2.getTrackbarPos("H","plots")
span_l = cv2.getTrackbarPos("L","plots")
span_s = cv2.getTrackbarPos("S","plots")
configs.set("span_h",span_h)
configs.set("span_l",span_l)
configs.set("span_s",span_s)
# def update_command(, center)
if flag_display:
cv2.namedWindow("plots")
cv2.createTrackbar("H","plots",span_h,255,update_trackbars)
cv2.createTrackbar("L","plots",span_l,255,update_trackbars)
cv2.createTrackbar("S","plots",span_s,255,update_trackbars)
# initialize and configure camera module
if(flag_raspberry>0):
print("Starting Raspberry Pi Camera module Thread")
vs = PiVideoStream().start()
else:
print "Start OpenCV Video Capture module"
vs = cv2.VideoCapture(0)
time.sleep(2.0)
try:
while True:
# store fps
timer_list =[]
timer_list.append(["FPS",int(cv2.getTickFrequency()/(cv2.getTickCount() - loop_timer_start))])
loop_timer_start = cv2.getTickCount()
finger_list_filtered = []
finger_num = 0
contour_solidity = 0
center = (0,0)
defects_list = []
hull_rp = np.array([])
mean_dist_center = 0
start_timer()
if flag_raspberry > 0:
frame_full = vs.read()
else:
ret,frame_full = vs.read()
if ret == False:
continue
frame = frame_full[plotHeigth/4:plotHeigth*3/4,plotWidth/4:plotWidth*3/4]
frame = cv2.flip(frame,-1)
end_timer("read")
start_timer()
frame_hls = cv2.cvtColor(frame.copy(),cv2.COLOR_BGR2HLS)
end_timer("cvtColor HSL")
if calibrate_hand_flag:
start_timer()
roi_range = 20
roi_hist_p1 = (frame.shape[1]/2 - roi_range,frame.shape[0]/2 - roi_range)
roi_hist_p2 = (frame.shape[1]/2 + roi_range,frame.shape[0]/2 + roi_range)
cv2.rectangle(frame,roi_hist_p1,roi_hist_p2,[255,0,255],2)
roi_hist = frame_hls[roi_hist_p1[1]:roi_hist_p2[1],roi_hist_p1[0]:roi_hist_p2[0]]
hist_h = cv2.calcHist([roi_hist],[0],None,[256],[0,256])
hist_l = cv2.calcHist([roi_hist],[1],None,[256],[0,256])
hist_s = cv2.calcHist([roi_hist],[2],None,[256],[0,256])
max_hist_h = sorted(range(len(hist_h)), key=lambda k: hist_h[k],reverse=True)[:3]
max_hist_l = sorted(range(len(hist_l)), key=lambda k: hist_l[k],reverse=True)[:3]
max_hist_s = sorted(range(len(hist_s)), key=lambda k: hist_s[k],reverse=True)[:3]
hand_color_l = np.clip([min(max_hist_h)-span_h,min(max_hist_l)-span_l,min(max_hist_s)-span_s],0,255)
hand_color_h = np.clip([max(max_hist_h)+span_h,max(max_hist_l)+span_l,max(max_hist_s)+span_s],0,255)
print "h:"+str(max_hist_h)+"\n\n\tl:"+str(max_hist_l)+"\n\n\ts:"+str(max_hist_s)
configs.set("hand_color_l",[hand_color_l[0],hand_color_l[1],hand_color_l[2]])
configs.set("hand_color_h",[hand_color_h[0],hand_color_h[1],hand_color_h[2]])
print hand_color_l
print hand_color_h
end_timer("calibrate")
calibrate_hand_flag = False
timer_list_flag = True
# Threshold Image
start_timer()
mask0 = cv2.inRange(frame_hls,hand_color_l,hand_color_h)
end_timer("inRange")
# Erode and dilate
start_timer()
# mask = cv2.erode(mask0, kernel,iterations=2)
mask = cv2.dilate(mask0, kernel,iterations=4)
mask2 = cv2.erode(mask, kernel,iterations=4)
end_timer("Dilate/Erode")
# Find contours in mask
start_timer()
_,cnts, hierarchy = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts2 = sorted(cnts,key=cv2.contourArea,reverse=True)[:1]
finger_num = -1
for c in cnts2:
# Area filter
contour_area = cv2.contourArea(c)
if contour_area < 3000 or contour_area > 30000:
status_detector = "Status: Out of Area range ("+str(contour_area)+")"
# print status_detector
continue
# center of mass of hand area mask
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# calculate all distances of points to center
dist_center_c = []
for i in xrange(len(c)):
dist_center_c.append((c[i][0],calc_distance(c[i][0],center)))
dist_center_c = sorted(dist_center_c,key = lambda s: s[1],reverse=True)
# calculate median radius of all contours points to center
mean_dist_center = 0
median_dist_center = np.median([dist_center_c[i][1] for i in range(0,len(dist_center_c))], axis=0)
mean_dist_center = int(1.1*np.average([dist_center_c[i][1] for i in range(0,len(dist_center_c))],axis =0))
hull_rp = cv2.convexHull(c,returnPoints = True)
# print "hull_rp\n"+str(hull_rp)
# calculate solidity
hull_area = cv2.contourArea(hull_rp)
contour_solidity = float(contour_area)/hull_area
if contour_solidity > 0.9:
finger_num = 0
continue
# get the hull and defects points
hull = cv2.convexHull(c,returnPoints = False)
defects = cv2.convexityDefects(c,hull)
# construct a list of defects points and distances
defects_list = []
for i in range(defects.shape[0]):
s,e,f,d = defects[i,0]
start = tuple(c[s][0])
end = tuple(c[e][0])
far = tuple(c[f][0])
angle,dist_start,dist_end,dist_start_center,dist_end_center,dist_far_center = calc_multi_params(start,end,far,center)
defects_list.append([start,end,far,angle,dist_start,dist_end,dist_start_center,dist_end_center,dist_far_center])
# 0 1 2 3 4 5 6 7 8
# filter by openin finger angles
defects_list = sorted(defects_list, key = lambda s: s[3]) [:10]
finger_list=[]
for i in range(0,len(defects_list)):
if defects_list[i][3] < 100 and defects_list[i][3] > 5:
if defects_list[i][6] > mean_dist_center:
if 0.5 < float(defects_list[i][4])/defects_list[i][6] < 1.3:
finger_list.append(defects_list[i][0])
cv2.line(frame,defects_list[i][2],defects_list[i][0],[255,255,255],2)
if defects_list[i][7] > mean_dist_center:
if 0.5 < float(defects_list[i][5])/defects_list[i][7] < 1.3:
finger_list.append(defects_list[i][1])
cv2.line(frame,defects_list[i][2],defects_list[i][1],[255,255,255],2)
finger_list = sorted(finger_list)
# if it din't find any valid defects,
# get the farthest point to be a candidate for finger
if len(finger_list) == 0:
finger_list.append(dist_center_c[0][0])
finger_list_filtered.append((dist_center_c[0][0][0],dist_center_c[0][0][1]))
finger_num = 1
continue
# remove near finger points
finger_list = sorted(finger_list, key = lambda s: s[0])
nearst_dist = 20 #max distance between point to mean points
finger_list_filtered = []
for i in xrange(len(finger_list)):
flag_duplicated = False
point_sum = []
for k in xrange(len(finger_list)):
if calc_distance(finger_list[i],finger_list[k]) < nearst_dist:
point_sum.append(finger_list[k])
filtered_meanpoint = tuple(map(lambda y: int(sum(y) / float(len(y))), zip(*point_sum)))
if filtered_meanpoint not in finger_list_filtered:
finger_list_filtered.append(filtered_meanpoint)
finger_num = len(finger_list_filtered)
end_timer("Find Contours")
# Check for Calibration
start_timer()
if calibration_counter < 20:
command_buffer.append(finger_num)
calibration_counter = calibration_counter + 1
else:
min_mode_command = max(set(command_buffer), key=command_buffer.count)
min_mode_command_percent = 100*command_buffer.count(min_mode_command)/len(command_buffer)
print "Mode fingers: "+str(min_mode_command)+" | "+str(min_mode_command_percent)+"%"
if min_mode_command == 5 and flag_calibration_timeout:
flag_calibration_timeout = False
hand_color_l = hand_color_l_temp
hand_color_h = hand_color_h_temp
print "Got 5 Fingers! System are calibrated"
if not flag_calibration_timeout and min_mode_command < 0:
calibration_timeout_counter = calibration_timeout_counter + 1
if calibration_timeout_counter >= 3:
flag_calibration_timeout = True
calibration_timeout_counter = 0
print "Timeout Calibration!! put yout hand in the middle and open all your fingers"
if flag_calibration_timeout:
calibrate_hand_flag =True
hand_color_l_temp = hand_color_l
hand_color_h_temp = hand_color_h
print "Calibration on!"
command_buffer = []
command_buffer.append(finger_num)
calibration_counter = 0
end_timer("Calibration_check")
# Set behaviours - pode checar se o comando for maior que 50% predominandte, senao conta tambem e anda calibar
start_timer()
if not flag_calibration_timeout:
if finger_num < 0:
command = "O"
if finger_num == 0:
command = "S"
finger_angle_offset = 0
finger_angle_range = 25
if finger_num == 1:
finger_angle = int(math.degrees(math.acos(float(finger_list_filtered[0][0] - center[0])/(calc_distance(finger_list_filtered[0],center)))))
if finger_angle > (90+finger_angle_range+finger_angle_offset) :
command = "FL"
else:
if finger_angle < (90-finger_angle_range+finger_angle_offset):
command = "FR"
else:
command = "F0"
if finger_num == 2:
mean_2_fingers = ((finger_list_filtered[0][0]+finger_list_filtered[1][0])/2,(finger_list_filtered[0][1]+finger_list_filtered[1][1])/2)
finger_angle = int(math.degrees(math.acos(float( mean_2_fingers[0]- center[0])/(calc_distance(mean_2_fingers,center)))))
if finger_angle > (90+finger_angle_range+finger_angle_offset) :
command = "BL"
else:
if finger_angle < (90-finger_angle_range+finger_angle_offset):
command = "BR"
else:
command = "B0"
if 3 <= finger_num <= 4:
command = "N"
if finger_num >= 5:
command = "X"
end_timer("Command")
# Display it if flag is setted
start_timer()
if flag_display:
if finger_num >= 0:
status_detector = "Status: "+str(finger_num)+" - command:"+command
# print status_detector
for i in xrange(0,len(defects_list)):
cv2.putText(frame,str(i),defects_list[i][2] , cv2.FONT_HERSHEY_SIMPLEX,0.5, (0, 0, 255), 2)
for i in xrange(0,len(finger_list_filtered)):
cv2.putText(frame,str(i),finger_list_filtered[i] , cv2.FONT_HERSHEY_SIMPLEX,1, (0, 255,0), 2)
cv2.line(frame,center,finger_list_filtered[i],[0,255,0],2)
cv2.circle(frame, center, 10, (0,255,255),-1)
# cv2.circle(frame, center2, 10, (255,0,255),-1)
cv2.putText(frame,str(command),center, cv2.FONT_HERSHEY_SIMPLEX,1, (255, 255,0), 2)
cv2.drawContours(frame, [hull_rp], -1, (255,0,0),lineType = cv2.LINE_8, thickness = 2)
# cv2.circle(frame, center, int(dist_center_c[len(dist_center_c)-2][1]), (0,150,150),2)
cv2.circle(frame, center, int(mean_dist_center), (55,250,50),2)
cv2.putText(frame,status_detector,(20,20), cv2.FONT_HERSHEY_SIMPLEX,0.5, (255, 255,0), 2)
cv2.putText(frame,"solidity: "+str("{0:2.2f}".format(contour_solidity)),(20,40), cv2.FONT_HERSHEY_SIMPLEX,0.5, (255, 255,0), 2)
# cv2.putText(frame,status_detector,(20,60), cv2.FONT_HERSHEY_SIMPLEX,0.4, (255, 255,0), 2)
cv2.imshow("plots",frame)
cv2.imshow("mask2",mask2)
# cv2.imshow("mask0",mask0)
end_timer("plot")
start_timer()
# Send commands via TCP socket
send_command(command)
command = " "
end_timer("Send commands")
timer_list_2.append(timer_list)
if timer_list_flag:
print "Timer_list:\n instant: " +str(timer_list)
timer_list_mean = []
for k in xrange(0,len(timer_list_2[0])):
timer_list_mean.append((timer_list[k][0],"{0:2.2f}".format(np.average([timer_list_2[i][k][1] for i in range(0,len(timer_list_2))],axis = 0))))
print " average: "+ str(timer_list_mean)
timer_list_2 = []
timer_list_flag = False
k = cv2.waitKey(1) & 0xFF
if k == 27 or k == ord('q'):
timer_list_mean = []
for k in xrange(0,len(timer_list_2[0])):
timer_list_mean.append((timer_list[k][0],"{0:2.2f}".format(np.average([timer_list_2[i][k][1] for i in range(0,len(timer_list_2))],axis = 0))))
print " average: "+ str(timer_list_mean)
send_command("X")
command = " "
break
if k == ord('d'):
flag_display = not(flag_display)
if k == ord('c'):
# cv2.destroyAllWindows()
calibrate_hand_flag =True
if k == ord('t'):
timer_list_flag = True
except KeyboardInterrupt:
pass
print "Closing program..."
# Clean windows
if flag_display:
cv2.destroyAllWindows()
# Stop camera module
if(flag_raspberry>0):
vs.stop()
else:
vs.release()
# http://www2.ic.uff.br/iwssip2010/Proceedings/nav/papers/paper_128.pdf
| |
import unittest
import numpy as np
from pymatgen.core.structure import Structure
from pymatgen.analysis.eos import EOS
from pymatgen.analysis.quasiharmonic import QuasiharmonicDebyeApprox
__author__ = "Kiran Mathew"
class TestQuasiharmociDebyeApprox(unittest.TestCase):
def setUp(self):
struct = Structure.from_dict(
{
"lattice": {
"a": 2.5630200477817295,
"alpha": 59.999993839702206,
"b": 2.563020442699644,
"beta": 59.999988742674944,
"c": 2.56301993,
"gamma": 60.00000504373715,
"matrix": [
[2.21964022, 0.0, 1.28151046],
[0.73987974, 2.09269747, 1.28151046],
[-0.0, -0.0, 2.56301993],
],
"volume": 11.905318492097948,
},
"sites": [
{
"abc": [-0.0, -0.0, -0.0],
"label": "B",
"species": [{"element": "B", "occu": 1}],
"xyz": [0.0, 0.0, 0.0],
},
{
"abc": [0.25, 0.25, 0.25],
"label": "N",
"species": [{"element": "N", "occu": 1}],
"xyz": [0.73987999, 0.5231743675, 1.2815102125],
},
],
}
)
self.energies = [
-15.76315281,
-16.11541813,
-16.41784171,
-16.47471523,
-16.63624155,
-16.6741551,
-16.78661144,
-16.88768073,
-16.92450672,
-17.04863261,
-17.06126553,
-17.15786866,
-17.19784976,
-17.25078749,
-17.30017149,
-17.32578594,
-17.3708922,
-17.38125127,
-17.41231934,
-17.41534352,
-17.42636644,
]
self.volumes = [
8.678977833994137,
8.971505437031707,
9.27052889309282,
15.845976281427582,
15.417733609491387,
9.576127994353376,
14.997270631725604,
9.888370962140854,
14.584523227465766,
14.179424329180256,
10.20732378093211,
13.78189117535765,
10.533067462993838,
13.391864274742145,
10.865663655755416,
13.009260480347871,
11.205193091129587,
12.634015019827533,
11.551718049704352,
12.26606042141808,
11.90531496343142,
]
self.eos = "vinet"
self.T = 300
self.qhda = QuasiharmonicDebyeApprox(
self.energies,
self.volumes,
struct,
t_min=self.T,
t_max=self.T,
eos=self.eos,
)
self.opt_vol = 11.957803302392925
def test_bulk_modulus(self):
eos = EOS(self.eos)
eos_fit = eos.fit(self.volumes, self.energies)
bulk_modulus = float(str(eos_fit.b0_GPa).split()[0])
bulk_modulus_ans = float(str(self.qhda.bulk_modulus).split()[0])
np.testing.assert_almost_equal(bulk_modulus, bulk_modulus_ans, 3)
def test_optimum_volume(self):
opt_vol = self.qhda.optimum_volumes[0]
np.testing.assert_almost_equal(opt_vol, self.opt_vol, 3)
def test_debye_temperature(self):
theta = self.qhda.debye_temperature(self.opt_vol)
np.testing.assert_almost_equal(theta, 2559.675227, 3)
def test_gruneisen_paramter(self):
gamma = self.qhda.gruneisen_parameter(self.T, self.opt_vol)
np.testing.assert_almost_equal(gamma, 1.670486, 3)
def test_thermal_conductivity(self):
kappa = self.qhda.thermal_conductivity(self.T, self.opt_vol)
np.testing.assert_almost_equal(kappa, 131.736242, 1)
def test_vibrational_internal_energy(self):
u = self.qhda.vibrational_internal_energy(self.T, self.opt_vol)
np.testing.assert_almost_equal(u, 0.50102, 3)
def test_vibrational_free_energy(self):
A = self.qhda.vibrational_free_energy(self.T, self.opt_vol)
np.testing.assert_almost_equal(A, 0.494687, 3)
class TestAnharmonicQuasiharmociDebyeApprox(unittest.TestCase):
def setUp(self):
struct = Structure.from_str(
"""FCC Al
1.0
2.473329 0.000000 1.427977
0.824443 2.331877 1.427977
0.000000 0.000000 2.855955
Al
1
direct
0.000000 0.000000 0.000000 Al""",
fmt="POSCAR",
)
self.energies = [
-3.69150886,
-3.70788383,
-3.71997361,
-3.72522301,
-3.73569569,
-3.73649743,
-3.74054982,
]
self.volumes = [
14.824542034870653,
18.118887714656875,
15.373596786943025,
17.569833126580278,
15.92265868064787,
17.02077912220064,
16.471717630914863,
]
self.eos = "vinet"
self.T = 500
self.qhda = QuasiharmonicDebyeApprox(
self.energies,
self.volumes,
struct,
t_min=self.T,
t_max=self.T,
eos=self.eos,
anharmonic_contribution=True,
)
self.opt_vol = 17.216094889116807
def test_optimum_volume(self):
opt_vol = self.qhda.optimum_volumes[0]
np.testing.assert_almost_equal(opt_vol, self.opt_vol, 3)
def test_debye_temperature(self):
theta = self.qhda.debye_temperature(self.opt_vol)
np.testing.assert_approx_equal(theta, 601.239096, 4)
def test_gruneisen_paramter(self):
gamma = self.qhda.gruneisen_parameter(0, self.qhda.ev_eos_fit.v0)
np.testing.assert_almost_equal(gamma, 2.188302, 3)
def test_thermal_conductivity(self):
kappa = self.qhda.thermal_conductivity(self.T, self.opt_vol)
np.testing.assert_almost_equal(kappa, 21.810997, 1)
def test_vibrational_internal_energy(self):
u = self.qhda.vibrational_internal_energy(self.T, self.opt_vol)
np.testing.assert_almost_equal(u, 0.13845, 3)
def test_vibrational_free_energy(self):
A = self.qhda.vibrational_free_energy(self.T, self.opt_vol)
np.testing.assert_almost_equal(A, -0.014620, 3)
| |
#!/usr/bin/env python
"tests_generator.py -- randomly generates the environment and tasks descriptions"
import random
import sys
## parameters
gDir = './'
gTestSize = 100
gLocMinSize = 3
gLocMaxSize = 10
gSmallObjMinSize = 5
gSmallObjMaxSize = 15
gMisInfoMaxSize = 5
gErrInfoMaxSize = 5
gTaskMinSize = 0
gTaskMaxSize = 3
gInfoMinSize = 0
gInfoMaxSize = 4
gConsMinSize = 0
gConsMaxSize = 3
gContainers = [
'washmachine',
'closet',
'cupboard',
'refrigerator',
'microwave']
gBigObjs = [
'human',
'plant',
'couch',
'chair',
'sofa',
'bed',
'table',
'workspace',
'worktable',
'teapoy',
'desk',
'television',
'airconditioner']
gSmallObjs = [
'book',
'can',
'remotecontrol',
'bottle',
'cup']
gColor = [
# the number of empty string determines the proportion of no-color cases
'', '',
'white', 'red', 'green', 'yellow', 'blue', 'black']
gTasks = [
'give', 'puton', 'goto', 'putdown', 'pickup', 'open', 'close', 'putin', 'takeout']
gPredicates = [
'at', 'plate', 'opened', 'closed']
## the main part
class BigObj:
def __init__(self, obj_id, loc_id):
self.obj_id = obj_id
self.sort = random.choice(gBigObjs + gContainers)
if self.sort in gContainers:
self.type = 'container'
self.opened = random.choice((True, False))
else:
self.type = ''
self.opened = False
self.loc_id = loc_id
class SmallObj:
def __init__(self, obj_id, loc_list):
self.obj_id = obj_id
self.sort = random.choice(gSmallObjs)
self.color = random.choice(gColor)
self.loc_id = random.choice(loc_list)
self.info = ''
self.mis = ''
self.err_w = ''
self.err_r = ''
def genInfo(self, type, big_objs):
self.info = '(sort {0} {1}) (size {0} small)'.format(self.obj_id, self.sort)
if self.color != '':
self.info += ' (color {0} {1})'.format(self.obj_id, self.color)
# location is a little complicated
if self.loc_id == 0:
self.info += ' (plate %d)'%self.obj_id
elif self.loc_id == -1:
self.info += ' (hold %d)'%self.obj_id
else:
loc_str = ''
loc_str_w = ''
loc_id = self.loc_id
if type == 'err':
while loc_id == self.loc_id:
loc_id = random.choice(list(big_objs.keys()))
if big_objs[self.loc_id].type == 'container':
loc_str = '(inside %d %d)' % (self.obj_id, self.loc_id)
loc_str_w = '(inside %d %d)' % (self.obj_id, loc_id)
else:
loc_str += '(at %d %d)' % (self.obj_id, self.loc_id)
loc_str_w += '(at %d %d)' % (self.obj_id, loc_id)
if type == 'mis':
self.mis = loc_str
elif type == 'err':
self.err_w = loc_str_w
self.err_r = loc_str
else:
self.info += ' ' + loc_str
class Task:
def __init__(self, big_objs, small_objs):
self.type = random.choice(gTasks)
rand_again = not hasContainers(big_objs) and (self.type == 'open' or self.type == 'close' or self.type == 'putin' or self.type == 'takeout')
while rand_again:
self.type = random.choice(gTasks)
rand_again = (self.type == 'open' or self.type == 'close' or self.type == 'putin' or self.type == 'takeout')
if self.type == 'give':
self.var0 = random.choice(small_objs)
elif self.type == 'puton':
self.var0 = random.choice(small_objs)
self.var1 = random.choice(big_objs)
elif self.type == 'goto':
self.var0 = random.choice(big_objs)
elif self.type == 'putdown':
self.var0 = random.choice(small_objs)
elif self.type == 'pickup':
self.var0 = random.choice(small_objs)
elif self.type == 'open':
self.var0 = random.choice(big_objs)
while self.var0.type != 'container':
self.var0 = random.choice(big_objs)
elif self.type == 'close':
self.var0 = random.choice(big_objs)
while self.var0.type != 'container':
self.var0 = random.choice(big_objs)
elif self.type == 'putin':
self.var0 = random.choice(small_objs)
self.var1 = random.choice(big_objs)
while self.var1.type != 'container':
self.var1 = random.choice(big_objs)
elif self.type == 'takeout':
self.var0 = random.choice(small_objs)
self.var1 = random.choice(big_objs)
while self.var1.type != 'container':
self.var1 = random.choice(big_objs)
def genTask(self):
if self.type == 'give':
self.task = '(:task (give Human X) (:cond %s))'%randSmallObj(self.var0, 'X')
elif self.type == 'puton':
self.task = '(:task (puton X Y) (:cond %s))'%(randSmallObj(self.var0, 'X') + ' ' + randBigObj(self.var1, 'Y'))
elif self.type == 'goto':
self.task = '(:task (goto X) (:cond %s))'%randBigObj(self.var0, 'X')
elif self.type == 'putdown':
self.task = '(:task (putdown X) (:cond %s))'%randSmallObj(self.var0, 'X')
elif self.type == 'pickup':
self.task = '(:task (pickup X) (:cond %s))'%randSmallObj(self.var0, 'X')
elif self.type == 'open':
self.task = '(:task (open X) (:cond %s))'%randBigObj(self.var0, 'X')
elif self.type == 'close':
self.task = '(:task (close X) (:cond %s))'%randBigObj(self.var0, 'X')
elif self.type == 'putin':
self.task = '(:task (putin X Y) (:cond %s))'%(randSmallObj(self.var0, 'X') + ' ' + randBigObj(self.var1, 'Y'))
elif self.type == 'takeout':
self.task = '(:task (takeout X Y) (:cond %s))'%(randSmallObj(self.var0, 'X') + ' ' + randBigObj(self.var1, 'Y'))
class Info:
def __init__(self, big_objs, small_objs):
self.type = random.choice(gPredicates)
rand_again = not hasContainers(big_objs) and (self.type == 'opened' or self.type == 'closed')
while rand_again:
self.type = random.choice(gPredicates)
rand_again = (self.type == 'opened' or self.type == 'closed')
if self.type == 'at':
self.var0 = random.choice(small_objs)
self.var1 = random.choice(big_objs)
elif self.type == 'plate':
self.var0 = random.choice(small_objs)
elif self.type == 'opened':
self.var0 = random.choice(big_objs)
while self.var0.type != 'container':
self.var0 = random.choice(big_objs)
elif self.type == 'closed':
self.var0 = random.choice(big_objs)
while self.var0.type != 'container':
self.var0 = random.choice(big_objs)
def genInfo(self):
if self.type == 'at':
self.info = '(:info (at X Y) (:cond %s))'%(randSmallObj(self.var0, 'X') + ' ' + randBigObj(self.var1, 'Y'))
elif self.type == 'plate':
self.info = '(:info (plate X) (:cond %s))'%randSmallObj(self.var0, 'X')
elif self.type == 'opened':
self.info = '(:info (opened X) (:cond %s))'%randBigObj(self.var0, 'X')
elif self.type == 'closed':
self.info = '(:info (closed X) (:cond %s))'%randBigObj(self.var0, 'X')
class Cons:
def __init__(self, task, info):
self.type = random.choice(('not', 'not not'))
self.task = task
self.info = info
def genCons(self):
cons = ''
if self.task != None:
self.task.genTask()
cons = self.task.task
elif self.info != None:
self.info.genInfo()
cons = self.info.info
if self.type == 'not':
self.cons = '(:cons_not %s)'%cons
elif self.type == 'not not':
self.cons = '(:cons_notnot %s)'%cons
def randSmallObj(small_obj, var):
preds = []
preds.append('(sort %s %s)'%(var, small_obj.sort))
if small_obj.color != '':
preds.append('(color %s %s)'%(var, small_obj.color))
size = random.randint(1, len(preds))
random.shuffle(preds)
result = preds[0]
for i in range(1, size):
result += ' ' + preds[i]
return result
def randBigObj(big_obj, var):
preds = []
preds.append('(sort %s %s)'%(var, big_obj.sort))
if big_obj.type != '':
preds.append('(type %s %s)'%(var, big_obj.type))
size = random.randint(1, len(preds))
random.shuffle(preds)
result = preds[0]
for i in range(1, size):
result += ' ' + preds[i]
return result
def hasContainers(big_objs):
for o in big_objs:
if o.sort in gContainers:
return True
return False
def generate(test_id):
testXML = open(gDir + 'test.' + str(test_id) + '.xml', 'w')
testXML.write('<?xml version="1.0" encoding="utf-8"?>\n')
testXML.write('<test>\n')
testXML.write('<env>\n')
bigObjs = {}
smallObjs = []
info = []
mis = []
err = []
# 1. generate environment descriptions
objID = 1
hasHuman = False
emptyPlate = True
emptyHand = True
# 1.1. randomize locations
locSize = random.randint(gLocMinSize, gLocMaxSize)
locList = range(1, locSize + 1)
for loc_id in locList:
bigObj = BigObj(objID, loc_id)
while hasHuman and bigObj.sort == 'human':
bigObj = BigObj(objID, loc_id)
if not hasHuman and bigObj.sort == 'human':
hasHuman = True
bigObjs[loc_id] = bigObj
objID += 1
# 1.2. randomize small objects
smallObjSize = random.randint(gSmallObjMinSize, gSmallObjMaxSize)
# location id 0 means in the plate of robot; -1 means in the hand of robot
_loc_list = range(-1, locSize + 1)
for obj_id in range(objID, objID + smallObjSize + 1):
smallObj = SmallObj(obj_id, _loc_list)
while (not emptyPlate and smallObj.loc_id == 0) or (not emptyHand and smallObj.loc_id == -1):
smallObj = SmallObj(obj_id, _loc_list)
if emptyPlate and smallObj.loc_id == 0:
emptyPlate = False
if emptyHand and smallObj.loc_id == -1:
emptyHand = False
smallObjs.append(smallObj)
# 1.3. pick out some info to generate the missing and error one
random.shuffle(smallObjs)
misSize = random.randint(1, gMisInfoMaxSize)
mis = smallObjs[0:misSize]
# output the mis sorted by 'obj_id'
mis = sorted(mis, key = lambda o:o.obj_id)
errSize = random.randint(1, gErrInfoMaxSize)
err = smallObjs[misSize:misSize + errSize]
# output the err sorted by 'obj_id'
err = sorted(err, key = lambda o:o.obj_id)
info = smallObjs[misSize + errSize:]
# 1.4. output to the xml file
info_str = ''
# robot state
if emptyPlate:
info_str += '(plate 0) '
if emptyHand:
info_str += '(hold 0) '
rob_loc = random.choice(locList)
info_str += '(at 0 %d)\n'%rob_loc
for o in bigObjs.values():
info_str += '(sort {0} {1}) (size {0} big) (at {0} {2})'.format(o.obj_id, o.sort, o.loc_id)
if o.type == 'container':
info_str += ' (type {0} container)'.format(o.obj_id)
if o.opened:
info_str += ' (opened {0})'.format(o.obj_id)
else:
info_str += ' (closed {0})'.format(o.obj_id)
info_str += '\n'
sortedInfoOjbs = []
for o in info:
o.genInfo('info', bigObjs)
sortedInfoOjbs.append(o)
mis_str = ''
for o in mis:
o.genInfo('mis', bigObjs)
sortedInfoOjbs.append(o)
if o.mis != '':
mis_str += o.mis + '\n'
err_str_w = ''
err_str_r = ''
for o in err:
o.genInfo('err', bigObjs)
sortedInfoOjbs.append(o)
if o.err_w != '':
err_str_w += o.err_w + '\n'
err_str_r += o.err_r + '\n'
# output the info sorted by 'obj_id'
sortedInfoOjbs = sorted(sortedInfoOjbs, key = lambda o:o.obj_id)
for o in sortedInfoOjbs:
info_str += o.info + '\n'
testXML.write('<info>\n%s</info>\n' % info_str)
testXML.write('<mis>\n%s</mis>\n' % mis_str)
testXML.write('<err>\n<r>\n%s</r>\n<w>\n%s</w>\n</err>\n' % (err_str_r, err_str_w))
testXML.write('</env>\n')
# 2. generate task description
_big_objs = []
for i in bigObjs.values():
_big_objs.append(i)
instr = '(:ins \n'
taskSize = random.randint(gTaskMinSize, gTaskMaxSize)
for i in range(0, taskSize):
task = Task(_big_objs, smallObjs)
task.genTask()
instr += task.task + '\n'
infoSize = random.randint(gInfoMinSize, gInfoMaxSize)
for i in range(0, infoSize):
info = Info(_big_objs, smallObjs)
info.genInfo()
instr += info.info + '\n'
consSize = random.randint(gConsMinSize, gConsMaxSize)
for i in range(0, consSize):
cons = Cons(None, None)
if cons.type == 'not not':
cons.info = Info(_big_objs, smallObjs)
elif random.choice((True, False)):
cons.task = Task(_big_objs, smallObjs)
else:
cons.info = Info(_big_objs, smallObjs)
cons.genCons()
instr += cons.cons + '\n'
instr += ')\n'
testXML.write('<instr>%s</instr>\n' % instr)
testXML.write('<nl>\n</nl>\n')
testXML.write('</test>\n')
if __name__ == '__main__':
if len(sys.argv) > 1:
gDir = sys.argv[1] + '/'
if len(sys.argv) > 2:
gTestSize = int(sys.argv[2])
testlist = open(gDir + 'test.list', 'w')
for i in range(1, gTestSize + 1):
testlist.write('test.' + str(i) + '.xml\n')
generate(i)
| |
# Copyright 2012 Josh Durgin
# Copyright 2013 Canonical Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
import os
import tempfile
import mock
from oslo_utils import timeutils
from oslo_utils import units
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder import test
from cinder.tests.image import fake as fake_image
from cinder.tests.test_volume import DriverTestCase
from cinder.volume import configuration as conf
import cinder.volume.drivers.rbd as driver
from cinder.volume.flows.manager import create_volume
LOG = logging.getLogger(__name__)
# This is used to collect raised exceptions so that tests may check what was
# raised.
# NOTE: this must be initialised in test setUp().
RAISED_EXCEPTIONS = []
class MockException(Exception):
def __init__(self, *args, **kwargs):
RAISED_EXCEPTIONS.append(self.__class__)
class MockImageNotFoundException(MockException):
"""Used as mock for rbd.ImageNotFound."""
class MockImageBusyException(MockException):
"""Used as mock for rbd.ImageBusy."""
class MockImageExistsException(MockException):
"""Used as mock for rbd.ImageExists."""
def common_mocks(f):
"""Decorator to set mocks common to all tests.
The point of doing these mocks here is so that we don't accidentally set
mocks that can't/don't get unset.
"""
def _common_inner_inner1(inst, *args, **kwargs):
@mock.patch('cinder.volume.drivers.rbd.RBDVolumeProxy')
@mock.patch('cinder.volume.drivers.rbd.RADOSClient')
@mock.patch('cinder.backup.drivers.ceph.rbd')
@mock.patch('cinder.backup.drivers.ceph.rados')
def _common_inner_inner2(mock_rados, mock_rbd, mock_client,
mock_proxy):
inst.mock_rbd = mock_rbd
inst.mock_rados = mock_rados
inst.mock_client = mock_client
inst.mock_proxy = mock_proxy
inst.mock_rbd.RBD.Error = Exception
inst.mock_rados.Error = Exception
inst.mock_rbd.ImageBusy = MockImageBusyException
inst.mock_rbd.ImageNotFound = MockImageNotFoundException
inst.mock_rbd.ImageExists = MockImageExistsException
inst.driver.rbd = inst.mock_rbd
inst.driver.rados = inst.mock_rados
return f(inst, *args, **kwargs)
return _common_inner_inner2()
return _common_inner_inner1
CEPH_MON_DUMP = """dumped monmap epoch 1
{ "epoch": 1,
"fsid": "33630410-6d93-4d66-8e42-3b953cf194aa",
"modified": "2013-05-22 17:44:56.343618",
"created": "2013-05-22 17:44:56.343618",
"mons": [
{ "rank": 0,
"name": "a",
"addr": "[::1]:6789\/0"},
{ "rank": 1,
"name": "b",
"addr": "[::1]:6790\/0"},
{ "rank": 2,
"name": "c",
"addr": "[::1]:6791\/0"},
{ "rank": 3,
"name": "d",
"addr": "127.0.0.1:6792\/0"},
{ "rank": 4,
"name": "e",
"addr": "example.com:6791\/0"}],
"quorum": [
0,
1,
2]}
"""
class RBDTestCase(test.TestCase):
def setUp(self):
global RAISED_EXCEPTIONS
RAISED_EXCEPTIONS = []
super(RBDTestCase, self).setUp()
self.cfg = mock.Mock(spec=conf.Configuration)
self.cfg.volume_tmp_dir = None
self.cfg.image_conversion_dir = None
self.cfg.rbd_pool = 'rbd'
self.cfg.rbd_ceph_conf = None
self.cfg.rbd_secret_uuid = None
self.cfg.rbd_user = None
self.cfg.volume_dd_blocksize = '1M'
self.cfg.rbd_store_chunk_size = 4
mock_exec = mock.Mock()
mock_exec.return_value = ('', '')
self.driver = driver.RBDDriver(execute=mock_exec,
configuration=self.cfg)
self.driver.set_initialized()
self.volume_name = u'volume-00000001'
self.snapshot_name = u'snapshot-00000001'
self.volume_size = 1
self.volume = dict(name=self.volume_name, size=self.volume_size)
self.snapshot = dict(volume_name=self.volume_name,
name=self.snapshot_name)
@common_mocks
def test_create_volume(self):
client = self.mock_client.return_value
client.__enter__.return_value = client
with mock.patch.object(self.driver, '_supports_layering') as \
mock_supports_layering:
mock_supports_layering.return_value = True
self.driver.create_volume(self.volume)
chunk_size = self.cfg.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
args = [client.ioctx, str(self.volume_name),
self.volume_size * units.Gi, order]
kwargs = {'old_format': False,
'features': self.mock_rbd.RBD_FEATURE_LAYERING}
self.mock_rbd.RBD.return_value.create.assert_called_once_with(
*args, **kwargs)
client.__enter__.assert_called_once_with()
client.__exit__.assert_called_once_with(None, None, None)
mock_supports_layering.assert_called_once_with()
@common_mocks
def test_manage_existing_get_size(self):
with mock.patch.object(self.driver.rbd.Image(), 'size') as \
mock_rbd_image_size:
with mock.patch.object(self.driver.rbd.Image(), 'close') \
as mock_rbd_image_close:
mock_rbd_image_size.return_value = 2 * units.Gi
existing_ref = {'source-name': self.volume_name}
return_size = self.driver.manage_existing_get_size(
self.volume,
existing_ref)
self.assertEqual(2, return_size)
mock_rbd_image_size.assert_called_once_with()
mock_rbd_image_close.assert_called_once_with()
@common_mocks
def test_manage_existing_get_invalid_size(self):
with mock.patch.object(self.driver.rbd.Image(), 'size') as \
mock_rbd_image_size:
with mock.patch.object(self.driver.rbd.Image(), 'close') \
as mock_rbd_image_close:
mock_rbd_image_size.return_value = 'abcd'
existing_ref = {'source-name': self.volume_name}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.manage_existing_get_size,
self.volume, existing_ref)
mock_rbd_image_size.assert_called_once_with()
mock_rbd_image_close.assert_called_once_with()
@common_mocks
def test_manage_existing(self):
client = self.mock_client.return_value
client.__enter__.return_value = client
with mock.patch.object(self.driver.rbd.RBD(), 'rename') as \
mock_rbd_image_rename:
exist_volume = 'vol-exist'
existing_ref = {'source-name': exist_volume}
mock_rbd_image_rename.return_value = 0
self.driver.manage_existing(self.volume, existing_ref)
mock_rbd_image_rename.assert_called_with(
client.ioctx,
exist_volume,
self.volume_name)
@common_mocks
def test_manage_existing_with_exist_rbd_image(self):
client = self.mock_client.return_value
client.__enter__.return_value = client
self.mock_rbd.RBD.return_value.rename.side_effect = (
MockImageExistsException)
exist_volume = 'vol-exist'
existing_ref = {'source-name': exist_volume}
self.assertRaises(self.mock_rbd.ImageExists,
self.driver.manage_existing,
self.volume, existing_ref)
# Make sure the exception was raised
self.assertEqual(RAISED_EXCEPTIONS,
[self.mock_rbd.ImageExists])
@common_mocks
def test_create_volume_no_layering(self):
client = self.mock_client.return_value
client.__enter__.return_value = client
with mock.patch.object(self.driver, '_supports_layering') as \
mock_supports_layering:
mock_supports_layering.return_value = False
self.driver.create_volume(self.volume)
chunk_size = self.cfg.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
args = [client.ioctx, str(self.volume_name),
self.volume_size * units.Gi, order]
kwargs = {'old_format': True,
'features': 0}
self.mock_rbd.RBD.return_value.create.assert_called_once_with(
*args, **kwargs)
client.__enter__.assert_called_once_with()
client.__exit__.assert_called_once_with(None, None, None)
mock_supports_layering.assert_called_once_with()
@common_mocks
def test_delete_backup_snaps(self):
self.driver.rbd.Image.remove_snap = mock.Mock()
with mock.patch.object(self.driver, '_get_backup_snaps') as \
mock_get_backup_snaps:
mock_get_backup_snaps.return_value = [{'name': 'snap1'}]
rbd_image = self.driver.rbd.Image()
self.driver._delete_backup_snaps(rbd_image)
mock_get_backup_snaps.assert_called_once_with(rbd_image)
self.assertTrue(
self.driver.rbd.Image.return_value.remove_snap.called)
@common_mocks
def test_delete_volume(self):
client = self.mock_client.return_value
self.driver.rbd.Image.return_value.list_snaps.return_value = []
with mock.patch.object(self.driver, '_get_clone_info') as \
mock_get_clone_info:
with mock.patch.object(self.driver, '_delete_backup_snaps') as \
mock_delete_backup_snaps:
mock_get_clone_info.return_value = (None, None, None)
self.driver.delete_volume(self.volume)
mock_get_clone_info.assert_called_once_with(
self.mock_rbd.Image.return_value,
self.volume_name,
None)
(self.driver.rbd.Image.return_value
.list_snaps.assert_called_once_with())
client.__enter__.assert_called_once_with()
client.__exit__.assert_called_once_with(None, None, None)
mock_delete_backup_snaps.assert_called_once_with(
self.mock_rbd.Image.return_value)
self.assertFalse(
self.driver.rbd.Image.return_value.unprotect_snap.called)
self.assertEqual(
1, self.driver.rbd.RBD.return_value.remove.call_count)
@common_mocks
def delete_volume_not_found(self):
self.mock_rbd.Image.side_effect = self.mock_rbd.ImageNotFound
self.assertIsNone(self.driver.delete_volume(self.volume))
self.mock_rbd.Image.assert_called_once_with()
# Make sure the exception was raised
self.assertEqual(RAISED_EXCEPTIONS, [self.mock_rbd.ImageNotFound])
@common_mocks
def test_delete_busy_volume(self):
self.mock_rbd.Image.return_value.list_snaps.return_value = []
self.mock_rbd.RBD.return_value.remove.side_effect = (
self.mock_rbd.ImageBusy)
with mock.patch.object(self.driver, '_get_clone_info') as \
mock_get_clone_info:
mock_get_clone_info.return_value = (None, None, None)
with mock.patch.object(self.driver, '_delete_backup_snaps') as \
mock_delete_backup_snaps:
with mock.patch.object(driver, 'RADOSClient') as \
mock_rados_client:
self.assertRaises(exception.VolumeIsBusy,
self.driver.delete_volume, self.volume)
mock_get_clone_info.assert_called_once_with(
self.mock_rbd.Image.return_value,
self.volume_name,
None)
(self.mock_rbd.Image.return_value.list_snaps
.assert_called_once_with())
mock_rados_client.assert_called_once_with(self.driver)
mock_delete_backup_snaps.assert_called_once_with(
self.mock_rbd.Image.return_value)
self.assertFalse(
self.mock_rbd.Image.return_value.unprotect_snap.called)
self.assertEqual(
1, self.mock_rbd.RBD.return_value.remove.call_count)
# Make sure the exception was raised
self.assertEqual(RAISED_EXCEPTIONS,
[self.mock_rbd.ImageBusy])
@common_mocks
def test_delete_volume_not_found(self):
self.mock_rbd.Image.return_value.list_snaps.return_value = []
self.mock_rbd.RBD.return_value.remove.side_effect = (
self.mock_rbd.ImageNotFound)
with mock.patch.object(self.driver, '_get_clone_info') as \
mock_get_clone_info:
mock_get_clone_info.return_value = (None, None, None)
with mock.patch.object(self.driver, '_delete_backup_snaps') as \
mock_delete_backup_snaps:
with mock.patch.object(driver, 'RADOSClient') as \
mock_rados_client:
self.assertIsNone(self.driver.delete_volume(self.volume))
mock_get_clone_info.assert_called_once_with(
self.mock_rbd.Image.return_value,
self.volume_name,
None)
(self.mock_rbd.Image.return_value.list_snaps
.assert_called_once_with())
mock_rados_client.assert_called_once_with(self.driver)
mock_delete_backup_snaps.assert_called_once_with(
self.mock_rbd.Image.return_value)
self.assertFalse(
self.mock_rbd.Image.return_value.unprotect_snap.called)
self.assertEqual(
1, self.mock_rbd.RBD.return_value.remove.call_count)
# Make sure the exception was raised
self.assertEqual(RAISED_EXCEPTIONS,
[self.mock_rbd.ImageNotFound])
@common_mocks
def test_create_snapshot(self):
proxy = self.mock_proxy.return_value
proxy.__enter__.return_value = proxy
self.driver.create_snapshot(self.snapshot)
args = [str(self.snapshot_name)]
proxy.create_snap.assert_called_with(*args)
proxy.protect_snap.assert_called_with(*args)
@common_mocks
def test_delete_snapshot(self):
proxy = self.mock_proxy.return_value
proxy.__enter__.return_value = proxy
self.driver.delete_snapshot(self.snapshot)
args = [str(self.snapshot_name)]
proxy.remove_snap.assert_called_with(*args)
proxy.unprotect_snap.assert_called_with(*args)
@common_mocks
def test_get_clone_info(self):
volume = self.mock_rbd.Image()
volume.set_snap = mock.Mock()
volume.parent_info = mock.Mock()
parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_name))
volume.parent_info.return_value = parent_info
info = self.driver._get_clone_info(volume, self.volume_name)
self.assertEqual(info, parent_info)
self.assertFalse(volume.set_snap.called)
volume.parent_info.assert_called_once_with()
@common_mocks
def test_get_clone_info_w_snap(self):
volume = self.mock_rbd.Image()
volume.set_snap = mock.Mock()
volume.parent_info = mock.Mock()
parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_name))
volume.parent_info.return_value = parent_info
snapshot = self.mock_rbd.ImageSnapshot()
info = self.driver._get_clone_info(volume, self.volume_name,
snap=snapshot)
self.assertEqual(info, parent_info)
self.assertEqual(volume.set_snap.call_count, 2)
volume.parent_info.assert_called_once_with()
@common_mocks
def test_get_clone_info_w_exception(self):
volume = self.mock_rbd.Image()
volume.set_snap = mock.Mock()
volume.parent_info = mock.Mock()
volume.parent_info.side_effect = self.mock_rbd.ImageNotFound
snapshot = self.mock_rbd.ImageSnapshot()
info = self.driver._get_clone_info(volume, self.volume_name,
snap=snapshot)
self.assertEqual(info, (None, None, None))
self.assertEqual(volume.set_snap.call_count, 2)
volume.parent_info.assert_called_once_with()
# Make sure the exception was raised
self.assertEqual(RAISED_EXCEPTIONS, [self.mock_rbd.ImageNotFound])
@common_mocks
def test_get_clone_info_deleted_volume(self):
volume = self.mock_rbd.Image()
volume.set_snap = mock.Mock()
volume.parent_info = mock.Mock()
parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_name))
volume.parent_info.return_value = parent_info
info = self.driver._get_clone_info(volume,
"%s.deleted" % (self.volume_name))
self.assertEqual(info, parent_info)
self.assertFalse(volume.set_snap.called)
volume.parent_info.assert_called_once_with()
@common_mocks
def test_create_cloned_volume_same_size(self):
src_name = u'volume-00000001'
dst_name = u'volume-00000002'
self.cfg.rbd_max_clone_depth = 2
with mock.patch.object(self.driver, '_get_clone_depth') as \
mock_get_clone_depth:
# Try with no flatten required
with mock.patch.object(self.driver, '_resize') as mock_resize:
mock_get_clone_depth.return_value = 1
self.driver.create_cloned_volume({'name': dst_name,
'size': 10},
{'name': src_name,
'size': 10})
(self.mock_rbd.Image.return_value.create_snap
.assert_called_once_with('.'.join((dst_name,
'clone_snap'))))
(self.mock_rbd.Image.return_value.protect_snap
.assert_called_once_with('.'.join((dst_name,
'clone_snap'))))
self.assertEqual(
1, self.mock_rbd.RBD.return_value.clone.call_count)
self.mock_rbd.Image.return_value.close \
.assert_called_once_with()
self.assertTrue(mock_get_clone_depth.called)
self.assertEqual(
0, mock_resize.call_count)
@common_mocks
def test_create_cloned_volume_different_size(self):
src_name = u'volume-00000001'
dst_name = u'volume-00000002'
self.cfg.rbd_max_clone_depth = 2
with mock.patch.object(self.driver, '_get_clone_depth') as \
mock_get_clone_depth:
# Try with no flatten required
with mock.patch.object(self.driver, '_resize') as mock_resize:
mock_get_clone_depth.return_value = 1
self.driver.create_cloned_volume({'name': dst_name,
'size': 20},
{'name': src_name,
'size': 10})
(self.mock_rbd.Image.return_value.create_snap
.assert_called_once_with('.'.join((dst_name,
'clone_snap'))))
(self.mock_rbd.Image.return_value.protect_snap
.assert_called_once_with('.'.join((dst_name,
'clone_snap'))))
self.assertEqual(
1, self.mock_rbd.RBD.return_value.clone.call_count)
self.mock_rbd.Image.return_value.close \
.assert_called_once_with()
self.assertTrue(mock_get_clone_depth.called)
self.assertEqual(
1, mock_resize.call_count)
@common_mocks
def test_create_cloned_volume_w_flatten(self):
src_name = u'volume-00000001'
dst_name = u'volume-00000002'
self.cfg.rbd_max_clone_depth = 1
self.mock_rbd.RBD.return_value.clone.side_effect = (
self.mock_rbd.RBD.Error)
with mock.patch.object(self.driver, '_get_clone_depth') as \
mock_get_clone_depth:
# Try with no flatten required
mock_get_clone_depth.return_value = 1
self.assertRaises(self.mock_rbd.RBD.Error,
self.driver.create_cloned_volume,
dict(name=dst_name), dict(name=src_name))
(self.mock_rbd.Image.return_value.create_snap
.assert_called_once_with('.'.join((dst_name, 'clone_snap'))))
(self.mock_rbd.Image.return_value.protect_snap
.assert_called_once_with('.'.join((dst_name, 'clone_snap'))))
self.assertEqual(
1, self.mock_rbd.RBD.return_value.clone.call_count)
(self.mock_rbd.Image.return_value.unprotect_snap
.assert_called_once_with('.'.join((dst_name, 'clone_snap'))))
(self.mock_rbd.Image.return_value.remove_snap
.assert_called_once_with('.'.join((dst_name, 'clone_snap'))))
self.mock_rbd.Image.return_value.close.assert_called_once_with()
self.assertTrue(mock_get_clone_depth.called)
@common_mocks
def test_create_cloned_volume_w_clone_exception(self):
src_name = u'volume-00000001'
dst_name = u'volume-00000002'
self.cfg.rbd_max_clone_depth = 2
self.mock_rbd.RBD.return_value.clone.side_effect = (
self.mock_rbd.RBD.Error)
with mock.patch.object(self.driver, '_get_clone_depth') as \
mock_get_clone_depth:
# Try with no flatten required
mock_get_clone_depth.return_value = 1
self.assertRaises(self.mock_rbd.RBD.Error,
self.driver.create_cloned_volume,
{'name': dst_name}, {'name': src_name})
(self.mock_rbd.Image.return_value.create_snap
.assert_called_once_with('.'.join((dst_name, 'clone_snap'))))
(self.mock_rbd.Image.return_value.protect_snap
.assert_called_once_with('.'.join((dst_name, 'clone_snap'))))
self.assertEqual(
1, self.mock_rbd.RBD.return_value.clone.call_count)
(self.mock_rbd.Image.return_value.unprotect_snap
.assert_called_once_with('.'.join((dst_name, 'clone_snap'))))
(self.mock_rbd.Image.return_value.remove_snap
.assert_called_once_with('.'.join((dst_name, 'clone_snap'))))
self.mock_rbd.Image.return_value.close.assert_called_once_with()
@common_mocks
def test_good_locations(self):
locations = ['rbd://fsid/pool/image/snap',
'rbd://%2F/%2F/%2F/%2F', ]
map(self.driver._parse_location, locations)
@common_mocks
def test_bad_locations(self):
locations = ['rbd://image',
'http://path/to/somewhere/else',
'rbd://image/extra',
'rbd://image/',
'rbd://fsid/pool/image/',
'rbd://fsid/pool/image/snap/',
'rbd://///', ]
for loc in locations:
self.assertRaises(exception.ImageUnacceptable,
self.driver._parse_location,
loc)
self.assertFalse(
self.driver._is_cloneable(loc, {'disk_format': 'raw'}))
@common_mocks
def test_cloneable(self):
with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid:
mock_get_fsid.return_value = 'abc'
location = 'rbd://abc/pool/image/snap'
info = {'disk_format': 'raw'}
self.assertTrue(self.driver._is_cloneable(location, info))
self.assertTrue(mock_get_fsid.called)
@common_mocks
def test_uncloneable_different_fsid(self):
with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid:
mock_get_fsid.return_value = 'abc'
location = 'rbd://def/pool/image/snap'
self.assertFalse(
self.driver._is_cloneable(location, {'disk_format': 'raw'}))
self.assertTrue(mock_get_fsid.called)
@common_mocks
def test_uncloneable_unreadable(self):
with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid:
mock_get_fsid.return_value = 'abc'
location = 'rbd://abc/pool/image/snap'
self.driver.rbd.Error = Exception
self.mock_proxy.side_effect = Exception
args = [location, {'disk_format': 'raw'}]
self.assertFalse(self.driver._is_cloneable(*args))
self.assertEqual(1, self.mock_proxy.call_count)
self.assertTrue(mock_get_fsid.called)
@common_mocks
def test_uncloneable_bad_format(self):
with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid:
mock_get_fsid.return_value = 'abc'
location = 'rbd://abc/pool/image/snap'
formats = ['qcow2', 'vmdk', 'vdi']
for f in formats:
self.assertFalse(
self.driver._is_cloneable(location, {'disk_format': f}))
self.assertTrue(mock_get_fsid.called)
def _copy_image(self):
with mock.patch.object(tempfile, 'NamedTemporaryFile'):
with mock.patch.object(os.path, 'exists') as mock_exists:
mock_exists.return_value = True
with mock.patch.object(image_utils, 'fetch_to_raw'):
with mock.patch.object(self.driver, 'delete_volume'):
with mock.patch.object(self.driver, '_resize'):
mock_image_service = mock.MagicMock()
args = [None, {'name': 'test', 'size': 1},
mock_image_service, None]
self.driver.copy_image_to_volume(*args)
@common_mocks
def test_copy_image_no_volume_tmp(self):
self.cfg.volume_tmp_dir = None
self.cfg.image_conversion_dir = None
self._copy_image()
@common_mocks
def test_copy_image_volume_tmp(self):
self.cfg.volume_tmp_dir = None
self.cfg.image_conversion_dir = '/var/run/cinder/tmp'
self._copy_image()
@common_mocks
def test_update_volume_stats(self):
client = self.mock_client.return_value
client.__enter__.return_value = client
client.cluster = mock.Mock()
client.cluster.get_cluster_stats = mock.Mock()
client.cluster.get_cluster_stats.return_value = {'kb': 1024 ** 3,
'kb_avail': 1024 ** 2}
self.driver.configuration.safe_get = mock.Mock()
self.driver.configuration.safe_get.return_value = 'RBD'
expected = dict(
volume_backend_name='RBD',
vendor_name='Open Source',
driver_version=self.driver.VERSION,
storage_protocol='ceph',
total_capacity_gb=1024,
free_capacity_gb=1,
reserved_percentage=0)
actual = self.driver.get_volume_stats(True)
client.cluster.get_cluster_stats.assert_called_once_with()
self.assertDictMatch(expected, actual)
@common_mocks
def test_update_volume_stats_error(self):
client = self.mock_client.return_value
client.__enter__.return_value = client
client.cluster = mock.Mock()
client.cluster.get_cluster_stats = mock.Mock()
client.cluster.get_cluster_stats.side_effect = Exception
self.driver.configuration.safe_get = mock.Mock()
self.driver.configuration.safe_get.return_value = 'RBD'
expected = dict(volume_backend_name='RBD',
vendor_name='Open Source',
driver_version=self.driver.VERSION,
storage_protocol='ceph',
total_capacity_gb='unknown',
free_capacity_gb='unknown',
reserved_percentage=0)
actual = self.driver.get_volume_stats(True)
client.cluster.get_cluster_stats.assert_called_once_with()
self.assertDictMatch(expected, actual)
@common_mocks
def test_get_mon_addrs(self):
with mock.patch.object(self.driver, '_execute') as mock_execute:
mock_execute.return_value = (CEPH_MON_DUMP, '')
hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com']
ports = ['6789', '6790', '6791', '6792', '6791']
self.assertEqual((hosts, ports), self.driver._get_mon_addrs())
@common_mocks
def test_initialize_connection(self):
hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com']
ports = ['6789', '6790', '6791', '6792', '6791']
with mock.patch.object(self.driver, '_get_mon_addrs') as \
mock_get_mon_addrs:
mock_get_mon_addrs.return_value = (hosts, ports)
expected = {
'driver_volume_type': 'rbd',
'data': {
'name': '%s/%s' % (self.cfg.rbd_pool,
self.volume_name),
'hosts': hosts,
'ports': ports,
'auth_enabled': False,
'auth_username': None,
'secret_type': 'ceph',
'secret_uuid': None, }
}
volume = dict(name=self.volume_name)
actual = self.driver.initialize_connection(volume, None)
self.assertDictMatch(expected, actual)
self.assertTrue(mock_get_mon_addrs.called)
@common_mocks
def test_clone(self):
src_pool = u'images'
src_image = u'image-name'
src_snap = u'snapshot-name'
client_stack = []
def mock__enter__(inst):
def _inner():
client_stack.append(inst)
return inst
return _inner
client = self.mock_client.return_value
# capture both rados client used to perform the clone
client.__enter__.side_effect = mock__enter__(client)
self.driver._clone(self.volume, src_pool, src_image, src_snap)
args = [client_stack[0].ioctx, str(src_image), str(src_snap),
client_stack[1].ioctx, str(self.volume_name)]
kwargs = {'features': self.mock_rbd.RBD_FEATURE_LAYERING}
self.mock_rbd.RBD.return_value.clone.assert_called_once_with(
*args, **kwargs)
self.assertEqual(client.__enter__.call_count, 2)
@common_mocks
def test_extend_volume(self):
fake_size = '20'
fake_vol = {'project_id': 'testprjid', 'name': self.volume_name,
'size': fake_size,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
self.mox.StubOutWithMock(self.driver, '_resize')
size = int(fake_size) * units.Gi
self.driver._resize(fake_vol, size=size)
self.mox.ReplayAll()
self.driver.extend_volume(fake_vol, fake_size)
self.mox.VerifyAll()
@common_mocks
def test_retype(self):
context = {}
diff = {'encryption': {},
'extra_specs': {}}
fake_volume = {'name': 'testvolume',
'host': 'currenthost'}
fake_type = 'high-IOPS'
# no support for migration
host = {'host': 'anotherhost'}
self.assertFalse(self.driver.retype(context, fake_volume,
fake_type, diff, host))
host = {'host': 'currenthost'}
# no support for changing encryption
diff['encryption'] = {'non-empty': 'non-empty'}
self.assertFalse(self.driver.retype(context, fake_volume,
fake_type, diff, host))
diff['encryption'] = {}
# no support for changing extra_specs
diff['extra_specs'] = {'non-empty': 'non-empty'}
self.assertFalse(self.driver.retype(context, fake_volume,
fake_type, diff, host))
diff['extra_specs'] = {}
self.assertTrue(self.driver.retype(context, fake_volume,
fake_type, diff, host))
def test_rbd_volume_proxy_init(self):
mock_driver = mock.Mock(name='driver')
mock_driver._connect_to_rados.return_value = (None, None)
with driver.RBDVolumeProxy(mock_driver, self.volume_name):
self.assertEqual(1, mock_driver._connect_to_rados.call_count)
self.assertFalse(mock_driver._disconnect_from_rados.called)
self.assertEqual(1, mock_driver._disconnect_from_rados.call_count)
mock_driver.reset_mock()
snap = u'snapshot-name'
with driver.RBDVolumeProxy(mock_driver, self.volume_name,
snapshot=snap):
self.assertEqual(1, mock_driver._connect_to_rados.call_count)
self.assertFalse(mock_driver._disconnect_from_rados.called)
self.assertEqual(1, mock_driver._disconnect_from_rados.call_count)
@common_mocks
def test_connect_to_rados(self):
# Default
self.cfg.rados_connect_timeout = -1
self.mock_rados.Rados.return_value.open_ioctx.return_value = \
self.mock_rados.Rados.return_value.ioctx
# default configured pool
ret = self.driver._connect_to_rados()
self.assertTrue(self.mock_rados.Rados.return_value.connect.called)
# Expect no timeout if default is used
self.mock_rados.Rados.return_value.connect.assert_called_once_with()
self.assertTrue(self.mock_rados.Rados.return_value.open_ioctx.called)
self.assertEqual(ret[1], self.mock_rados.Rados.return_value.ioctx)
self.mock_rados.Rados.return_value.open_ioctx.assert_called_with(
self.cfg.rbd_pool)
# different pool
ret = self.driver._connect_to_rados('alt_pool')
self.assertTrue(self.mock_rados.Rados.return_value.connect.called)
self.assertTrue(self.mock_rados.Rados.return_value.open_ioctx.called)
self.assertEqual(ret[1], self.mock_rados.Rados.return_value.ioctx)
self.mock_rados.Rados.return_value.open_ioctx.assert_called_with(
'alt_pool')
# With timeout
self.cfg.rados_connect_timeout = 1
self.mock_rados.Rados.return_value.connect.reset_mock()
self.driver._connect_to_rados()
self.mock_rados.Rados.return_value.connect.assert_called_once_with(
timeout=1)
# error
self.mock_rados.Rados.return_value.open_ioctx.reset_mock()
self.mock_rados.Rados.return_value.shutdown.reset_mock()
self.mock_rados.Rados.return_value.open_ioctx.side_effect = (
self.mock_rados.Error)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._connect_to_rados)
self.assertTrue(self.mock_rados.Rados.return_value.open_ioctx.called)
self.mock_rados.Rados.return_value.shutdown.assert_called_once_with()
class RBDImageIOWrapperTestCase(test.TestCase):
def setUp(self):
super(RBDImageIOWrapperTestCase, self).setUp()
self.meta = mock.Mock()
self.meta.user = 'mock_user'
self.meta.conf = 'mock_conf'
self.meta.pool = 'mock_pool'
self.meta.image = mock.Mock()
self.meta.image.read = mock.Mock()
self.meta.image.write = mock.Mock()
self.meta.image.size = mock.Mock()
self.mock_rbd_wrapper = driver.RBDImageIOWrapper(self.meta)
self.data_length = 1024
self.full_data = 'abcd' * 256
def test_init(self):
self.assertEqual(self.mock_rbd_wrapper._rbd_meta, self.meta)
self.assertEqual(self.mock_rbd_wrapper._offset, 0)
def test_inc_offset(self):
self.mock_rbd_wrapper._inc_offset(10)
self.mock_rbd_wrapper._inc_offset(10)
self.assertEqual(self.mock_rbd_wrapper._offset, 20)
def test_rbd_image(self):
self.assertEqual(self.mock_rbd_wrapper.rbd_image, self.meta.image)
def test_rbd_user(self):
self.assertEqual(self.mock_rbd_wrapper.rbd_user, self.meta.user)
def test_rbd_pool(self):
self.assertEqual(self.mock_rbd_wrapper.rbd_conf, self.meta.conf)
def test_rbd_conf(self):
self.assertEqual(self.mock_rbd_wrapper.rbd_pool, self.meta.pool)
def test_read(self):
def mock_read(offset, length):
return self.full_data[offset:length]
self.meta.image.read.side_effect = mock_read
self.meta.image.size.return_value = self.data_length
data = self.mock_rbd_wrapper.read()
self.assertEqual(data, self.full_data)
data = self.mock_rbd_wrapper.read()
self.assertEqual(data, '')
self.mock_rbd_wrapper.seek(0)
data = self.mock_rbd_wrapper.read()
self.assertEqual(data, self.full_data)
self.mock_rbd_wrapper.seek(0)
data = self.mock_rbd_wrapper.read(10)
self.assertEqual(data, self.full_data[:10])
def test_write(self):
self.mock_rbd_wrapper.write(self.full_data)
self.assertEqual(self.mock_rbd_wrapper._offset, 1024)
def test_seekable(self):
self.assertTrue(self.mock_rbd_wrapper.seekable)
def test_seek(self):
self.assertEqual(self.mock_rbd_wrapper._offset, 0)
self.mock_rbd_wrapper.seek(10)
self.assertEqual(self.mock_rbd_wrapper._offset, 10)
self.mock_rbd_wrapper.seek(10)
self.assertEqual(self.mock_rbd_wrapper._offset, 10)
self.mock_rbd_wrapper.seek(10, 1)
self.assertEqual(self.mock_rbd_wrapper._offset, 20)
self.mock_rbd_wrapper.seek(0)
self.mock_rbd_wrapper.write(self.full_data)
self.meta.image.size.return_value = self.data_length
self.mock_rbd_wrapper.seek(0)
self.assertEqual(self.mock_rbd_wrapper._offset, 0)
self.mock_rbd_wrapper.seek(10, 2)
self.assertEqual(self.mock_rbd_wrapper._offset, self.data_length + 10)
self.mock_rbd_wrapper.seek(-10, 2)
self.assertEqual(self.mock_rbd_wrapper._offset, self.data_length - 10)
# test exceptions.
self.assertRaises(IOError, self.mock_rbd_wrapper.seek, 0, 3)
self.assertRaises(IOError, self.mock_rbd_wrapper.seek, -1)
# offset should not have been changed by any of the previous
# operations.
self.assertEqual(self.mock_rbd_wrapper._offset, self.data_length - 10)
def test_tell(self):
self.assertEqual(self.mock_rbd_wrapper.tell(), 0)
self.mock_rbd_wrapper._inc_offset(10)
self.assertEqual(self.mock_rbd_wrapper.tell(), 10)
def test_flush(self):
with mock.patch.object(driver, 'LOG') as mock_logger:
self.meta.image.flush = mock.Mock()
self.mock_rbd_wrapper.flush()
self.meta.image.flush.assert_called_once_with()
self.meta.image.flush.reset_mock()
# this should be caught and logged silently.
self.meta.image.flush.side_effect = AttributeError
self.mock_rbd_wrapper.flush()
self.meta.image.flush.assert_called_once_with()
msg = _("flush() not supported in this version of librbd")
mock_logger.warning.assert_called_with(msg)
def test_fileno(self):
self.assertRaises(IOError, self.mock_rbd_wrapper.fileno)
def test_close(self):
self.mock_rbd_wrapper.close()
class ManagedRBDTestCase(DriverTestCase):
driver_name = "cinder.volume.drivers.rbd.RBDDriver"
def setUp(self):
super(ManagedRBDTestCase, self).setUp()
# TODO(dosaboy): need to remove dependency on mox stubs here once
# image.fake has been converted to mock.
fake_image.stub_out_image_service(self.stubs)
self.volume.driver.set_initialized()
self.volume.stats = {'allocated_capacity_gb': 0,
'pools': {}}
self.called = []
def _create_volume_from_image(self, expected_status, raw=False,
clone_error=False):
"""Try to clone a volume from an image, and check the status
afterwards.
NOTE: if clone_error is True we force the image type to raw otherwise
clone_image is not called
"""
volume_id = 1
# See tests.image.fake for image types.
if raw:
image_id = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
else:
image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
# creating volume testdata
db.volume_create(self.context,
{'id': volume_id,
'updated_at': timeutils.utcnow(),
'display_description': 'Test Desc',
'size': 20,
'status': 'creating',
'instance_uuid': None,
'host': 'dummy'})
try:
if not clone_error:
self.volume.create_volume(self.context,
volume_id,
image_id=image_id)
else:
self.assertRaises(exception.CinderException,
self.volume.create_volume,
self.context,
volume_id,
image_id=image_id)
volume = db.volume_get(self.context, volume_id)
self.assertEqual(volume['status'], expected_status)
finally:
# cleanup
db.volume_destroy(self.context, volume_id)
def test_create_vol_from_image_status_available(self):
"""Clone raw image then verify volume is in available state."""
def _mock_clone_image(context, volume, image_location,
image_meta, image_service):
return {'provider_location': None}, True
with mock.patch.object(self.volume.driver, 'clone_image') as \
mock_clone_image:
mock_clone_image.side_effect = _mock_clone_image
with mock.patch.object(self.volume.driver, 'create_volume') as \
mock_create:
with mock.patch.object(create_volume.CreateVolumeFromSpecTask,
'_copy_image_to_volume') as mock_copy:
self._create_volume_from_image('available', raw=True)
self.assertFalse(mock_copy.called)
self.assertTrue(mock_clone_image.called)
self.assertFalse(mock_create.called)
def test_create_vol_from_non_raw_image_status_available(self):
"""Clone non-raw image then verify volume is in available state."""
def _mock_clone_image(context, volume, image_location,
image_meta, image_service):
return {'provider_location': None}, False
with mock.patch.object(self.volume.driver, 'clone_image') as \
mock_clone_image:
mock_clone_image.side_effect = _mock_clone_image
with mock.patch.object(self.volume.driver, 'create_volume') as \
mock_create:
with mock.patch.object(create_volume.CreateVolumeFromSpecTask,
'_copy_image_to_volume') as mock_copy:
self._create_volume_from_image('available', raw=False)
self.assertTrue(mock_copy.called)
self.assertTrue(mock_clone_image.called)
self.assertTrue(mock_create.called)
def test_create_vol_from_image_status_error(self):
"""Fail to clone raw image then verify volume is in error state."""
with mock.patch.object(self.volume.driver, 'clone_image') as \
mock_clone_image:
mock_clone_image.side_effect = exception.CinderException
with mock.patch.object(self.volume.driver, 'create_volume'):
with mock.patch.object(create_volume.CreateVolumeFromSpecTask,
'_copy_image_to_volume') as mock_copy:
self._create_volume_from_image('error', raw=True,
clone_error=True)
self.assertFalse(mock_copy.called)
self.assertTrue(mock_clone_image.called)
self.assertFalse(self.volume.driver.create_volume.called)
def test_clone_failure(self):
driver = self.volume.driver
with mock.patch.object(driver, '_is_cloneable', lambda *args: False):
image_loc = (mock.Mock(), mock.Mock())
actual = driver.clone_image(mock.Mock(),
mock.Mock(),
image_loc,
{},
mock.Mock())
self.assertEqual(({}, False), actual)
self.assertEqual(({}, False),
driver.clone_image('', object(), None, {}, ''))
def test_clone_success(self):
expected = ({'provider_location': None}, True)
driver = self.volume.driver
with mock.patch.object(self.volume.driver, '_is_cloneable') as \
mock_is_cloneable:
mock_is_cloneable.return_value = True
with mock.patch.object(self.volume.driver, '_clone') as \
mock_clone:
with mock.patch.object(self.volume.driver, '_resize') as \
mock_resize:
image_loc = ('rbd://fee/fi/fo/fum', None)
volume = {'name': 'vol1'}
actual = driver.clone_image(mock.Mock(),
volume,
image_loc,
{'disk_format': 'raw',
'id': 'id.foo'},
mock.Mock())
self.assertEqual(expected, actual)
mock_clone.assert_called_once_with(volume,
'fi', 'fo', 'fum')
mock_resize.assert_called_once_with(volume)
| |
from praw.exceptions import ClientException, PRAWException
from praw.models import Comment, Submission
from prawcore import BadRequest
import mock
import pytest
from ... import IntegrationTest
class TestComment(IntegrationTest):
def test_attributes(self):
with self.recorder.use_cassette('TestComment.test_attributes'):
comment = Comment(self.reddit, 'cklhv0f')
assert comment.author == 'bboe'
assert comment.body.startswith('Yes it does.')
assert not comment.is_root
assert comment.permalink(fast=True) == '/comments/2gmzqe//cklhv0f'
assert comment.submission == '2gmzqe'
@mock.patch('time.sleep', return_value=None)
def test_block(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette('TestComment.test_block'):
comment = None
for item in self.reddit.inbox.submission_replies():
if item.author and item.author != pytest.placeholders.username:
comment = item
break
else:
assert False, 'no comment found'
comment.block()
def test_clear_vote(self):
self.reddit.read_only = False
with self.recorder.use_cassette('TestComment.test_clear_vote'):
Comment(self.reddit, 'd1680wu').clear_vote()
@mock.patch('time.sleep', return_value=None)
def test_delete(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette('TestComment.test_delete'):
comment = Comment(self.reddit, 'd1616q2')
comment.delete()
assert comment.author is None
assert comment.body == '[deleted]'
def test_disable_inbox_replies(self):
self.reddit.read_only = False
comment = Comment(self.reddit, 'dcc9snh')
with self.recorder.use_cassette(
'TestComment.test_disable_inbox_replies'):
comment.disable_inbox_replies()
def test_downvote(self):
self.reddit.read_only = False
with self.recorder.use_cassette('TestComment.test_downvote'):
Comment(self.reddit, 'd1680wu').downvote()
@mock.patch('time.sleep', return_value=None)
def test_edit(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette(
'TestComment.test_edit'):
comment = Comment(self.reddit, 'd1616q2')
comment.edit('New text')
assert comment.body == 'New text'
def test_enable_inbox_replies(self):
self.reddit.read_only = False
comment = Comment(self.reddit, 'dcc9snh')
with self.recorder.use_cassette(
'TestComment.test_enable_inbox_replies'):
comment.enable_inbox_replies()
def test_gild__no_creddits(self):
self.reddit.read_only = False
with self.recorder.use_cassette('TestComment.test_gild__no_creddits'):
with pytest.raises(BadRequest) as excinfo:
Comment(self.reddit, 'd1616q2').gild()
reason = excinfo.value.response.json()['reason']
assert 'INSUFFICIENT_CREDDITS' == reason
def test_invalid(self):
with self.recorder.use_cassette('TestComment.test_invalid'):
with pytest.raises(PRAWException) as excinfo:
Comment(self.reddit, '0').body
assert ("No 'Comment' data returned for thing t1_0",)\
== excinfo.value.args
@mock.patch('time.sleep', return_value=None)
def test_mark_read(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette('TestComment.test_mark_read'):
comment = next(self.reddit.inbox.unread())
assert isinstance(comment, Comment)
comment.mark_read()
@mock.patch('time.sleep', return_value=None)
def test_mark_unread(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette('TestComment.test_mark_unread'):
comment = next(self.reddit.inbox.comment_replies())
comment.mark_unread()
def test_parent__comment(self):
comment = Comment(self.reddit, 'cklhv0f')
with self.recorder.use_cassette('TestComment.test_parent__comment'):
parent = comment.parent()
parent.refresh()
assert comment in parent.replies
assert isinstance(parent, Comment)
assert parent.fullname == comment.parent_id
def test_parent__comment_from_forest(self):
submission = self.reddit.submission('2gmzqe')
with self.recorder.use_cassette(
'TestComment.test_parent__comment_from_forest'):
comment = submission.comments[0].replies[0]
parent = comment.parent()
assert comment in parent.replies
assert isinstance(parent, Comment)
assert parent.fullname == comment.parent_id
@mock.patch('time.sleep', return_value=None)
def test_parent__from_replies(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette('TestComment.parent__from_replies'):
comment = next(self.reddit.inbox.comment_replies())
parent = comment.parent()
assert isinstance(parent, Comment)
assert parent.fullname == comment.parent_id
def test_parent__submission(self):
comment = Comment(self.reddit, 'cklfmye')
with self.recorder.use_cassette('TestComment.test_parent__submission'):
parent = comment.parent()
assert comment in parent.comments
assert isinstance(parent, Submission)
assert parent.fullname == comment.parent_id
def test_permalink(self):
with self.recorder.use_cassette('TestComment.test_permalink'):
comment = Comment(self.reddit, 'cklhv0f')
assert comment.permalink() == ('/r/redditdev/comments/2gmzqe/'
'praw_https_enabled_praw_testing_'
'needed/cklhv0f')
def test_refresh(self):
with self.recorder.use_cassette('TestComment.test_refresh'):
comment = Comment(self.reddit, 'd81vwef').refresh()
assert len(comment.replies) > 0
def test_refresh__deleted_comment(self):
with self.recorder.use_cassette(
'TestComment.test_refresh__deleted_comment'):
with pytest.raises(ClientException) as excinfo:
Comment(self.reddit, 'd7ltvl0').refresh()
assert ('Comment has been deleted',) == excinfo.value.args
def test_reply(self):
self.reddit.read_only = False
with self.recorder.use_cassette('TestComment.test_reply'):
parent_comment = Comment(self.reddit, 'd1616q2')
comment = parent_comment.reply('Comment reply')
assert comment.author == self.reddit.config.username
assert comment.body == 'Comment reply'
assert not comment.is_root
assert comment.parent_id == parent_comment.fullname
def test_report(self):
self.reddit.read_only = False
with self.recorder.use_cassette('TestComment.test_report'):
Comment(self.reddit, 'd0335z3').report('custom')
def test_save(self):
self.reddit.read_only = False
with self.recorder.use_cassette('TestComment.test_save'):
Comment(self.reddit, 'd1680wu').save('foo')
def test_unsave(self):
self.reddit.read_only = False
with self.recorder.use_cassette('TestComment.test_unsave'):
Comment(self.reddit, 'd1680wu').unsave()
def test_upvote(self):
self.reddit.read_only = False
with self.recorder.use_cassette('TestComment.test_upvote'):
Comment(self.reddit, 'd1680wu').upvote()
class TestCommentModeration(IntegrationTest):
def test_approve(self):
self.reddit.read_only = False
with self.recorder.use_cassette('TestCommentModeration.test_approve'):
Comment(self.reddit, 'da2g5y6').mod.approve()
def test_distinguish(self):
self.reddit.read_only = False
with self.recorder.use_cassette(
'TestCommentModeration.test_distinguish'):
Comment(self.reddit, 'da2g5y6').mod.distinguish()
@mock.patch('time.sleep', return_value=None)
def test_distinguish__sticky(self, _):
self.reddit.read_only = False
with self.recorder.use_cassette(
'TestCommentModeration.test_distinguish__sticky'):
Comment(self.reddit, 'da2g5y6').mod.distinguish(sticky=True)
def test_ignore_reports(self):
self.reddit.read_only = False
with self.recorder.use_cassette(
'TestCommentModeration.test_ignore_reports'):
self.reddit.comment('da2g5y6').mod.ignore_reports()
def test_remove(self):
self.reddit.read_only = False
with self.recorder.use_cassette(
'TestCommentModeration.test_remove'):
self.reddit.comment('da2g5y6').mod.remove(spam=True)
def test_undistinguish(self):
self.reddit.read_only = False
with self.recorder.use_cassette(
'TestCommentModeration.test_undistinguish'):
self.reddit.comment('da2g5y6').mod.undistinguish()
def test_unignore_reports(self):
self.reddit.read_only = False
with self.recorder.use_cassette(
'TestCommentModeration.test_unignore_reports'):
self.reddit.comment('da2g5y6').mod.unignore_reports()
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import re
from hashlib import sha1
from future.utils import string_types
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.base.exceptions import TargetDefinitionException
from pants.base.payload import Payload
from pants.base.payload_field import (ExcludesField, FingerprintedField, FingerprintedMixin,
PrimitiveField)
from pants.base.validation import assert_list
from pants.java.jar.exclude import Exclude
from pants.util.meta import AbstractClass
class JarRule(FingerprintedMixin, AbstractClass):
def __init__(self, apply_pattern, payload=None):
self.payload = payload or Payload()
if not isinstance(apply_pattern, string_types):
raise ValueError('The supplied apply_pattern is not a string, given: {}'
.format(apply_pattern))
try:
self._apply_pattern = re.compile(apply_pattern)
except re.error as e:
raise ValueError('The supplied apply_pattern: {pattern} '
'is not a valid regular expression: {msg}'
.format(pattern=apply_pattern, msg=e))
self.payload.add_fields({
'apply_pattern': PrimitiveField(apply_pattern),
})
def fingerprint(self):
return self.payload.fingerprint()
@property
def apply_pattern(self):
"""The pattern that matches jar entry paths this rule applies to.
:rtype: re.RegexObject
"""
return self._apply_pattern
class Skip(JarRule):
"""A rule that skips adding matched entries to a jar."""
def __repr__(self):
return "Skip(apply_pattern={})".format(self.payload.apply_pattern)
class Duplicate(JarRule):
"""A rule that indicates how duplicate entries should be handled when building a jar."""
class Error(Exception):
"""Raised by the ``FAIL`` action when a duplicate entry is encountered"""
def __init__(self, path):
"""Creates a duplicate entry error for the given path.
:param string path: The path of the duplicate entry.
"""
assert path and isinstance(path, string_types), 'A non-empty path must be supplied.'
super(Duplicate.Error, self).__init__('Duplicate entry encountered for path {}'.format(path))
self._path = path
@property
def path(self):
"""The path of the duplicate entry."""
return self._path
SKIP = 'SKIP'
"""Retains the 1st entry and skips subsequent duplicates."""
REPLACE = 'REPLACE'
"""Retains the most recent entry and skips prior duplicates."""
CONCAT = 'CONCAT'
"""Concatenates the contents of all duplicate entries encountered in the order encountered."""
CONCAT_TEXT = 'CONCAT_TEXT'
"""Concatenates the contents of all duplicate entries encountered in the order encountered,
separating entries with newlines if needed.
"""
FAIL = 'FAIL'
"""Raises a :class:``Duplicate.Error`` when a duplicate entry is
encountered.
"""
_VALID_ACTIONS = frozenset((SKIP, REPLACE, CONCAT, CONCAT_TEXT, FAIL))
@classmethod
def validate_action(cls, action):
"""Verifies the given action is a valid duplicate jar rule action.
:returns: The action if it is valid.
:raises: ``ValueError`` if the action is invalid.
"""
if action not in cls._VALID_ACTIONS:
raise ValueError('The supplied action must be one of {valid}, given: {given}'
.format(valid=cls._VALID_ACTIONS, given=action))
return action
def __init__(self, apply_pattern, action):
"""Creates a rule for handling duplicate jar entries.
:param string apply_pattern: A regular expression that matches duplicate jar entries this rule
applies to.
:param action: An action to take to handle one or more duplicate entries. Must be one of:
``Duplicate.SKIP``, ``Duplicate.REPLACE``, ``Duplicate.CONCAT``, ``Duplicate.CONCAT_TEXT``,
or ``Duplicate.FAIL``.
"""
payload = Payload()
payload.add_fields({
'action': PrimitiveField(self.validate_action(action)),
})
super(Duplicate, self).__init__(apply_pattern, payload=payload)
@property
def action(self):
"""The action to take for any duplicate entries that match this rule's ``apply_pattern``."""
return self.payload.action
def fingerprint(self):
return self.payload.fingerprint()
def __repr__(self):
return "Duplicate(apply_pattern={0}, action={1})".format(self.payload.apply_pattern,
self.payload.action)
class JarRules(FingerprintedMixin):
"""A set of rules for packaging up a deploy jar.
Deploy jars are executable jars with fully self-contained classpaths and as such, assembling them
presents problems given jar semantics.
One issue is signed jars that must be included on the
classpath. These have a signature that depends on the jar contents and assembly of the deploy jar
changes the content of the jar, breaking the signatures. For cases like these the signed jars
must be verified and then the signature information thrown away. The `Skip <#Skip>`_
rule supports this sort of issue by allowing outright entry exclusion in the final deploy jar.
Another issue is duplicate jar entries. Although the underlying zip format supports these, the
java jar tool and libraries do not. As such some action must be taken for each duplicate entry
such that there are no duplicates in the final deploy jar. The four
`Duplicate <#Duplicate>`_ rules support resolution of these cases by allowing 1st wins,
last wins, concatenation of the duplicate entry contents or raising an exception.
:API: public
"""
@classmethod
def skip_signatures_and_duplicates_concat_well_known_metadata(cls, default_dup_action=None,
additional_rules=None):
"""Produces a rule set useful in many deploy jar creation contexts.
The rule set skips duplicate entries by default, retaining the 1st encountered. In addition it
has the following special handling:
- jar signature metadata is dropped
- jar indexing files INDEX.LIST are dropped
- ``java.util.ServiceLoader`` provider-configuration files are concatenated in the order
encountered
:param default_dup_action: An optional default action to take for duplicates. Defaults to
`Duplicate.SKIP` if not specified.
:param additional_rules: Optionally one or more jar rules to add to those described above.
:returns: JarRules
"""
default_dup_action = Duplicate.validate_action(default_dup_action or Duplicate.SKIP)
additional_rules = assert_list(additional_rules,
expected_type=(Duplicate, Skip))
rules = [Skip(r'^META-INF/[^/]+\.SF$'), # signature file
Skip(r'^META-INF/[^/]+\.DSA$'), # default signature alg. file
Skip(r'^META-INF/[^/]+\.RSA$'), # default signature alg. file
Skip(r'^META-INF/INDEX.LIST$'), # interferes with Class-Path: see man jar for i option
Duplicate(r'^META-INF/services/', Duplicate.CONCAT_TEXT)] # 1 svc fqcn per line
return JarRules(rules=rules + additional_rules, default_dup_action=default_dup_action)
_DEFAULT = None
@classmethod
def default(cls):
"""Returns the default set of jar rules.
Can be set with `set_default` but otherwise defaults to
`skip_signatures_and_duplicates_concat_well_known_metadata`.
:API: public
"""
if cls._DEFAULT is None:
cls._DEFAULT = cls.skip_signatures_and_duplicates_concat_well_known_metadata()
return cls._DEFAULT
@classmethod
def set_default(cls, rules):
"""Sets the default site-wide jar rules."""
if not isinstance(rules, JarRules):
raise ValueError('The default rules must be a JarRules instance.')
cls._DEFAULT = rules
def __init__(self, rules=None, default_dup_action=Duplicate.SKIP):
"""Creates a new set of jar rules with the default duplicate action of ``Duplicate.SKIP``.
:param rules: One or more rules that will be applied in order to jar entries being packaged in
a deploy jar. `Skip <#Skip>`_ rules can go here.
:param default_dup_action: The default action to take when a duplicate entry is encountered and
no explicit rules apply to the entry.
"""
self.payload = Payload()
self.payload.add_fields({
'default_dup_action': PrimitiveField(Duplicate.validate_action(default_dup_action))
})
self._rules = assert_list(rules, expected_type=JarRule, key_arg="rules")
@property
def default_dup_action(self):
"""The default action to take when a duplicate jar entry is encountered.
:API: public
"""
return self.payload.default_dup_action
@property
def rules(self):
"""A copy of the list of explicit entry rules in effect."""
return list(self._rules)
def fingerprint(self):
hasher = sha1()
hasher.update(self.payload.fingerprint())
for rule in self.rules:
hasher.update(rule.fingerprint())
return hasher.hexdigest()
@property
def value(self):
return self._jar_rules
class ManifestEntries(FingerprintedMixin):
"""Describes additional items to add to the app manifest."""
class ExpectedDictionaryError(Exception):
pass
def __init__(self, entries=None):
"""
:param entries: Additional headers, value pairs to add to the MANIFEST.MF.
You can just add fixed string header / value pairs.
:type entries: dictionary of string : string
"""
self.payload = Payload()
if entries:
if not isinstance(entries, dict):
raise self.ExpectedDictionaryError("entries must be a dictionary of strings.")
for key in entries.keys():
if not isinstance(key, string_types):
raise self.ExpectedDictionaryError(
"entries must be dictionary of strings, got key {} type {}"
.format(key, type(key).__name__))
self.payload.add_fields({
'entries': PrimitiveField(entries or {}),
})
def fingerprint(self):
return self.payload.fingerprint()
@property
def entries(self):
return self.payload.entries
class JvmBinary(JvmTarget):
"""A JVM binary.
Below are a summary of how key goals affect targets of this type:
* ``bundle`` - Creates a self-contained directory with the binary and all
its dependencies, optionally archived, suitable for deployment.
* ``binary`` - Create an executable jar of the binary. On the JVM
this means the jar has a manifest specifying the main class.
* ``run`` - Executes the main class of this binary locally.
:API: public
"""
def __init__(self,
name=None,
address=None,
payload=None,
main=None,
basename=None,
sources=None,
deploy_excludes=None,
deploy_jar_rules=None,
manifest_entries=None,
shading_rules=None,
**kwargs):
"""
:API: public
:param string main: The name of the ``main`` class, e.g.,
``'org.pantsbuild.example.hello.main.HelloMain'``. This class may be
present as the source of this target or depended-upon library.
:param string basename: Base name for the generated ``.jar`` file, e.g.,
``'hello'``. (By default, uses ``name`` param) Note this is unsafe
because of the possible conflict when multiple binaries are built.
:param EagerFilesetWithSpec sources: Zero or one source files. If more than one source is
required, they should be put in a library target which should be added to dependencies.
:param dependencies: Targets (probably ``java_library`` and
``scala_library`` targets) to "link" in.
:type dependencies: list of target specs
:param deploy_excludes: List of `exclude <#exclude>`_\s to apply
at deploy time.
If you, for example, deploy a java servlet that has one version of
``servlet.jar`` onto a Tomcat environment that provides another version,
they might conflict. ``deploy_excludes`` gives you a way to build your
code but exclude the conflicting ``jar`` when deploying.
:param deploy_jar_rules: `Jar rules <#jar_rules>`_ for packaging this binary in a
deploy jar.
:param manifest_entries: dict that specifies entries for `ManifestEntries <#manifest_entries>`_
for adding to MANIFEST.MF when packaging this binary.
:param list shading_rules: Optional list of shading rules to apply when building a shaded
(aka monolithic aka fat) binary jar. The order of the rules matters: the first rule which
matches a fully-qualified class name is used to shade it. See shading_relocate(),
shading_exclude(), shading_relocate_package(), and shading_exclude_package().
"""
self.address = address # Set in case a TargetDefinitionException is thrown early
if main and not isinstance(main, string_types):
raise TargetDefinitionException(self, 'main must be a fully qualified classname')
if deploy_jar_rules and not isinstance(deploy_jar_rules, JarRules):
raise TargetDefinitionException(self,
'deploy_jar_rules must be a JarRules specification. got {}'
.format(type(deploy_jar_rules).__name__))
if manifest_entries and not isinstance(manifest_entries, dict):
raise TargetDefinitionException(self,
'manifest_entries must be a dict. got {}'
.format(type(manifest_entries).__name__))
payload = payload or Payload()
payload.add_fields({
'basename': PrimitiveField(basename or name),
'deploy_excludes': ExcludesField(self.assert_list(deploy_excludes,
expected_type=Exclude,
key_arg='deploy_excludes')),
'deploy_jar_rules': FingerprintedField(deploy_jar_rules or JarRules.default()),
'manifest_entries': FingerprintedField(ManifestEntries(manifest_entries)),
'main': PrimitiveField(main),
'shading_rules': PrimitiveField(shading_rules or ()),
})
super(JvmBinary, self).__init__(name=name,
address=address,
payload=payload,
sources=sources,
**kwargs)
@property
def basename(self):
return self.payload.basename
@property
def deploy_excludes(self):
return self.payload.deploy_excludes
@property
def deploy_jar_rules(self):
return self.payload.deploy_jar_rules
@property
def shading_rules(self):
return self.payload.shading_rules
@property
def main(self):
return self.payload.main
@property
def manifest_entries(self):
return self.payload.manifest_entries
| |
#!/usr/bin/env python
import sys
import os
import shutil
from datetime import datetime
import gzip
from argparse import ArgumentParser
import random
from distutils.spawn import find_executable
from ..illumina.run_info import parse_samplesheet
# TODO: Finish this RunInfo.xml generation function
# Need to populate the template dict with more variable we
# don't currently have, including the 'lanes' dictionary, then call
# write_runinfo_xml somewhere.
# Add jinja2 to requirements.txt
#
# from jinja2 import Template
#
# runinfo_xml_template = """<?xml version="1.0"?>
# <RunInfo xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" Version="2">
# <Run Id="{{ run_id }}" Number="{{ run_number }}">
# <Flowcell>{{ flowcell_id }}</Flowcell>
# <Instrument>{{ instrument_id }}</Instrument>
# <Date>{{ date }}</Date>
# <Reads>
# {% for lane in lanes %}
# <Read Number="{{ lane.number }}" NumCycles="{{ lane.cycles }}" IsIndexedRead="{{ lane.is_index }}" />
# {% endfor %}
# </Reads>
# <FlowcellLayout LaneCount="{{ number_of_lanes }}" SurfaceCount="{{ surface_count }}" SwathCount="{{ swath_count }}" TileCount="{{ tile_count }}" />
# <AlignToPhiX>
# {% for lane in lanes %}
# <Lane>{{ lane.number }}</Lane>
# {% endfor %}
# </AlignToPhiX>
# </Run>
# </RunInfo>
# """
#
#
# def write_runinfo_xml(info):
# runinfo_text = Template(runinfo_xml_template).render(**info)
# with open(os.path.join(run_path, 'RunInfo.xml'), 'w') as f:
# f.write(runinfo_text)
#
def add_options(parser):
parser.add_argument("-s", "--samplesheet",
dest="samplesheet_path",
type=str,
help="The path to Samplesheet.csv",
metavar="SAMPLESHEET_PATH")
parser.add_argument("-x", "--reads-per-sample",
dest="reads_per_sample",
type=int,
default=10,
help="The number of reads per sample.",
metavar="READS_PER_SAMPLE")
parser.add_argument("-f", "--fastq-source",
dest="fastq_source",
type=str,
default='',
help="The path to a source fastq.gz file that we will"
"sample reads from. If specified, --read-length is"
"ignored.",
metavar="FASTQ_SOURCE")
parser.add_argument("-o", "--output-path",
dest="output_path",
type=str,
default='.',
help="The path where generated files with be written.",
metavar="OUTPUT_PATH")
parser.add_argument("-l", "--read-length",
dest="read_length",
type=int,
default=151,
help="The read lengthm overriding any [Read] setting"
"in SampleSheet.csv.",
metavar="READS_PER_SAMPLE")
parser.add_argument("-d", "--run-date",
dest="run_date",
type=str,
default='',
help="The date in the format YYMMDD",
metavar="RUN_DATE")
parser.add_argument("-i", "--instrument",
dest="instrument_id",
type=str,
default='M04242',
help="The instrument ID",
metavar="INSTRUMENT_ID")
parser.add_argument("-c", "--flowcell",
dest="flowcell_id",
type=str,
default='',
help="The flowcell ID (FCID)",
metavar="FLOWCELL_ID")
parser.add_argument("-n", "--run-number",
dest="run_number",
type=int,
default=1,
help="The run number (eg 14 becomes 0014)",
metavar="RUN_NUMBER")
parser.add_argument("-r", "--random-shuffle",
dest="random_shuffle",
action='store_true',
help="If a source FASTQ file is provided, shuffle the"
"nucleotides in place (without shuffling quality"
"scores) so we obscure the original source.")
parser.add_argument("-p", "--paired",
dest="paired",
action='store_true',
help="Generate reads for paired end / mate paired "
"(R1/R2).")
parser.add_argument("--no-create-project-directories",
dest="create_project_directories",
action='store_false',
help="Don't create directory for each project ID. "
"Otherwise a project directory in"
"(eg SampleProject in SampleSheet.csv) will be "
"created, with nested directories for each "
"sample ID")
parser.add_argument("--sample-directory-prefix",
dest="sample_directory_prefix",
type=str,
default='',
help="Prefix this to sample directory names. Older "
"versions (~1.8.4) of bcl2fastq prefix 'Sample_'"
"to the output directory names")
parser.add_argument("--no-generate-undetermined-indices",
dest="generate_undetermined_indices",
action='store_false',
help="Don't generate Undetermined_indices_*.fastq.gz "
"files")
parser.add_argument("--random-seed",
dest="random_seed",
type=int,
default=-1,
help="Set the pseudo-random seed so we can be "
"deterministic.",
metavar="RANDOM_SEED")
def shuffle_seq(seq):
l = list(seq)
random.shuffle(l)
seq = ''.join(l)
return seq
def random_seq(length, characters='ATGC'):
return ''.join([random.choice(characters)
for i in range(length)])
def set_missing_key(d, k, v):
d[k] = d.get(k, v)
def fasta_header(header_type=2, **kwargs):
# alternative headers in the wild as documented on Wikipedia:
# https://en.wikipedia.org/wiki/FASTQ_format
illumina_type_1 = "@{instrument_id}:{lane}:{tile}:{tile_x}:{tile_y}#" \
"{index_number}/{pair}"
illumina_type_2 = "@{instrument_id}:{run_number}:{flowcell_id}:{lane}:" \
"{tile}:{tile_x}:{tile_y} {pair}:{filtered}:" \
"{control_bits}:{index}"
illumina_type_3 = "@{instrument_id}:{run_number}:{flowcell_id}:{lane}:" \
"{tile}:{tile_x}:{tile_y} {pair}:{filtered}:" \
"{control_bits}:{sample_number}"
fq_header_types = [None, illumina_type_1, illumina_type_2, illumina_type_3]
set_missing_key(kwargs, 'control_bits', 0)
return fq_header_types[header_type].format(**kwargs)
def fastq_filename(name_type=1, suffix='.fastq', **kwargs):
# bcl2fastq 1.8.4 style filenames:
if kwargs.get('index', None): # for Undetermined_indices
template_1 = "{sample_name}_{index}_L{lane:03d}_R{read}_001%s" % suffix
else:
template_1 = "{sample_name}_L{lane:03d}_R{read}_001%s" % suffix
# bcl2fastq 2.x style filenames:
template_2 = "{sample_name}_{sample_number}_L{lane:03d}_R{read}_001%s" % suffix
templates = [None, template_1, template_2]
return templates[name_type].format(**kwargs)
def run_directory_name(instrument_id, run_number, flowcell_id, run_date=None):
if not run_date:
run_date = datetime.now().strftime('%y%m%d')
return "{run_date}_{instrument_id}_{run_number:04d}_{flowcell_id}".format(
run_date=run_date,
instrument_id=instrument_id,
run_number=run_number,
flowcell_id=flowcell_id)
def write(f, text, to_stdout=True):
if to_stdout:
print(text)
f.write('%s\n' % text)
def run_commandline():
parser = ArgumentParser(description='Generate FASTQ files that appear'
'to have come from an Illumina '
'instrument run processed by '
'bcl2fastq.')
add_options(parser)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
options = parser.parse_args()
if options.random_seed == -1:
random.seed()
else:
random.seed(options.random_seed)
number_of_tiles = 5000
tile_x_max = 200000
tile_y_max = 200000
filtered = 'N'
control_bits = 0
quality_range = "!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLM" \
"NOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~"
if 'samplesheet_path' not in options:
sys.stderr.write("Error: no --samplesheet option provided")
sys.exit(1)
bgzip_bin = find_executable('bgzip')
fqsource = None
if options.fastq_source:
fqsource = gzip.open(options.fastq_source, 'r')
samplesheet, chemistry = parse_samplesheet(options.samplesheet_path)
# make all the SampleSheet keys lowercase
samplesheet_lowercase = []
for s in samplesheet:
r = {k.lower(): s[k] for k in s.keys()}
samplesheet_lowercase.append(r)
samplesheet = samplesheet_lowercase
# if no flowcell id is specified, take the SampleSheet one.
# if there is no FCID in the SampleSheet, use a default
if options.flowcell_id:
flowcell_id = options.flowcell_id
elif 'FCID' not in samplesheet[0]:
flowcell_id = '000000000-ANV1L'
run_id = run_directory_name(options.instrument_id,
options.run_number,
flowcell_id,
run_date=options.run_date)
run_path = os.path.join(options.output_path, run_id)
if not os.path.exists(run_path):
os.makedirs(run_path)
try:
shutil.copy(options.samplesheet_path, run_path)
except shutil.Error:
# this may occur if the same SampleSheet.csv already exists at this path
pass
# TODO: Use [Read] from IEMv4 SampleSheet if --read-length isn't
# specified, fallback to default 151 if none specified anywhere
read_length = options.read_length
read_pairs = ['1']
if options.paired:
read_pairs = ['1', '2']
# determine the number of lanes based on the highest lane in the SampleSheet
number_of_lanes = 1
for s in samplesheet:
lane = s.get('lane', 0)
if lane > number_of_lanes:
number_of_lanes = lane
# we add stuff to the samplesheet data structure to so that
# Undetermined_indicies gets generated as if it's a project+samples
if options.generate_undetermined_indices:
from collections import OrderedDict
for pair in read_pairs:
for lane in range(1, number_of_lanes + 1):
d = {'lane': lane,
'sampleproject': 'Undetermined_indicies',
'samplename': 'lane%s_Undetermined' % lane,
'sampleid': 'Sample_lane%s' % lane,
'pair': pair,
'read': pair,
'fcid': samplesheet[0].get('fcid', ''),
'instrument_id': options.instrument_id,
'run_number': options.run_number,
}
samplesheet.append(OrderedDict(d))
for pair in read_pairs:
sample_number = 0
for sample in samplesheet:
sample_number += 1
info = {}
info['pair'] = pair
info['read'] = pair
info['project'] = sample.get('sampleproject', '')
info['sample_number'] = sample_number
info['sample_name'] = sample.get('samplename', None)
if info['sample_name'] is None:
info['sample_name'] = 'Sample-%s' % sample_number
info['sample_id'] = sample.get('sampleid', '')
info['lane'] = int(sample.get('lane', 1))
info['index'] = sample.get('index', '')
info['flowcell_id'] = sample.get('fcid', '')
info['instrument_id'] = options.instrument_id
info['run_number'] = options.run_number
output_path = os.path.join(run_path, 'Data/Intensities/BaseCalls')
# progressively build output path, depending on existance of project
# and sample id values
if options.create_project_directories and info['project']:
output_path = os.path.join(output_path, info['project'])
if options.create_project_directories and info['sample_id']:
output_path = os.path.join(
output_path,
"%s%s" % (options.sample_directory_prefix,
info['sample_id']))
if not os.path.exists(output_path):
os.makedirs(output_path)
output_filename = fastq_filename(**info)
fq_outpath = os.path.join(output_path, output_filename)
with open(fq_outpath, 'w') as fh:
for n in range(options.reads_per_sample):
info['filtered'] = filtered
info['control_bits'] = control_bits
info['tile'] = random.randrange(1, number_of_tiles)
info['tile_x'] = random.randrange(1, tile_x_max)
info['tile_y'] = random.randrange(1, tile_y_max)
write(fh, fasta_header(**info))
if fqsource:
# skip header in source
try:
fqsource.readline()
except StopIteration:
fqsource = gzip.open(options.fastq_source, 'r')
seq = fqsource.readline().strip()
if options.random_shuffle:
seq = shuffle_seq(seq)
plus = fqsource.readline().strip()
quality = fqsource.readline().strip()
else:
seq = random_seq(read_length)
plus = '+'
# TODO: generating random quality scores is going to
# look silly in FastQC etc - generate some kind of
# plausible quality falloff. Maybe it just makes
# sense to support wrapping ART to generate
# the FASTQ source sequences
quality = random_seq(read_length,
characters=quality_range)
write(fh, seq)
write(fh, plus)
write(fh, quality)
# bgzip them the correct way for FASTQs, falling back to plain gzip
if bgzip_bin:
os.system('%s -f %s' % (bgzip_bin, fq_outpath))
else:
os.system('gzip -f %s' % fq_outpath)
if fqsource:
fqsource.close()
if __name__ == '__main__':
# Hint - on the commandline, run like:
#
# $ python -m mytardis_ngs_ingestor.utils.generate_test_run
#
# (otherwise relative imports will fail)
run_commandline()
| |
# -*- coding:utf-8 -*-
import gettext
import json
import os
from os import path
import unittest
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import (
LiveServerTestCase, TestCase, modify_settings, override_settings)
from django.utils import six
from django.utils._os import upath
from django.utils.module_loading import import_string
from django.utils.translation import override, LANGUAGE_SESSION_KEY
from ..urls import locale_dir
class I18NTests(TestCase):
""" Tests django views in django/views/i18n.py """
urls = 'view_tests.urls'
def test_setlang(self):
"""
The set_language view can be used to change the session language.
The user is redirected to the 'next' argument if provided.
"""
for lang_code, lang_name in settings.LANGUAGES:
post_data = dict(language=lang_code, next='/')
response = self.client.post('/i18n/setlang/', data=post_data)
self.assertRedirects(response, 'http://testserver/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_unsafe_next(self):
"""
The set_language view only redirects to the 'next' argument if it is
"safe".
"""
lang_code, lang_name = settings.LANGUAGES[0]
post_data = dict(language=lang_code, next='//unsafe/redirection/')
response = self.client.post('/i18n/setlang/', data=post_data)
self.assertEqual(response.url, 'http://testserver/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_reversal(self):
self.assertEqual(reverse('set_language'), '/i18n/setlang/')
def test_setlang_cookie(self):
# we force saving language to a cookie rather than a session
# by excluding session middleware and those which do require it
test_settings = dict(
MIDDLEWARE_CLASSES=('django.middleware.common.CommonMiddleware',),
LANGUAGE_COOKIE_NAME='mylanguage',
LANGUAGE_COOKIE_AGE=3600 * 7 * 2,
LANGUAGE_COOKIE_DOMAIN='.example.com',
LANGUAGE_COOKIE_PATH='/test/',
)
with self.settings(**test_settings):
post_data = dict(language='pl', next='/views/')
response = self.client.post('/i18n/setlang/', data=post_data)
language_cookie = response.cookies.get('mylanguage')
self.assertEqual(language_cookie.value, 'pl')
self.assertEqual(language_cookie['domain'], '.example.com')
self.assertEqual(language_cookie['path'], '/test/')
self.assertEqual(language_cookie['max-age'], 3600 * 7 * 2)
def test_jsi18n(self):
"""The javascript_catalog can be deployed with language settings"""
for lang_code in ['es', 'fr', 'ru']:
with override(lang_code):
catalog = gettext.translation('djangojs', locale_dir, [lang_code])
if six.PY3:
trans_txt = catalog.gettext('this is to be translated')
else:
trans_txt = catalog.ugettext('this is to be translated')
response = self.client.get('/jsi18n/')
# response content must include a line like:
# "this is to be translated": <value of trans_txt Python variable>
# json.dumps() is used to be able to check unicode strings
self.assertContains(response, json.dumps(trans_txt), 1)
if lang_code == 'fr':
# Message with context (msgctxt)
self.assertContains(response, r'"month name\u0004May": "mai"', 1)
class JsI18NTests(TestCase):
"""
Tests django views in django/views/i18n.py that need to change
settings.LANGUAGE_CODE.
"""
urls = 'view_tests.urls'
def test_jsi18n_with_missing_en_files(self):
"""
The javascript_catalog shouldn't load the fallback language in the
case that the current selected language is actually the one translated
from, and hence missing translation files completely.
This happens easily when you're translating from English to other
languages and you've set settings.LANGUAGE_CODE to some other language
than English.
"""
with self.settings(LANGUAGE_CODE='es'), override('en-us'):
response = self.client.get('/jsi18n/')
self.assertNotContains(response, 'esto tiene que ser traducido')
def test_jsi18n_fallback_language(self):
"""
Let's make sure that the fallback language is still working properly
in cases where the selected language cannot be found.
"""
with self.settings(LANGUAGE_CODE='fr'), override('fi'):
response = self.client.get('/jsi18n/')
self.assertContains(response, 'il faut le traduire')
def test_i18n_language_non_english_default(self):
"""
Check if the Javascript i18n view returns an empty language catalog
if the default language is non-English, the selected language
is English and there is not 'en' translation available. See #13388,
#3594 and #13726 for more details.
"""
with self.settings(LANGUAGE_CODE='fr'), override('en-us'):
response = self.client.get('/jsi18n/')
self.assertNotContains(response, 'Choisir une heure')
@modify_settings(INSTALLED_APPS={'append': 'view_tests.app0'})
def test_non_english_default_english_userpref(self):
"""
Same as above with the difference that there IS an 'en' translation
available. The Javascript i18n view must return a NON empty language catalog
with the proper English translations. See #13726 for more details.
"""
with self.settings(LANGUAGE_CODE='fr'), override('en-us'):
response = self.client.get('/jsi18n_english_translation/')
self.assertContains(response, 'this app0 string is to be translated')
def test_i18n_language_non_english_fallback(self):
"""
Makes sure that the fallback language is still working properly
in cases where the selected language cannot be found.
"""
with self.settings(LANGUAGE_CODE='fr'), override('none'):
response = self.client.get('/jsi18n/')
self.assertContains(response, 'Choisir une heure')
def test_escaping(self):
# Force a language via GET otherwise the gettext functions are a noop!
response = self.client.get('/jsi18n_admin/?language=de')
self.assertContains(response, '\\x04')
@modify_settings(INSTALLED_APPS={'append': ['view_tests.app5']})
def test_non_BMP_char(self):
"""
Non-BMP characters should not break the javascript_catalog (#21725).
"""
with self.settings(LANGUAGE_CODE='en-us'), override('fr'):
response = self.client.get('/jsi18n/app5/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'emoji')
self.assertContains(response, '\\ud83d\\udca9')
class JsI18NTestsMultiPackage(TestCase):
urls = 'view_tests.urls'
"""
Tests for django views in django/views/i18n.py that need to change
settings.LANGUAGE_CODE and merge JS translation from several packages.
"""
@modify_settings(INSTALLED_APPS={'append': ['view_tests.app1', 'view_tests.app2']})
def test_i18n_language_english_default(self):
"""
Check if the JavaScript i18n view returns a complete language catalog
if the default language is en-us, the selected language has a
translation available and a catalog composed by djangojs domain
translations of multiple Python packages is requested. See #13388,
#3594 and #13514 for more details.
"""
with self.settings(LANGUAGE_CODE='en-us'), override('fr'):
response = self.client.get('/jsi18n_multi_packages1/')
self.assertContains(response, 'il faut traduire cette cha\\u00eene de caract\\u00e8res de app1')
@modify_settings(INSTALLED_APPS={'append': ['view_tests.app3', 'view_tests.app4']})
def test_i18n_different_non_english_languages(self):
"""
Similar to above but with neither default or requested language being
English.
"""
with self.settings(LANGUAGE_CODE='fr'), override('es-ar'):
response = self.client.get('/jsi18n_multi_packages2/')
self.assertContains(response, 'este texto de app3 debe ser traducido')
def test_i18n_with_locale_paths(self):
extended_locale_paths = settings.LOCALE_PATHS + (
path.join(path.dirname(
path.dirname(path.abspath(upath(__file__)))), 'app3', 'locale'),)
with self.settings(LANGUAGE_CODE='es-ar', LOCALE_PATHS=extended_locale_paths):
with override('es-ar'):
response = self.client.get('/jsi18n/')
self.assertContains(response,
'este texto de app3 debe ser traducido')
skip_selenium = not os.environ.get('DJANGO_SELENIUM_TESTS', False)
@unittest.skipIf(skip_selenium, 'Selenium tests not requested')
class JavascriptI18nTests(LiveServerTestCase):
# The test cases use translations from these apps.
available_apps = ['django.contrib.admin', 'view_tests']
urls = 'view_tests.urls'
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
@classmethod
def setUpClass(cls):
try:
cls.selenium = import_string(cls.webdriver_class)()
except Exception as e:
raise unittest.SkipTest('Selenium webdriver "%s" not installed or '
'not operational: %s' % (cls.webdriver_class, str(e)))
super(JavascriptI18nTests, cls).setUpClass()
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
super(JavascriptI18nTests, cls).tearDownClass()
@override_settings(LANGUAGE_CODE='de')
def test_javascript_gettext(self):
self.selenium.get('%s%s' % (self.live_server_url, '/jsi18n_template/'))
elem = self.selenium.find_element_by_id("gettext")
self.assertEqual(elem.text, "Entfernen")
elem = self.selenium.find_element_by_id("ngettext_sing")
self.assertEqual(elem.text, "1 Element")
elem = self.selenium.find_element_by_id("ngettext_plur")
self.assertEqual(elem.text, "455 Elemente")
elem = self.selenium.find_element_by_id("pgettext")
self.assertEqual(elem.text, "Kann")
elem = self.selenium.find_element_by_id("npgettext_sing")
self.assertEqual(elem.text, "1 Resultat")
elem = self.selenium.find_element_by_id("npgettext_plur")
self.assertEqual(elem.text, "455 Resultate")
class JavascriptI18nChromeTests(JavascriptI18nTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class JavascriptI18nIETests(JavascriptI18nTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class RoutesOperations(object):
"""RoutesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2016_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
route_table_name, # type: str
route_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
route_table_name, # type: str
route_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified route from a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
route_name=route_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
route_table_name, # type: str
route_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Route"
"""Gets the specified route from a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Route, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2016_09_01.models.Route
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
route_table_name, # type: str
route_name, # type: str
route_parameters, # type: "_models.Route"
**kwargs # type: Any
):
# type: (...) -> "_models.Route"
cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_parameters, 'Route')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Route', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
route_table_name, # type: str
route_name, # type: str
route_parameters, # type: "_models.Route"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Route"]
"""Creates or updates a route in the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:param route_parameters: Parameters supplied to the create or update route operation.
:type route_parameters: ~azure.mgmt.network.v2016_09_01.models.Route
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Route or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2016_09_01.models.Route]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
route_name=route_name,
route_parameters=route_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
route_table_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RouteListResult"]
"""Gets all routes in a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2016_09_01.models.RouteListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RouteListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes'} # type: ignore
| |
# -*- coding: utf-8 -*-
"""
TestPluribus.py: this is a tester for the pyPluribus library. Will try to open & close connection to the device,
test basic getters such as CLI and execute_show and will test an end-to-end scenario of a configuration change history.
"""
# stdlib
from __future__ import absolute_import
import unittest
# local modules
import pyPluribus.exceptions
from pyPluribus import PluribusDevice
__author__ = "Mircea Ulinic"
__copyright__ = 'Copyright 2016, CloudFlare, Inc.'
__license__ = "Apache"
__maintainer__ = "Mircea Ulinic"
__contact__ = "mircea@cloudflare.com"
__status__ = "Prototype"
class _MyPluribusDeviceGlobals(object): # pylint: disable=too-few-public-methods
"""
This clsas contains only static data, basically only constants to be used in the tester classes.
"""
def __init__(self):
pass
def __repr__(self):
return 'class {class_name} having the following globals defined: {global_list}'.format(
class_name=self.__class__.__name__,
global_list=','.join(dir(self))
)
# ----- Connection details ---------------------------------------------------------------------------------------->
HOSTNAME = 'device.location'
USERNAME = 'username'
PASSWORD = 'password'
# <---- Connection details -----------------------------------------------------------------------------------------
VALID_CONFIG = '''trunk-create name core05.scl01 port 4,8 speed 40g no-autoneg \
jumbo enable lacp-mode active port-mac-address 06:a0:00:19:a0:4d send-port 4'''
VALID_CONFIG_FILE_PATH = 'valid.cfg'
UNWANTED_CONFIG = '''igmp-snooping-modify enable''' # typed enable instead of disable...
INVALID_CONFIG = '''port-storm-control-modify port 39 speed Xg'''
# instead of "port-storm-control-modify port 39 speed 10g"
class TestPluribusDevice(unittest.TestCase): # pylint: disable=too-many-public-methods
"""
This will test the basic methods of the PluribusDevice: open&close connection, CLI getter etc.
"""
def __repr__(self):
return 'class {class_name}: {class_doc}'.format(
class_name=self.__class__.__name__,
class_doc=self.__doc__
)
def __str__(self):
return self.__repr__()
# ----- Connection management ------------------------------------------------------------------------------------->
@classmethod
def setUpClass(cls):
"""Opens the connection with the device."""
cls.device = PluribusDevice(_MyPluribusDeviceGlobals.HOSTNAME,
_MyPluribusDeviceGlobals.USERNAME,
_MyPluribusDeviceGlobals.PASSWORD)
cls.device.open()
@classmethod
def tearDownClass(cls):
"""Closes the connection with the device."""
cls.device.close()
def test_connection_open(self):
"""Will test if the connection is really open."""
self.assertTrue(self.device.connected)
# <---- Connection management ------------------------------------------------------------------------------------->
# ----- Basic interaction ----------------------------------------------------------------------------------------->
def test_cli(self):
"""Will test if the CLI is available, trying to execute a simple command."""
help_output = self.device.cli('help')
self.assertIsInstance(help_output, unicode)
self.assertGreater(len(help_output.splitlines()), 100)
def test_raise_cli(self):
"""Will test cli() to raise Exception in case of inexisting command."""
wrong_command = 'fakecommand'
self.assertRaises(pyPluribus.exceptions.CommandExecutionError, self.device.cli, wrong_command)
def test_execute_show(self):
"""Will try to execute a simple show command on the CLI."""
bootenv = self.device.execute_show('bootenv-show') # let's execute a simple show command
self.assertIsInstance(bootenv, unicode)
lines_output = bootenv.splitlines()
self.assertGreater(len(lines_output), 0) # make sure the array is not empty now
for line in lines_output:
line_elems = line.split(';')
self.assertEquals(len(line_elems), 6) # must have exactly 6 elements
def test_raise_execute_show(self):
"""Will make execute_show() raise exception."""
wrong_command = 'switch-config-reset' # if it does not raise, will erase the config on the switch...
self.assertRaises(pyPluribus.exceptions.CommandExecutionError, self.device.execute_show, wrong_command)
def test_show(self):
"""Will test the show() method if returns content when issuing non-specific-formatted commands."""
mac_addr_table = self.device.show('l2 table') # for sure this will always return some content
self.assertGreater(len(mac_addr_table.splitlines()), 0)
# <---- Basic interaction ------------------------------------------------------------------------------------------
# ----- Configuration management ---------------------------------------------------------------------------------->
def test_load_valid_candidate(self):
"""Will try to load a valid candidate config."""
self.assertFalse(self.device.config.changed()) # config should not be changed at this point
self.assertTrue(self.device.config.load_candidate(
config=_MyPluribusDeviceGlobals.VALID_CONFIG))
self.assertTrue(self.device.config.changed()) # now it should
self.assertTrue(self.device.config.commit()) # will try to commit changes
self.assertTrue(self.device.config.committed()) # committed?
self.assertFalse(self.device.config.changed()) # config should not be changed
def test_load_valid_candidate_from_file(self): # pylint: disable=invalid-name
"""Will try to load a valid candidate configuration from a file."""
self.assertFalse(self.device.config.changed()) # config should not be changed at this point
self.assertTrue(self.device.config.load_candidate(
filename=_MyPluribusDeviceGlobals.VALID_CONFIG_FILE_PATH))
self.assertTrue(self.device.config.changed()) # now it should
self.assertTrue(self.device.config.commit()) # will try to commit changes
self.assertTrue(self.device.config.committed()) # committed?
self.assertFalse(self.device.config.changed()) # config should not be changed
def test_change_config_by_mistake(self):
"""Will simulate a human error and will set a wrong command."""
self.assertFalse(self.device.config.changed()) # config should not be changed at this point
self.assertTrue(self.device.config.load_candidate(
config=_MyPluribusDeviceGlobals.UNWANTED_CONFIG))
self.assertTrue(self.device.config.changed()) # now it should be changed
self.assertTrue(self.device.config.discard()) # let's discard the unwanted config
self.assertFalse(self.device.config.changed()) # config discarded thus not changes
def test_load_invalid_config(self):
"""
Will try to load invalid commands.
Should raise pyPluribus.exceptions.ConfigLoadError and discard the config.
"""
self.assertFalse(self.device.config.changed())
self.assertRaises(pyPluribus.exceptions.ConfigLoadError,
self.device.config.load_candidate,
_MyPluribusDeviceGlobals.INVALID_CONFIG)
# should raise error and discard the wron config
self.assertFalse(self.device.config.changed()) # configuration should not be changed
self.assertFalse(self.device.config.commit()) # will not commit since the configuration was discarded
self.assertFalse(self.device.config.committed()) # definitely not committed
def test_rollback_two_steps(self):
"""
Should rollback nicely and have on the device the config we initially had.
There were loaded two valid configurations: from a variable and from a file.
The unwanted config and invalid config should be already discarded.
"""
self.assertTrue(self.device.config.rollback(2))
def test_rollback_big_number_of_steps(self): # pylint: disable=invalid-name
"""Should raise error."""
self.assertRaises(pyPluribus.exceptions.RollbackError, self.device.config.rollback, 100)
def test_rollback_negative_number(self):
"""Should raise error."""
self.assertRaises(pyPluribus.exceptions.RollbackError, self.device.config.rollback, -5)
def test_rollback_verify(self):
"""
After the successfully rollback to the initial config and two failed rollbacks, will try to rollback
once more. But because we are already in the initial state and no more history available, should
throw an error.
"""
self.assertRaises(pyPluribus.exceptions.RollbackError, self.device.config.rollback, 1)
if __name__ == '__main__':
TEST_RUNNER = unittest.TextTestRunner()
BASIC_COMMANDS = unittest.TestSuite()
BASIC_COMMANDS.addTest(TestPluribusDevice("test_connection_open"))
BASIC_COMMANDS.addTest(TestPluribusDevice("test_cli"))
BASIC_COMMANDS.addTest(TestPluribusDevice("test_raise_cli"))
BASIC_COMMANDS.addTest(TestPluribusDevice("test_execute_show"))
BASIC_COMMANDS.addTest(TestPluribusDevice("test_raise_execute_show"))
BASIC_COMMANDS.addTest(TestPluribusDevice("test_show"))
TEST_RUNNER.run(BASIC_COMMANDS)
FULL_CONFIG_SCENARIO = unittest.TestSuite()
FULL_CONFIG_SCENARIO.addTest(TestPluribusDevice("test_load_valid_candidate"))
FULL_CONFIG_SCENARIO.addTest(TestPluribusDevice("test_load_valid_candidate_from_file"))
FULL_CONFIG_SCENARIO.addTest(TestPluribusDevice("test_change_config_by_mistake"))
FULL_CONFIG_SCENARIO.addTest(TestPluribusDevice("test_load_invalid_config"))
FULL_CONFIG_SCENARIO.addTest(TestPluribusDevice("test_rollback_two_steps"))
FULL_CONFIG_SCENARIO.addTest(TestPluribusDevice("test_rollback_big_number_of_steps"))
FULL_CONFIG_SCENARIO.addTest(TestPluribusDevice("test_rollback_negative_number"))
FULL_CONFIG_SCENARIO.addTest(TestPluribusDevice("test_rollback_verify"))
TEST_RUNNER.run(FULL_CONFIG_SCENARIO)
| |
"""Support for Google Assistant Smart Home API."""
import asyncio
from itertools import product
import logging
from homeassistant.util.decorator import Registry
from homeassistant.const import ATTR_ENTITY_ID
from .const import (
ERR_PROTOCOL_ERROR,
ERR_DEVICE_OFFLINE,
ERR_UNKNOWN_ERROR,
EVENT_COMMAND_RECEIVED,
EVENT_SYNC_RECEIVED,
EVENT_QUERY_RECEIVED,
)
from .helpers import RequestData, GoogleEntity, async_get_entities
from .error import SmartHomeError
HANDLERS = Registry()
_LOGGER = logging.getLogger(__name__)
async def async_handle_message(hass, config, user_id, message):
"""Handle incoming API messages."""
request_id = message.get("requestId") # type: str
data = RequestData(config, user_id, request_id)
response = await _process(hass, data, message)
if response and "errorCode" in response["payload"]:
_LOGGER.error("Error handling message %s: %s", message, response["payload"])
return response
async def _process(hass, data, message):
"""Process a message."""
inputs = message.get("inputs") # type: list
if len(inputs) != 1:
return {
"requestId": data.request_id,
"payload": {"errorCode": ERR_PROTOCOL_ERROR},
}
handler = HANDLERS.get(inputs[0].get("intent"))
if handler is None:
return {
"requestId": data.request_id,
"payload": {"errorCode": ERR_PROTOCOL_ERROR},
}
try:
result = await handler(hass, data, inputs[0].get("payload"))
except SmartHomeError as err:
return {"requestId": data.request_id, "payload": {"errorCode": err.code}}
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected error")
return {
"requestId": data.request_id,
"payload": {"errorCode": ERR_UNKNOWN_ERROR},
}
if result is None:
return None
return {"requestId": data.request_id, "payload": result}
@HANDLERS.register("action.devices.SYNC")
async def async_devices_sync(hass, data, payload):
"""Handle action.devices.SYNC request.
https://developers.google.com/actions/smarthome/create-app#actiondevicessync
"""
hass.bus.async_fire(
EVENT_SYNC_RECEIVED, {"request_id": data.request_id}, context=data.context
)
devices = await asyncio.gather(
*(
entity.sync_serialize()
for entity in async_get_entities(hass, data.config)
if data.config.should_expose(entity.state)
)
)
response = {
"agentUserId": data.config.agent_user_id or data.context.user_id,
"devices": devices,
}
return response
@HANDLERS.register("action.devices.QUERY")
async def async_devices_query(hass, data, payload):
"""Handle action.devices.QUERY request.
https://developers.google.com/actions/smarthome/create-app#actiondevicesquery
"""
devices = {}
for device in payload.get("devices", []):
devid = device["id"]
state = hass.states.get(devid)
hass.bus.async_fire(
EVENT_QUERY_RECEIVED,
{"request_id": data.request_id, ATTR_ENTITY_ID: devid},
context=data.context,
)
if not state:
# If we can't find a state, the device is offline
devices[devid] = {"online": False}
continue
entity = GoogleEntity(hass, data.config, state)
devices[devid] = entity.query_serialize()
return {"devices": devices}
@HANDLERS.register("action.devices.EXECUTE")
async def handle_devices_execute(hass, data, payload):
"""Handle action.devices.EXECUTE request.
https://developers.google.com/actions/smarthome/create-app#actiondevicesexecute
"""
entities = {}
results = {}
for command in payload["commands"]:
for device, execution in product(command["devices"], command["execution"]):
entity_id = device["id"]
hass.bus.async_fire(
EVENT_COMMAND_RECEIVED,
{
"request_id": data.request_id,
ATTR_ENTITY_ID: entity_id,
"execution": execution,
},
context=data.context,
)
# Happens if error occurred. Skip entity for further processing
if entity_id in results:
continue
if entity_id not in entities:
state = hass.states.get(entity_id)
if state is None:
results[entity_id] = {
"ids": [entity_id],
"status": "ERROR",
"errorCode": ERR_DEVICE_OFFLINE,
}
continue
entities[entity_id] = GoogleEntity(hass, data.config, state)
try:
await entities[entity_id].execute(data, execution)
except SmartHomeError as err:
results[entity_id] = {
"ids": [entity_id],
"status": "ERROR",
**err.to_response(),
}
final_results = list(results.values())
for entity in entities.values():
if entity.entity_id in results:
continue
entity.async_update()
final_results.append(
{
"ids": [entity.entity_id],
"status": "SUCCESS",
"states": entity.query_serialize(),
}
)
return {"commands": final_results}
@HANDLERS.register("action.devices.DISCONNECT")
async def async_devices_disconnect(hass, data, payload):
"""Handle action.devices.DISCONNECT request.
https://developers.google.com/actions/smarthome/create#actiondevicesdisconnect
"""
return None
def turned_off_response(message):
"""Return a device turned off response."""
return {
"requestId": message.get("requestId"),
"payload": {"errorCode": "deviceTurnedOff"},
}
| |
#********************************************************************
#* Copyright (C) 2002 LSI Logic Corporation. *
#* All Rights Reserved. *
#********************************************************************
#-----------------------------------------------
# oaTracerUtl: Utility package for the Pickers in oaTracer
#-----------------------------------------------
import openaccess22
import BrowserWx
from BrowserWx import *
import re
import string
#---------------------------------------------------------------
def getInstCellviewName(inst):
s=openaccess22.oaString()
ns=openaccess22.oaNativeNS()
inst.getCellName(ns,s)
return str(s)
#---------------------------------------------------------------
def getInstCellviewNames(inst):
s=openaccess22.oaString()
ns=openaccess22.oaNativeNS()
l=[]
inst.getLibName(ns,s)
l.append(str(s))
inst.getCellName(ns,s)
l.append(str(s))
inst.getViewName(ns,s)
l.append(str(s))
return l
#---------------------------------------------------------------
def getInstCellviewFullname(inst):
l=getInstCellviewNames(inst)
return string.join(l,' ')
#---------------------------------------------------------------
def getInstModuleFullname(inst):
s=openaccess22.oaString()
ns=openaccess22.oaNativeNS()
inst.getMaster().getName(ns,s)
return str(s)
#---------------------------------------------------------------
def getOccInstOccFullname(inst):
s=openaccess22.oaString()
ns=openaccess22.oaNativeNS()
inst.getPathName(ns,s)
return str(s)
#---------------------------------------------------------------
def getCellviewName(design):
design=design.getDesign()
s=openaccess22.oaString()
ns=openaccess22.oaNativeNS()
design.getCellName(ns,s)
return str(s)
#---------------------------------------------------------------
def getCellviewNames(design):
design=design.getDesign()
s=openaccess22.oaString()
ns=openaccess22.oaNativeNS()
l=[]
design.getLibName(ns,s)
l.append(str(s))
design.getCellName(ns,s)
l.append(str(s))
design.getViewName(ns,s)
l.append(str(s))
return l
#---------------------------------------------------------------
def getCellviewFullname(design):
l=getCellviewNames(design)
return string.join(l,' ')
#---------------------------------------------------------------
def getModuleNames(module):
design=module.getDesign()
s=openaccess22.oaString()
ns=openaccess22.oaNativeNS()
l=[]
design.getLibName(ns,s)
l.append(str(s))
design.getCellName(ns,s)
l.append(str(s))
design.getViewName(ns,s)
l.append(str(s))
module.getName(ns,s)
l.append(str(s))
return l
#---------------------------------------------------------------
def getModuleFullname(design):
l=getModuleNames(design)
return string.join(l,' ')
#---------------------------------------------------------------
def getDesObjectRepr(obj):
if (obj.isBlockObject()):
s='oaBlock: %s'%getCellviewFullname(obj)
elif (obj.isModObject()):
s='oaModule: %s'%getModuleFullname(obj)
elif (obj.isOccObject()):
s='oaOccurrence: %s '%getCellviewFullname(obj)
oinst=obj.getOccInst()
if (oinst):
ss=openaccess22.oaString()
ns=openaccess22.oaNativeNS()
oinst.getPathName(ns,ss)
s+=str(ss)
else:
s+='.'
else:
s=str(obj)
return s
#---------------------------------------------------------------
# master members:
# self.AllObjects() - returns a list of all objects
# self.FindObject(name) - Fetch an object by name
# self.ObjectName(obj) - Returns an objects name
#
class cTracerUtility:
#######################################
# ObjectList
#######################################
def ObjectList(self):
list=self.mReturnData
if (len(list)==0):
return None
return list
#######################################
# FormatList
#######################################
def FormatList(self,list):
if (not list):
return None
first=1
str=""
for l in list:
if (first): first=None
else: str = str + " "
str = str + l
return str
#######################################
# ObjectNames
#######################################
def ObjectNames(self):
list=self.ObjectList()
return self.FormatList(list)
#######################################
# ObjectName
#######################################
def ObjectName(self):
list=self.ObjectList(self)
if (list):
return list[0]
return list
#######################################
# ApplyUpdate
#######################################
def ApplyUpdate(self,l):
v=self.mAction.get()
values=self.mMenu.allvalues()
dct={}
for item in values:
dct[item]=1
if (v==0):
for item in l:
if (dct.has_key(item)): continue
values.append(item)
dct[item]=1
newvalues=values
elif (v==1):
newvalues=[]
for item in l:
if (not dct.has_key(item)): continue
newvalues.append(item)
elif (v==2):
newvalues=[]
for item in l:
if (dct.has_key(item)): continue
newvalues.append(item)
self.mMenu.clear()
self.mMenu.insert(newvalues)
return len(newvalues)
#######################################
# LoadAll
#######################################
def LoadAll(self,*v):
values=[]
for obj in self.AllObjects():
values.append(self.ObjectName(obj))
values.sort()
return self.ApplyUpdate(values)
#######################################
# Read
#######################################
def Read(self,*v):
values=[]
str = self.mFile.get()
if (not str):
return None
try:
fp=open(str,'r')
line=fp.readline()
while (line):
if (line[0]!='#'):
obj=self.FindObject(line)
if (obj!=None): values.append(self.ObjectName(obj))
line=fp.readline()
except:
pass
return self.ApplyUpdate(values)
#######################################
# Evaluate
#######################################
def Evaluate(self,*v):
values=[]
str = self.mExpr.get()
if (not str):
return None
prog=re.compile(str)
for obj in self.AllObjects():
name=self.ObjectName(obj)
if (prog.match(name)!=None):
values.append(name)
values.sort()
return self.ApplyUpdate(values)
#######################################
# ReturnAll
#######################################
def ReturnAll(self,*v):
self.mReturnData=self.mMenu.allvalues()
if (self.mReturnAction):
apply(self.mReturnAction)
self.Detach()
self.mReturnData=[]
#######################################
# ReturnSelected
#######################################
def ReturnSelected(self,*v):
self.mReturnData=self.mMenu.values()
if (self.mReturnAction):
apply(self.mReturnAction)
self.Detach()
self.mReturnData=[]
#######################################
# DeselectAll
#######################################
def DeselectAll(self,*v):
self.mMenu.select_all(0)
#######################################
# SelectAll
#######################################
def SelectAll(self,*v):
self.mMenu.select_all(1)
#######################################
# RemoveSelected
#######################################
def RemoveSelected(self,*v):
self.mMenu.remove_selected()
#######################################
# RemoveUnselected
#######################################
def RemoveUnselected(self,*v):
self.mMenu.remove_unselected()
#######################################
# Empty
#######################################
def Empty(self,*v):
if (self.mTop!=None):
self.mMenu.clear()
else:
self.mDisplayData=[]
#######################################
# DetachAction
#######################################
def DetachAction(self):
pass
#######################################
# Raise
#######################################
def Raise(self):
self.mTop.Raise()
#######################################
# Report
#######################################
def Report(self,*v):
self.mTop.Report()
#######################################
# Detach
#######################################
def Detach(self,*v):
if (self.mTop==None): return
self.mDisplayData=self.mMenu.allvalues()
self.DetachAction()
self.mTop.Detach()
self.mTop=None
#######################################
# Display
#######################################
def Display(self,clear=0):
if (self.mTop!=None):
self.mTop.Raise()
return
# -------------------------------------
# Main Display
# -------------------------------------
if (clear):
self.mDisplayData=[]
self.mTop=cWinTop(self.mTitle,self.Detach)
self.mCol=self.mTop.CreateColumn(1)
row=self.mCol.CreateRow(1)
self.mMenu=cWinScrollListbox(row,multiple=1,minsize=(400,200),
double_command=self.ReturnSelected)
self.mMenu.insert(self.mDisplayData)
col=row.CreateColumn()
r1=col.CreateRow()
cWinButton(r1,'DeselectAll',command=self.DeselectAll,minwidth=120)
#r1.AddSpace(10)
r2=col.CreateRow()
cWinButton(r2,'SelectAll',command=self.SelectAll,minwidth=120)
r3=col.CreateRow()
cWinButton(r3,'RemoveSelected',command=self.RemoveSelected,minwidth=120)
#r3.AddSpace(30)
r4=col.CreateRow()
cWinButton(r4,'RemoveUnselected',command=self.RemoveUnselected,minwidth=120)
r5=col.CreateRow()
cWinButton(r5,'Empty',command=self.Empty,minwidth=120)
row=self.mCol.CreateRow()
cWinLabel(row,'Action:')
self.mAction=cWinChoice(row,['Add','Filter','Remove'])
row=self.mCol.CreateRow()
cWinLabel(row,'Expression:')
self.mExpr=cWinEntry(row)
cWinButton(row,'Eval',self.Evaluate)
row=self.mCol.CreateRow()
cWinLabel(row,'File:')
self.mFile=cWinEntry(row)
cWinButton(row,'Read',self.Read)
row=self.mCol.CreateRow()
cWinButton(row,text='ReturnSelected',command=self.ReturnSelected)
cWinButton(row,text='ReturnAll',command=self.ReturnAll)
cWinButton(row,text='Cancel',command=self.Detach)
if (self.mWithLoadAll):
cWinButton(row,text='LoadAll',command=self.LoadAll)
#cWinButton(row,text='Report',command=self.Report)
self.mTop.End()
#######################################
# Init
#######################################
def __init__(self,title,return_action=None):
self.mReturnAction=return_action
self.mReturnData=[]
self.mDisplayData=[]
self.mTop=None
self.mTitle=title
self.mWithLoadAll=1
#---------------------------------------------------------------
class cTracerUtilityCellview(cTracerUtility):
#######################################
# Display
#######################################
def Display(self,design=None,clear=0):
if (design!=None):
if (self.mDesign!=design): clear=1
self.mDesign=design
cTracerUtility.Display(self,clear=clear)
#######################################
# UpdateCellview
#######################################
def UpdateCellview(self,design):
self.mDesign=design
self.Empty()
#######################################
# Init
#######################################
def __init__(self,design,title,return_action=None):
self.mDesign=design
title=title+': '+oaTracerUtl.getCellviewFullname(self.mDesign)
cTracerUtility.__init__(self,title,return_action)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exception types for TensorFlow errors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import traceback
import warnings
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.python import _pywrap_py_exception_registry
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import error_interpolation
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
def _compact_stack_trace(op):
"""Returns a traceback for `op` with common file prefixes stripped."""
compact_traces = []
common_prefix = error_interpolation.traceback_files_common_prefix([[op]])
# TODO(slebedev): switch to .filename etc once 2.X support is dropped.
for filename, lineno, name, line in op.traceback:
if filename.startswith(common_prefix):
filename = filename[len(common_prefix):]
compact_traces.append((filename, lineno, name, line))
return compact_traces
class InaccessibleTensorError(ValueError):
pass
class OperatorNotAllowedInGraphError(TypeError):
pass
@tf_export("errors.OpError", v1=["errors.OpError", "OpError"])
@deprecation.deprecated_endpoints("OpError")
class OpError(Exception):
"""A generic error that is raised when TensorFlow execution fails.
Whenever possible, the session will raise a more specific subclass
of `OpError` from the `tf.errors` module.
"""
def __init__(self, node_def, op, message, error_code):
"""Creates a new `OpError` indicating that a particular op failed.
Args:
node_def: The `node_def_pb2.NodeDef` proto representing the op that
failed, if known; otherwise None.
op: The `ops.Operation` that failed, if known; otherwise None.
message: The message string describing the failure.
error_code: The `error_codes_pb2.Code` describing the error.
"""
super(OpError, self).__init__()
self._node_def = node_def
self._op = op
self._message = message
self._error_code = error_code
def __reduce__(self):
# Allow the subclasses to accept less arguments in their __init__.
init_argspec = tf_inspect.getargspec(self.__class__.__init__)
args = tuple(getattr(self, arg) for arg in init_argspec.args[1:])
return self.__class__, args
@property
def message(self):
"""The error message that describes the error."""
return self._message
@property
def op(self):
"""The operation that failed, if known.
*N.B.* If the failed op was synthesized at runtime, e.g. a `Send`
or `Recv` op, there will be no corresponding
`tf.Operation`
object. In that case, this will return `None`, and you should
instead use the `tf.errors.OpError.node_def` to
discover information about the op.
Returns:
The `Operation` that failed, or None.
"""
return self._op
@property
def error_code(self):
"""The integer error code that describes the error."""
return self._error_code
@property
def node_def(self):
"""The `NodeDef` proto representing the op that failed."""
return self._node_def
def __str__(self):
if self._op is not None:
output = ["%s\n\nOriginal stack trace for %r:\n" % (self.message,
self._op.name,)]
curr_traceback_list = traceback.format_list(
_compact_stack_trace(self._op))
output.extend(curr_traceback_list)
# pylint: disable=protected-access
original_op = self._op._original_op
# pylint: enable=protected-access
while original_op is not None:
output.append(
"\n...which was originally created as op %r, defined at:\n"
% (original_op.name,))
prev_traceback_list = curr_traceback_list
curr_traceback_list = traceback.format_list(
_compact_stack_trace(original_op))
# Attempt to elide large common subsequences of the subsequent
# stack traces.
#
# TODO(mrry): Consider computing the actual longest common subsequence.
is_eliding = False
elide_count = 0
last_elided_line = None
for line, line_in_prev in zip(curr_traceback_list, prev_traceback_list):
if line == line_in_prev:
if is_eliding:
elide_count += 1
last_elided_line = line
else:
output.append(line)
is_eliding = True
elide_count = 0
else:
if is_eliding:
if elide_count > 0:
output.extend(
["[elided %d identical lines from previous traceback]\n"
% (elide_count - 1,), last_elided_line])
is_eliding = False
output.extend(line)
# pylint: disable=protected-access
original_op = original_op._original_op
# pylint: enable=protected-access
return "".join(output)
else:
return self.message
OK = error_codes_pb2.OK
tf_export("errors.OK").export_constant(__name__, "OK")
CANCELLED = error_codes_pb2.CANCELLED
tf_export("errors.CANCELLED").export_constant(__name__, "CANCELLED")
UNKNOWN = error_codes_pb2.UNKNOWN
tf_export("errors.UNKNOWN").export_constant(__name__, "UNKNOWN")
INVALID_ARGUMENT = error_codes_pb2.INVALID_ARGUMENT
tf_export("errors.INVALID_ARGUMENT").export_constant(__name__,
"INVALID_ARGUMENT")
DEADLINE_EXCEEDED = error_codes_pb2.DEADLINE_EXCEEDED
tf_export("errors.DEADLINE_EXCEEDED").export_constant(__name__,
"DEADLINE_EXCEEDED")
NOT_FOUND = error_codes_pb2.NOT_FOUND
tf_export("errors.NOT_FOUND").export_constant(__name__, "NOT_FOUND")
ALREADY_EXISTS = error_codes_pb2.ALREADY_EXISTS
tf_export("errors.ALREADY_EXISTS").export_constant(__name__, "ALREADY_EXISTS")
PERMISSION_DENIED = error_codes_pb2.PERMISSION_DENIED
tf_export("errors.PERMISSION_DENIED").export_constant(__name__,
"PERMISSION_DENIED")
UNAUTHENTICATED = error_codes_pb2.UNAUTHENTICATED
tf_export("errors.UNAUTHENTICATED").export_constant(__name__, "UNAUTHENTICATED")
RESOURCE_EXHAUSTED = error_codes_pb2.RESOURCE_EXHAUSTED
tf_export("errors.RESOURCE_EXHAUSTED").export_constant(__name__,
"RESOURCE_EXHAUSTED")
FAILED_PRECONDITION = error_codes_pb2.FAILED_PRECONDITION
tf_export("errors.FAILED_PRECONDITION").export_constant(__name__,
"FAILED_PRECONDITION")
ABORTED = error_codes_pb2.ABORTED
tf_export("errors.ABORTED").export_constant(__name__, "ABORTED")
OUT_OF_RANGE = error_codes_pb2.OUT_OF_RANGE
tf_export("errors.OUT_OF_RANGE").export_constant(__name__, "OUT_OF_RANGE")
UNIMPLEMENTED = error_codes_pb2.UNIMPLEMENTED
tf_export("errors.UNIMPLEMENTED").export_constant(__name__, "UNIMPLEMENTED")
INTERNAL = error_codes_pb2.INTERNAL
tf_export("errors.INTERNAL").export_constant(__name__, "INTERNAL")
UNAVAILABLE = error_codes_pb2.UNAVAILABLE
tf_export("errors.UNAVAILABLE").export_constant(__name__, "UNAVAILABLE")
DATA_LOSS = error_codes_pb2.DATA_LOSS
tf_export("errors.DATA_LOSS").export_constant(__name__, "DATA_LOSS")
# pylint: disable=line-too-long
@tf_export("errors.CancelledError")
class CancelledError(OpError):
"""Raised when an operation or step is cancelled.
For example, a long-running operation (e.g.
`tf.QueueBase.enqueue` may be
cancelled by running another operation (e.g.
`tf.QueueBase.close`,
or by `tf.Session.close`.
A step that is running such a long-running operation will fail by raising
`CancelledError`.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `CancelledError`."""
super(CancelledError, self).__init__(node_def, op, message, CANCELLED)
# pylint: enable=line-too-long
@tf_export("errors.UnknownError")
class UnknownError(OpError):
"""Unknown error.
An example of where this error may be returned is if a Status value
received from another address space belongs to an error-space that
is not known to this address space. Also, errors raised by APIs that
do not return enough error information may be converted to this
error.
@@__init__
"""
def __init__(self, node_def, op, message, error_code=UNKNOWN):
"""Creates an `UnknownError`."""
super(UnknownError, self).__init__(node_def, op, message, error_code)
@tf_export("errors.InvalidArgumentError")
class InvalidArgumentError(OpError):
"""Raised when an operation receives an invalid argument.
This may occur, for example, if an operation receives an input
tensor that has an invalid value or shape. For example, the
`tf.matmul` op will raise this
error if it receives an input that is not a matrix, and the
`tf.reshape` op will raise
this error if the new shape does not match the number of elements in the input
tensor.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `InvalidArgumentError`."""
super(InvalidArgumentError, self).__init__(node_def, op, message,
INVALID_ARGUMENT)
@tf_export("errors.DeadlineExceededError")
class DeadlineExceededError(OpError):
"""Raised when a deadline expires before an operation could complete.
This exception is not currently used.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `DeadlineExceededError`."""
super(DeadlineExceededError, self).__init__(node_def, op, message,
DEADLINE_EXCEEDED)
@tf_export("errors.NotFoundError")
class NotFoundError(OpError):
"""Raised when a requested entity (e.g., a file or directory) was not found.
For example, running the
`tf.WholeFileReader.read`
operation could raise `NotFoundError` if it receives the name of a file that
does not exist.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `NotFoundError`."""
super(NotFoundError, self).__init__(node_def, op, message, NOT_FOUND)
@tf_export("errors.AlreadyExistsError")
class AlreadyExistsError(OpError):
"""Raised when an entity that we attempted to create already exists.
For example, running an operation that saves a file
(e.g. `tf.train.Saver.save`)
could potentially raise this exception if an explicit filename for an
existing file was passed.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `AlreadyExistsError`."""
super(AlreadyExistsError, self).__init__(node_def, op, message,
ALREADY_EXISTS)
@tf_export("errors.PermissionDeniedError")
class PermissionDeniedError(OpError):
"""Raised when the caller does not have permission to run an operation.
For example, running the
`tf.WholeFileReader.read`
operation could raise `PermissionDeniedError` if it receives the name of a
file for which the user does not have the read file permission.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `PermissionDeniedError`."""
super(PermissionDeniedError, self).__init__(node_def, op, message,
PERMISSION_DENIED)
@tf_export("errors.UnauthenticatedError")
class UnauthenticatedError(OpError):
"""The request does not have valid authentication credentials.
This exception is not currently used.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `UnauthenticatedError`."""
super(UnauthenticatedError, self).__init__(node_def, op, message,
UNAUTHENTICATED)
@tf_export("errors.ResourceExhaustedError")
class ResourceExhaustedError(OpError):
"""Some resource has been exhausted.
For example, this error might be raised if a per-user quota is
exhausted, or perhaps the entire file system is out of space.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `ResourceExhaustedError`."""
super(ResourceExhaustedError, self).__init__(node_def, op, message,
RESOURCE_EXHAUSTED)
@tf_export("errors.FailedPreconditionError")
class FailedPreconditionError(OpError):
"""Operation was rejected because the system is not in a state to execute it.
This exception is most commonly raised when running an operation
that reads a `tf.Variable`
before it has been initialized.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `FailedPreconditionError`."""
super(FailedPreconditionError, self).__init__(node_def, op, message,
FAILED_PRECONDITION)
@tf_export("errors.AbortedError")
class AbortedError(OpError):
"""The operation was aborted, typically due to a concurrent action.
For example, running a
`tf.QueueBase.enqueue`
operation may raise `AbortedError` if a
`tf.QueueBase.close` operation
previously ran.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `AbortedError`."""
super(AbortedError, self).__init__(node_def, op, message, ABORTED)
@tf_export("errors.OutOfRangeError")
class OutOfRangeError(OpError):
"""Raised when an operation iterates past the valid input range.
This exception is raised in "end-of-file" conditions, such as when a
`tf.QueueBase.dequeue`
operation is blocked on an empty queue, and a
`tf.QueueBase.close`
operation executes.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `OutOfRangeError`."""
super(OutOfRangeError, self).__init__(node_def, op, message,
OUT_OF_RANGE)
@tf_export("errors.UnimplementedError")
class UnimplementedError(OpError):
"""Raised when an operation has not been implemented.
Some operations may raise this error when passed otherwise-valid
arguments that it does not currently support. For example, running
the `tf.nn.max_pool2d` operation
would raise this error if pooling was requested on the batch dimension,
because this is not yet supported.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `UnimplementedError`."""
super(UnimplementedError, self).__init__(node_def, op, message,
UNIMPLEMENTED)
@tf_export("errors.InternalError")
class InternalError(OpError):
"""Raised when the system experiences an internal error.
This exception is raised when some invariant expected by the runtime
has been broken. Catching this exception is not recommended.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `InternalError`."""
super(InternalError, self).__init__(node_def, op, message, INTERNAL)
@tf_export("errors.UnavailableError")
class UnavailableError(OpError):
"""Raised when the runtime is currently unavailable.
This exception is not currently used.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `UnavailableError`."""
super(UnavailableError, self).__init__(node_def, op, message,
UNAVAILABLE)
@tf_export("errors.DataLossError")
class DataLossError(OpError):
"""Raised when unrecoverable data loss or corruption is encountered.
For example, this may be raised by running a
`tf.WholeFileReader.read`
operation, if the file is truncated while it is being read.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `DataLossError`."""
super(DataLossError, self).__init__(node_def, op, message, DATA_LOSS)
_CODE_TO_EXCEPTION_CLASS = {
CANCELLED: CancelledError,
UNKNOWN: UnknownError,
INVALID_ARGUMENT: InvalidArgumentError,
DEADLINE_EXCEEDED: DeadlineExceededError,
NOT_FOUND: NotFoundError,
ALREADY_EXISTS: AlreadyExistsError,
PERMISSION_DENIED: PermissionDeniedError,
UNAUTHENTICATED: UnauthenticatedError,
RESOURCE_EXHAUSTED: ResourceExhaustedError,
FAILED_PRECONDITION: FailedPreconditionError,
ABORTED: AbortedError,
OUT_OF_RANGE: OutOfRangeError,
UNIMPLEMENTED: UnimplementedError,
INTERNAL: InternalError,
UNAVAILABLE: UnavailableError,
DATA_LOSS: DataLossError,
}
_pywrap_py_exception_registry.PyExceptionRegistry_Init(_CODE_TO_EXCEPTION_CLASS)
_EXCEPTION_CLASS_TO_CODE = {
class_: code for code, class_ in _CODE_TO_EXCEPTION_CLASS.items()}
@tf_export(v1=["errors.exception_type_from_error_code"])
def exception_type_from_error_code(error_code):
return _CODE_TO_EXCEPTION_CLASS[error_code]
@tf_export(v1=["errors.error_code_from_exception_type"])
def error_code_from_exception_type(cls):
try:
return _EXCEPTION_CLASS_TO_CODE[cls]
except KeyError:
warnings.warn("Unknown class exception")
return UnknownError(None, None, "Unknown class exception", None)
def _make_specific_exception(node_def, op, message, error_code):
try:
exc_type = exception_type_from_error_code(error_code)
return exc_type(node_def, op, message)
except KeyError:
warnings.warn("Unknown error code: %d" % error_code)
return UnknownError(node_def, op, message, error_code)
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
# TODO(b/77295559): expand use of TF_Status* SWIG typemap and deprecate this.
@tf_export(v1=["errors.raise_exception_on_not_ok_status"]) # pylint: disable=invalid-name
class raise_exception_on_not_ok_status(object):
"""Context manager to check for C API status."""
def __enter__(self):
self.status = c_api_util.ScopedTFStatus()
return self.status.status
def __exit__(self, type_arg, value_arg, traceback_arg):
try:
if c_api.TF_GetCode(self.status.status) != 0:
raise _make_specific_exception(
None, None,
compat.as_text(c_api.TF_Message(self.status.status)),
c_api.TF_GetCode(self.status.status))
# Delete the underlying status object from memory otherwise it stays alive
# as there is a reference to status from this from the traceback due to
# raise.
finally:
del self.status
return False # False values do not suppress exceptions
| |
import numpy as np
import tensorflow as tf
import h5py
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# Functions for initializing neural nets parameters
def init_weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1, dtype=tf.float32)
return tf.Variable(initial)
def init_bias_variable(shape):
initial = tf.constant(0.1, shape=shape, dtype=tf.float32)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, [1, 1, 1, 1], 'VALID')
def loadData(filepath):
print('==> Experiment 2_0c')
print('==> Loading data from {}'.format(filepath))
# benchmark
t_start = time.time()
# reading data
f = h5py.File(filepath)
X_train = np.array(f.get('trainingFeatures'))
y_train = np.array(f.get('trainingLabels'))
X_val = np.array(f.get('validationFeatures'))
y_val = np.array(f.get('validationLabels'))
t_end = time.time()
print('--Time elapsed for loading data: {t:.2f} \
seconds'.format(t = t_end - t_start))
del f
print('-- Number of training samples: {}'.format(X_train.shape[0]))
print('-- Number of validation samples: {}'.format(X_val.shape[0]))
print('Shape of X_train: %s'%str(X_train.shape))
print('Shape of y_train: %s'%str(y_train.shape))
print('Shape of X_val: %s'%str(X_val.shape))
print('Shape of y_val: %s'%str(y_val.shape))
return [X_train, y_train, X_val, y_val]
def runNeuralNet(num_freq, X_train, y_train, X_val, y_val, batch_size, num_epochs, pooling_strategy):
# Neural-network model set-up
num_training_vec, total_features = X_train.shape
num_frames = int(total_features / num_freq)
print('-- Num frames: {}'.format(num_frames))
num_classes = int(max(y_train.max(), y_val.max()) + 1)
k1 = 5
k2 = 0
l = num_frames
print("Num Classes: %g"%(num_classes))
print_freq = 1
# Transform labels into on-hot encoding form
y_train_OHEnc = tf.one_hot(y_train.copy(), num_classes)
y_val_OHEnc = tf.one_hot(y_val.copy(), num_classes)
# Set-up input and output label
x = tf.placeholder(tf.float32, [None, total_features])
y_ = tf.placeholder(tf.float32, [None, num_classes])
# go straight from input to output, densely connected to SM layer
'''
W_sm = init_weight_variable([total_features, num_classes])
b_sm = init_bias_variable([num_classes])
y_conv = tf.matmul(x, W_sm) + b_sm
'''
print("Running single convolutional layer with %g 1x1 filters"%(k1))
# single convolutional layer
W_conv1 = init_weight_variable([1, 1, 1, k1]) # Old: [num_freq, 1, 1, k1]
b_conv1 = init_bias_variable([k1])
x_image = tf.reshape(x, [-1, num_freq, num_frames, 1])
h_conv1 = conv2d(x_image, W_conv1) + b_conv1 # tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1), no non-linearity
h_conv1_flat = tf.reshape(h_conv1, [-1, k1 * num_freq * num_frames]) #tf.reshape(h_conv1, [-1, num_frames * k1]) --- use this type of thing to make multiple scaled versions of data? enhance dataset?
W_sm = init_weight_variable([k1 * num_freq * num_frames, num_classes])
b_sm = init_bias_variable([num_classes])
y_conv = tf.matmul(h_conv1_flat, W_sm) + b_sm
'''
# One hidden layer then softmax
numHiddenUnits = 100
W_1 = init_weight_variable([total_features, numHiddenUnits])
b_1 = init_bias_variable([numHiddenUnits])
W_sm = init_weight_variable([numHiddenUnits, num_classes])
b_sm = init_bias_variable([num_classes])
hiddenActivation = tf.nn.relu(tf.matmul(x, W_1) + b_1)
y_conv = tf.matmul(hiddenActivation, W_sm) + b_sm
'''
# second layer
#W_conv2 = init_weight_variable([1, l, k1, k2])
#b_conv2 = init_bias_variable([k2])
#h_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2) + b_conv2)
#h_conv2_flat = tf.reshape(h_conv2, [-1, (num_frames - l + 1) * k2])
#h_pool2 = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# softmax layer
#W_sm = init_weight_variable([(num_frames - l + 1) * k2, num_classes])
#b_sm = init_bias_variable([num_classes])
#y_conv = tf.matmul(h_conv2_flat, W_sm) + b_sm
# evaluations
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# session
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
y_train = sess.run(y_train_OHEnc)[:, 0, :]
y_val = sess.run(y_val_OHEnc)[:, 0, :]
# print("h_conv1 %s"%str(h_conv1.eval(feed_dict={x:X_train, y_:y_train})))
# print("W_sm is: %s"%str(W_sm.eval()))
# print("h_conv1_flat is: %s"%str(h_conv1_flat.eval(feed_dict={x:X_train, y_:y_train})))
# print("y_conv: %s"%str(y_conv.eval(feed_dict={x: X_train, y_: y_train})))
# print("y_ is : %s"%str(y_.eval(feed_dict={x:X_train, y_:y_train})))
train_acc_list = []
val_acc_list = []
train_err_list = []
val_err_list = []
epoch_numbers = []
# benchmark
t_start = time.time()
for epoch in range(num_epochs):
epochStart = time.time()
for i in range(0, num_training_vec, batch_size):
batch_end_point = min(i + batch_size, num_training_vec)
train_batch_data = X_train[i : batch_end_point]
train_batch_label = y_train[i : batch_end_point]
train_step.run(feed_dict={x: train_batch_data, y_: train_batch_label})
epochEnd = time.time()
# printing and recording data
if (epoch + 1) % print_freq == 0:
train_acc = accuracy.eval(feed_dict={x:X_train, y_: y_train})
train_acc_list.append(train_acc)
val_acc = accuracy.eval(feed_dict={x: X_val, y_: y_val})
val_acc_list.append(val_acc)
train_err = cross_entropy.eval(feed_dict={x: X_train, y_: y_train})
train_err_list.append(train_err)
val_err = cross_entropy.eval(feed_dict={x: X_val, y_: y_val})
val_err_list.append(val_err)
epoch_numbers += [epoch]
#print("-- epoch: %d, training error %g"%(epoch + 1, train_err))
print("epoch: %d, time: %g, t acc, v acc, t cost, v cost: %g, %g, %g, %g"%(epoch+1, epochEnd - epochStart, train_acc, val_acc, train_err, val_err))
t_end = time.time()
print('--Time elapsed for training: {t:.2f} \
seconds'.format(t = t_end - t_start))
return [train_acc_list, val_acc_list, train_err_list, val_err_list, epoch_numbers]
'''
Our Main
Command Line Arguments: (1) Length of horizontal window
'''
# load the data
[X_train, y_train, X_val, y_val] = loadData('/pylon2/ci560sp/cstrong/exp2/exp2_d15_1s_2.mat')
batchSize = 1000
numEpochs = 300
poolingStrategy = 'MAX'
[train_acc_list, val_acc_list, train_err_list, val_err_list, epoch_numbers] = runNeuralNet(121, X_train, y_train, X_val, y_val, batchSize, numEpochs, poolingStrategy)
# Reports
print('-- Training accuracy: {:.4f}'.format(train_acc_list[-1]))
print('-- Validation accuracy: {:.4f}'.format(val_acc_list[-1]))
print('-- Training error: {:.4E}'.format(train_err_list[-1]))
print('-- Validation error: {:.4E}'.format(val_err_list[-1]))
print('==> Generating error plot...')
x_list = epoch_numbers
train_err_plot, = plt.plot(x_list, train_err_list, 'b.')
val_err_plot, = plt.plot(x_list, val_err_list, '.', color='orange')
plt.xlabel('Number of epochs')
plt.ylabel('Cross-Entropy Error')
plt.title('Error vs Number of Epochs')
plt.legend((train_err_plot, val_err_plot), ('training', 'validation'), loc='best')
plt.savefig('exp2_0c_k1=5.png', format='png')
plt.close()
print('==> Done.')
'''
y_ = np.array([[1, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
y_ = np.array([[0], [1], [2], [3], [3]])
x = np.array([[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11], [12, 13, 14, 15, 16, 17], [18, 19, 20, 21, 22, 23], [24, 25, 26, 27, 28, 29]])
x_val = np.array([[5, 6, 7, 8, 9, 10], [9, 10, 11, 12, 13, 14], [11, 12, 13, 14, 15, 16]])
y_val = np.array([[1], [3], [2]])
runNeuralNet(2, x, y_, x_val, y_val, 1, 300, 'MAX')
'''
'''
K1 = 5
--Time elapsed for training: 2883.65 seconds
-- Training accuracy: 0.9682
-- Validation accuracy: 0.8862
-- Training error: 1.3700E-01
-- Validation error: 4.2459E-01
K1 = 10
--Time elapsed for training: 3057.72 seconds
-- Training accuracy: 0.2608
-- Validation accuracy: 0.8906
-- Training error: 8.5545E+00
-- Validation error: 4.2435E-01
==> Generating error plot...
'''
| |
#! /usr/bin/python
import argparse
import ast
import os
import re
import sys
import yaml
class DefinitionVisitor(ast.NodeVisitor):
def __init__(self):
super().__init__()
self.functions = {}
self.classes = {}
self.names = {}
self.attrs = set()
self.definitions = {
"def": self.functions,
"class": self.classes,
"names": self.names,
"attrs": self.attrs,
}
def visit_Name(self, node):
self.names.setdefault(type(node.ctx).__name__, set()).add(node.id)
def visit_Attribute(self, node):
self.attrs.add(node.attr)
for child in ast.iter_child_nodes(node):
self.visit(child)
def visit_ClassDef(self, node):
visitor = DefinitionVisitor()
self.classes[node.name] = visitor.definitions
for child in ast.iter_child_nodes(node):
visitor.visit(child)
def visit_FunctionDef(self, node):
visitor = DefinitionVisitor()
self.functions[node.name] = visitor.definitions
for child in ast.iter_child_nodes(node):
visitor.visit(child)
def non_empty(defs):
functions = {name: non_empty(f) for name, f in defs["def"].items()}
classes = {name: non_empty(f) for name, f in defs["class"].items()}
result = {}
if functions:
result["def"] = functions
if classes:
result["class"] = classes
names = defs["names"]
uses = []
for name in names.get("Load", ()):
if name not in names.get("Param", ()) and name not in names.get("Store", ()):
uses.append(name)
uses.extend(defs["attrs"])
if uses:
result["uses"] = uses
result["names"] = names
result["attrs"] = defs["attrs"]
return result
def definitions_in_code(input_code):
input_ast = ast.parse(input_code)
visitor = DefinitionVisitor()
visitor.visit(input_ast)
definitions = non_empty(visitor.definitions)
return definitions
def definitions_in_file(filepath):
with open(filepath) as f:
return definitions_in_code(f.read())
def defined_names(prefix, defs, names):
for name, funcs in defs.get("def", {}).items():
names.setdefault(name, {"defined": []})["defined"].append(prefix + name)
defined_names(prefix + name + ".", funcs, names)
for name, funcs in defs.get("class", {}).items():
names.setdefault(name, {"defined": []})["defined"].append(prefix + name)
defined_names(prefix + name + ".", funcs, names)
def used_names(prefix, item, defs, names):
for name, funcs in defs.get("def", {}).items():
used_names(prefix + name + ".", name, funcs, names)
for name, funcs in defs.get("class", {}).items():
used_names(prefix + name + ".", name, funcs, names)
path = prefix.rstrip(".")
for used in defs.get("uses", ()):
if used in names:
if item:
names[item].setdefault("uses", []).append(used)
names[used].setdefault("used", {}).setdefault(item, []).append(path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Find definitions.")
parser.add_argument(
"--unused", action="store_true", help="Only list unused definitions"
)
parser.add_argument(
"--ignore", action="append", metavar="REGEXP", help="Ignore a pattern"
)
parser.add_argument(
"--pattern", action="append", metavar="REGEXP", help="Search for a pattern"
)
parser.add_argument(
"directories",
nargs="+",
metavar="DIR",
help="Directories to search for definitions",
)
parser.add_argument(
"--referrers",
default=0,
type=int,
help="Include referrers up to the given depth",
)
parser.add_argument(
"--referred",
default=0,
type=int,
help="Include referred down to the given depth",
)
parser.add_argument(
"--format", default="yaml", help="Output format, one of 'yaml' or 'dot'"
)
args = parser.parse_args()
definitions = {}
for directory in args.directories:
for root, _, files in os.walk(directory):
for filename in files:
if filename.endswith(".py"):
filepath = os.path.join(root, filename)
definitions[filepath] = definitions_in_file(filepath)
names = {}
for filepath, defs in definitions.items():
defined_names(filepath + ":", defs, names)
for filepath, defs in definitions.items():
used_names(filepath + ":", None, defs, names)
patterns = [re.compile(pattern) for pattern in args.pattern or ()]
ignore = [re.compile(pattern) for pattern in args.ignore or ()]
result = {}
for name, definition in names.items():
if patterns and not any(pattern.match(name) for pattern in patterns):
continue
if ignore and any(pattern.match(name) for pattern in ignore):
continue
if args.unused and definition.get("used"):
continue
result[name] = definition
referrer_depth = args.referrers
referrers = set()
while referrer_depth:
referrer_depth -= 1
for entry in result.values():
for used_by in entry.get("used", ()):
referrers.add(used_by)
for name, definition in names.items():
if name not in referrers:
continue
if ignore and any(pattern.match(name) for pattern in ignore):
continue
result[name] = definition
referred_depth = args.referred
referred = set()
while referred_depth:
referred_depth -= 1
for entry in result.values():
for uses in entry.get("uses", ()):
referred.add(uses)
for name, definition in names.items():
if name not in referred:
continue
if ignore and any(pattern.match(name) for pattern in ignore):
continue
result[name] = definition
if args.format == "yaml":
yaml.dump(result, sys.stdout, default_flow_style=False)
elif args.format == "dot":
print("digraph {")
for name, entry in result.items():
print(name)
for used_by in entry.get("used", ()):
if used_by in result:
print(used_by, "->", name)
print("}")
else:
raise ValueError("Unknown format %r" % (args.format))
| |
#!/usr/bin/env python2
import inspect
import sys
def get_user_attributes(cls):
"""
Keyword arguments:
cls --
"""
boring = dir(type('dummy', (object,), {}))
return [item
for item in inspect.getmembers(cls,
lambda x: not inspect.ismethod(x))
if item[0] not in boring]
class Score:
def __init__(self, string=None):
"""Constructor
Keyword arguments:
string -- (Default: None)
"""
self.from_string(string)
def from_string(self, string):
"""
Keyword arguments:
string --
"""
cls_vars = get_user_attributes(self.__class__)
if not string:
for var, _ in cls_vars:
self.__dict__[var] = None
else:
data = dict([d.strip().split(':') for d in string.strip(' ()')
.split('/')])
for var, _ in cls_vars:
if var in data:
self.__dict__[var] = data[var]
else:
self.__dict__[var] = None
def __str__(self):
"""
Keyword arguments:
"""
cls_vars = get_user_attributes(self.__class__)
tmp = []
for name, _ in cls_vars:
if self.__dict__[name]:
tmp.append('%s:%s' % (name, self.__dict__[name]))
else:
tmp.append('0')
return ' / '.join(tmp)
class Base(Score):
AV = {
'L' : 0.395,
'A' : 0.646,
'N' : 1.0
}
AC = {
'H' : 0.35,
'M' : 0.61,
'L' : 0.71
}
Au = {
'M' : 0.45,
'S' : 0.56,
'N' : 0.704
}
C = A = I = {
'N' : 0.0,
'P' : 0.275,
'C' : 0.660
}
def get_score(self):
"""
Keyword arguments:
"""
try:
impact = 10.41 * (1 -
(1 - Base.C[self.C]) *
(1 - Base.I[self.I]) *
(1 - Base.A[self.A]))
exploit = 20 * (Base.AV[self.AV] *
Base.AC[self.AC] *
Base.Au[self.Au])
except KeyError:
return 0
f_impact = impact and 1.176
return round(((0.6 * impact) + (0.4 * exploit) - 1.5) * f_impact, 1)
def __str__(self):
"""
Keyword arguments:
"""
return 'AV:%s / AC:%s / Au:%s / C:%s / I:%s / A:%s' % (self.AV,
self.AC, self.Au, self.C, self.I, self.A)
def from_string(self, string):
cls_vars = get_user_attributes(self.__class__)
if not string:
for var, _ in cls_vars:
self.__dict__[var] = None
else:
data = dict([d.strip().split(':') for d in string.strip(' ()')
.split('/')])
if data['AV'] == 'AN':
data['AV'] = 'A'
for var, _ in cls_vars:
if var in data:
self.__dict__[var] = data[var]
else:
self.__dict__[var] = None
class Temporal(Score):
E = {
'U' : 0.85,
'POC': 0.9,
'F' : 0.95,
'H' : 1.0,
'ND' : 1.0
}
RL = {
'OF' : 0.87,
'TF' : 0.9,
'W' : 0.95,
'U' : 1.0,
'ND' : 1.0
}
RC = {
'UC' : 0.9,
'UR' : 0.95,
'C' : 1.0,
'ND' : 1.0
}
def get_score(self, base):
"""
Keyword arguments:
base --
"""
if isinstance(base, Base):
base = base.get_score()
try:
return round(base *
Temporal.E[self.E] *
Temporal.RL[self.RL] *
Temporal.RC[self.RC], 1)
except KeyError:
return 0
def __str__(self):
"""
Keyword arguments:
"""
return 'E:%s / RL:%s / RC:%s' % (self.E,
self.RL, self.RC)
class Environmental(Score):
CDP = {
'N' : 0.0,
'L' : 0.1,
'LM' : 0.3,
'MH' : 0.4,
'H' : 0.5,
'ND' : 0.0
}
TD = {
'N' : 0.0,
'L' : 0.25,
'M' : 0.75,
'H' : 1.0,
'ND' : 1.0
}
CR = IR = AR = {
'L' : 0.5,
'M' : 1.0,
'H' : 1.51,
'ND' : 1.0
}
def get_score(self, base, temp):
"""
Keyword arguments:
base --
temp --
"""
try:
adj_impact = min(10,
10.41 * (1 -
(1 - Base.C[base.C] * Environmental.CR[self.CR]) *
(1 - Base.I[base.I] * Environmental.IR[self.IR]) *
(1 - Base.A[base.A] * Environmental.AR[self.AR])))
exploit = (20 * (Base.AV[base.AV] *
Base.AC[base.AC] *
Base.Au[base.Au]))
f_impact = adj_impact and 1.176
adj_base = round(((0.6 * adj_impact) + (0.4 * exploit) - 1.5) *
f_impact, 1)
tmp_score = temp.get_score(adj_base)
return round((tmp_score + (10 - tmp_score) *
Environmental.CDP[self.CDP]) * Environmental.TD[self.TD], 1)
except KeyError:
return 0
def __str__(self):
"""
Keyword arguments:
"""
return 'CDP:%s / TD:%s / CR:%s / IR:%s / AR:%s' % (self.CDP,
self.TD, self.CR, self.IR, self.AR)
class Cvss2:
def __init__(self):
"""Constructor
Keyword arguments:
"""
self.base = Base()
self.tmp = Temporal()
self.env = Environmental()
def get_score(self):
"""
Keyword arguments:
"""
return self.env.get_score(self.base, self.tmp)
def main(filep, out=sys.stdout, crlf=False):
"""
Keyword arguments:
fp --
out -- (Default: sys.stdout)
"""
lines = filep.readlines()
filep.close()
cvss = Cvss2()
cvss.base = Base(lines[3])
cvss.tmp = Temporal(lines[4])
cvss.env = Environmental(lines[5])
score = cvss.get_score()
eol = "\r\n" if crlf else "\n"
out.writelines(lines[:3])
out.write(str(cvss.base) + eol)
out.write(str(cvss.tmp) + eol)
out.write(str(cvss.env) + eol)
out.writelines(lines[6:9])
out.write(str(score) + eol)
if score < 4.0:
out.write('Low' + eol)
elif score >= 7:
out.write('High' + eol)
else:
out.write('Medium' + eol)
if __name__ == '__main__':
__FP = open(sys.argv[1])
main(__FP)
# vim: set ts=4 sw=4 tw=79 :
| |
#!/usr/bin/python
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Burnin program
"""
import sys
import optparse
import time
import socket
import urllib
import random
import string # pylint: disable=W0402
from itertools import izip, islice, cycle
from cStringIO import StringIO
from operator import or_
from ganeti import opcodes
from ganeti import constants
from ganeti import cli
from ganeti import errors
from ganeti import utils
from ganeti import hypervisor
from ganeti import compat
from ganeti import pathutils
from ganeti.confd import client as confd_client
from ganeti.runtime import (GetClient)
USAGE = ("\tburnin -o OS_NAME [options...] instance_name ...")
MAX_RETRIES = 3
LOG_HEADERS = {
0: "- ",
1: "* ",
2: "",
}
#: Disk templates supporting a single node
_SINGLE_NODE_DISK_TEMPLATES = compat.UniqueFrozenset([
constants.DT_DISKLESS,
constants.DT_PLAIN,
constants.DT_FILE,
constants.DT_SHARED_FILE,
constants.DT_EXT,
constants.DT_RBD,
constants.DT_GLUSTER
])
_SUPPORTED_DISK_TEMPLATES = compat.UniqueFrozenset([
constants.DT_DISKLESS,
constants.DT_DRBD8,
constants.DT_EXT,
constants.DT_FILE,
constants.DT_PLAIN,
constants.DT_RBD,
constants.DT_SHARED_FILE,
constants.DT_GLUSTER
])
#: Disk templates for which import/export is tested
_IMPEXP_DISK_TEMPLATES = (_SUPPORTED_DISK_TEMPLATES - frozenset([
constants.DT_DISKLESS,
constants.DT_FILE,
constants.DT_SHARED_FILE,
constants.DT_GLUSTER
]))
class InstanceDown(Exception):
"""The checked instance was not up"""
class BurninFailure(Exception):
"""Failure detected during burning"""
def Usage():
"""Shows program usage information and exits the program."""
print >> sys.stderr, "Usage:"
print >> sys.stderr, USAGE
sys.exit(2)
def Log(msg, *args, **kwargs):
"""Simple function that prints out its argument.
"""
if args:
msg = msg % args
indent = kwargs.get("indent", 0)
sys.stdout.write("%*s%s%s\n" % (2 * indent, "",
LOG_HEADERS.get(indent, " "), msg))
sys.stdout.flush()
def Err(msg, exit_code=1):
"""Simple error logging that prints to stderr.
"""
sys.stderr.write(msg + "\n")
sys.stderr.flush()
sys.exit(exit_code)
def RandomString(size=8, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
class SimpleOpener(urllib.FancyURLopener):
"""A simple url opener"""
# pylint: disable=W0221
def prompt_user_passwd(self, host, realm, clear_cache=0):
"""No-interaction version of prompt_user_passwd."""
# we follow parent class' API
# pylint: disable=W0613
return None, None
def http_error_default(self, url, fp, errcode, errmsg, headers):
"""Custom error handling"""
# make sure sockets are not left in CLOSE_WAIT, this is similar
# but with a different exception to the BasicURLOpener class
_ = fp.read() # throw away data
fp.close()
raise InstanceDown("HTTP error returned: code %s, msg %s" %
(errcode, errmsg))
OPTIONS = [
cli.cli_option("-o", "--os", dest="os", default=None,
help="OS to use during burnin",
metavar="<OS>",
completion_suggest=cli.OPT_COMPL_ONE_OS),
cli.HYPERVISOR_OPT,
cli.OSPARAMS_OPT,
cli.cli_option("--disk-size", dest="disk_size",
help="Disk size (determines disk count)",
default="1G", type="string", metavar="<size,size,...>",
completion_suggest=("512M 1G 4G 1G,256M"
" 4G,1G,1G 10G").split()),
cli.cli_option("--disk-growth", dest="disk_growth", help="Disk growth",
default="128m", type="string", metavar="<size,size,...>"),
cli.cli_option("--mem-size", dest="mem_size", help="Memory size",
default=None, type="unit", metavar="<size>",
completion_suggest=("128M 256M 512M 1G 4G 8G"
" 12G 16G").split()),
cli.cli_option("--maxmem-size", dest="maxmem_size", help="Max Memory size",
default=256, type="unit", metavar="<size>",
completion_suggest=("128M 256M 512M 1G 4G 8G"
" 12G 16G").split()),
cli.cli_option("--minmem-size", dest="minmem_size", help="Min Memory size",
default=128, type="unit", metavar="<size>",
completion_suggest=("128M 256M 512M 1G 4G 8G"
" 12G 16G").split()),
cli.cli_option("--vcpu-count", dest="vcpu_count", help="VCPU count",
default=3, type="unit", metavar="<count>",
completion_suggest=("1 2 3 4").split()),
cli.DEBUG_OPT,
cli.VERBOSE_OPT,
cli.NOIPCHECK_OPT,
cli.NONAMECHECK_OPT,
cli.EARLY_RELEASE_OPT,
cli.cli_option("--no-replace1", dest="do_replace1",
help="Skip disk replacement with the same secondary",
action="store_false", default=True),
cli.cli_option("--no-replace2", dest="do_replace2",
help="Skip disk replacement with a different secondary",
action="store_false", default=True),
cli.cli_option("--no-failover", dest="do_failover",
help="Skip instance failovers", action="store_false",
default=True),
cli.cli_option("--no-migrate", dest="do_migrate",
help="Skip instance live migration",
action="store_false", default=True),
cli.cli_option("--no-move", dest="do_move",
help="Skip instance moves", action="store_false",
default=True),
cli.cli_option("--no-importexport", dest="do_importexport",
help="Skip instance export/import", action="store_false",
default=True),
cli.cli_option("--no-startstop", dest="do_startstop",
help="Skip instance stop/start", action="store_false",
default=True),
cli.cli_option("--no-reinstall", dest="do_reinstall",
help="Skip instance reinstall", action="store_false",
default=True),
cli.cli_option("--no-reboot", dest="do_reboot",
help="Skip instance reboot", action="store_false",
default=True),
cli.cli_option("--no-renamesame", dest="do_renamesame",
help="Skip instance rename to same name", action="store_false",
default=True),
cli.cli_option("--reboot-types", dest="reboot_types",
help="Specify the reboot types", default=None),
cli.cli_option("--no-activate-disks", dest="do_activate_disks",
help="Skip disk activation/deactivation",
action="store_false", default=True),
cli.cli_option("--no-add-disks", dest="do_addremove_disks",
help="Skip disk addition/removal",
action="store_false", default=True),
cli.cli_option("--no-add-nics", dest="do_addremove_nics",
help="Skip NIC addition/removal",
action="store_false", default=True),
cli.cli_option("--no-nics", dest="nics",
help="No network interfaces", action="store_const",
const=[], default=[{}]),
cli.cli_option("--no-confd", dest="do_confd_tests",
help="Skip confd queries",
action="store_false", default=True),
cli.cli_option("--rename", dest="rename", default=None,
help=("Give one unused instance name which is taken"
" to start the renaming sequence"),
metavar="<instance_name>"),
cli.cli_option("-t", "--disk-template", dest="disk_template",
choices=list(_SUPPORTED_DISK_TEMPLATES),
default=constants.DT_DRBD8,
help=("Disk template (default %s, otherwise one of %s)" %
(constants.DT_DRBD8,
utils.CommaJoin(_SUPPORTED_DISK_TEMPLATES)))),
cli.cli_option("-n", "--nodes", dest="nodes", default="",
help=("Comma separated list of nodes to perform"
" the burnin on (defaults to all nodes)"),
completion_suggest=cli.OPT_COMPL_MANY_NODES),
cli.cli_option("-I", "--iallocator", dest="iallocator",
default=None, type="string",
help=("Perform the allocation using an iallocator"
" instead of fixed node spread (node restrictions no"
" longer apply, therefore -n/--nodes must not be"
" used"),
completion_suggest=cli.OPT_COMPL_ONE_IALLOCATOR),
cli.cli_option("-p", "--parallel", default=False, action="store_true",
dest="parallel",
help=("Enable parallelization of some operations in"
" order to speed burnin or to test granular locking")),
cli.cli_option("--net-timeout", default=15, type="int",
dest="net_timeout",
help=("The instance check network timeout in seconds"
" (defaults to 15 seconds)"),
completion_suggest="15 60 300 900".split()),
cli.cli_option("-C", "--http-check", default=False, action="store_true",
dest="http_check",
help=("Enable checking of instance status via http,"
" looking for /hostname.txt that should contain the"
" name of the instance")),
cli.cli_option("-K", "--keep-instances", default=False,
action="store_true",
dest="keep_instances",
help=("Leave instances on the cluster after burnin,"
" for investigation in case of errors or simply"
" to use them")),
cli.REASON_OPT,
]
# Mainly used for bash completion
ARGUMENTS = [cli.ArgInstance(min=1)]
def _DoCheckInstances(fn):
"""Decorator for checking instances.
"""
def wrapper(self, *args, **kwargs):
val = fn(self, *args, **kwargs)
for instance in self.instances:
self._CheckInstanceAlive(instance) # pylint: disable=W0212
return val
return wrapper
def _DoBatch(retry):
"""Decorator for possible batch operations.
Must come after the _DoCheckInstances decorator (if any).
@param retry: whether this is a retryable batch, will be
passed to StartBatch
"""
def wrap(fn):
def batched(self, *args, **kwargs):
self.StartBatch(retry)
val = fn(self, *args, **kwargs)
self.CommitQueue()
return val
return batched
return wrap
class FeedbackAccumulator(object):
"""Feedback accumulator class."""
_feed_buf = StringIO()
opts = None
def ClearFeedbackBuf(self):
"""Clear the feedback buffer."""
self._feed_buf.truncate(0)
def GetFeedbackBuf(self):
"""Return the contents of the buffer."""
return self._feed_buf.getvalue()
def Feedback(self, msg):
"""Acumulate feedback in our buffer."""
formatted_msg = "%s %s" % (time.ctime(utils.MergeTime(msg[0])), msg[2])
self._feed_buf.write(formatted_msg + "\n")
if self.opts.verbose:
Log(formatted_msg, indent=3)
class JobHandler(FeedbackAccumulator):
"""Class for handling Ganeti jobs."""
queued_ops = []
queue_retry = False
def __init__(self):
self.cl = cli.GetClient()
def MaybeRetry(self, retry_count, msg, fn, *args):
"""Possibly retry a given function execution.
@type retry_count: int
@param retry_count: retry counter:
- 0: non-retryable action
- 1: last retry for a retryable action
- MAX_RETRIES: original try for a retryable action
@type msg: str
@param msg: the kind of the operation
@type fn: callable
@param fn: the function to be called
"""
try:
val = fn(*args)
if retry_count > 0 and retry_count < MAX_RETRIES:
Log("Idempotent %s succeeded after %d retries",
msg, MAX_RETRIES - retry_count)
return val
except Exception, err: # pylint: disable=W0703
if retry_count == 0:
Log("Non-idempotent %s failed, aborting", msg)
raise
elif retry_count == 1:
Log("Idempotent %s repeated failure, aborting", msg)
raise
else:
Log("Idempotent %s failed, retry #%d/%d: %s",
msg, MAX_RETRIES - retry_count + 1, MAX_RETRIES, err)
self.MaybeRetry(retry_count - 1, msg, fn, *args)
def _ExecOp(self, *ops):
"""Execute one or more opcodes and manage the exec buffer.
@return: if only opcode has been passed, we return its result;
otherwise we return the list of results
"""
job_id = cli.SendJob(ops, cl=self.cl)
results = cli.PollJob(job_id, cl=self.cl, feedback_fn=self.Feedback)
if len(ops) == 1:
return results[0]
else:
return results
def ExecOp(self, retry, *ops):
"""Execute one or more opcodes and manage the exec buffer.
@return: if only opcode has been passed, we return its result;
otherwise we return the list of results
"""
if retry:
rval = MAX_RETRIES
else:
rval = 0
cli.SetGenericOpcodeOpts(ops, self.opts)
return self.MaybeRetry(rval, "opcode", self._ExecOp, *ops)
def ExecOrQueue(self, name, ops, post_process=None):
"""Execute an opcode and manage the exec buffer."""
if self.opts.parallel:
cli.SetGenericOpcodeOpts(ops, self.opts)
self.queued_ops.append((ops, name, post_process))
else:
val = self.ExecOp(self.queue_retry, *ops) # pylint: disable=W0142
if post_process is not None:
post_process()
return val
def StartBatch(self, retry):
"""Start a new batch of jobs.
@param retry: whether this is a retryable batch
"""
self.queued_ops = []
self.queue_retry = retry
def CommitQueue(self):
"""Execute all submitted opcodes in case of parallel burnin"""
if not self.opts.parallel or not self.queued_ops:
return
if self.queue_retry:
rval = MAX_RETRIES
else:
rval = 0
try:
results = self.MaybeRetry(rval, "jobset", self.ExecJobSet,
self.queued_ops)
finally:
self.queued_ops = []
return results
def ExecJobSet(self, jobs):
"""Execute a set of jobs and return once all are done.
The method will return the list of results, if all jobs are
successful. Otherwise, OpExecError will be raised from within
cli.py.
"""
self.ClearFeedbackBuf()
jex = cli.JobExecutor(cl=self.cl, feedback_fn=self.Feedback)
for ops, name, _ in jobs:
jex.QueueJob(name, *ops) # pylint: disable=W0142
try:
results = jex.GetResults()
except Exception, err: # pylint: disable=W0703
Log("Jobs failed: %s", err)
raise BurninFailure()
fail = False
val = []
for (_, name, post_process), (success, result) in zip(jobs, results):
if success:
if post_process:
try:
post_process()
except Exception, err: # pylint: disable=W0703
Log("Post process call for job %s failed: %s", name, err)
fail = True
val.append(result)
else:
fail = True
if fail:
raise BurninFailure()
return val
class Burner(JobHandler):
"""Burner class."""
def __init__(self):
"""Constructor."""
super(Burner, self).__init__()
self.url_opener = SimpleOpener()
self.nodes = []
self.instances = []
self.to_rem = []
self.disk_count = self.disk_growth = self.disk_size = None
self.hvp = self.bep = None
self.ParseOptions()
self.disk_nodes = {}
self.instance_nodes = {}
self.GetState()
self.confd_reply = None
def ParseOptions(self):
"""Parses the command line options.
In case of command line errors, it will show the usage and exit the
program.
"""
parser = optparse.OptionParser(usage="\n%s" % USAGE,
version=("%%prog (ganeti) %s" %
constants.RELEASE_VERSION),
option_list=OPTIONS)
options, args = parser.parse_args()
if len(args) < 1 or options.os is None:
Usage()
if options.mem_size:
options.maxmem_size = options.mem_size
options.minmem_size = options.mem_size
elif options.minmem_size > options.maxmem_size:
Err("Maximum memory lower than minimum memory")
if options.disk_template not in _SUPPORTED_DISK_TEMPLATES:
Err("Unknown or unsupported disk template '%s'" % options.disk_template)
if options.disk_template == constants.DT_DISKLESS:
disk_size = disk_growth = []
options.do_addremove_disks = False
else:
disk_size = [utils.ParseUnit(v) for v in options.disk_size.split(",")]
disk_growth = [utils.ParseUnit(v)
for v in options.disk_growth.split(",")]
if len(disk_growth) != len(disk_size):
Err("Wrong disk sizes/growth combination")
if ((disk_size and options.disk_template == constants.DT_DISKLESS) or
(not disk_size and options.disk_template != constants.DT_DISKLESS)):
Err("Wrong disk count/disk template combination")
self.disk_size = disk_size
self.disk_growth = disk_growth
self.disk_count = len(disk_size)
if options.nodes and options.iallocator:
Err("Give either the nodes option or the iallocator option, not both")
if options.http_check and not options.name_check:
Err("Can't enable HTTP checks without name checks")
self.opts = options
self.instances = args
self.bep = {
constants.BE_MINMEM: options.minmem_size,
constants.BE_MAXMEM: options.maxmem_size,
constants.BE_VCPUS: options.vcpu_count,
}
self.hypervisor = None
self.hvp = {}
if options.hypervisor:
self.hypervisor, self.hvp = options.hypervisor
if options.reboot_types is None:
options.reboot_types = constants.REBOOT_TYPES
else:
options.reboot_types = options.reboot_types.split(",")
rt_diff = set(options.reboot_types).difference(constants.REBOOT_TYPES)
if rt_diff:
Err("Invalid reboot types specified: %s" % utils.CommaJoin(rt_diff))
socket.setdefaulttimeout(options.net_timeout)
def GetState(self):
"""Read the cluster state from the master daemon."""
if self.opts.nodes:
names = self.opts.nodes.split(",")
else:
names = []
try:
qcl = GetClient()
result = qcl.QueryNodes(names, ["name", "offline", "drained"], False)
except errors.GenericError, err:
err_code, msg = cli.FormatError(err)
Err(msg, exit_code=err_code)
finally:
qcl.Close()
self.nodes = [data[0] for data in result if not (data[1] or data[2])]
op_diagnose = opcodes.OpOsDiagnose(output_fields=["name",
"variants",
"hidden"],
names=[])
result = self.ExecOp(True, op_diagnose)
if not result:
Err("Can't get the OS list")
found = False
for (name, variants, _) in result:
if self.opts.os in cli.CalculateOSNames(name, variants):
found = True
break
if not found:
Err("OS '%s' not found" % self.opts.os)
cluster_info = self.cl.QueryClusterInfo()
self.cluster_info = cluster_info
if not self.cluster_info:
Err("Can't get cluster info")
default_nic_params = self.cluster_info["nicparams"][constants.PP_DEFAULT]
self.cluster_default_nicparams = default_nic_params
if self.hypervisor is None:
self.hypervisor = self.cluster_info["default_hypervisor"]
self.hv_can_migrate = \
hypervisor.GetHypervisorClass(self.hypervisor).CAN_MIGRATE
def FindMatchingDisk(self, instance):
"""Find a disk whose nodes match the instance's disk nodes."""
instance_nodes = self.instance_nodes[instance]
for disk, disk_nodes in self.disk_nodes.iteritems():
if instance_nodes == disk_nodes:
# Erase that disk from the dictionary so that we don't pick it again.
del self.disk_nodes[disk]
return disk
Err("Couldn't find matching detached disk for instance %s" % instance)
@_DoCheckInstances
@_DoBatch(False)
def BurnCreateInstances(self):
"""Create the given instances.
"""
self.to_rem = []
mytor = izip(cycle(self.nodes),
islice(cycle(self.nodes), 1, None),
self.instances)
Log("Creating instances")
for pnode, snode, instance in mytor:
Log("instance %s", instance, indent=1)
if self.opts.iallocator:
pnode = snode = None
msg = "with iallocator %s" % self.opts.iallocator
elif self.opts.disk_template not in constants.DTS_INT_MIRROR:
snode = None
msg = "on %s" % pnode
else:
msg = "on %s, %s" % (pnode, snode)
Log(msg, indent=2)
op = opcodes.OpInstanceCreate(instance_name=instance,
disks=[{"size": size}
for size in self.disk_size],
disk_template=self.opts.disk_template,
nics=self.opts.nics,
mode=constants.INSTANCE_CREATE,
os_type=self.opts.os,
pnode=pnode,
snode=snode,
start=True,
ip_check=self.opts.ip_check,
name_check=self.opts.name_check,
wait_for_sync=True,
file_driver="loop",
file_storage_dir=None,
iallocator=self.opts.iallocator,
beparams=self.bep,
hvparams=self.hvp,
hypervisor=self.hypervisor,
osparams=self.opts.osparams,
)
remove_instance = lambda name: lambda: self.to_rem.append(name)
self.ExecOrQueue(instance, [op], post_process=remove_instance(instance))
@_DoBatch(False)
def BurnModifyRuntimeMemory(self):
"""Alter the runtime memory."""
Log("Setting instance runtime memory")
for instance in self.instances:
Log("instance %s", instance, indent=1)
tgt_mem = self.bep[constants.BE_MINMEM]
op = opcodes.OpInstanceSetParams(instance_name=instance,
runtime_mem=tgt_mem)
Log("Set memory to %s MB", tgt_mem, indent=2)
self.ExecOrQueue(instance, [op])
@_DoBatch(False)
def BurnGrowDisks(self):
"""Grow both the os and the swap disks by the requested amount, if any."""
Log("Growing disks")
for instance in self.instances:
Log("instance %s", instance, indent=1)
for idx, growth in enumerate(self.disk_growth):
if growth > 0:
op = opcodes.OpInstanceGrowDisk(instance_name=instance, disk=idx,
amount=growth, wait_for_sync=True,
ignore_ipolicy=True)
Log("increase disk/%s by %s MB", idx, growth, indent=2)
self.ExecOrQueue(instance, [op])
@_DoBatch(True)
def BurnReplaceDisks1D8(self):
"""Replace disks on primary and secondary for drbd8."""
Log("Replacing disks on the same nodes")
early_release = self.opts.early_release
for instance in self.instances:
Log("instance %s", instance, indent=1)
ops = []
for mode in constants.REPLACE_DISK_SEC, constants.REPLACE_DISK_PRI:
op = opcodes.OpInstanceReplaceDisks(instance_name=instance,
mode=mode,
disks=list(range(self.disk_count)),
early_release=early_release)
Log("run %s", mode, indent=2)
ops.append(op)
self.ExecOrQueue(instance, ops)
@_DoBatch(True)
def BurnReplaceDisks2(self):
"""Replace secondary node."""
Log("Changing the secondary node")
mode = constants.REPLACE_DISK_CHG
mytor = izip(islice(cycle(self.nodes), 2, None),
self.instances)
for tnode, instance in mytor:
Log("instance %s", instance, indent=1)
if self.opts.iallocator:
tnode = None
msg = "with iallocator %s" % self.opts.iallocator
else:
msg = tnode
op = opcodes.OpInstanceReplaceDisks(instance_name=instance,
mode=mode,
remote_node=tnode,
iallocator=self.opts.iallocator,
disks=[],
early_release=self.opts.early_release)
Log("run %s %s", mode, msg, indent=2)
self.ExecOrQueue(instance, [op])
@_DoCheckInstances
@_DoBatch(False)
def BurnFailover(self):
"""Failover the instances."""
Log("Failing over instances")
for instance in self.instances:
Log("instance %s", instance, indent=1)
op = opcodes.OpInstanceFailover(instance_name=instance,
ignore_consistency=False)
self.ExecOrQueue(instance, [op])
@_DoCheckInstances
@_DoBatch(False)
def BurnMove(self):
"""Move the instances."""
Log("Moving instances")
mytor = izip(islice(cycle(self.nodes), 1, None),
self.instances)
for tnode, instance in mytor:
Log("instance %s", instance, indent=1)
op = opcodes.OpInstanceMove(instance_name=instance,
target_node=tnode)
self.ExecOrQueue(instance, [op])
@_DoBatch(False)
def BurnMigrate(self):
"""Migrate the instances."""
Log("Migrating instances")
for instance in self.instances:
Log("instance %s", instance, indent=1)
op1 = opcodes.OpInstanceMigrate(instance_name=instance, mode=None,
cleanup=False)
op2 = opcodes.OpInstanceMigrate(instance_name=instance, mode=None,
cleanup=True)
Log("migration and migration cleanup", indent=2)
self.ExecOrQueue(instance, [op1, op2])
@_DoCheckInstances
@_DoBatch(False)
def BurnImportExport(self):
"""Export the instance, delete it, and import it back.
"""
Log("Exporting and re-importing instances")
mytor = izip(cycle(self.nodes),
islice(cycle(self.nodes), 1, None),
islice(cycle(self.nodes), 2, None),
self.instances)
qcl = GetClient()
for pnode, snode, enode, instance in mytor:
Log("instance %s", instance, indent=1)
# read the full name of the instance
((full_name, ), ) = qcl.QueryInstances([instance], ["name"], False)
if self.opts.iallocator:
pnode = snode = None
import_log_msg = ("import from %s"
" with iallocator %s" %
(enode, self.opts.iallocator))
elif self.opts.disk_template not in constants.DTS_INT_MIRROR:
snode = None
import_log_msg = ("import from %s to %s" %
(enode, pnode))
else:
import_log_msg = ("import from %s to %s, %s" %
(enode, pnode, snode))
exp_op = opcodes.OpBackupExport(instance_name=instance,
target_node=enode,
mode=constants.EXPORT_MODE_LOCAL,
shutdown=True)
rem_op = opcodes.OpInstanceRemove(instance_name=instance,
ignore_failures=True)
imp_dir = utils.PathJoin(pathutils.EXPORT_DIR, full_name)
imp_op = opcodes.OpInstanceCreate(instance_name=instance,
disks=[{"size": size}
for size in self.disk_size],
disk_template=self.opts.disk_template,
nics=self.opts.nics,
mode=constants.INSTANCE_IMPORT,
src_node=enode,
src_path=imp_dir,
pnode=pnode,
snode=snode,
start=True,
ip_check=self.opts.ip_check,
name_check=self.opts.name_check,
wait_for_sync=True,
file_storage_dir=None,
file_driver="loop",
iallocator=self.opts.iallocator,
beparams=self.bep,
hvparams=self.hvp,
osparams=self.opts.osparams,
)
erem_op = opcodes.OpBackupRemove(instance_name=instance)
Log("export to node %s", enode, indent=2)
Log("remove instance", indent=2)
Log(import_log_msg, indent=2)
Log("remove export", indent=2)
self.ExecOrQueue(instance, [exp_op, rem_op, imp_op, erem_op])
qcl.Close()
@staticmethod
def StopInstanceOp(instance):
"""Stop given instance."""
return opcodes.OpInstanceShutdown(instance_name=instance)
@staticmethod
def StartInstanceOp(instance):
"""Start given instance."""
return opcodes.OpInstanceStartup(instance_name=instance, force=False)
@staticmethod
def RenameInstanceOp(instance, instance_new, name_check, ip_check):
"""Rename instance."""
return opcodes.OpInstanceRename(instance_name=instance,
new_name=instance_new,
name_check=name_check,
ip_check=ip_check)
@_DoCheckInstances
@_DoBatch(True)
def BurnStopStart(self):
"""Stop/start the instances."""
Log("Stopping and starting instances")
for instance in self.instances:
Log("instance %s", instance, indent=1)
op1 = self.StopInstanceOp(instance)
op2 = self.StartInstanceOp(instance)
self.ExecOrQueue(instance, [op1, op2])
@_DoBatch(False)
def BurnRemove(self):
"""Remove the instances."""
Log("Removing instances")
for instance in self.to_rem:
Log("instance %s", instance, indent=1)
op = opcodes.OpInstanceRemove(instance_name=instance,
ignore_failures=True)
self.ExecOrQueue(instance, [op])
def BurnRename(self, name_check, ip_check):
"""Rename the instances.
Note that this function will not execute in parallel, since we
only have one target for rename.
"""
Log("Renaming instances")
rename = self.opts.rename
for instance in self.instances:
Log("instance %s", instance, indent=1)
op_stop1 = self.StopInstanceOp(instance)
op_stop2 = self.StopInstanceOp(rename)
op_rename1 = self.RenameInstanceOp(instance, rename, name_check, ip_check)
op_rename2 = self.RenameInstanceOp(rename, instance, name_check, ip_check)
op_start1 = self.StartInstanceOp(rename)
op_start2 = self.StartInstanceOp(instance)
self.ExecOp(False, op_stop1, op_rename1, op_start1)
self._CheckInstanceAlive(rename)
self.ExecOp(False, op_stop2, op_rename2, op_start2)
self._CheckInstanceAlive(instance)
@_DoCheckInstances
@_DoBatch(True)
def BurnReinstall(self):
"""Reinstall the instances."""
Log("Reinstalling instances")
for instance in self.instances:
Log("instance %s", instance, indent=1)
op1 = self.StopInstanceOp(instance)
op2 = opcodes.OpInstanceReinstall(instance_name=instance)
Log("reinstall without passing the OS", indent=2)
op3 = opcodes.OpInstanceReinstall(instance_name=instance,
os_type=self.opts.os)
Log("reinstall specifying the OS", indent=2)
op4 = self.StartInstanceOp(instance)
self.ExecOrQueue(instance, [op1, op2, op3, op4])
@_DoCheckInstances
@_DoBatch(True)
def BurnReboot(self):
"""Reboot the instances."""
Log("Rebooting instances")
for instance in self.instances:
Log("instance %s", instance, indent=1)
ops = []
for reboot_type in self.opts.reboot_types:
op = opcodes.OpInstanceReboot(instance_name=instance,
reboot_type=reboot_type,
ignore_secondaries=False)
Log("reboot with type '%s'", reboot_type, indent=2)
ops.append(op)
self.ExecOrQueue(instance, ops)
@_DoCheckInstances
@_DoBatch(True)
def BurnRenameSame(self, name_check, ip_check):
"""Rename the instances to their own name."""
Log("Renaming the instances to their own name")
for instance in self.instances:
Log("instance %s", instance, indent=1)
op1 = self.StopInstanceOp(instance)
op2 = self.RenameInstanceOp(instance, instance, name_check, ip_check)
Log("rename to the same name", indent=2)
op4 = self.StartInstanceOp(instance)
self.ExecOrQueue(instance, [op1, op2, op4])
@_DoCheckInstances
@_DoBatch(True)
def BurnActivateDisks(self):
"""Activate and deactivate disks of the instances."""
Log("Activating/deactivating disks")
for instance in self.instances:
Log("instance %s", instance, indent=1)
op_start = self.StartInstanceOp(instance)
op_act = opcodes.OpInstanceActivateDisks(instance_name=instance)
op_deact = opcodes.OpInstanceDeactivateDisks(instance_name=instance)
op_stop = self.StopInstanceOp(instance)
Log("activate disks when online", indent=2)
Log("activate disks when offline", indent=2)
Log("deactivate disks (when offline)", indent=2)
self.ExecOrQueue(instance, [op_act, op_stop, op_act, op_deact, op_start])
@_DoBatch(False)
def BurnAddRemoveNICs(self):
"""Add, change and remove an extra NIC for the instances."""
Log("Adding and removing NICs")
for instance in self.instances:
Log("instance %s", instance, indent=1)
op_add = opcodes.OpInstanceSetParams(
instance_name=instance, nics=[(constants.DDM_ADD, {})])
op_chg = opcodes.OpInstanceSetParams(
instance_name=instance, nics=[(constants.DDM_MODIFY,
-1, {"mac": constants.VALUE_GENERATE})])
op_rem = opcodes.OpInstanceSetParams(
instance_name=instance, nics=[(constants.DDM_REMOVE, {})])
Log("adding a NIC", indent=2)
Log("changing a NIC", indent=2)
Log("removing last NIC", indent=2)
self.ExecOrQueue(instance, [op_add, op_chg, op_rem])
def ConfdCallback(self, reply):
"""Callback for confd queries"""
if reply.type == confd_client.UPCALL_REPLY:
if reply.server_reply.status != constants.CONFD_REPL_STATUS_OK:
Err("Query %s gave non-ok status %s: %s" % (reply.orig_request,
reply.server_reply.status,
reply.server_reply))
if reply.orig_request.type == constants.CONFD_REQ_PING:
Log("Ping: OK", indent=1)
elif reply.orig_request.type == constants.CONFD_REQ_CLUSTER_MASTER:
if reply.server_reply.answer == self.cluster_info["master"]:
Log("Master: OK", indent=1)
else:
Err("Master: wrong: %s" % reply.server_reply.answer)
elif reply.orig_request.type == constants.CONFD_REQ_NODE_ROLE_BYNAME:
if reply.server_reply.answer == constants.CONFD_NODE_ROLE_MASTER:
Log("Node role for master: OK", indent=1)
else:
Err("Node role for master: wrong: %s" % reply.server_reply.answer)
elif reply.orig_request.type == constants.CONFD_REQ_INSTANCE_DISKS:
self.confd_reply = reply.server_reply.answer
def DoConfdRequestReply(self, req):
self.confd_counting_callback.RegisterQuery(req.rsalt)
self.confd_client.SendRequest(req, async=False)
while not self.confd_counting_callback.AllAnswered():
if not self.confd_client.ReceiveReply():
Err("Did not receive all expected confd replies")
break
def BurnConfd(self):
"""Run confd queries for our instances.
The following confd queries are tested:
- CONFD_REQ_PING: simple ping
- CONFD_REQ_CLUSTER_MASTER: cluster master
- CONFD_REQ_NODE_ROLE_BYNAME: node role, for the master
"""
Log("Checking confd results")
filter_callback = confd_client.ConfdFilterCallback(self.ConfdCallback)
counting_callback = confd_client.ConfdCountingCallback(filter_callback)
self.confd_counting_callback = counting_callback
self.confd_client = confd_client.GetConfdClient(counting_callback)
req = confd_client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
self.DoConfdRequestReply(req)
req = confd_client.ConfdClientRequest(
type=constants.CONFD_REQ_CLUSTER_MASTER)
self.DoConfdRequestReply(req)
req = confd_client.ConfdClientRequest(
type=constants.CONFD_REQ_NODE_ROLE_BYNAME,
query=self.cluster_info["master"])
self.DoConfdRequestReply(req)
@_DoCheckInstances
@_DoBatch(False)
def BurnAddDisks(self):
"""Add an extra disk to every instance and then detach it."""
Log("Adding and detaching disks")
# Instantiate a Confd client
filter_callback = confd_client.ConfdFilterCallback(self.ConfdCallback)
counting_callback = confd_client.ConfdCountingCallback(filter_callback)
self.confd_counting_callback = counting_callback
self.confd_client = confd_client.GetConfdClient(counting_callback)
# Iterate all instances, start them, add a disk with a unique name and
# detach it. Do all disk operations with hotplugging (if possible).
for instance in self.instances:
Log("instance %s", instance, indent=1)
# Fetch disk info for an instance from the confd. The result of the query
# will be stored in the confd_reply attribute of Burner.
req = (confd_client.ConfdClientRequest(
type=constants.CONFD_REQ_INSTANCE_DISKS, query=instance))
self.DoConfdRequestReply(req)
disk_name = RandomString()
nodes = [set(disk["nodes"]) for disk in self.confd_reply]
nodes = reduce(or_, nodes)
self.instance_nodes[instance] = nodes
self.disk_nodes[disk_name] = nodes
op_stop = self.StopInstanceOp(instance)
op_add = opcodes.OpInstanceSetParams(
instance_name=instance, hotplug_if_possible=True,
disks=[(constants.DDM_ADD, {"size": self.disk_size[0],
"name": disk_name})])
op_detach = opcodes.OpInstanceSetParams(
instance_name=instance, hotplug_if_possible=True,
disks=[(constants.DDM_DETACH, {})])
op_start = self.StartInstanceOp(instance)
Log("adding a disk with name %s" % disk_name, indent=2)
Log("detaching last disk", indent=2)
self.ExecOrQueue(instance, [op_start, op_add, op_detach, op_stop,
op_start])
@_DoCheckInstances
@_DoBatch(False)
def BurnRemoveDisks(self):
"""Attach a previously detached disk to an instance and then remove it."""
Log("Attaching and removing disks")
# Iterate all instances in random order, attach the detached disks, remove
# them and then restart the instances. Do all disk operation with
# hotplugging (if possible).
instances_copy = list(self.instances)
random.shuffle(instances_copy)
for instance in instances_copy:
Log("instance %s", instance, indent=1)
disk_name = self.FindMatchingDisk(instance)
op_attach = opcodes.OpInstanceSetParams(
instance_name=instance, hotplug_if_possible=True,
disks=[(constants.DDM_ATTACH, {"name": disk_name})])
op_rem = opcodes.OpInstanceSetParams(
instance_name=instance, hotplug_if_possible=True,
disks=[(constants.DDM_REMOVE, {})])
op_stop = self.StopInstanceOp(instance)
op_start = self.StartInstanceOp(instance)
Log("attaching a disk with name %s" % disk_name, indent=2)
Log("removing last disk", indent=2)
self.ExecOrQueue(instance, [op_attach, op_rem, op_stop, op_start])
# Disk nodes are useful only for this test.
del self.disk_nodes
del self.instance_nodes
def _CheckInstanceAlive(self, instance):
"""Check if an instance is alive by doing http checks.
This will try to retrieve the url on the instance /hostname.txt
and check that it contains the hostname of the instance. In case
we get ECONNREFUSED, we retry up to the net timeout seconds, for
any other error we abort.
"""
if not self.opts.http_check:
return
end_time = time.time() + self.opts.net_timeout
url = None
while time.time() < end_time and url is None:
try:
url = self.url_opener.open("http://%s/hostname.txt" % instance)
except IOError:
# here we can have connection refused, no route to host, etc.
time.sleep(1)
if url is None:
raise InstanceDown(instance, "Cannot contact instance")
hostname = url.read().strip()
url.close()
if hostname != instance:
raise InstanceDown(instance, ("Hostname mismatch, expected %s, got %s" %
(instance, hostname)))
def BurninCluster(self):
"""Test a cluster intensively.
This will create instances and then start/stop/failover them.
It is safe for existing instances but could impact performance.
"""
Log("Testing global parameters")
if (len(self.nodes) == 1 and
self.opts.disk_template not in _SINGLE_NODE_DISK_TEMPLATES):
Err("When one node is available/selected the disk template must"
" be one of %s" % utils.CommaJoin(_SINGLE_NODE_DISK_TEMPLATES))
has_err = True
try:
self.BurnCreateInstances()
if self.opts.do_startstop:
self.BurnStopStart()
if self.bep[constants.BE_MINMEM] < self.bep[constants.BE_MAXMEM]:
self.BurnModifyRuntimeMemory()
if self.opts.do_replace1 and \
self.opts.disk_template in constants.DTS_INT_MIRROR:
self.BurnReplaceDisks1D8()
if (self.opts.do_replace2 and len(self.nodes) > 2 and
self.opts.disk_template in constants.DTS_INT_MIRROR):
self.BurnReplaceDisks2()
if (self.opts.disk_template in constants.DTS_GROWABLE and
compat.any(n > 0 for n in self.disk_growth)):
self.BurnGrowDisks()
if self.opts.do_failover and \
self.opts.disk_template in constants.DTS_MIRRORED:
self.BurnFailover()
if self.opts.do_migrate:
if self.opts.disk_template not in constants.DTS_MIRRORED:
Log("Skipping migration (disk template %s does not support it)",
self.opts.disk_template)
elif not self.hv_can_migrate:
Log("Skipping migration (hypervisor %s does not support it)",
self.hypervisor)
else:
self.BurnMigrate()
if (self.opts.do_move and len(self.nodes) > 1 and
self.opts.disk_template in [constants.DT_PLAIN, constants.DT_FILE]):
self.BurnMove()
if (self.opts.do_importexport and
self.opts.disk_template in _IMPEXP_DISK_TEMPLATES):
self.BurnImportExport()
if self.opts.do_reinstall:
self.BurnReinstall()
if self.opts.do_reboot:
self.BurnReboot()
if self.opts.do_renamesame:
self.BurnRenameSame(self.opts.name_check, self.opts.ip_check)
if self.opts.do_confd_tests:
self.BurnConfd()
default_nic_mode = self.cluster_default_nicparams[constants.NIC_MODE]
# Don't add/remove nics in routed mode, as we would need an ip to add
# them with
if self.opts.do_addremove_nics:
if default_nic_mode == constants.NIC_MODE_BRIDGED:
self.BurnAddRemoveNICs()
else:
Log("Skipping nic add/remove as the cluster is not in bridged mode")
if self.opts.do_activate_disks:
self.BurnActivateDisks()
if self.opts.do_addremove_disks:
self.BurnAddDisks()
self.BurnRemoveDisks()
if self.opts.rename:
self.BurnRename(self.opts.name_check, self.opts.ip_check)
has_err = False
finally:
if has_err:
Log("Error detected: opcode buffer follows:\n\n")
Log(self.GetFeedbackBuf())
Log("\n\n")
if not self.opts.keep_instances:
try:
self.BurnRemove()
except Exception, err: # pylint: disable=W0703
if has_err: # already detected errors, so errors in removal
# are quite expected
Log("Note: error detected during instance remove: %s", err)
else: # non-expected error
raise
return constants.EXIT_SUCCESS
def Main():
"""Main function.
"""
utils.SetupLogging(pathutils.LOG_BURNIN, sys.argv[0],
debug=False, stderr_logging=True)
return Burner().BurninCluster()
| |
"""CardinalityMatching.py
Find maximum cardinality matchings in general undirected graphs.
D. Eppstein, UC Irvine, September 6, 2003.
"""
from .union_find import UnionFind
from .util import arbitrary_item
def matching(G, initial_matching=None):
"""Find a maximum cardinality matching in a graph G.
G is represented in modified GvR form: iter(G) lists its vertices;
iter(G[v]) lists the neighbors of v; w in G[v] tests adjacency.
For maximal efficiency, G and G[v] should be dictionaries, so
that adjacency tests take constant time each.
The output is a dictionary mapping vertices to their matches;
unmatched vertices are omitted from the dictionary.
We use Edmonds' blossom-contraction algorithm, as described e.g.
in Galil's 1986 Computing Surveys paper.
"""
# Copy initial matching so we can use it nondestructively
# and augment it greedily to reduce main loop iterations
matching = greedy_matching(G, initial_matching)
def augment():
"""Search for a single augmenting path.
Returns true if the matching size was increased, false otherwise.
"""
# Data structures for augmenting path search:
#
# leader: union-find structure; the leader of a blossom is one
# of its vertices (not necessarily topmost), and leader[v] always
# points to the leader of the largest blossom containing v
#
# S: dictionary of blossoms at even levels of the structure tree.
# Dictionary keys are names of blossoms (as returned by the union-find
# data structure) and values are the structure tree parent of the
# blossom
# (a T-node, or the top vertex if the blossom is a root of a structure
# tree).
#
# T: dictionary of vertices at odd levels of the structure tree.
# Dictionary keys are the vertices; T[x] is a vertex with an unmatched
# edge to x. To find the parent in the structure tree, use
# leader[T[x]].
#
# unexplored: collection of unexplored vertices within blossoms of S
#
# base: if x was originally a T-vertex, but becomes part of a blossom,
# base[t] will be the pair (v,w) at the base of the blossom, where v
# and t
# are on the same side of the blossom and w is on the other side.
leader = UnionFind()
S = {}
T = {}
unexplored = []
base = {}
# Subroutines for augmenting path search.
# Many of these are called only from one place, but are split out
# as subroutines to improve modularization and readability.
def blossom(v, w, a):
"""Create a new blossom from edge v-w with common ancestor a."""
def find_side(v, w):
path = [leader[v]]
b = (v, w) # new base for all T nodes found on the path
while path[-1] != a:
tnode = S[path[-1]]
path.append(tnode)
base[tnode] = b
unexplored.append(tnode)
path.append(leader[T[tnode]])
return path
a = leader[a] # sanity check
path1, path2 = find_side(v, w), find_side(w, v)
leader.union(*path1)
leader.union(*path2)
S[leader[a]] = S[a] # update structure tree
topless = object() # should be unequal to any graph vertex
def alternating_path(start, goal=topless):
"""Return sequence of vertices on alternating path from start to goal.
The goal must be a T node along the path from the start to
the root of the structure tree. If goal is omitted, we find
an alternating path to the structure tree root.
"""
path = []
while 1:
while start in T:
v, w = base[start]
vs = alternating_path(v, start)
vs.reverse()
path += vs
start = w
path.append(start)
if start not in matching:
return path # reached top of structure tree, done!
tnode = matching[start]
path.append(tnode)
if tnode == goal:
return path # finished recursive subpath
start = T[tnode]
def alternate(v):
"""Make v unmatched by alternating the path to the root of its
structure tree.
"""
path = alternating_path(v)
path.reverse()
for i in range(0, len(path) - 1, 2):
matching[path[i]] = path[i + 1]
matching[path[i + 1]] = path[i]
def add_match(v, w):
"""Here with an S-S edge vw connecting vertices in different
structure trees. Find the corresponding augmenting path and use it
to augment the matching.
"""
alternate(v)
alternate(w)
matching[v] = w
matching[w] = v
def ss(v, w):
"""Handle detection of an S-S edge in augmenting path search.
Like augment(), returns true iff the matching size was increased.
"""
if leader[v] == leader[w]:
return False # self-loop within blossom, ignore
# parallel search up two branches of structure tree
# until we find a common ancestor of v and w
path1, head1 = {}, v
path2, head2 = {}, w
def step(path, head):
head = leader[head]
parent = leader[S[head]]
if parent == head:
return head # found root of structure tree
path[head] = parent
path[parent] = leader[T[parent]]
return path[parent]
while 1:
head1 = step(path1, head1)
head2 = step(path2, head2)
if head1 == head2:
blossom(v, w, head1)
return False
if leader[S[head1]] == head1 and leader[S[head2]] == head2:
add_match(v, w)
return True
if head1 in path2:
blossom(v, w, head1)
return False
if head2 in path1:
blossom(v, w, head2)
return False
# Start of main augmenting path search code.
for v in G:
if v not in matching:
S[v] = v
unexplored.append(v)
# index into unexplored, in FIFO order so we get short paths
current = 0
while current < len(unexplored):
v = unexplored[current]
current += 1
for w in G[v]:
if leader[w] in S: # S-S edge: blossom or augmenting path
if ss(v, w):
return True
elif w not in T: # previously unexplored node, add as T-node
T[w] = v
u = matching[w]
if leader[u] not in S:
S[u] = w # and add its match as an S-node
unexplored.append(u)
return False # ran out of graph without finding an augmenting path
# augment the matching until it is maximum
while augment():
pass
return matching
def greedy_matching(G, initial_matching=None):
"""Near-linear-time greedy heuristic for creating high-cardinality matching.
If there is any vertex with one unmatched neighbor, we match it.
Otherwise, if there is a vertex with two unmatched neighbors, we contract
it away and store the contraction on a stack for later matching.
If neither of these two cases applies, we match an arbitrary edge.
"""
# Copy initial matching so we can use it nondestructively
matching = {}
if initial_matching:
for x in initial_matching:
matching[x] = initial_matching[x]
# Copy graph to new subgraph of available edges
# Representation: nested dictionary rep->rep->pair
# where the reps are representative vertices for merged clusters
# and the pair is an unmatched original pair of vertices
avail = {}
has_edge = False
for v in G:
if v not in matching:
avail[v] = {}
for w in G[v]:
if w not in matching:
avail[v][w] = (v, w)
has_edge = True
if not avail[v]:
del avail[v]
if not has_edge:
return matching
# make sets of degree one and degree two vertices
deg1 = {v for v in avail if len(avail[v]) == 1}
deg2 = {v for v in avail if len(avail[v]) == 2}
d2edges = []
def update_degree(v):
"""Cluster degree changed, update sets."""
if v in deg1:
deg1.remove(v)
elif v in deg2:
deg2.remove(v)
if len(avail[v]) == 0:
del avail[v]
elif len(avail[v]) == 1:
deg1.add(v)
elif len(avail[v]) == 2:
deg2.add(v)
def add_match(v, w):
"""Add edge connecting two given cluster reps, update avail."""
p, q = avail[v][w]
matching[p] = q
matching[q] = p
for x in avail[v].keys():
if x != w:
del avail[x][v]
update_degree(x)
for x in avail[w].keys():
if x != v:
del avail[x][w]
update_degree(x)
avail[v] = avail[w] = {}
update_degree(v)
update_degree(w)
def contract(v):
"""Handle degree two vertex."""
u, w = avail[v] # find reps for two neighbors
d2edges.extend([avail[v][u], avail[v][w]])
del avail[u][v]
del avail[w][v]
if len(avail[u]) > len(avail[w]):
u, w = w, u # swap to preserve near-linear time bound
for x in avail[u].keys():
del avail[x][u]
if x in avail[w]:
update_degree(x)
elif x != w:
avail[x][w] = avail[w][x] = avail[u][x]
avail[u] = avail[v] = {}
update_degree(u)
update_degree(v)
update_degree(w)
# loop adding edges or contracting deg2 clusters
while avail:
if deg1:
v = arbitrary_item(deg1)
w = arbitrary_item(avail[v])
add_match(v, w)
elif deg2:
v = arbitrary_item(deg2)
contract(v)
else:
v = arbitrary_item(avail)
w = arbitrary_item(avail[v])
add_match(v, w)
# at this point the edges listed in d2edges form a matchable tree
# repeat the degree one part of the algorithm only on those edges
avail = {}
d2edges = [(u, x)
for u, x in d2edges if u not in matching and x not in matching]
for u, v in d2edges:
avail[u] = {}
avail[v] = {}
for u, v in d2edges:
avail[u][v] = avail[v][u] = (u, v)
deg1 = {v for v in avail if len(avail[v]) == 1}
while deg1:
v = arbitrary_item(deg1)
w = arbitrary_item(avail[v])
add_match(v, w)
return matching
| |
# author Mauro Baresic
# email: mauro.baresic@outlook.com
# An efficient string matching algorithm with k differences
# for nucleotide and amino acid sequences
# algorithm authors: Gad M. Landau, Uzi Vishkin, Ruth Nussinov
import sys
# procedure for parsing file in FASTA format
def parseFastaFile(path):
f = open(path,'r')
sequence = ""
readingSequence = False
for row in f.readlines():
if (row[0] == ','):
continue
elif (row[0] == '>' and readingSequence == False):
readingSequence = True
elif (row[0] != '>' and readingSequence == True):
sequence += row.rstrip()
else:
break
f.close()
return sequence
# class that implements Landau-Vishkin-Nussinov algorithm
class MinDifferenceEfficient():
# pattern string
R = ''
# text string
B = ''
# length of pattern
m = 0
# length of text
n = 0
# maximal number of allowed differences
k = 0
# hashtable L[d,e] that denotes the largest
# row i on diagonal d with e differences
L = None
# [m x m] matrix for pattern analysis
MAXLENGTH = None
# hash for all sequences of triples (p,c,f)
T = None
# current sequence of triples (p,c,f)
Sij = None
# pair (d,e) that denotes new current sequence
newSij = None
# constructor
def __init__(self,R,B):
self.R = R
self.B = B
self.m = len(R)
self.n = len(B)
self.L = dict()
self.T = dict()
self.patternAnalysis()
# procedure that computes pattern analysis
def patternAnalysis(self): # O(m * m *m)
if ( len(self.R) < 1 ):
return -1
self.MAXLENGTH = dict()
for i in xrange(self.m):
for j in xrange(self.m):
self.MAXLENGTH[(i,j)] = 0
for i in xrange(self.m):
for j in xrange(i,self.m):
length = 0
for p in xrange(self.m - j):
if (self.R[i + p] != self.R[j + p]):
break
length += 1
self.MAXLENGTH[(i,j)] = length
self.MAXLENGTH[(j,i)] = length
return 0
# procedure that finds covering of index h in sequence of triples (p,c,f)
def findCovering(self, S, h, idx): #returns (int, (int, int, int))
#S = [(20,3,1),(21,0,0),(22,5,2),(24,0,0),(25,7,3),(28,11,1),(29,0,0)]
id_last = idx
for c in xrange(id_last, len(S)):
triple = S[c]
if (h == triple[0] and h == triple[0] + triple[2]):
id_last = c
break
elif (h >= triple[0] and h< triple[0] + triple[2]):
id_last = c
break
if (S[id_last][2] == 0):
return (id_last, S[id_last])
else:
difference = h - S[id_last][0]
return (id_last,(difference + S[id_last][0], difference + S[id_last][1], S[id_last][2] - difference))
# procedure for calculating Landau-Vishkin-Nussinov algorithm
def calculate(self, k):
self.k = k
if (self.MAXLENGTH is None):
if (self.patternAnalysis() == -1):
return -1
j = 0
max_j = 0
idx = 0
for i in xrange(self.n - self.m + self.k + 1):
next_i = False
self.L = dict()
self.T = dict()
#1 Initialization
for d in xrange(-(self.k + 1), (self.k + 1) + 1):
self.L[(d,abs(d)-2)] = - float("inf")
self.T[(d,abs(d)-2)] = list()
if (d < 0):
self.L[(d,abs(d)-1)] = abs(d) - 1
self.T[(d,abs(d)-1)] = list()
else:
self.L[(d,abs(d)-1)] = -1
self.T[(d,abs(d)-1)] = list()
#2
for e in xrange(self.k + 1):
for d in xrange(- e, e +1):
#3
row = max((self.L[(d,e-1)] + 1, self.L[(d-1,e-1)], self.L[(d+1,e-1)] + 1))
row_old = row
if (row == self.L[(d-1,e-1)]):
idx = 0
self.T[(d,e)] = self.T[(d-1,e-1)][:]
self.T[(d,e)].append((i+row+d-1,0,0))
elif (row == self.L[(d,e-1)] + 1):
idx = 1
self.T[(d,e)] = self.T[(d,e-1)][:]
self.T[(d,e)].append((i+row+d-1,0,0))
elif (row == self.L[(d+1,e-1)] + 1):
idx = 2
self.T[(d,e)] = self.T[(d+1,e-1)][:]
#4 new
GoTo = False
while ((row < self.m) and (i + row + d < self.n) and ( (i + row + d + 1) <= j)):
#4 new.1
c=0
f=0
if (self.Sij is not None):
last_id, triple = self.findCovering(self.Sij,i+row+d,0)
c = triple[1]
f = triple[2]
#4 new.2
if (f >= 1): #case a
#4 new.3
if (f != self.MAXLENGTH[(c,row)]): #case a1
row += min((f,self.MAXLENGTH[(c,row)]))
#GOTO 5
GoTo = True
break
else: #case a2
row += f
else: #case b
#4 new.4
if (self.B[i + row + d] != self.R[row]): #case b1
#GOTO 5
GoTo = True
break
else: #case b2
row += 1
if (False == GoTo):
#4 old
while ((row < self.m) and (i + row + d < self.n) and (self.R[row] == self.B[i + row + d])):
row += 1
#5
self.L[(d,e)] = row
if (row > row_old):
self.T[(d,e)].append((i + row_old + d, row_old, row - row_old))
if (row + d + i > max_j):
max_j = row + d + i
self.newSij = (d,e)
#6
if (row == self.m):
print "An occurence with <=",self.k,"differences of the pattern starts at",i
#GOTO 7
next_i = True
break
#GOTO 7
if (next_i == True):
break
#7
if (max_j > j):
j = max_j
self.Sij = self.T[self.newSij]
#self.outputScreen()
# procedure that prints L to screen
def outputScreen(self):
for i in xrange(- self.k, self.k +1):
row = ''
for j in xrange(- self.k, self.k +1):
if self.L.has_key((i,j)):
row += "L(" + str(i) + ', '+ str(j) + ')=' + str(self.L[(i,j)]) + '\t'
sys.stdout.write(row + '\n')
# procedure that prints L to file
def outputFile(self, path):
f = open(path,'w')
for i in xrange(- self.k, self.k +1):
row = ''
for j in xrange(- self.k, self.k +1):
if self.L.has_key((i,j)):
row += "L(" + str(i) + ', '+ str(j) + ')=' + str(self.L[(i,j)]) + '\t'
f.write(row + '\n')
f.close()
# procedure that computes pattern analysis
def computeMAXLENGTH(self):
self.MAXLENGTH = dict()
for i in xrange(self.m):
for j in xrange(self.m):
self.MAXLENGTH[(i,j)] = 0
for d in xrange(self.m):
if (self.R[self.m-d-1] == self.R[self.m-1]):
self.MAXLENGTH[(self.m-d-1,self.m-1)] = 1
self.MAXLENGTH[(self.m-1,self.m-d-1)] = 1
#else:
#self.MAXLENGTH[(self.m-d-1,self.m-1)] = 0
for d in xrange(self.m):
for i in xrange(self.m - 2 -d, -1, -1):
if (self.R[i] == self.R[i +d]):
self.MAXLENGTH[(i,i+d)] = 1 + self.MAXLENGTH[(i + 1,i + d +1)]
self.MAXLENGTH[(i+d,i)] = self.MAXLENGTH[(i,i+d)]
#else:
#self.MAXLENGTH[(i,i+d)] = 0
#self.MAXLENGTH[(i+d,i)] = 0
if __name__ == "__main__":
patternPath = sys.argv[1]
textPath = sys.argv[2]
k = int(sys.argv[3])
R = parseFastaFile(patternPath)
B = parseFastaFile(textPath)
mde = MinDifferenceEfficient(R,B)
mde.calculate(k)
| |
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from .kern import Kern
from ...core.parameterization import Param
from paramz.transformations import Logexp
import numpy as np
from .independent_outputs import index_to_slices
class ODE_st(Kern):
"""
kernel resultiong from a first order ODE with OU driving GP
:param input_dim: the number of input dimension, has to be equal to one
:type input_dim: int
:param varianceU: variance of the driving GP
:type varianceU: float
:param lengthscaleU: lengthscale of the driving GP (sqrt(3)/lengthscaleU)
:type lengthscaleU: float
:param varianceY: 'variance' of the transfer function
:type varianceY: float
:param lengthscaleY: 'lengthscale' of the transfer function (1/lengthscaleY)
:type lengthscaleY: float
:rtype: kernel object
"""
def __init__(self, input_dim, a=1.,b=1., c=1.,variance_Yx=3.,variance_Yt=1.5, lengthscale_Yx=1.5, lengthscale_Yt=1.5, active_dims=None, name='ode_st'):
assert input_dim ==3, "only defined for 3 input dims"
super(ODE_st, self).__init__(input_dim, active_dims, name)
self.variance_Yt = Param('variance_Yt', variance_Yt, Logexp())
self.variance_Yx = Param('variance_Yx', variance_Yx, Logexp())
self.lengthscale_Yt = Param('lengthscale_Yt', lengthscale_Yt, Logexp())
self.lengthscale_Yx = Param('lengthscale_Yx', lengthscale_Yx, Logexp())
self.a= Param('a', a, Logexp())
self.b = Param('b', b, Logexp())
self.c = Param('c', c, Logexp())
self.link_parameters(self.a, self.b, self.c, self.variance_Yt, self.variance_Yx, self.lengthscale_Yt,self.lengthscale_Yx)
def K(self, X, X2=None):
# model : -a d^2y/dx^2 + b dy/dt + c * y = U
# kernel Kyy rbf spatiol temporal
# vyt Y temporal variance vyx Y spatiol variance lyt Y temporal lengthscale lyx Y spatiol lengthscale
# kernel Kuu doper( doper(Kyy))
# a b c lyt lyx vyx*vyt
"""Compute the covariance matrix between X and X2."""
X,slices = X[:,:-1],index_to_slices(X[:,-1])
if X2 is None:
X2,slices2 = X,slices
K = np.zeros((X.shape[0], X.shape[0]))
else:
X2,slices2 = X2[:,:-1],index_to_slices(X2[:,-1])
K = np.zeros((X.shape[0], X2.shape[0]))
tdist = (X[:,0][:,None] - X2[:,0][None,:])**2
xdist = (X[:,1][:,None] - X2[:,1][None,:])**2
ttdist = (X[:,0][:,None] - X2[:,0][None,:])
#rdist = [tdist,xdist]
#dist = np.abs(X - X2.T)
vyt = self.variance_Yt
vyx = self.variance_Yx
lyt=1/(2*self.lengthscale_Yt)
lyx=1/(2*self.lengthscale_Yx)
a = self.a ## -a is used in the model, negtive diffusion
b = self.b
c = self.c
kyy = lambda tdist,xdist: np.exp(-lyt*(tdist) -lyx*(xdist))
k1 = lambda tdist: (2*lyt - 4*lyt**2 * (tdist) )
k2 = lambda xdist: ( 4*lyx**2 * (xdist) - 2*lyx )
k3 = lambda xdist: ( 3*4*lyx**2 - 6*8*xdist*lyx**3 + 16*xdist**2*lyx**4 )
k4 = lambda ttdist: 2*lyt*(ttdist)
for i, s1 in enumerate(slices):
for j, s2 in enumerate(slices2):
for ss1 in s1:
for ss2 in s2:
if i==0 and j==0:
K[ss1,ss2] = vyt*vyx*kyy(tdist[ss1,ss2],xdist[ss1,ss2])
elif i==0 and j==1:
K[ss1,ss2] = (-a*k2(xdist[ss1,ss2]) + b*k4(ttdist[ss1,ss2]) + c)*vyt*vyx*kyy(tdist[ss1,ss2],xdist[ss1,ss2])
#K[ss1,ss2]= np.where( rdist[ss1,ss2]>0 , kuyp(np.abs(rdist[ss1,ss2])), kuyn(np.abs(rdist[ss1,ss2]) ) )
#K[ss1,ss2]= np.where( rdist[ss1,ss2]>0 , kuyp(rdist[ss1,ss2]), kuyn(rdist[ss1,ss2] ) )
elif i==1 and j==1:
K[ss1,ss2] = ( b**2*k1(tdist[ss1,ss2]) - 2*a*c*k2(xdist[ss1,ss2]) + a**2*k3(xdist[ss1,ss2]) + c**2 )* vyt*vyx* kyy(tdist[ss1,ss2],xdist[ss1,ss2])
else:
K[ss1,ss2] = (-a*k2(xdist[ss1,ss2]) - b*k4(ttdist[ss1,ss2]) + c)*vyt*vyx*kyy(tdist[ss1,ss2],xdist[ss1,ss2])
#K[ss1,ss2]= np.where( rdist[ss1,ss2]>0 , kyup(np.abs(rdist[ss1,ss2])), kyun(np.abs(rdist[ss1,ss2]) ) )
#K[ss1,ss2] = np.where( rdist[ss1,ss2]>0 , kyup(rdist[ss1,ss2]), kyun(rdist[ss1,ss2] ) )
#stop
return K
def Kdiag(self, X):
"""Compute the diagonal of the covariance matrix associated to X."""
vyt = self.variance_Yt
vyx = self.variance_Yx
lyt = 1./(2*self.lengthscale_Yt)
lyx = 1./(2*self.lengthscale_Yx)
a = self.a
b = self.b
c = self.c
## dk^2/dtdt'
k1 = (2*lyt )*vyt*vyx
## dk^2/dx^2
k2 = ( - 2*lyx )*vyt*vyx
## dk^4/dx^2dx'^2
k3 = ( 4*3*lyx**2 )*vyt*vyx
Kdiag = np.zeros(X.shape[0])
slices = index_to_slices(X[:,-1])
for i, ss1 in enumerate(slices):
for s1 in ss1:
if i==0:
Kdiag[s1]+= vyt*vyx
elif i==1:
#i=1
Kdiag[s1]+= b**2*k1 - 2*a*c*k2 + a**2*k3 + c**2*vyt*vyx
#Kdiag[s1]+= Vu*Vy*(k1+k2+k3)
else:
raise ValueError("invalid input/output index")
return Kdiag
def update_gradients_full(self, dL_dK, X, X2=None):
#def dK_dtheta(self, dL_dK, X, X2, target):
"""derivative of the covariance matrix with respect to the parameters."""
X,slices = X[:,:-1],index_to_slices(X[:,-1])
if X2 is None:
X2,slices2 = X,slices
K = np.zeros((X.shape[0], X.shape[0]))
else:
X2,slices2 = X2[:,:-1],index_to_slices(X2[:,-1])
vyt = self.variance_Yt
vyx = self.variance_Yx
lyt = 1./(2*self.lengthscale_Yt)
lyx = 1./(2*self.lengthscale_Yx)
a = self.a
b = self.b
c = self.c
tdist = (X[:,0][:,None] - X2[:,0][None,:])**2
xdist = (X[:,1][:,None] - X2[:,1][None,:])**2
#rdist = [tdist,xdist]
ttdist = (X[:,0][:,None] - X2[:,0][None,:])
rd=tdist.shape[0]
dka = np.zeros([rd,rd])
dkb = np.zeros([rd,rd])
dkc = np.zeros([rd,rd])
dkYdvart = np.zeros([rd,rd])
dkYdvarx = np.zeros([rd,rd])
dkYdlent = np.zeros([rd,rd])
dkYdlenx = np.zeros([rd,rd])
kyy = lambda tdist,xdist: np.exp(-lyt*(tdist) -lyx*(xdist))
#k1 = lambda tdist: (lyt - lyt**2 * (tdist) )
#k2 = lambda xdist: ( lyx**2 * (xdist) - lyx )
#k3 = lambda xdist: ( 3*lyx**2 - 6*xdist*lyx**3 + xdist**2*lyx**4 )
#k4 = lambda tdist: -lyt*np.sqrt(tdist)
k1 = lambda tdist: (2*lyt - 4*lyt**2 * (tdist) )
k2 = lambda xdist: ( 4*lyx**2 * (xdist) - 2*lyx )
k3 = lambda xdist: ( 3*4*lyx**2 - 6*8*xdist*lyx**3 + 16*xdist**2*lyx**4 )
k4 = lambda ttdist: 2*lyt*(ttdist)
dkyydlyx = lambda tdist,xdist: kyy(tdist,xdist)*(-xdist)
dkyydlyt = lambda tdist,xdist: kyy(tdist,xdist)*(-tdist)
dk1dlyt = lambda tdist: 2. - 4*2.*lyt*tdist
dk2dlyx = lambda xdist: (4.*2.*lyx*xdist -2.)
dk3dlyx = lambda xdist: (6.*4.*lyx - 18.*8*xdist*lyx**2 + 4*16*xdist**2*lyx**3)
dk4dlyt = lambda ttdist: 2*(ttdist)
for i, s1 in enumerate(slices):
for j, s2 in enumerate(slices2):
for ss1 in s1:
for ss2 in s2:
if i==0 and j==0:
dka[ss1,ss2] = 0
dkb[ss1,ss2] = 0
dkc[ss1,ss2] = 0
dkYdvart[ss1,ss2] = vyx*kyy(tdist[ss1,ss2],xdist[ss1,ss2])
dkYdvarx[ss1,ss2] = vyt*kyy(tdist[ss1,ss2],xdist[ss1,ss2])
dkYdlenx[ss1,ss2] = vyt*vyx*dkyydlyx(tdist[ss1,ss2],xdist[ss1,ss2])
dkYdlent[ss1,ss2] = vyt*vyx*dkyydlyt(tdist[ss1,ss2],xdist[ss1,ss2])
elif i==0 and j==1:
dka[ss1,ss2] = -k2(xdist[ss1,ss2])*vyt*vyx*kyy(tdist[ss1,ss2],xdist[ss1,ss2])
dkb[ss1,ss2] = k4(ttdist[ss1,ss2])*vyt*vyx*kyy(tdist[ss1,ss2],xdist[ss1,ss2])
dkc[ss1,ss2] = vyt*vyx*kyy(tdist[ss1,ss2],xdist[ss1,ss2])
#dkYdvart[ss1,ss2] = 0
#dkYdvarx[ss1,ss2] = 0
#dkYdlent[ss1,ss2] = 0
#dkYdlenx[ss1,ss2] = 0
dkYdvart[ss1,ss2] = (-a*k2(xdist[ss1,ss2])+b*k4(ttdist[ss1,ss2])+c)*vyx*kyy(tdist[ss1,ss2],xdist[ss1,ss2])
dkYdvarx[ss1,ss2] = (-a*k2(xdist[ss1,ss2])+b*k4(ttdist[ss1,ss2])+c)*vyt*kyy(tdist[ss1,ss2],xdist[ss1,ss2])
dkYdlent[ss1,ss2] = vyt*vyx*dkyydlyt(tdist[ss1,ss2],xdist[ss1,ss2])* (-a*k2(xdist[ss1,ss2])+b*k4(ttdist[ss1,ss2])+c)+\
vyt*vyx*kyy(tdist[ss1,ss2],xdist[ss1,ss2])*b*dk4dlyt(ttdist[ss1,ss2])
dkYdlenx[ss1,ss2] = vyt*vyx*dkyydlyx(tdist[ss1,ss2],xdist[ss1,ss2])*(-a*k2(xdist[ss1,ss2])+b*k4(ttdist[ss1,ss2])+c)+\
vyt*vyx*kyy(tdist[ss1,ss2],xdist[ss1,ss2])*(-a*dk2dlyx(xdist[ss1,ss2]))
elif i==1 and j==1:
dka[ss1,ss2] = (2*a*k3(xdist[ss1,ss2]) - 2*c*k2(xdist[ss1,ss2]))*vyt*vyx* kyy(tdist[ss1,ss2],xdist[ss1,ss2])
dkb[ss1,ss2] = 2*b*k1(tdist[ss1,ss2])*vyt*vyx* kyy(tdist[ss1,ss2],xdist[ss1,ss2])
dkc[ss1,ss2] = (-2*a*k2(xdist[ss1,ss2]) + 2*c )*vyt*vyx* kyy(tdist[ss1,ss2],xdist[ss1,ss2])
dkYdvart[ss1,ss2] = ( b**2*k1(tdist[ss1,ss2]) - 2*a*c*k2(xdist[ss1,ss2]) + a**2*k3(xdist[ss1,ss2]) + c**2 )*vyx* kyy(tdist[ss1,ss2],xdist[ss1,ss2])
dkYdvarx[ss1,ss2] = ( b**2*k1(tdist[ss1,ss2]) - 2*a*c*k2(xdist[ss1,ss2]) + a**2*k3(xdist[ss1,ss2]) + c**2 )*vyt* kyy(tdist[ss1,ss2],xdist[ss1,ss2])
dkYdlent[ss1,ss2] = vyt*vyx*dkyydlyt(tdist[ss1,ss2],xdist[ss1,ss2])*( b**2*k1(tdist[ss1,ss2]) - 2*a*c*k2(xdist[ss1,ss2]) + a**2*k3(xdist[ss1,ss2]) + c**2 ) +\
vyx*vyt*kyy(tdist[ss1,ss2],xdist[ss1,ss2])*b**2*dk1dlyt(tdist[ss1,ss2])
dkYdlenx[ss1,ss2] = vyt*vyx*dkyydlyx(tdist[ss1,ss2],xdist[ss1,ss2])*( b**2*k1(tdist[ss1,ss2]) - 2*a*c*k2(xdist[ss1,ss2]) + a**2*k3(xdist[ss1,ss2]) + c**2 ) +\
vyx*vyt*kyy(tdist[ss1,ss2],xdist[ss1,ss2])* (-2*a*c*dk2dlyx(xdist[ss1,ss2]) + a**2*dk3dlyx(xdist[ss1,ss2]) )
else:
dka[ss1,ss2] = -k2(xdist[ss1,ss2])*vyt*vyx*kyy(tdist[ss1,ss2],xdist[ss1,ss2])
dkb[ss1,ss2] = -k4(ttdist[ss1,ss2])*vyt*vyx*kyy(tdist[ss1,ss2],xdist[ss1,ss2])
dkc[ss1,ss2] = vyt*vyx*kyy(tdist[ss1,ss2],xdist[ss1,ss2])
#dkYdvart[ss1,ss2] = 0
#dkYdvarx[ss1,ss2] = 0
#dkYdlent[ss1,ss2] = 0
#dkYdlenx[ss1,ss2] = 0
dkYdvart[ss1,ss2] = (-a*k2(xdist[ss1,ss2])-b*k4(ttdist[ss1,ss2])+c)*vyx*kyy(tdist[ss1,ss2],xdist[ss1,ss2])
dkYdvarx[ss1,ss2] = (-a*k2(xdist[ss1,ss2])-b*k4(ttdist[ss1,ss2])+c)*vyt*kyy(tdist[ss1,ss2],xdist[ss1,ss2])
dkYdlent[ss1,ss2] = vyt*vyx*dkyydlyt(tdist[ss1,ss2],xdist[ss1,ss2])* (-a*k2(xdist[ss1,ss2])-b*k4(ttdist[ss1,ss2])+c)+\
vyt*vyx*kyy(tdist[ss1,ss2],xdist[ss1,ss2])*(-1)*b*dk4dlyt(ttdist[ss1,ss2])
dkYdlenx[ss1,ss2] = vyt*vyx*dkyydlyx(tdist[ss1,ss2],xdist[ss1,ss2])*(-a*k2(xdist[ss1,ss2])-b*k4(ttdist[ss1,ss2])+c)+\
vyt*vyx*kyy(tdist[ss1,ss2],xdist[ss1,ss2])*(-a*dk2dlyx(xdist[ss1,ss2]))
self.a.gradient = np.sum(dka * dL_dK)
self.b.gradient = np.sum(dkb * dL_dK)
self.c.gradient = np.sum(dkc * dL_dK)
self.variance_Yt.gradient = np.sum(dkYdvart * dL_dK) # Vy
self.variance_Yx.gradient = np.sum(dkYdvarx * dL_dK)
self.lengthscale_Yt.gradient = np.sum(dkYdlent*(-0.5*self.lengthscale_Yt**(-2)) * dL_dK) #ly np.sum(dktheta2*(-self.lengthscale_Y**(-2)) * dL_dK)
self.lengthscale_Yx.gradient = np.sum(dkYdlenx*(-0.5*self.lengthscale_Yx**(-2)) * dL_dK)
| |
import boto
import os
import re
import urllib.parse
from boto.s3 import connection
from wal_e import log_help
from wal_e.exception import UserException
logger = log_help.WalELogger(__name__)
_S3_REGIONS = {
# See http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
'ap-northeast-1': 's3-ap-northeast-1.amazonaws.com',
'ap-southeast-1': 's3-ap-southeast-1.amazonaws.com',
'ap-southeast-2': 's3-ap-southeast-2.amazonaws.com',
'eu-central-1': 's3-eu-central-1.amazonaws.com',
'eu-west-1': 's3-eu-west-1.amazonaws.com',
'sa-east-1': 's3-sa-east-1.amazonaws.com',
'us-east-1': 's3.amazonaws.com',
'us-west-1': 's3-us-west-1.amazonaws.com',
'us-west-2': 's3-us-west-2.amazonaws.com',
}
try:
# Override the hard-coded region map with boto's mappings if
# available.
from boto.s3 import regions
_S3_REGIONS.update(dict((r.name, str(r.endpoint)) for r in regions()))
except ImportError:
pass
def _is_ipv4_like(s):
"""Find if a string superficially looks like an IPv4 address.
AWS documentation plays it fast and loose with this; in other
regions, it seems like even non-valid IPv4 addresses (in
particular, ones that possess decimal numbers out of range for
IPv4) are rejected.
"""
parts = s.split('.')
if len(parts) != 4:
return False
for part in parts:
try:
int(part)
except ValueError:
return False
return True
def _is_mostly_subdomain_compatible(bucket_name):
"""Returns True if SubdomainCallingFormat can be used...mostly
This checks to make sure that putting aside certificate validation
issues that a bucket_name is able to use the
SubdomainCallingFormat.
"""
return (bucket_name.lower() == bucket_name and
len(bucket_name) >= 3 and
len(bucket_name) <= 63 and
'_' not in bucket_name and
'..' not in bucket_name and
'-.' not in bucket_name and
'.-' not in bucket_name and
not bucket_name.startswith('-') and
not bucket_name.endswith('-') and
not bucket_name.startswith('.') and
not bucket_name.endswith('.') and
not _is_ipv4_like(bucket_name))
def _connect_secureish(*args, **kwargs):
"""Connect using the safest available options.
This turns on encryption (works in all supported boto versions)
and certificate validation (in the subset of supported boto
versions that can handle certificate validation, namely, those
after 2.6.0).
Versions below 2.6 don't support the validate_certs option to
S3Connection, and enable it via configuration option just seems to
cause an error.
"""
if tuple(int(x) for x in boto.__version__.split('.')) >= (2, 6, 0):
kwargs['validate_certs'] = True
kwargs['is_secure'] = True
auth_region_name = kwargs.pop('auth_region_name', None)
conn = connection.S3Connection(*args, **kwargs)
if auth_region_name:
conn.auth_region_name = auth_region_name
return conn
def _s3connection_opts_from_uri(impl):
# 'impl' should look like:
#
# <protocol>+<calling_format>://[user:pass]@<host>[:port]
#
# A concrete example:
#
# https+virtualhost://user:pass@localhost:1235
o = urllib.parse.urlparse(impl, allow_fragments=False)
if o.scheme is not None:
proto_match = re.match(
r'(?P<protocol>http|https)\+'
r'(?P<format>virtualhost|path|subdomain)', o.scheme)
if proto_match is None:
raise UserException(
msg='WALE_S3_ENDPOINT URI scheme is invalid',
detail='The scheme defined is ' + repr(o.scheme),
hint='An example of a valid scheme is https+virtualhost.')
opts = {}
if proto_match.group('protocol') == 'http':
opts['is_secure'] = False
else:
# Constrained by prior regexp.
proto_match.group('protocol') == 'https'
opts['is_secure'] = True
f = proto_match.group('format')
if f == 'virtualhost':
opts['calling_format'] = connection.VHostCallingFormat()
elif f == 'path':
opts['calling_format'] = connection.OrdinaryCallingFormat()
elif f == 'subdomain':
opts['calling_format'] = connection.SubdomainCallingFormat()
else:
# Constrained by prior regexp.
assert False
if o.username is not None or o.password is not None:
raise UserException(
msg='WALE_S3_ENDPOINT does not support username or password')
if o.hostname is not None:
opts['host'] = o.hostname
if o.port is not None:
opts['port'] = o.port
if o.path:
raise UserException(
msg='WALE_S3_ENDPOINT does not support a URI path',
detail='Path is {0!r}'.format(o.path))
if o.query:
raise UserException(
msg='WALE_S3_ENDPOINT does not support query parameters')
return opts
class CallingInfo(object):
"""Encapsulate information used to produce a S3Connection."""
def __init__(self, bucket_name=None, calling_format=None, region=None,
ordinary_endpoint=None):
self.bucket_name = bucket_name
self.calling_format = calling_format
self.region = region
self.ordinary_endpoint = ordinary_endpoint
def __repr__(self):
return ('CallingInfo({bucket_name}, {calling_format!r}, {region!r}, '
'{ordinary_endpoint!r})'.format(**self.__dict__))
def __str__(self):
return repr(self)
def connect(self, creds):
"""Return a boto S3Connection set up with great care.
This includes TLS settings, calling format selection, and
region detection.
The credentials are applied by the caller because in many
cases (instance-profile IAM) it is possible for those
credentials to fluctuate rapidly. By comparison, region
fluctuations of a bucket name are not nearly so likely versus
the gains of not looking up a bucket's region over and over.
"""
def _conn_help(*args, **kwargs):
return _connect_secureish(
*args,
provider=creds,
calling_format=self.calling_format(),
auth_region_name=self.region,
**kwargs)
# If WALE_S3_ENDPOINT is set, do not attempt to guess
# the right calling conventions and instead honor the explicit
# settings within WALE_S3_ENDPOINT.
impl = os.getenv('WALE_S3_ENDPOINT')
if impl:
return connection.S3Connection(**_s3connection_opts_from_uri(impl))
# Check if subdomain format compatible: if so, use the
# BUCKETNAME.s3.amazonaws.com hostname to communicate with the
# bucket.
if self.calling_format is connection.SubdomainCallingFormat:
return _conn_help(host='s3.amazonaws.com')
# Check if OrdinaryCallingFormat compatible, but also see if
# the endpoint has already been set, in which case only
# setting the host= flag is necessary.
assert self.calling_format is connection.OrdinaryCallingFormat
assert self.ordinary_endpoint is not None
return _conn_help(host=self.ordinary_endpoint)
def must_resolve(region):
if region in _S3_REGIONS:
endpoint = _S3_REGIONS[region]
return endpoint
else:
raise UserException(msg='Could not resolve host for AWS_REGION',
detail='AWS_REGION is set to "{0}".'
.format(region))
def from_store_name(bucket_name, region=None):
"""Construct a CallingInfo value from a bucket name.
This is useful to encapsulate the ugliness of setting up S3
connections, especially with regions and TLS certificates are
involved.
"""
# Late-bind `region` for the sake of tests that inject the
# AWS_REGION environment variable.
if region is None:
region = os.getenv('AWS_REGION')
mostly_ok = _is_mostly_subdomain_compatible(bucket_name)
if not mostly_ok:
return CallingInfo(
bucket_name=bucket_name,
region=region,
calling_format=connection.OrdinaryCallingFormat,
ordinary_endpoint=must_resolve(region))
else:
if '.' in bucket_name:
# The bucket_name might have been DNS compatible, but once
# dots are involved TLS certificate validations will
# certainly fail even if that's the case.
return CallingInfo(
bucket_name=bucket_name,
calling_format=connection.OrdinaryCallingFormat,
region=region,
ordinary_endpoint=must_resolve(region))
else:
# If the bucket follows naming rules and has no dots in
# the name, SubdomainCallingFormat can be used, with TLS,
# world-wide.
return CallingInfo(
bucket_name=bucket_name,
calling_format=connection.SubdomainCallingFormat,
region=region,
ordinary_endpoint=None)
assert False
| |
#!/usr/bin/python
#
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a build from the given commands. A command is either an addition or
a subtraction. An addition is prefixed with a +; a subtraction is when
prefixed with a -. After the character, there is a name of a file or a @ sign
and the name of a build file.
Build files are the files found in build/types. These files are simply a
new-line separated list of commands to execute. So if the "+@complete" command
is given, it will open the complete file and run it (which may in turn open
other build files). Subtracting a build file will reverse all actions applied
by the given file. So "-@networking" will remove all the networking plugins.
The core library is always included so does not have to be listed. The default
is to use the name 'compiled'; if no commands are given, it will build the
complete build.
Examples:
# Equivalent to +@complete
build.py
build.py +@complete
build.py +@complete -@networking
build.py --name custom +@manifests +@networking +../my_plugin.js"""
import os
import re
import shakaBuildHelpers
import shutil
import subprocess
import sys
closure_opts = [
'--language_in', 'ECMASCRIPT5',
'--language_out', 'ECMASCRIPT3',
'--jscomp_error=*',
# 'deprecatedAnnotations' controls complains about @expose, but the new
# @nocollapse annotation does not do the same job for properties.
# So since we can't use the new annotations, we have to ignore complaints
# about the old one.
'--jscomp_off=deprecatedAnnotations',
# 'analyzerChecks' complains about countless instances of implicitly nullable
# types, plus a few other issues. Even the closure library doesn't pass
# these checks, and the implicit nullability check in particular is over-
# zealous and unhelpful. So we disable the whole category of
# 'analyzerChecks'.
'--jscomp_off=analyzerChecks',
'--extra_annotation_name=listens',
'--extra_annotation_name=exportDoc',
'--conformance_configs', '%s/build/conformance.textproto' % \
shakaBuildHelpers.cygwinSafePath(shakaBuildHelpers.getSourceBase()),
'-O', 'ADVANCED',
'--generate_exports',
'--output_wrapper_file=%s/build/wrapper.template.js' % \
shakaBuildHelpers.cygwinSafePath(shakaBuildHelpers.getSourceBase()),
'-D', 'COMPILED=true',
'-D', 'goog.DEBUG=false',
'-D', 'goog.STRICT_MODE_COMPATIBLE=true',
'-D', 'goog.ENABLE_DEBUG_LOADER=false',
'-D', 'goog.asserts.ENABLE_ASSERTS=false',
'-D', 'shaka.log.MAX_LOG_LEVEL=0',
'-D', 'GIT_VERSION="%s"' % shakaBuildHelpers.calculateVersion()
]
class Build:
"""Defines a build that has been parsed from a build file. This has
exclude files even though it will not be used at the top-level. This allows
combining builds. A file will only exist in at most one set.
Members:
include - A set of files to include.
exclude - A set of files to remove.
"""
def __init__(self, include=None, exclude=None):
self.include = include or set()
self.exclude = exclude or set()
def _getBuildFilePath(self, name, root):
"""Gets the full path to a build file, if it exists. Returns None if not.
Arguments:
name - The string name to check.
Returns:
The full path to the build file.
"""
sourceBase = shakaBuildHelpers.getSourceBase()
localPath = os.path.join(root, name)
buildPath = os.path.join(sourceBase, 'build', 'types', name)
if (os.path.isfile(localPath) and os.path.isfile(buildPath)
and localPath != buildPath):
print >> sys.stderr, 'Build file "%s" is ambiguous' % name
return None
elif os.path.isfile(localPath):
return localPath
elif os.path.isfile(buildPath):
return buildPath
else:
print >> sys.stderr, 'Build file not found: ' + name
return None
def _reverse(self):
return Build(self.exclude, self.include)
def _combine(self, other):
includeAll = self.include | other.include
excludeAll = self.exclude | other.exclude
self.include = includeAll - excludeAll
self.exclude = excludeAll - includeAll
def _addCore(self):
"""Adds the core library."""
# Add externs and closure dependencies.
sourceBase = shakaBuildHelpers.getSourceBase()
match = re.compile(r'.*\.js$')
self.include = self.include | set(
shakaBuildHelpers.getAllFiles(
os.path.join(sourceBase, 'externs'), match) +
shakaBuildHelpers.getAllFiles(
os.path.join(sourceBase, 'third_party', 'closure'), match))
# Check that there are no files in 'core' that are removed
coreBuild = Build()
coreBuild.parseBuild(['+@core'], os.getcwd())
coreFiles = coreBuild.include
if len(self.exclude & coreFiles) > 0:
print >> sys.stderr, 'Cannot exclude files from core'
self.include = self.include | coreFiles
def parseBuild(self, lines, root):
"""Parses a Build object from the given lines of commands. This will
recursively read and parse builds.
Arguments:
lines - An array of strings defining commands.
root - The full path to the base directory.
Returns:
True on success, False otherwise.
"""
for line in lines:
# Strip comments
try:
line = line[:line.index('#')]
except ValueError:
pass
# Strip whitespace and ignore empty lines.
line = line.strip()
if not line:
continue
isNeg = False
if line[0] == '+':
line = line[1:].strip()
elif line[0] == '-':
isNeg = True
line = line[1:].strip()
else:
print >> sys.stderr, 'Operation (+/-) required'
return False
if line[0] == '@':
line = line[1:].strip()
buildPath = self._getBuildFilePath(line, root)
if not buildPath:
return False
lines = open(buildPath).readlines()
subRoot = os.path.dirname(buildPath)
# If this is a build file, then recurse and combine the builds.
subBuild = Build()
if not subBuild.parseBuild(lines, subRoot):
return False
if isNeg:
self._combine(subBuild._reverse())
else:
self._combine(subBuild)
else:
if not os.path.isabs(line):
line = os.path.abspath(os.path.join(root, line))
if not os.path.isfile(line):
print >> sys.stderr, 'Unable to find file ' + line
return False
if isNeg:
self.include.discard(line)
self.exclude.add(line)
else:
self.include.add(line)
self.exclude.discard(line)
return True
def buildRaw(self, extra_opts):
"""Builds the files in |self.include| using the given extra Closure options.
Arguments:
extra_opts - An array of extra options to give to Closure.
Returns:
True on success; False on failure.
"""
jar = os.path.join(shakaBuildHelpers.getSourceBase(),
'third_party', 'closure', 'compiler.jar')
jar = shakaBuildHelpers.cygwinSafePath(jar)
files = map(shakaBuildHelpers.cygwinSafePath, list(self.include))
try:
cmdLine = ['java', '-jar', jar] + closure_opts + extra_opts + files
shakaBuildHelpers.printCmdLine(cmdLine)
subprocess.check_call(cmdLine)
return True
except subprocess.CalledProcessError:
print >> sys.stderr, 'Build failed'
return False
def buildLibrary(self, name, rebuild):
"""Builds Shaka Player using the files in |self.include|.
Arguments:
name - The name of the build.
rebuild - True to rebuild, False to ignore if no changes are detected.
Returns:
True on success; False on failure.
"""
self._addCore()
sourceBase = shakaBuildHelpers.getSourceBase()
resultPrefix = shakaBuildHelpers.cygwinSafePath(
os.path.join(sourceBase, 'dist', 'shaka-player.' + name))
resultFile = resultPrefix + '.js'
resultDebug = resultPrefix + '.debug.js'
resultMap = resultPrefix + '.debug.map'
# Detect changes to the library and only build if changes have been made.
if not rebuild and os.path.isfile(resultFile):
buildTime = os.path.getmtime(resultFile)
completeBuild = Build()
if completeBuild.parseBuild(['+@complete'], os.getcwd()):
completeBuild._addCore()
# Get a list of files modified since the build file was.
editedFiles = filter(lambda x: os.path.getmtime(x) > buildTime,
completeBuild.include)
if len(editedFiles) == 0:
print 'No changes detected, not building. Use --force to override.'
return True
opts = ['--create_source_map', resultMap, '--js_output_file', resultDebug,
'--source_map_location_mapping', sourceBase + '|..']
if not self.buildRaw(opts):
return False
shutil.copyfile(resultDebug, resultFile)
# Add a special source-mapping comment so that Chrome and Firefox can map
# line and character numbers from the compiled library back to the original
# source locations.
with open(resultDebug, 'a') as f:
f.write('//# sourceMappingURL=shaka-player.' + name + '.debug.map')
return True
def usage():
print 'Usage:', sys.argv[0], """[options] [commands]
Options:
--force : Build the library even if no changes are detected.
--help : Prints this help page.
--name : Sets the name of the build, uses 'compiled' if not given.
"""
print __doc__
def main(args):
name = 'compiled'
lines = []
rebuild = False
i = 0
while i < len(args):
if args[i] == '--name':
i = i + 1
if i == len(args):
print >> sys.stderr, '--name requires an argument'
return 1
name = args[i]
elif args[i] == '--force':
rebuild = True
elif args[i] == '--help':
usage()
return 0
elif args[i].startswith('--'):
print >> sys.stderr, 'Unknown option', args[i]
usage()
return 1
else:
lines.append(args[i])
i = i + 1
if len(lines) == 0:
lines = ['+@complete']
print 'Compiling the library...'
customBuild = Build()
if not customBuild.parseBuild(lines, os.getcwd()):
return 1
return 0 if customBuild.buildLibrary(name, rebuild) else 1
if __name__ == '__main__':
shakaBuildHelpers.runMain(main)
| |
# coding=utf-8
# Copyright 2021 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTFeatureExtractor
class ViTFeatureExtractionTester(unittest.TestCase):
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=18,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
def prepare_feat_extract_dict(self):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
def prepare_inputs(self, equal_resolution=False, numpify=False, torchify=False):
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
image_inputs = []
for i in range(self.batch_size):
image_inputs.append(
np.random.randint(
255, size=(self.num_channels, self.max_resolution, self.max_resolution), dtype=np.uint8
)
)
else:
image_inputs = []
for i in range(self.batch_size):
width, height = np.random.choice(np.arange(self.min_resolution, self.max_resolution), 2)
image_inputs.append(np.random.randint(255, size=(self.num_channels, width, height), dtype=np.uint8))
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs]
if torchify:
image_inputs = [torch.from_numpy(x) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class ViTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase):
feature_extraction_class = ViTFeatureExtractor if is_vision_available() else None
def setUp(self):
self.feature_extract_tester = ViTFeatureExtractionTester(self)
@property
def feat_extract_dict(self):
return self.feature_extract_tester.prepare_feat_extract_dict()
def test_feat_extract_properties(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(feature_extractor, "image_mean"))
self.assertTrue(hasattr(feature_extractor, "image_std"))
self.assertTrue(hasattr(feature_extractor, "do_normalize"))
self.assertTrue(hasattr(feature_extractor, "do_resize"))
self.assertTrue(hasattr(feature_extractor, "size"))
def test_batch_feature(self):
pass
def test_call_pil(self):
# Initialize feature_extractor
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
# create random PIL images
image_inputs = self.feature_extract_tester.prepare_inputs(equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
# Test not batched input
encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape,
(
1,
self.feature_extract_tester.num_channels,
self.feature_extract_tester.size,
self.feature_extract_tester.size,
),
)
# Test batched
encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape,
(
self.feature_extract_tester.batch_size,
self.feature_extract_tester.num_channels,
self.feature_extract_tester.size,
self.feature_extract_tester.size,
),
)
def test_call_numpy(self):
# Initialize feature_extractor
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
# create random numpy tensors
image_inputs = self.feature_extract_tester.prepare_inputs(equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
# Test not batched input
encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape,
(
1,
self.feature_extract_tester.num_channels,
self.feature_extract_tester.size,
self.feature_extract_tester.size,
),
)
# Test batched
encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape,
(
self.feature_extract_tester.batch_size,
self.feature_extract_tester.num_channels,
self.feature_extract_tester.size,
self.feature_extract_tester.size,
),
)
def test_call_pytorch(self):
# Initialize feature_extractor
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
# create random PyTorch tensors
image_inputs = self.feature_extract_tester.prepare_inputs(equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
# Test not batched input
encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape,
(
1,
self.feature_extract_tester.num_channels,
self.feature_extract_tester.size,
self.feature_extract_tester.size,
),
)
# Test batched
encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape,
(
self.feature_extract_tester.batch_size,
self.feature_extract_tester.num_channels,
self.feature_extract_tester.size,
self.feature_extract_tester.size,
),
)
| |
# Copyright (c) 2020 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import platform
import time
import traceback
import types
from oslo_log import log as logging
import six
import tabulate
from cinder.objects import volume
from cinder import version
from cinder.volume.drivers.dell_emc.powermax import utils
LOG = logging.getLogger(__name__)
CLEANUP_LIST = ['masking_view', 'child_storage_group', 'parent_storage_group',
'initiator_group', 'port_group', 'storage_group']
def debug_required(func):
"""Only execute the function if debug is enabled."""
def func_wrapper(*args, **kwargs):
try:
if args[0].is_debug:
return func(*args, **kwargs)
else:
pass
except Exception:
LOG.warning("Volume metadata logging failure. "
"Exception is %s.", traceback.format_exc())
return func_wrapper
class PowerMaxVolumeMetadata(object):
"""Gathers PowerMax/VMAX specific volume information.
Also gathers Unisphere, Microcode OS/distribution and python versions.
"""
def __init__(self, rest, version, is_debug):
self.version_dict = {}
self.rest = rest
self.utils = utils.PowerMaxUtils()
self.volume_trace_list = []
self.is_debug = is_debug
self.powermax_driver_version = version
def _update_platform(self):
"""Update the platform."""
try:
self.version_dict['openstack_platform'] = platform.platform()
except Exception as ex:
LOG.warning("Unable to determine the platform. "
"Exception is %s.", ex)
def _get_python_version(self):
"""Get the python version."""
try:
self.version_dict['python_version'] = platform.python_version()
except Exception as ex:
LOG.warning("Unable to determine the python version. "
"Exception is %s.", ex)
def _update_version_from_version_string(self):
"""Update the version from the version string."""
try:
self.version_dict['openstack_version'] = (
version.version_info.version_string())
except Exception as ex:
LOG.warning("Unable to determine the OS version. "
"Exception is %s.", ex)
def _update_release_from_release_string(self):
"""Update the release from the release string."""
try:
self.version_dict['openstack_release'] = (
version.version_info.release_string())
except Exception as ex:
LOG.warning("Unable to get release info. "
"Exception is %s.", ex)
@staticmethod
def _get_version_info_version():
"""Gets the version.
:returns: string -- version
"""
return version.version_info.version
def _update_info_from_version_info(self):
"""Update class variables from version info."""
try:
ver = self._get_version_info_version()
if ver:
self.version_dict['openstack_version'] = ver
except Exception as ex:
LOG.warning("Unable to get version info. "
"Exception is %s.", ex)
def _update_openstack_info(self):
"""Update openstack info."""
self._update_version_from_version_string()
self._update_release_from_release_string()
self._update_platform()
self._get_python_version()
# Some distributions override with more meaningful information
self._update_info_from_version_info()
def _update_array_info(self, serial_number):
"""Update PowerMax/VMAX info.
:param serial_number: the serial number of the array
"""
u4p_version_dict = (
self.rest.get_unisphere_version())
self.version_dict['unisphere_for_powermax_version'] = (
u4p_version_dict['version'])
self.version_dict['serial_number'] = serial_number
array_info_dict = self.rest.get_array_detail(serial_number)
self.version_dict['storage_firmware_version'] = (
array_info_dict['ucode'])
self.version_dict['storage_model'] = array_info_dict['model']
self.version_dict['powermax_cinder_driver_version'] = (
self.powermax_driver_version)
@debug_required
def gather_version_info(self, serial_number):
"""Gather info on the array
:param serial_number: the serial number of the array
:returns: version_dict
"""
try:
self._update_openstack_info()
self._update_array_info(serial_number)
self.print_pretty_table(self.version_dict)
except Exception as ex:
LOG.warning("Unable to gather version info. "
"Exception is %s.", ex)
return self.version_dict
@debug_required
def gather_volume_info(
self, volume_id, successful_operation, append, **kwargs):
"""Gather volume information.
:param volume_id: the unique volume id key
:param successful_operation: the operation e.g "create"
:param append: append flag
:param kwargs: variable length argument list
:returns: datadict
"""
volume_trace_dict = {}
volume_key_value = {}
datadict = {}
try:
volume_trace_dict = self._fill_volume_trace_dict(
volume_id, successful_operation, append, **kwargs)
volume_trace_dict['volume_updated_time'] = (
datetime.datetime.fromtimestamp(
int(time.time())).strftime('%Y-%m-%d %H:%M:%S'))
volume_key_value[volume_id] = volume_trace_dict
if not self.volume_trace_list:
self.volume_trace_list.append(volume_key_value.copy())
else:
self._consolidate_volume_trace_list(
volume_id, volume_trace_dict, volume_key_value)
for datadict in list(self.volume_trace_list):
if volume_id in datadict:
if not append:
self.volume_trace_list.remove(datadict)
return datadict
except Exception as ex:
LOG.warning("Exception in gather volume metadata. "
"Exception is %s.", ex)
return datadict
def _fill_volume_trace_dict(
self, volume_id, successful_operation, append, **kwargs):
"""Populates a dictionary with key value pairs
:param volume_id: the unique volume id key
:param successful_operation: the operation e.g "create"
:param append: append flag
:param kwargs: variable length argument list
:returns: my_volume_trace_dict
"""
param_dict = locals()
my_volume_trace_dict = {}
for k, v in param_dict.items():
if self._param_condition(k, v):
my_volume_trace_dict[k] = v
if k == 'kwargs':
for k2, v2 in v.items():
if self._param_condition(k2, v2):
my_volume_trace_dict[k2] = v2
elif k2 == 'mv_list' and v2:
for i, item in enumerate(v2, 1):
my_volume_trace_dict["masking_view_%d" % i] = item
elif k2 == 'sg_list' and v2:
for i, item in enumerate(v2, 1):
my_volume_trace_dict["storage_group_%d" % i] = item
return my_volume_trace_dict
def _param_condition(self, key, value):
"""Determines condition for inclusion.
:param key: the key
:param value: the value
:returns: True or False
"""
exclude_list = ('self', 'append', 'mv_list', 'sg_list')
return (value is not None and key not in exclude_list and
not isinstance(value, (dict,
types.FunctionType,
type)))
@debug_required
def print_pretty_table(self, datadict):
"""Prints the data in the dict.
:param datadict: the data dictionary
"""
rows = []
for k, v in datadict.items():
if v is not None:
rows.append([k, v])
t = tabulate.tabulate(rows, headers=['Key', 'Value'], tablefmt='psql')
LOG.debug('\n%s\n', t)
def _consolidate_volume_trace_list(
self, volume_id, volume_trace_dict, volume_key_value):
"""Consolidate data into self.volume_trace_list
:param volume_id: the unique volume identifier
:param volume_trace_dict: the existing dict
:param volume_key_value: the volume id key and dict value
"""
is_merged = False
for datadict in list(self.volume_trace_list):
if volume_id in datadict:
for key, dict_value in datadict.items():
merged_dict = (
self.utils.merge_dicts(
volume_trace_dict, dict_value))
self.volume_trace_list.remove(datadict)
volume_key_value[volume_id] = merged_dict
self.volume_trace_list.append(volume_key_value.copy())
is_merged = True
if not is_merged:
self.volume_trace_list.append(volume_key_value.copy())
@debug_required
def update_volume_info_metadata(self, datadict, version_dict):
"""Get update volume metadata with volume info
:param datadict: volume info key value pairs
:param version_dict: version dictionary
:returns: volume_metadata
"""
return self.utils.merge_dicts(
version_dict, *datadict.values())
@debug_required
def capture_attach_info(
self, volume, extra_specs, masking_view_dict, host,
is_multipath, is_multiattach):
"""Captures attach info in volume metadata
:param volume: the volume object
:param extra_specs: extra specifications
:param masking_view_dict: masking view dict
:param host: host
:param is_multipath: is mulitipath flag
:param is_multiattach: is multi attach
"""
mv_list, sg_list = [], []
child_storage_group, parent_storage_group = None, None
initiator_group, port_group = None, None
child_storage_group_tag_list = None
if is_multiattach:
successful_operation = 'multi_attach'
mv_list = masking_view_dict['mv_list']
sg_list = masking_view_dict['sg_list']
else:
successful_operation = 'attach'
child_storage_group = masking_view_dict[utils.SG_NAME]
child_storage_group_tag_list = (
masking_view_dict.get(utils.TAG_LIST, None))
parent_storage_group = masking_view_dict[utils.PARENT_SG_NAME]
initiator_group = masking_view_dict[utils.IG_NAME]
port_group = masking_view_dict[utils.PORTGROUPNAME]
sl, wl = self.utils.get_service_level_workload(extra_specs)
datadict = self.gather_volume_info(
volume.id, successful_operation, False,
serial_number=extra_specs[utils.ARRAY],
service_level=sl, workload=wl, srp=extra_specs[utils.SRP],
masking_view=masking_view_dict[utils.MV_NAME],
child_storage_group=child_storage_group,
parent_storage_group=parent_storage_group,
initiator_group=initiator_group,
port_group=port_group,
host=host, used_host_name=masking_view_dict[utils.USED_HOST_NAME],
is_multipath=is_multipath,
identifier_name=self.utils.get_volume_element_name(volume.id),
openstack_name=volume.display_name,
mv_list=mv_list, sg_list=sg_list,
child_storage_group_tag_list=child_storage_group_tag_list,
array_tag_list=masking_view_dict.get('array_tag_list', None))
volume_metadata = self.update_volume_info_metadata(
datadict, self.version_dict)
self.print_pretty_table(volume_metadata)
@debug_required
def capture_detach_info(
self, volume, extra_specs, device_id, mv_list, sg_list):
"""Captures detach info in volume metadata
:param volume: the volume object
:param extra_specs: extra specifications
:param device_id: masking view dict
:param mv_list: masking view list
:param sg_list: storage group list
"""
default_sg = self.utils.derive_default_sg_from_extra_specs(extra_specs)
sl, wl = self.utils.get_service_level_workload(extra_specs)
datadict = self.gather_volume_info(
volume.id, 'detach', False, device_id=device_id,
serial_number=extra_specs[utils.ARRAY],
service_level=sl, workload=wl,
srp=extra_specs[utils.SRP], default_sg_name=default_sg,
identifier_name=self.utils.get_volume_element_name(volume.id),
openstack_name=volume.display_name,
mv_list=mv_list, sg_list=sg_list
)
volume_metadata = self.update_volume_info_metadata(
datadict, self.version_dict)
self.print_pretty_table(volume_metadata)
@debug_required
def capture_extend_info(
self, volume, new_size, device_id, extra_specs, array):
"""Capture extend info in volume metadata
:param volume: the volume object
:param new_size: new size
:param device_id: device id
:param extra_specs: extra specifications
:param array: array serial number
"""
default_sg = self.utils.derive_default_sg_from_extra_specs(extra_specs)
sl, wl = self.utils.get_service_level_workload(extra_specs)
datadict = self.gather_volume_info(
volume.id, 'extend', False, volume_size=new_size,
device_id=device_id,
default_sg_name=default_sg, serial_number=array,
service_level=sl, workload=wl,
srp=extra_specs[utils.SRP],
identifier_name=self.utils.get_volume_element_name(volume.id),
openstack_name=volume.display_name,
is_compression_disabled=self.utils.is_compression_disabled(
extra_specs))
volume_metadata = self.update_volume_info_metadata(
datadict, self.version_dict)
self.print_pretty_table(volume_metadata)
@debug_required
def capture_snapshot_info(
self, source, extra_specs, successful_operation,
snapshot_metadata):
"""Captures snapshot info in volume metadata
:param source: the source volume object
:param extra_specs: extra specifications
:param successful_operation: snapshot operation
:param snapshot_metadata: snapshot metadata
"""
last_ss_name, snapshot_label, source_device_id = None, None, None
source_device_label, snap_ids, is_snap_id = None, None, None
if isinstance(source, volume.Volume):
if 'create' or 'manage' in successful_operation:
snapshot_count = six.text_type(len(source.snapshots))
if snapshot_metadata:
last_ss_name = snapshot_metadata.get('snap_display_name')
snapshot_label = snapshot_metadata.get('SnapshotLabel')
source_device_id = snapshot_metadata.get('SourceDeviceID')
source_device_label = snapshot_metadata.get(
'SourceDeviceLabel')
snap_ids = snapshot_metadata.get('SnapIdList')
is_snap_id = snapshot_metadata.get('is_snap_id')
else:
snapshot_count = six.text_type(len(source.snapshots) - 1)
default_sg = (
self.utils.derive_default_sg_from_extra_specs(extra_specs))
sl, wl = self.utils.get_service_level_workload(extra_specs)
datadict = self.gather_volume_info(
source.id, successful_operation, False,
volume_size=source.size,
default_sg_name=default_sg,
serial_number=extra_specs[utils.ARRAY],
service_level=sl, workload=wl,
srp=extra_specs[utils.SRP],
identifier_name=(
self.utils.get_volume_element_name(source.id)),
openstack_name=source.display_name,
snapshot_count=snapshot_count,
last_ss_name=last_ss_name,
snapshot_label=snapshot_label,
is_snap_id=is_snap_id,
snap_ids_or_gens=snap_ids,
source_device_id=source_device_id,
source_device_label=source_device_label)
volume_metadata = self.update_volume_info_metadata(
datadict, self.version_dict)
self.print_pretty_table(volume_metadata)
@debug_required
def capture_modify_group(
self, group_name, group_id, add_vols, remove_volumes, array):
"""Captures group info after a modify operation
:param group_name: group name
:param group_id: group id
:param add_vols: add volume list
:param remove_volumes: remove volume list
:param array: array serial number
"""
if not self.version_dict:
self.version_dict = self.gather_version_info(array)
for add_vol in add_vols:
datadict = self.gather_volume_info(
add_vol.id, 'addToGroup', True,
group_name=group_name, group_id=group_id)
add_volume_metadata = self.update_volume_info_metadata(
datadict, self.version_dict)
self.print_pretty_table(add_volume_metadata)
for remove_volume in remove_volumes:
datadict = self.gather_volume_info(
remove_volume.id, 'removeFromGroup', True,
group_name='Removed from %s' % group_name,
group_id='Removed from %s' % group_id)
remove_volume_metadata = self.update_volume_info_metadata(
datadict, self.version_dict)
self.print_pretty_table(remove_volume_metadata)
@debug_required
def capture_create_volume(
self, device_id, volume, group_name, group_id, extra_specs,
rep_info_dict, successful_operation, source_snapshot_id=None,
source_device_id=None, temporary_snapvx=None,
array_tag_list=None):
"""Captures create volume info in volume metadata
:param device_id: device id
:param volume: volume object
:param group_name: group name
:param group_id: group id
:param extra_specs: additional info
:param rep_info_dict: information gathered from replication
:param successful_operation: the type of create operation
:param source_snapshot_id: the source snapshot id
:param temporary_snapvx: temporary snapVX
:param array_tag_list: array tag list
:returns: volume_metadata dict
"""
rdf_group_no, target_name, remote_array, target_device_id = (
None, None, None, None)
rep_mode, replication_status, rdf_group_label, use_bias = (
None, None, None, None)
target_array_model, backend_id = None, None
if rep_info_dict:
rdf_group_no = rep_info_dict.get('rdf_group_no')
target_name = rep_info_dict.get('target_name')
remote_array = rep_info_dict.get('remote_array')
target_device_id = rep_info_dict.get('target_device_id')
rep_mode = rep_info_dict.get('rep_mode')
replication_status = rep_info_dict.get('replication_status')
rdf_group_label = rep_info_dict.get('rdf_group_label')
backend_id = rep_info_dict.get('backend_id')
if utils.METROBIAS in extra_specs:
use_bias = extra_specs[utils.METROBIAS]
target_array_model = rep_info_dict.get('target_array_model')
default_sg = self.utils.derive_default_sg_from_extra_specs(
extra_specs, rep_mode)
sl, wl = self.utils.get_service_level_workload(extra_specs)
datadict = self.gather_volume_info(
volume.id, successful_operation, True, volume_size=volume.size,
device_id=device_id,
default_sg_name=default_sg,
serial_number=extra_specs[utils.ARRAY],
service_level=sl, workload=wl,
srp=extra_specs[utils.SRP],
identifier_name=self.utils.get_volume_element_name(volume.id),
openstack_name=volume.display_name,
source_volid=volume.source_volid,
group_name=group_name, group_id=group_id,
rdf_group_no=rdf_group_no, backend_id=backend_id,
target_name=target_name, remote_array=remote_array,
target_device_id=target_device_id,
source_snapshot_id=source_snapshot_id,
rep_mode=rep_mode, replication_status=replication_status,
rdf_group_label=rdf_group_label, use_bias=use_bias,
is_compression_disabled=self.utils.is_compression_disabled(
extra_specs),
source_device_id=source_device_id,
temporary_snapvx=temporary_snapvx,
target_array_model=target_array_model,
array_tag_list=array_tag_list)
volume_metadata = self.update_volume_info_metadata(
datadict, self.version_dict)
self.print_pretty_table(volume_metadata)
@debug_required
def gather_replication_info(
self, volume_id, successful_operation, append, **kwargs):
"""Gathers replication information
:param volume_id: volume id
:param successful_operation: the successful operation type
:param append: boolean
:param **kwargs: variable length of arguments
:returns: rep_dict
"""
return self._fill_volume_trace_dict(
volume_id, successful_operation, append, **kwargs)
@debug_required
def capture_failover_volume(
self, volume, target_device, remote_array, rdf_group, device_id,
array, extra_specs, failover, vol_grp_name,
replication_status, rep_mode):
"""Captures failover info in volume metadata
:param volume: volume object
:param target_device: the device to failover to
:param remote_array: the array to failover to
:param rdf_group: the rdf group
:param device_id: the device to failover from
:param array: the array to failover from
:param extra_specs: additional info
:param failover: failover flag
:param vol_grp_name: async group name
:param replication_status: volume replication status
:param rep_mode: replication mode
"""
operation = "Failover" if failover else "Failback"
sl, wl = self.utils.get_service_level_workload(extra_specs)
datadict = self.gather_volume_info(
volume.id, operation, True, volume_size=volume.size,
device_id=target_device,
serial_number=remote_array,
service_level=sl, workload=wl,
srp=extra_specs[utils.SRP],
identifier_name=self.utils.get_volume_element_name(volume.id),
openstack_name=volume.display_name,
source_volid=volume.source_volid,
rdf_group_no=rdf_group, remote_array=array,
target_device_id=device_id, vol_grp_name=vol_grp_name,
replication_status=replication_status, rep_mode=rep_mode
)
self.version_dict = (
self.gather_version_info(remote_array))
volume_metadata = self.update_volume_info_metadata(
datadict, self.version_dict)
self.print_pretty_table(volume_metadata)
@debug_required
def capture_manage_existing(
self, volume, rep_info_dict, device_id, extra_specs):
"""Captures manage existing info in volume metadata
:param volume: volume object
:param rep_info_dict: information gathered from replication
:param device_id: the PowerMax/VMAX device id
:param extra_specs: the extra specs
"""
successful_operation = "manage_existing_volume"
rdf_group_no, target_name, remote_array, target_device_id = (
None, None, None, None)
rep_mode, replication_status, rdf_group_label, backend_id = (
None, None, None, None)
if rep_info_dict:
rdf_group_no = rep_info_dict['rdf_group_no']
target_name = rep_info_dict['target_name']
remote_array = rep_info_dict['remote_array']
target_device_id = rep_info_dict['target_device_id']
rep_mode = rep_info_dict['rep_mode']
replication_status = rep_info_dict['replication_status']
rdf_group_label = rep_info_dict['rdf_group_label']
backend_id = rep_info_dict['backend_id']
default_sg = self.utils.derive_default_sg_from_extra_specs(
extra_specs, rep_mode)
sl, wl = self.utils.get_service_level_workload(extra_specs)
datadict = self.gather_volume_info(
volume.id, successful_operation, True, volume_size=volume.size,
device_id=device_id,
default_sg_name=default_sg,
serial_number=extra_specs[utils.ARRAY],
service_level=sl, workload=wl,
srp=extra_specs[utils.SRP],
identifier_name=self.utils.get_volume_element_name(volume.id),
openstack_name=volume.display_name,
source_volid=volume.source_volid,
rdf_group_no=rdf_group_no, backend_id=backend_id,
target_name=target_name, remote_array=remote_array,
target_device_id=target_device_id,
rep_mode=rep_mode, replication_status=replication_status,
rdf_group_label=rdf_group_label
)
volume_metadata = self.update_volume_info_metadata(
datadict, self.version_dict)
self.print_pretty_table(volume_metadata)
@debug_required
def capture_retype_info(
self, volume, device_id, array, srp, target_slo,
target_workload, target_sg_name, is_rep_enabled, rep_mode,
is_compression_disabled, target_backend_id):
"""Captures manage existing info in volume metadata
:param volume_id: volume identifier
:param volume_size: volume size
:param device_id: the PowerMax/VMAX device id
:param array: the PowerMax/VMAX serialnumber
:param srp: PowerMax/VMAX SRP
:param target_slo: volume name
:param target_workload: the PowerMax/VMAX device id
:param is_rep_enabled: replication enabled flag
:param rep_mode: replication mode
:param is_compression_disabled: compression disabled flag
:param target_backend_id: target replication backend id
"""
successful_operation = "retype"
if not target_slo:
target_slo, target_workload = 'None', 'None'
datadict = self.gather_volume_info(
volume.id, successful_operation, False, volume_size=volume.size,
device_id=device_id,
target_sg_name=target_sg_name,
serial_number=array,
target_service_level=target_slo,
target_workload=target_workload,
srp=srp,
identifier_name=self.utils.get_volume_element_name(volume.id),
openstack_name=volume.display_name,
is_rep_enabled=('yes' if is_rep_enabled else 'no'),
backend_id=target_backend_id, rep_mode=rep_mode,
is_compression_disabled=(
True if is_compression_disabled else False))
if not is_rep_enabled:
delete_list = ['rdf_group_no', 'rep_mode', 'target_array_model',
'service_level', 'remote_array', 'target_device_id',
'replication_status', 'rdf_group_label',
'backend_id']
self.utils.delete_values_from_dict(datadict, delete_list)
update_list = [('default_sg_name', 'source_sg_name'),
('service_level', 'source_service_level')]
self.utils.update_values_in_dict(datadict, update_list)
volume_metadata = self.update_volume_info_metadata(
datadict, self.version_dict)
self.print_pretty_table(volume_metadata)
@debug_required
def capture_delete_info(self, volume):
"""Captures delete info in volume metadata
:param volume: the volume object
"""
datadict = self.gather_volume_info(
volume.id, 'delete', False,
identifier_name=self.utils.get_volume_element_name(volume.id),
openstack_name=volume.display_name)
volume_metadata = self.update_volume_info_metadata(
datadict, self.version_dict)
self.print_pretty_table(volume_metadata)
| |
import argparse
import os
import sys
from snarkx import Config
from snarkx.io import IO_READERS, IO_WRITERS, walk_path, ParseFileError
__all__ = [] # all exports are forbidden from this script
# os.path.abspath(os.path.expanduser(os.path.normpath( <path> )))
# TODO: mozny prechod na `click!` http://click.pocoo.org/5/
# ale su problemy s Python3; vo verzii 5
# ================================================================================
_this_version = '1.0.2'
_fmt_choices_read = [k for k in IO_READERS.keys()]
_fmt_choices_write = [k for k in IO_WRITERS.keys()]
_fmt_choices_read_default = 'g6' if 'g6' in _fmt_choices_read else None
_fmt_choices_write_default = 'ba' if 'ba' in _fmt_choices_write else None
def parse_cmdline():
# ==================== Parse arguments
parser = argparse.ArgumentParser(description='Snark files converter v{0}, SnarkX v{1}'.format(_this_version, Config.VERSION))
# positional arguments
parser.add_argument('source',
help='file(s) to be converted',
default=None,
nargs='*',
metavar='PATH')
group1 = parser.add_mutually_exclusive_group()
group1.add_argument('-d', '--target-directory',
help='the folder where converted files will be stored',
type=str,
default='.',
metavar='DIRECTORY'
)
group1.add_argument('-m', '--merge',
help='merge output to the file',
type=str,
default=None,
metavar='FILE')
group1.add_argument('-p', '--preserve',
help='preserve input file location',
action='store_true')
# format
parser.add_argument('-s', '--source-format',
help='format of input file(s) (default: {0})'.format(_fmt_choices_read_default),
type=str,
choices=_fmt_choices_read,
default=_fmt_choices_read_default)
parser.add_argument('-t', '--target-format',
help='format of output file(s) (default: {0})'.format(_fmt_choices_write_default),
type=str,
choices=_fmt_choices_write,
default=_fmt_choices_write_default)
additional = parser.add_argument_group('additional arguments')
# Specific arguments ???
group2 = additional.add_mutually_exclusive_group()
group2.add_argument('-r', '--recurse-dirs',
help='recursively traverse directories (in args)',
action='store_true')
group2.add_argument('--skipdirs',
help='do not traverse directories in arguments at all',
action='store_true')
# parser.add_argument('-y', '--dry-run',
# help='dry run; do nothing',
# action='store_true')
additional.add_argument('--addext',
help="add default file extension on output(s) instead substituting the source file's one",
action='store_true')
additional.add_argument('--nohint',
help='do not check file extensions on input(s)',
action='store_true')
return parser.parse_args()
# ==================== End Parse arguments
def non_interactive_run(_args):
# print('DEBUG:\n{}\n'.format(_args), file=sys.stderr)
reader_class = IO_READERS[_args.source_format]
writer_class = IO_WRITERS[_args.target_format]
_inputs_set = set()
for _inpt in _args.source:
if not _args.skipdirs or not (os.path.isdir(_inpt)):
for _name in walk_path(os.path.abspath(os.path.expandvars(os.path.expanduser(_inpt))),
recurse=_args.recurse_dirs,
hint=(reader_class if not _args.nohint else None)):
_inputs_set.add(_name)
_inputs = list(_inputs_set)
_inputs.sort()
# print("DEBUG INPUTS: ")
# for _elt in _inputs:
# print('\t{}'.format(_elt))
if not (_args.merge is None):
_basepath, _mergepath = _prepare_path(os.path.abspath(os.path.expandvars(os.path.expanduser(_args.merge))),
writer_class.default_extension(), _args.addext)
#
# print("DEBUG basepath: {}".format(_basepath))
if os.access(_basepath, os.W_OK) is not True:
print('error: snarkx_converter: folder does not exist ({})'.format(_basepath), file=sys.stderr)
sys.exit(1)
if (os.access(_mergepath, os.F_OK) or os.path.isdir(_mergepath)) is True:
print('error: snarkx_converter: cannot rewrite target ({})'.format(_mergepath), file=sys.stderr)
sys.exit(1)
# print("DEBUG merge to: {}".format(_mergepath))
_writer = writer_class(_mergepath)
for _path in _inputs:
try:
_reader = reader_class(_path)
for _gph in _reader:
_writer.write(_gph)
except ParseFileError:
continue
else:
_preserve = _args.preserve
if _preserve:
for _path in _inputs:
try:
_reader = reader_class(_path)
_basepath, _outpath = _prepare_path(os.path.abspath(os.path.expandvars(os.path.expanduser(_path))),
writer_class.default_extension(), _args.addext)
# print("DEBUG From: {}".format(_path))
if os.access(_basepath, os.W_OK) \
and os.path.isdir(_basepath) \
and not (os.access(_outpath, os.F_OK)):
_writer = writer_class(_outpath)
# print("\tTo: {}".format(_outpath))
for _gph in _reader:
_writer.write(_gph)
else:
# print("DEBUG expect: {}".format(_outpath))
# print("DEBUG: Write fail")
continue
except ParseFileError as e:
print('warning: snarkx_converter: {}'.format(str(e)))
continue
else:
_arg_tdir = os.path.abspath(os.path.expandvars(os.path.expanduser(_args.target_directory)))
# print("DEBUG Args target dir: {}".format(_arg_tdir))
if not (os.path.isdir(_arg_tdir) or os.access(_arg_tdir, os.W_OK)):
print('error: snarkx_converter: the folder is not accessible ({})'.format(_arg_tdir), file=sys.stderr)
sys.exit(1)
for _path in _inputs:
try:
_reader = reader_class(_path)
_filename = os.path.basename(_path)
_basepath, _outpath = _prepare_path(os.path.join(_arg_tdir, _filename),
writer_class.default_extension(), _args.addext)
# print("DEBUG From: {}".format(_path))
if not (os.access(_outpath, os.F_OK)):
_writer = writer_class(_outpath)
# print("\tTo: {}".format(_outpath))
for _gph in _reader:
_writer.write(_gph)
else:
# print("DEBUG expect: {}".format(_outpath))
# print("DEBUG: Write fail")
continue
except ParseFileError as e:
print('warning: snarkx_converter: {}'.format(str(e)))
continue
# TODO: pokracovat s logikou zapisu do jednotlivych suborov; oddelit rovanky root a preserve
def _prepare_path(the_path, df_ext='', preserve_ext=False):
# _args_mergepath = os.path.abspath(os.path.expandvars(os.path.expanduser(_args.merge)))
_basepath = os.path.dirname(the_path)
_filename = os.path.basename(the_path)
_basename, _ext = os.path.splitext(_filename)
# df_ext = writer_class.default_extension()
if _ext == df_ext:
_mergepath = os.path.join(_basepath, _filename)
else:
_mergepath = os.path.join(_basepath, _filename + df_ext) if preserve_ext \
else os.path.join(_basepath, _basename + df_ext)
return _basepath, _mergepath
def main():
args = parse_cmdline()
if not args.source:
print('error: snarkx_converter: no input files', file=sys.stderr)
sys.exit(1)
else:
non_interactive_run(args)
if __name__ == '__main__':
main()
# if args.dry_run:
# # print('DRY RUN:')
# print(repr(args))
# sys.exit(0)
# s = 'filename.g6'
# print(_prepare_path(s, '.g6', False))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.