gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import pretend
from warehouse import csp
class TestCSPTween:
def test_csp_policy(self):
response = pretend.stub(headers={})
handler = pretend.call_recorder(lambda request: response)
settings = {
"csp": {"default-src": ["*"], "style-src": ["'self'", "example.net"]}
}
registry = pretend.stub(settings=settings)
tween = csp.content_security_policy_tween_factory(handler, registry)
request = pretend.stub(
path="/project/foobar/",
find_service=pretend.call_recorder(lambda *args, **kwargs: settings["csp"]),
)
assert tween(request) is response
assert response.headers == {
"Content-Security-Policy": "default-src *; style-src 'self' example.net"
}
def test_csp_policy_default(self):
response = pretend.stub(headers={})
handler = pretend.call_recorder(lambda request: response)
registry = pretend.stub(settings={})
tween = csp.content_security_policy_tween_factory(handler, registry)
request = pretend.stub(
path="/path/to/nowhere/", find_service=pretend.raiser(LookupError)
)
assert tween(request) is response
assert response.headers == {}
def test_csp_policy_debug_disables(self):
response = pretend.stub(headers={})
handler = pretend.call_recorder(lambda request: response)
settings = {
"csp": {"default-src": ["*"], "style-src": ["'self'", "example.net"]}
}
registry = pretend.stub(settings=settings)
tween = csp.content_security_policy_tween_factory(handler, registry)
request = pretend.stub(
path="/_debug_toolbar/foo/",
find_service=pretend.call_recorder(lambda *args, **kwargs: settings["csp"]),
)
assert tween(request) is response
assert response.headers == {}
def test_csp_policy_inject(self):
response = pretend.stub(headers={})
def handler(request):
request.find_service("csp")["default-src"].append("example.com")
return response
settings = {"csp": {"default-src": ["*"], "style-src": ["'self'"]}}
registry = pretend.stub(settings=settings)
tween = csp.content_security_policy_tween_factory(handler, registry)
request = pretend.stub(
path="/example",
find_service=pretend.call_recorder(lambda *args, **kwargs: settings["csp"]),
)
assert tween(request) is response
assert response.headers == {
"Content-Security-Policy": "default-src * example.com; style-src 'self'"
}
def test_csp_policy_default_inject(self):
settings = collections.defaultdict(list)
response = pretend.stub(headers={})
registry = pretend.stub(settings=settings)
def handler(request):
request.find_service("csp")["default-src"].append("example.com")
return response
tween = csp.content_security_policy_tween_factory(handler, registry)
request = pretend.stub(
path="/path/to/nowhere/",
find_service=pretend.call_recorder(lambda *args, **kwargs: settings),
)
assert tween(request) is response
assert response.headers == {
"Content-Security-Policy": "default-src example.com"
}
def test_devel_csp(self):
settings = {"csp": {"script-src": ["{request.scheme}://{request.host}"]}}
response = pretend.stub(headers={})
registry = pretend.stub(settings=settings)
handler = pretend.call_recorder(lambda request: response)
tween = csp.content_security_policy_tween_factory(handler, registry)
request = pretend.stub(
scheme="https",
host="example.com",
path="/path/to/nowhere",
find_service=pretend.call_recorder(lambda *args, **kwargs: settings["csp"]),
)
assert tween(request) is response
assert response.headers == {
"Content-Security-Policy": "script-src https://example.com"
}
def test_simple_csp(self):
settings = {
"csp": {"default-src": ["'none'"], "sandbox": ["allow-top-navigation"]}
}
response = pretend.stub(headers={})
registry = pretend.stub(settings=settings)
handler = pretend.call_recorder(lambda request: response)
tween = csp.content_security_policy_tween_factory(handler, registry)
request = pretend.stub(
scheme="https",
host="example.com",
path="/simple/",
find_service=pretend.call_recorder(lambda *args, **kwargs: settings["csp"]),
)
assert tween(request) is response
assert response.headers == {
"Content-Security-Policy": (
"default-src 'none'; sandbox allow-top-navigation"
)
}
class TestCSPPolicy:
def test_create(self):
policy = csp.CSPPolicy({"foo": ["bar"]})
assert isinstance(policy, collections.defaultdict)
def test_merge(self):
policy = csp.CSPPolicy({"foo": ["bar"]})
policy.merge({"foo": ["baz"], "something": ["else"]})
assert policy == {"foo": ["bar", "baz"], "something": ["else"]}
def test_includeme():
config = pretend.stub(
register_service_factory=pretend.call_recorder(lambda fact, name: None),
add_settings=pretend.call_recorder(lambda settings: None),
add_tween=pretend.call_recorder(lambda tween: None),
registry=pretend.stub(
settings={
"camo.url": "camo.url.value",
"statuspage.url": "https://2p66nmmycsj3.statuspage.io",
}
),
)
csp.includeme(config)
assert config.register_service_factory.calls == [
pretend.call(csp.csp_factory, name="csp")
]
assert config.add_tween.calls == [
pretend.call("warehouse.csp.content_security_policy_tween_factory")
]
assert config.add_settings.calls == [
pretend.call(
{
"csp": {
"base-uri": ["'self'"],
"block-all-mixed-content": [],
"connect-src": [
"'self'",
"https://api.github.com/repos/",
"fastly-insights.com",
"*.fastly-insights.com",
"*.ethicalads.io",
"https://api.pwnedpasswords.com",
"https://2p66nmmycsj3.statuspage.io",
],
"default-src": ["'none'"],
"font-src": ["'self'", "fonts.gstatic.com"],
"form-action": ["'self'"],
"frame-ancestors": ["'none'"],
"frame-src": ["'none'"],
"img-src": [
"'self'",
"camo.url.value",
"www.google-analytics.com",
"*.fastly-insights.com",
"*.ethicalads.io",
],
"script-src": [
"'self'",
"www.googletagmanager.com",
"www.google-analytics.com",
"*.fastly-insights.com",
"*.ethicalads.io",
"'sha256-U3hKDidudIaxBDEzwGJApJgPEf2mWk6cfMWghrAa6i0='",
],
"style-src": [
"'self'",
"fonts.googleapis.com",
"*.ethicalads.io",
"'sha256-2YHqZokjiizkHi1Zt+6ar0XJ0OeEy/egBnlm+MDMtrM='",
"'sha256-47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU='",
],
"worker-src": ["*.fastly-insights.com"],
}
}
)
]
class TestFactory:
def test_copy(self):
settings = {"csp": {"foo": "bar"}}
request = pretend.stub(registry=pretend.stub(settings=settings))
result = csp.csp_factory(None, request)
assert isinstance(result, csp.CSPPolicy)
assert result == settings["csp"]
# ensure changes to factory result don't propagate back to the
# settings
result["baz"] = "foo"
assert result == {"foo": "bar", "baz": "foo"}
assert settings == {"csp": {"foo": "bar"}}
def test_default(self):
request = pretend.stub(registry=pretend.stub(settings={}))
result = csp.csp_factory(None, request)
assert isinstance(result, csp.CSPPolicy)
assert result == {}
| |
# Copyright 2013 Canonical Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
from oslo.config import cfg
from oslo.vmware import exceptions as vexc
from nova import exception
from nova.network import model as network_model
from nova import test
from nova.tests.unit import matchers
from nova.tests.unit import utils
from nova.tests.unit.virt.vmwareapi import fake
from nova.virt.vmwareapi import network_util
from nova.virt.vmwareapi import vif
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
CONF = cfg.CONF
class VMwareVifTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVifTestCase, self).setUp()
self.flags(vlan_interface='vmnet0', group='vmware')
network = network_model.Network(id=0,
bridge='fa0',
label='fake',
vlan=3,
bridge_interface='eth0',
injected=True)
self.vif = network_model.NetworkInfo([
network_model.VIF(id=None,
address='DE:AD:BE:EF:00:00',
network=network,
type=None,
devname=None,
ovs_interfaceid=None,
rxtx_cap=3)
])[0]
self.session = fake.FakeSession()
self.cluster = None
def tearDown(self):
super(VMwareVifTestCase, self).tearDown()
def test_ensure_vlan_bridge(self):
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
self.mox.StubOutWithMock(network_util,
'get_vswitch_for_vlan_interface')
self.mox.StubOutWithMock(network_util,
'check_if_vlan_interface_exists')
self.mox.StubOutWithMock(network_util, 'create_port_group')
network_util.get_network_with_the_name(self.session, 'fa0',
self.cluster).AndReturn(None)
network_util.get_vswitch_for_vlan_interface(self.session, 'vmnet0',
self.cluster).AndReturn('vmnet0')
network_util.check_if_vlan_interface_exists(self.session, 'vmnet0',
self.cluster).AndReturn(True)
network_util.create_port_group(self.session, 'fa0', 'vmnet0', 3,
self.cluster)
network_util.get_network_with_the_name(self.session, 'fa0', None)
self.mox.ReplayAll()
vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=True)
# FlatDHCP network mode without vlan - network doesn't exist with the host
def test_ensure_vlan_bridge_without_vlan(self):
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
self.mox.StubOutWithMock(network_util,
'get_vswitch_for_vlan_interface')
self.mox.StubOutWithMock(network_util,
'check_if_vlan_interface_exists')
self.mox.StubOutWithMock(network_util, 'create_port_group')
network_util.get_network_with_the_name(self.session, 'fa0',
self.cluster).AndReturn(None)
network_util.get_vswitch_for_vlan_interface(self.session, 'vmnet0',
self.cluster).AndReturn('vmnet0')
network_util.check_if_vlan_interface_exists(self.session, 'vmnet0',
self.cluster).AndReturn(True)
network_util.create_port_group(self.session, 'fa0', 'vmnet0', 0,
self.cluster)
network_util.get_network_with_the_name(self.session, 'fa0', None)
self.mox.ReplayAll()
vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=False)
# FlatDHCP network mode without vlan - network exists with the host
# Get vswitch and check vlan interface should not be called
def test_ensure_vlan_bridge_with_network(self):
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
self.mox.StubOutWithMock(network_util,
'get_vswitch_for_vlan_interface')
self.mox.StubOutWithMock(network_util,
'check_if_vlan_interface_exists')
self.mox.StubOutWithMock(network_util, 'create_port_group')
vm_network = {'name': 'VM Network', 'type': 'Network'}
network_util.get_network_with_the_name(self.session, 'fa0',
self.cluster).AndReturn(vm_network)
self.mox.ReplayAll()
vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=False)
# Flat network mode with DVS
def test_ensure_vlan_bridge_with_existing_dvs(self):
network_ref = {'dvpg': 'dvportgroup-2062',
'type': 'DistributedVirtualPortgroup'}
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
self.mox.StubOutWithMock(network_util,
'get_vswitch_for_vlan_interface')
self.mox.StubOutWithMock(network_util,
'check_if_vlan_interface_exists')
self.mox.StubOutWithMock(network_util, 'create_port_group')
network_util.get_network_with_the_name(self.session, 'fa0',
self.cluster).AndReturn(network_ref)
self.mox.ReplayAll()
ref = vif.ensure_vlan_bridge(self.session,
self.vif,
create_vlan=False)
self.assertThat(ref, matchers.DictMatches(network_ref))
def test_get_network_ref_neutron(self):
self.mox.StubOutWithMock(vif, 'get_neutron_network')
vif.get_neutron_network(self.session, 'fa0', self.cluster, self.vif)
self.mox.ReplayAll()
vif.get_network_ref(self.session, self.cluster, self.vif, True)
def test_get_network_ref_flat_dhcp(self):
self.mox.StubOutWithMock(vif, 'ensure_vlan_bridge')
vif.ensure_vlan_bridge(self.session, self.vif, cluster=self.cluster,
create_vlan=False)
self.mox.ReplayAll()
vif.get_network_ref(self.session, self.cluster, self.vif, False)
def test_get_network_ref_bridge(self):
self.mox.StubOutWithMock(vif, 'ensure_vlan_bridge')
vif.ensure_vlan_bridge(self.session, self.vif, cluster=self.cluster,
create_vlan=True)
self.mox.ReplayAll()
network = network_model.Network(id=0,
bridge='fa0',
label='fake',
vlan=3,
bridge_interface='eth0',
injected=True,
should_create_vlan=True)
self.vif = network_model.NetworkInfo([
network_model.VIF(id=None,
address='DE:AD:BE:EF:00:00',
network=network,
type=None,
devname=None,
ovs_interfaceid=None,
rxtx_cap=3)
])[0]
vif.get_network_ref(self.session, self.cluster, self.vif, False)
def test_get_network_ref_bridge_from_opaque(self):
opaque_networks = [{'opaqueNetworkId': 'bridge_id',
'opaqueNetworkName': 'name',
'opaqueNetworkType': 'OpaqueNetwork'}]
network_ref = vif._get_network_ref_from_opaque(opaque_networks,
'integration_bridge', 'bridge_id')
self.assertEqual('bridge_id', network_ref['network-id'])
def test_get_network_ref_multiple_bridges_from_opaque(self):
opaque_networks = [{'opaqueNetworkId': 'bridge_id1',
'opaqueNetworkName': 'name1',
'opaqueNetworkType': 'OpaqueNetwork'},
{'opaqueNetworkId': 'bridge_id2',
'opaqueNetworkName': 'name2',
'opaqueNetworkType': 'OpaqueNetwork'}]
network_ref = vif._get_network_ref_from_opaque(opaque_networks,
'integration_bridge', 'bridge_id2')
self.assertEqual('bridge_id2', network_ref['network-id'])
def test_get_network_ref_integration(self):
opaque_networks = [{'opaqueNetworkId': 'integration_bridge',
'opaqueNetworkName': 'name',
'opaqueNetworkType': 'OpaqueNetwork'}]
network_ref = vif._get_network_ref_from_opaque(opaque_networks,
'integration_bridge', 'bridge_id')
self.assertEqual('integration_bridge', network_ref['network-id'])
def test_get_network_ref_bridge_none(self):
opaque_networks = [{'opaqueNetworkId': 'bridge_id1',
'opaqueNetworkName': 'name1',
'opaqueNetworkType': 'OpaqueNetwork'},
{'opaqueNetworkId': 'bridge_id2',
'opaqueNetworkName': 'name2',
'opaqueNetworkType': 'OpaqueNetwork'}]
network_ref = vif._get_network_ref_from_opaque(opaque_networks,
'integration_bridge', 'bridge_id')
self.assertIsNone(network_ref)
def test_get_network_ref_integration_multiple(self):
opaque_networks = [{'opaqueNetworkId': 'bridge_id1',
'opaqueNetworkName': 'name1',
'opaqueNetworkType': 'OpaqueNetwork'},
{'opaqueNetworkId': 'integration_bridge',
'opaqueNetworkName': 'name2',
'opaqueNetworkType': 'OpaqueNetwork'}]
network_ref = vif._get_network_ref_from_opaque(opaque_networks,
'integration_bridge', 'bridge_id')
self.assertIsNone(network_ref)
def test_get_neutron_network(self):
self.mox.StubOutWithMock(vm_util, 'get_host_ref')
self.mox.StubOutWithMock(self.session, '_call_method')
self.mox.StubOutWithMock(vif, '_get_network_ref_from_opaque')
vm_util.get_host_ref(self.session,
self.cluster).AndReturn('fake-host')
opaque = fake.DataObject()
opaque.HostOpaqueNetworkInfo = ['fake-network-info']
self.session._call_method(vim_util, "get_dynamic_property",
'fake-host', 'HostSystem',
'config.network.opaqueNetwork').AndReturn(opaque)
vif._get_network_ref_from_opaque(opaque.HostOpaqueNetworkInfo,
CONF.vmware.integration_bridge,
self.vif['network']['id']).AndReturn('fake-network-ref')
self.mox.ReplayAll()
network_ref = vif.get_neutron_network(self.session,
self.vif['network']['id'],
self.cluster,
self.vif)
self.assertEqual(network_ref, 'fake-network-ref')
def test_get_neutron_network_opaque_network_not_found(self):
self.mox.StubOutWithMock(vm_util, 'get_host_ref')
self.mox.StubOutWithMock(self.session, '_call_method')
self.mox.StubOutWithMock(vif, '_get_network_ref_from_opaque')
vm_util.get_host_ref(self.session,
self.cluster).AndReturn('fake-host')
opaque = fake.DataObject()
opaque.HostOpaqueNetworkInfo = ['fake-network-info']
self.session._call_method(vim_util, "get_dynamic_property",
'fake-host', 'HostSystem',
'config.network.opaqueNetwork').AndReturn(opaque)
vif._get_network_ref_from_opaque(opaque.HostOpaqueNetworkInfo,
CONF.vmware.integration_bridge,
self.vif['network']['id']).AndReturn(None)
self.mox.ReplayAll()
self.assertRaises(exception.NetworkNotFoundForBridge,
vif.get_neutron_network, self.session,
self.vif['network']['id'], self.cluster, self.vif)
def test_get_neutron_network_bridge_network_not_found(self):
self.mox.StubOutWithMock(vm_util, 'get_host_ref')
self.mox.StubOutWithMock(self.session, '_call_method')
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
vm_util.get_host_ref(self.session,
self.cluster).AndReturn('fake-host')
opaque = fake.DataObject()
opaque.HostOpaqueNetworkInfo = ['fake-network-info']
self.session._call_method(vim_util, "get_dynamic_property",
'fake-host', 'HostSystem',
'config.network.opaqueNetwork').AndReturn(None)
network_util.get_network_with_the_name(self.session, 0,
self.cluster).AndReturn(None)
self.mox.ReplayAll()
self.assertRaises(exception.NetworkNotFoundForBridge,
vif.get_neutron_network, self.session,
self.vif['network']['id'], self.cluster, self.vif)
def test_create_port_group_already_exists(self):
def fake_call_method(module, method, *args, **kwargs):
if method == 'AddPortGroup':
raise vexc.AlreadyExistsException()
with contextlib.nested(
mock.patch.object(vm_util, 'get_add_vswitch_port_group_spec'),
mock.patch.object(vm_util, 'get_host_ref'),
mock.patch.object(self.session, '_call_method',
fake_call_method)
) as (_add_vswitch, _get_host, _call_method):
network_util.create_port_group(self.session, 'pg_name',
'vswitch_name', vlan_id=0,
cluster=None)
def test_create_port_group_exception(self):
def fake_call_method(module, method, *args, **kwargs):
if method == 'AddPortGroup':
raise vexc.VMwareDriverException()
with contextlib.nested(
mock.patch.object(vm_util, 'get_add_vswitch_port_group_spec'),
mock.patch.object(vm_util, 'get_host_ref'),
mock.patch.object(self.session, '_call_method',
fake_call_method)
) as (_add_vswitch, _get_host, _call_method):
self.assertRaises(vexc.VMwareDriverException,
network_util.create_port_group,
self.session, 'pg_name',
'vswitch_name', vlan_id=0,
cluster=None)
def test_get_neutron_network_invalid_property(self):
def fake_call_method(module, method, *args, **kwargs):
if method == 'get_dynamic_property':
raise vexc.InvalidPropertyException()
with contextlib.nested(
mock.patch.object(vm_util, 'get_host_ref'),
mock.patch.object(self.session, '_call_method',
fake_call_method),
mock.patch.object(network_util, 'get_network_with_the_name')
) as (_get_host, _call_method, _get_name):
vif.get_neutron_network(self.session, 'network_name',
'cluster', self.vif)
def test_get_vif_info_none(self):
vif_info = vif.get_vif_info('fake_session', 'fake_cluster',
'is_neutron', 'fake_model', None)
self.assertEqual([], vif_info)
def test_get_vif_info_empty_list(self):
vif_info = vif.get_vif_info('fake_session', 'fake_cluster',
'is_neutron', 'fake_model', [])
self.assertEqual([], vif_info)
@mock.patch.object(vif, 'get_network_ref', return_value='fake_ref')
def test_get_vif_info(self, mock_get_network_ref):
network_info = utils.get_test_network_info()
vif_info = vif.get_vif_info('fake_session', 'fake_cluster',
'is_neutron', 'fake_model', network_info)
expected = [{'iface_id': 'vif-xxx-yyy-zzz',
'mac_address': 'fake',
'network_name': 'fake',
'network_ref': 'fake_ref',
'vif_model': 'fake_model'}]
self.assertEqual(expected, vif_info)
| |
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Installs and configures Nova
"""
import os
import platform
import socket
from packstack.installer import basedefs
from packstack.installer import processors
from packstack.installer import utils
from packstack.installer import validators
from packstack.installer.exceptions import ScriptRuntimeError
from packstack.modules.shortcuts import get_mq
from packstack.modules.ospluginutils import appendManifestFile
from packstack.modules.ospluginutils import createFirewallResources
from packstack.modules.ospluginutils import getManifestTemplate
from packstack.modules.ospluginutils import manifestfiles
from packstack.modules.ospluginutils import NovaConfig
# ------------- Nova Packstack Plugin Initialization --------------
PLUGIN_NAME = "OS-Nova"
PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
def initConfig(controller):
if platform.linux_distribution()[0] == "Fedora":
primary_netif = "em1"
secondary_netif = "em2"
else:
primary_netif = "eth0"
secondary_netif = "eth1"
nova_params = {
"NOVA": [
{"CMD_OPTION": "nova-db-passwd",
"USAGE": "The password to use for the Nova to access DB",
"PROMPT": "Enter the password for the Nova DB access",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "PW_PLACEHOLDER",
"PROCESSORS": [processors.process_password],
"MASK_INPUT": True,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_NOVA_DB_PW",
"USE_DEFAULT": False,
"NEED_CONFIRM": True,
"CONDITION": False},
{"CMD_OPTION": "nova-ks-passwd",
"USAGE": ("The password to use for the Nova to authenticate "
"with Keystone"),
"PROMPT": "Enter the password for the Nova Keystone access",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "PW_PLACEHOLDER",
"PROCESSORS": [processors.process_password],
"MASK_INPUT": True,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_NOVA_KS_PW",
"USE_DEFAULT": False,
"NEED_CONFIRM": True,
"CONDITION": False},
{"CMD_OPTION": "novasched-cpu-allocation-ratio",
"USAGE": ("The overcommitment ratio for virtual to physical CPUs."
" Set to 1.0 to disable CPU overcommitment"),
"PROMPT": "Enter the CPU overcommitment ratio. Set to 1.0 to "
"disable CPU overcommitment",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_float],
"DEFAULT_VALUE": 16.0,
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_SCHED_CPU_ALLOC_RATIO",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novasched-ram-allocation-ratio",
"USAGE": ("The overcommitment ratio for virtual to physical RAM. "
"Set to 1.0 to disable RAM overcommitment"),
"PROMPT": ("Enter the RAM overcommitment ratio. Set to 1.0 to "
"disable RAM overcommitment"),
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_float],
"DEFAULT_VALUE": 1.5,
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_SCHED_RAM_ALLOC_RATIO",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novacompute-migrate-protocol",
"USAGE": ("Protocol used for instance migration. Allowed values "
"are tcp and ssh. Note that by defaul nova user is "
"created with /sbin/nologin shell so that ssh protocol "
"won't be working. To make ssh protocol work you have "
"to fix nova user on compute hosts manually."),
"PROMPT": ("Enter protocol which will be used for instance "
"migration"),
"OPTION_LIST": ['tcp', 'ssh'],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": 'tcp',
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_COMPUTE_MIGRATE_PROTOCOL",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "nova-compute-manager",
"USAGE": ("The manager that will run nova compute."),
"PROMPT": ("Enter the compute manager for nova "
"migration"),
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "nova.compute.manager.ComputeManager",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_COMPUTE_MANAGER",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
],
"NOVA_NETWORK": [
{"CMD_OPTION": "novacompute-privif",
"USAGE": ("Private interface for Flat DHCP on the Nova compute "
"servers"),
"PROMPT": ("Enter the Private interface for Flat DHCP on the Nova"
" compute servers"),
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": secondary_netif,
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_COMPUTE_PRIVIF",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novanetwork-manager",
"USAGE": "Nova network manager",
"PROMPT": "Enter the Nova network manager",
"OPTION_LIST": [r'^nova\.network\.manager\.\w+Manager$'],
"VALIDATORS": [validators.validate_regexp],
"DEFAULT_VALUE": "nova.network.manager.FlatDHCPManager",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_NETWORK_MANAGER",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novanetwork-pubif",
"USAGE": "Public interface on the Nova network server",
"PROMPT": "Enter the Public interface on the Nova network server",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": primary_netif,
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_NETWORK_PUBIF",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novanetwork-privif",
"USAGE": ("Private interface for network manager on the Nova "
"network server"),
"PROMPT": ("Enter the Private interface for network manager on "
"the Nova network server"),
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": secondary_netif,
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_NETWORK_PRIVIF",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novanetwork-fixed-range",
"USAGE": "IP Range for network manager",
"PROMPT": "Enter the IP Range for network manager",
"OPTION_LIST": ["^[\:\.\da-fA-f]+(\/\d+){0,1}$"],
"PROCESSORS": [processors.process_cidr],
"VALIDATORS": [validators.validate_regexp],
"DEFAULT_VALUE": "192.168.32.0/22",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_NETWORK_FIXEDRANGE",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novanetwork-floating-range",
"USAGE": "IP Range for Floating IP's",
"PROMPT": "Enter the IP Range for Floating IP's",
"OPTION_LIST": ["^[\:\.\da-fA-f]+(\/\d+){0,1}$"],
"PROCESSORS": [processors.process_cidr],
"VALIDATORS": [validators.validate_regexp],
"DEFAULT_VALUE": "10.3.4.0/22",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_NETWORK_FLOATRANGE",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novanetwork-auto-assign-floating-ip",
"USAGE": "Automatically assign a floating IP to new instances",
"PROMPT": ("Should new instances automatically have a floating "
"IP assigned?"),
"OPTION_LIST": ["y", "n"],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": "n",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_NOVA_NETWORK_AUTOASSIGNFLOATINGIP",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
],
"NOVA_NETWORK_VLAN": [
{"CMD_OPTION": "novanetwork-vlan-start",
"USAGE": "First VLAN for private networks",
"PROMPT": "Enter first VLAN for private networks",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": 100,
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_NETWORK_VLAN_START",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novanetwork-num-networks",
"USAGE": "Number of networks to support",
"PROMPT": "How many networks should be supported",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": 1,
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_NETWORK_NUMBER",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novanetwork-network-size",
"USAGE": "Number of addresses in each private subnet",
"PROMPT": "How many addresses should be in each private subnet",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": 255,
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_NETWORK_SIZE",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
],
}
def use_nova_network(config):
return (config['CONFIG_NOVA_INSTALL'] == 'y' and
config['CONFIG_NEUTRON_INSTALL'] != 'y')
def use_nova_network_vlan(config):
manager = 'nova.network.manager.VlanManager'
return (config['CONFIG_NOVA_INSTALL'] == 'y' and
config['CONFIG_NEUTRON_INSTALL'] != 'y' and
config['CONFIG_NOVA_NETWORK_MANAGER'] == manager)
nova_groups = [
{"GROUP_NAME": "NOVA",
"DESCRIPTION": "Nova Options",
"PRE_CONDITION": "CONFIG_NOVA_INSTALL",
"PRE_CONDITION_MATCH": "y",
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True},
{"GROUP_NAME": "NOVA_NETWORK",
"DESCRIPTION": "Nova Network Options",
"PRE_CONDITION": use_nova_network,
"PRE_CONDITION_MATCH": True,
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True},
{"GROUP_NAME": "NOVA_NETWORK_VLAN",
"DESCRIPTION": "Nova Network VLAN Options",
"PRE_CONDITION": use_nova_network_vlan,
"PRE_CONDITION_MATCH": True,
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True},
]
for group in nova_groups:
params = nova_params[group["GROUP_NAME"]]
controller.addGroup(group, params)
def initSequences(controller):
if controller.CONF['CONFIG_NOVA_INSTALL'] != 'y':
return
if controller.CONF['CONFIG_NEUTRON_INSTALL'] == 'y':
network_title = ('Adding OpenStack Network-related '
'Nova manifest entries')
network_function = create_neutron_manifest
else:
network_title = 'Adding Nova Network manifest entries'
network_function = create_network_manifest
novaapisteps = [
{'title': 'Adding Nova API manifest entries',
'functions': [create_api_manifest]},
{'title': 'Adding Nova Keystone manifest entries',
'functions': [create_keystone_manifest]},
{'title': 'Adding Nova Cert manifest entries',
'functions': [create_cert_manifest]},
{'title': 'Adding Nova Conductor manifest entries',
'functions': [create_conductor_manifest]},
{'title': 'Creating ssh keys for Nova migration',
'functions': [create_ssh_keys]},
{'title': 'Gathering ssh host keys for Nova migration',
'functions': [gather_host_keys]},
{'title': 'Adding Nova Compute manifest entries',
'functions': [create_compute_manifest]},
{'title': 'Adding Nova Scheduler manifest entries',
'functions': [create_sched_manifest]},
{'title': 'Adding Nova VNC Proxy manifest entries',
'functions': [create_vncproxy_manifest]},
{'title': network_title,
'functions': [network_function]},
{'title': 'Adding Nova Common manifest entries',
'functions': [create_common_manifest]},
]
controller.addSequence("Installing OpenStack Nova API", [], [],
novaapisteps)
# ------------------------- helper functions -------------------------
def check_ifcfg(host, device):
"""
Raises ScriptRuntimeError if given host does not have give device.
"""
server = utils.ScriptRunner(host)
cmd = "ip addr show dev %s || ( echo Device %s does not exist && exit 1 )"
server.append(cmd % (device, device))
server.execute()
def bring_up_ifcfg(host, device):
"""
Brings given device up if it's down. Raises ScriptRuntimeError in case
of failure.
"""
server = utils.ScriptRunner(host)
server.append('ip link show up | grep "%s"' % device)
try:
server.execute()
except ScriptRuntimeError:
server.clear()
cmd = 'ip link set dev %s up'
server.append(cmd % device)
try:
server.execute()
except ScriptRuntimeError:
msg = ('Failed to bring up network interface %s on host %s.'
' Interface should be up so OpenStack can work'
' properly.' % (device, host))
raise ScriptRuntimeError(msg)
# ------------------------ Step Functions -------------------------
def create_ssh_keys(config, messages):
migration_key = os.path.join(basedefs.VAR_DIR, 'nova_migration_key')
# Generate key
local = utils.ScriptRunner()
local.append('ssh-keygen -t rsa -b 2048 -f "%s" -N ""' % migration_key)
local.execute()
with open(migration_key) as fp:
secret = fp.read().strip()
with open('%s.pub' % migration_key) as fp:
public = fp.read().strip()
config['NOVA_MIGRATION_KEY_TYPE'] = 'ssh-rsa'
config['NOVA_MIGRATION_KEY_PUBLIC'] = public.split()[1]
config['NOVA_MIGRATION_KEY_SECRET'] = secret
def gather_host_keys(config, messages):
global compute_hosts
for host in compute_hosts:
local = utils.ScriptRunner()
local.append('ssh-keyscan %s' % host)
retcode, hostkey = local.execute()
config['HOST_KEYS_%s' % host] = hostkey
def create_api_manifest(config, messages):
# Since this step is running first, let's create necesary variables here
# and make them global
global compute_hosts, network_hosts
com_var = config.get("CONFIG_COMPUTE_HOSTS", "")
compute_hosts = set([i.strip() for i in com_var.split(",") if i.strip()])
net_var = config.get("CONFIG_NETWORK_HOSTS", "")
network_hosts = set([i.strip() for i in net_var.split(",") if i.strip()])
# This is a hack around us needing to generate the neutron metadata
# password, but the nova puppet plugin uses the existence of that
# password to determine whether or not to configure neutron metadata
# proxy support. So the nova_api.pp template needs unquoted 'undef'
# to disable metadata support if neutron is not being installed.
if config['CONFIG_NEUTRON_INSTALL'] != 'y':
config['CONFIG_NEUTRON_METADATA_PW_UNQUOTED'] = 'undef'
else:
config['CONFIG_NEUTRON_METADATA_PW_UNQUOTED'] = "%s" % config['CONFIG_NEUTRON_METADATA_PW']
manifestfile = "%s_api_nova.pp" % config['CONFIG_CONTROLLER_HOST']
manifestdata = getManifestTemplate("nova_api")
fw_details = dict()
key = "nova_api"
fw_details.setdefault(key, {})
fw_details[key]['host'] = "ALL"
fw_details[key]['service_name'] = "nova api"
fw_details[key]['chain'] = "INPUT"
fw_details[key]['ports'] = ['8773', '8774', '8775']
fw_details[key]['proto'] = "tcp"
config['FIREWALL_NOVA_API_RULES'] = fw_details
manifestdata += createFirewallResources('FIREWALL_NOVA_API_RULES')
appendManifestFile(manifestfile, manifestdata, 'novaapi')
def create_keystone_manifest(config, messages):
manifestfile = "%s_keystone.pp" % config['CONFIG_CONTROLLER_HOST']
manifestdata = getManifestTemplate("keystone_nova")
appendManifestFile(manifestfile, manifestdata)
def create_cert_manifest(config, messages):
manifestfile = "%s_nova.pp" % config['CONFIG_CONTROLLER_HOST']
manifestdata = getManifestTemplate("nova_cert")
appendManifestFile(manifestfile, manifestdata)
def create_conductor_manifest(config, messages):
manifestfile = "%s_nova.pp" % config['CONFIG_CONTROLLER_HOST']
manifestdata = getManifestTemplate("nova_conductor")
appendManifestFile(manifestfile, manifestdata)
def create_compute_manifest(config, messages):
global compute_hosts, network_hosts
migrate_protocol = config['CONFIG_NOVA_COMPUTE_MIGRATE_PROTOCOL']
if migrate_protocol == 'ssh':
config['CONFIG_NOVA_COMPUTE_MIGRATE_URL'] = (
'qemu+ssh://nova@%s/system?no_verify=1&'
'keyfile=/etc/nova/ssh/nova_migration_key'
)
else:
config['CONFIG_NOVA_COMPUTE_MIGRATE_URL'] = (
'qemu+tcp://nova@%s/system'
)
ssh_hostkeys = ''
ssh_keys_details = {}
for host in compute_hosts:
try:
hostname, aliases, addrs = socket.gethostbyaddr(host)
except socket.herror:
hostname, aliases, addrs = (host, [], [])
for hostkey in config['HOST_KEYS_%s' % host].split('\n'):
hostkey = hostkey.strip()
if not hostkey:
continue
_, host_key_type, host_key_data = hostkey.split()
key = "%s.%s" % (host_key_type, hostname)
ssh_keys_details.setdefault(key, {})
ssh_keys_details[key]['ensure'] = 'present'
ssh_keys_details[key]['host_aliases'] = aliases + addrs
ssh_keys_details[key]['key'] = host_key_data
ssh_keys_details[key]['type'] = host_key_type
config['SSH_KEYS'] = ssh_keys_details
ssh_hostkeys += getManifestTemplate("sshkey")
for host in compute_hosts:
if config['CONFIG_IRONIC_INSTALL'] == 'y':
cm = 'ironic.nova.compute.manager.ClusteredComputeManager'
config['CONFIG_NOVA_COMPUTE_MANAGER'] = cm
manifestdata = getManifestTemplate("nova_compute")
fw_details = dict()
cf_fw_qemu_mig_key = "FIREWALL_NOVA_QEMU_MIG_RULES_%s" % host
for c_host in compute_hosts:
key = "nova_qemu_migration_%s_%s" % (host, c_host)
fw_details.setdefault(key, {})
fw_details[key]['host'] = "%s" % c_host
fw_details[key]['service_name'] = "nova qemu migration"
fw_details[key]['chain'] = "INPUT"
fw_details[key]['ports'] = ['16509', '49152-49215']
fw_details[key]['proto'] = "tcp"
config[cf_fw_qemu_mig_key] = fw_details
manifestdata += createFirewallResources(cf_fw_qemu_mig_key)
if config['CONFIG_VMWARE_BACKEND'] == 'y':
manifestdata += getManifestTemplate("nova_compute_vmware.pp")
elif config['CONFIG_IRONIC_INSTALL'] == 'y':
manifestdata += getManifestTemplate("nova_compute_ironic.pp")
else:
manifestdata += getManifestTemplate("nova_compute_libvirt.pp")
if (config['CONFIG_VMWARE_BACKEND'] != 'y' and
config['CONFIG_CINDER_INSTALL'] == 'y' and
'gluster' in config['CONFIG_CINDER_BACKEND']):
manifestdata += getManifestTemplate("nova_gluster")
if (config['CONFIG_VMWARE_BACKEND'] != 'y' and
config['CONFIG_CINDER_INSTALL'] == 'y' and
'nfs' in config['CONFIG_CINDER_BACKEND']):
manifestdata += getManifestTemplate("nova_nfs")
manifestfile = "%s_nova.pp" % host
nova_config_options = NovaConfig()
if config['CONFIG_NEUTRON_INSTALL'] != 'y':
if host not in network_hosts:
nova_config_options.addOption(
"DEFAULT/flat_interface",
config['CONFIG_NOVA_COMPUTE_PRIVIF']
)
check_ifcfg(host, config['CONFIG_NOVA_COMPUTE_PRIVIF'])
try:
bring_up_ifcfg(host, config['CONFIG_NOVA_COMPUTE_PRIVIF'])
except ScriptRuntimeError as ex:
# just warn user to do it by himself
messages.append(str(ex))
if config['CONFIG_CEILOMETER_INSTALL'] == 'y':
mq_template = get_mq(config, "nova_ceilometer")
manifestdata += getManifestTemplate(mq_template)
manifestdata += getManifestTemplate("nova_ceilometer")
fw_details = dict()
key = "nova_compute"
fw_details.setdefault(key, {})
fw_details[key]['host'] = "%s" % config['CONFIG_CONTROLLER_HOST']
fw_details[key]['service_name'] = "nova compute"
fw_details[key]['chain'] = "INPUT"
fw_details[key]['ports'] = ['5900-5999']
fw_details[key]['proto'] = "tcp"
config['FIREWALL_NOVA_COMPUTE_RULES'] = fw_details
manifestdata += "\n" + createFirewallResources(
'FIREWALL_NOVA_COMPUTE_RULES'
)
manifestdata += "\n" + nova_config_options.getManifestEntry()
manifestdata += "\n" + ssh_hostkeys
appendManifestFile(manifestfile, manifestdata)
def create_network_manifest(config, messages):
global compute_hosts, network_hosts
if config['CONFIG_NEUTRON_INSTALL'] == "y":
return
# set default values for VlanManager in case this values are not in config
for key, value in [('CONFIG_NOVA_NETWORK_VLAN_START', 100),
('CONFIG_NOVA_NETWORK_SIZE', 255),
('CONFIG_NOVA_NETWORK_NUMBER', 1)]:
config[key] = config.get(key, value)
api_host = config['CONFIG_CONTROLLER_HOST']
multihost = len(network_hosts) > 1
config['CONFIG_NOVA_NETWORK_MULTIHOST'] = multihost and 'true' or 'false'
for host in network_hosts:
for i in ('CONFIG_NOVA_NETWORK_PRIVIF', 'CONFIG_NOVA_NETWORK_PUBIF'):
check_ifcfg(host, config[i])
try:
bring_up_ifcfg(host, config[i])
except ScriptRuntimeError as ex:
# just warn user to do it by himself
messages.append(str(ex))
key = 'CONFIG_NOVA_NETWORK_AUTOASSIGNFLOATINGIP'
config[key] = config[key] == "y"
# We need to explicitly set the network size
routing_prefix = config['CONFIG_NOVA_NETWORK_FIXEDRANGE'].split('/')[1]
net_size = 2 ** (32 - int(routing_prefix))
config['CONFIG_NOVA_NETWORK_FIXEDSIZE'] = str(net_size)
manifestfile = "%s_nova.pp" % host
manifestdata = getManifestTemplate("nova_network")
# Restart libvirt if we deploy nova network on compute
if host in compute_hosts:
manifestdata += getManifestTemplate("nova_network_libvirt")
# in multihost mode each compute host runs nova-api-metadata
if multihost and host != api_host and host in compute_hosts:
manifestdata += getManifestTemplate("nova_metadata")
appendManifestFile(manifestfile, manifestdata)
def create_sched_manifest(config, messages):
manifestfile = "%s_nova.pp" % config['CONFIG_CONTROLLER_HOST']
if config['CONFIG_IRONIC_INSTALL'] == 'y':
manifestdata = getManifestTemplate("nova_sched_ironic.pp")
ram_alloc = '1.0'
config['CONFIG_NOVA_SCHED_RAM_ALLOC_RATIO'] = ram_alloc
manifestdata += getManifestTemplate("nova_sched.pp")
else:
manifestdata = getManifestTemplate("nova_sched.pp")
appendManifestFile(manifestfile, manifestdata)
def create_vncproxy_manifest(config, messages):
manifestfile = "%s_nova.pp" % config['CONFIG_CONTROLLER_HOST']
manifestdata = getManifestTemplate("nova_vncproxy")
appendManifestFile(manifestfile, manifestdata)
def create_common_manifest(config, messages):
global compute_hosts, network_hosts
network_type = (config['CONFIG_NEUTRON_INSTALL'] == "y" and
'neutron' or 'nova')
network_multi = len(network_hosts) > 1
dbacces_hosts = set([config.get('CONFIG_CONTROLLER_HOST')])
dbacces_hosts |= network_hosts
for manifestfile, marker in manifestfiles.getFiles():
pw_in_sqlconn = False
if manifestfile.endswith("_nova.pp"):
host, manifest = manifestfile.split('_', 1)
host = host.strip()
if host in compute_hosts and host not in dbacces_hosts:
# we should omit password in case we are installing only
# nova-compute to the host
perms = "nova"
pw_in_sqlconn = False
else:
perms = "nova:%s" % config['CONFIG_NOVA_DB_PW']
pw_in_sqlconn = True
sqlconn = "mysql://%s@%s/nova" % (perms,
config['CONFIG_MARIADB_HOST'])
if pw_in_sqlconn:
config['CONFIG_NOVA_SQL_CONN_PW'] = sqlconn
else:
config['CONFIG_NOVA_SQL_CONN_NOPW'] = sqlconn
# for nova-network in multihost mode each compute host is metadata
# host otherwise we use api host
if (network_type == 'nova' and network_multi and
host in compute_hosts):
metadata = host
else:
metadata = config['CONFIG_CONTROLLER_HOST']
config['CONFIG_NOVA_METADATA_HOST'] = metadata
data = getManifestTemplate(get_mq(config, "nova_common"))
if pw_in_sqlconn:
data += getManifestTemplate("nova_common_pw")
else:
data += getManifestTemplate("nova_common_nopw")
appendManifestFile(os.path.split(manifestfile)[1], data)
def create_neutron_manifest(config, messages):
if config['CONFIG_NEUTRON_INSTALL'] != "y":
return
if config['CONFIG_IRONIC_INSTALL'] == 'y':
virt_driver = 'nova.virt.firewall.NoopFirewallDriver'
config['CONFIG_NOVA_LIBVIRT_VIF_DRIVER'] = virt_driver
else:
virt_driver = 'nova.virt.libvirt.vif.LibvirtGenericVIFDriver'
config['CONFIG_NOVA_LIBVIRT_VIF_DRIVER'] = virt_driver
for manifestfile, marker in manifestfiles.getFiles():
if manifestfile.endswith("_nova.pp"):
data = getManifestTemplate("nova_neutron")
appendManifestFile(os.path.split(manifestfile)[1], data)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PacketCapturesOperations:
"""PacketCapturesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_initial(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
parameters: "_models.PacketCapture",
**kwargs: Any
) -> "_models.PacketCaptureResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PacketCapture')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def begin_create(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
parameters: "_models.PacketCapture",
**kwargs: Any
) -> AsyncLROPoller["_models.PacketCaptureResult"]:
"""Create and start a packet capture on the specified VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:param parameters: Parameters that define the create packet capture operation.
:type parameters: ~azure.mgmt.network.v2021_05_01.models.PacketCapture
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PacketCaptureResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2021_05_01.models.PacketCaptureResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> "_models.PacketCaptureResult":
"""Gets a packet capture session by name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PacketCaptureResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_05_01.models.PacketCaptureResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def _stop_initial(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/stop'} # type: ignore
async def begin_stop(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Stops a specified packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._stop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/stop'} # type: ignore
async def _get_status_initial(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> "_models.PacketCaptureQueryStatusResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureQueryStatusResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self._get_status_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_status_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/queryStatus'} # type: ignore
async def begin_get_status(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.PacketCaptureQueryStatusResult"]:
"""Query the status of a running packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param packet_capture_name: The name given to the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PacketCaptureQueryStatusResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2021_05_01.models.PacketCaptureQueryStatusResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureQueryStatusResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_status_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/queryStatus'} # type: ignore
def list(
self,
resource_group_name: str,
network_watcher_name: str,
**kwargs: Any
) -> AsyncIterable["_models.PacketCaptureListResult"]:
"""Lists all packet capture sessions within the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PacketCaptureListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2021_05_01.models.PacketCaptureListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PacketCaptureListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures'} # type: ignore
| |
# ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Generic image-like dataset able to be processed in macro batches.
"""
import logging
import numpy as np
import os
import sys
from threading import Thread
from neon.datasets.dataset import Dataset
from neon.util.param import opt_param, req_param
from neon.util.persist import deserialize
logger = logging.getLogger(__name__)
class MacrobatchDecodeThread(Thread):
"""
Load and decode a macrobatch of images in a separate thread,
double buffering.
Hide the time to transpose and convert (astype).
"""
def __init__(self, ds):
Thread.__init__(self)
self.ds = ds
def run(self):
import imgworker
bsz = self.ds.batch_size
b_idx = self.ds.macro_decode_buf_idx
jdict = self.ds.get_macro_batch()
betype = self.ds.backend_type
# This macrobatch could be smaller than macro_size for last macrobatch
mac_sz = len(jdict['data'])
self.ds.tgt_macro[b_idx] = \
jdict['targets'] if 'targets' in jdict else None
lbl_macro = {k: jdict['labels'][k] for k in self.ds.label_list}
img_macro = np.zeros((self.ds.macro_size, self.ds.npixels),
dtype=np.uint8)
do_center = self.ds.predict or not self.ds.dotransforms
do_flip = self.ds.dotransforms
imgworker.decode_list(jpglist=jdict['data'],
tgt=img_macro[:mac_sz],
orig_size=self.ds.output_image_size,
crop_size=self.ds.cropped_image_size,
center=do_center, flip=do_flip,
rgb=self.ds.rgb,
nthreads=self.ds.num_workers)
if mac_sz < self.ds.macro_size:
img_macro[mac_sz:] = 0
# Leave behind the partial minibatch
self.ds.minis_per_macro[b_idx] = mac_sz / bsz
self.ds.lbl_one_hot[b_idx] = \
{lbl: [None for mini_idx in range(self.ds.minis_per_macro[b_idx])]
for lbl in self.ds.label_list}
self.ds.img_mini_T[b_idx] = \
[None for mini_idx in range(self.ds.minis_per_macro[b_idx])]
for mini_idx in range(self.ds.minis_per_macro[b_idx]):
s_idx = mini_idx * bsz
e_idx = (mini_idx + 1) * bsz
self.ds.img_mini_T[b_idx][mini_idx] = \
img_macro[s_idx:e_idx].T.astype(betype, order='C')
if self.ds.img_mini_T[b_idx][mini_idx].shape[1] < bsz:
tmp = self.ds.img_mini_T[b_idx][mini_idx].shape[0]
mb_residual = self.ds.img_mini_T[b_idx][mini_idx].shape[1]
filledbatch = np.vstack((img_macro[s_idx:e_idx],
np.zeros((bsz - mb_residual, tmp))))
self.ds.img_mini_T[b_idx][mini_idx] = \
filledbatch.T.astype(betype, order='C')
for lbl in self.ds.label_list:
hl = np.squeeze(lbl_macro[lbl][s_idx:e_idx])
self.ds.lbl_one_hot[b_idx][lbl][mini_idx] = \
np.eye(self.ds.nclass[lbl])[hl].T.astype(betype, order='C')
return
class Imageset(Dataset):
"""
Sets up a macro batched imageset dataset.
Assumes you have the data already partitioned and in macrobatch format
Attributes:
backend (neon.backends.Backend): backend used for this data
inputs (dict): structure housing the loaded train/test/validation
input data
targets (dict): structure housing the loaded train/test/validation
target data
Kwargs:
repo_path (str, optional): where to locally host this dataset on disk
"""
def __init__(self, **kwargs):
opt_param(self, ['preprocess_done'], False)
opt_param(self, ['dotransforms', 'square_crop'], False)
opt_param(self, ['mean_norm', 'unit_norm'], False)
opt_param(self, ['tdims'], 0)
opt_param(self, ['label_list'], ['l_id'])
opt_param(self, ['num_channels'], 3)
opt_param(self, ['num_workers'], 6)
opt_param(self, ['backend_type'], 'np.float32')
self.__dict__.update(kwargs)
if self.backend_type in ['float16', 'np.float16', 'numpy.float16']:
self.backend_type = np.float16
elif self.backend_type in ['float32', 'np.float32', 'numpy.float32']:
self.backend_type = np.float32
else:
raise ValueError('Datatype not understood')
logger.warning("Imageset initialized with dtype %s", self.backend_type)
req_param(self, ['cropped_image_size', 'output_image_size',
'imageset', 'save_dir', 'repo_path', 'macro_size'])
opt_param(self, ['image_dir'], os.path.join(self.repo_path,
self.imageset))
self.rgb = True if self.num_channels == 3 else False
self.norm_factor = 128. if self.mean_norm else 256.
def __getstate__(self):
"""
Defines what and how we go about serializing an instance of this class.
"""
self.macro_decode_thread = None
return self.__dict__
def __setstate__(self, state):
"""
Defines how we go about deserializing into an instance of this class.
"""
self.__dict__.update(state)
def load(self, backend=None, experiment=None):
bdir = os.path.expanduser(self.save_dir)
cachefile = os.path.join(bdir, 'dataset_cache.pkl')
if not os.path.exists(cachefile):
logger.error("Batch dir cache not found in %s:", cachefile)
# response = 'Y'
response = raw_input("Press Y to create, otherwise exit: ")
if response == 'Y':
from neon.util.batch_writer import (BatchWriter,
BatchWriterImagenet)
if self.imageset.startswith('I1K'):
self.bw = BatchWriterImagenet(**self.__dict__)
else:
self.bw = BatchWriter(**self.__dict__)
self.bw.run()
logger.error('Done writing batches - please rerun to train.')
else:
logger.error('Exiting...')
sys.exit()
cstats = deserialize(cachefile, verbose=False)
if cstats['macro_size'] != self.macro_size:
raise NotImplementedError("Cached macro size %d different from "
"specified %d, delete save_dir %s "
"and try again.",
cstats['macro_size'],
self.macro_size,
self.save_dir)
# Set the max indexes of batches for each from the cache file
self.maxval = cstats['nval'] + cstats['val_start'] - 1
self.maxtrain = cstats['ntrain'] + cstats['train_start'] - 1
# Make sure only those properties not by yaml are updated
cstats.update(self.__dict__)
self.__dict__.update(cstats)
# Should also put (in addition to nclass), number of train/val images
req_param(self, ['ntrain', 'nval', 'train_start', 'val_start',
'train_mean', 'val_mean', 'labels_dict'])
def get_macro_batch(self):
self.macro_idx = (self.macro_idx + 1 - self.startb) \
% self.nmacros + self.startb
fname = os.path.join(self.save_dir,
'data_batch_{:d}'.format(self.macro_idx))
return deserialize(os.path.expanduser(fname), verbose=False)
def del_mini_batch_producer(self):
if self.macro_decode_thread is not None:
self.macro_decode_thread.join()
del self.inp_be
def init_mini_batch_producer(self, batch_size, setname, predict=False):
# local shortcuts
sbe = self.backend.empty
betype = self.backend_type
sn = 'val' if (setname == 'validation') else setname
osz = self.output_image_size
csz = self.cropped_image_size
self.npixels = csz * csz * self.num_channels
self.startb = getattr(self, sn + '_start')
self.nmacros = getattr(self, 'n' + sn)
self.maxmacros = getattr(self, 'max' + sn)
if self.startb + self.nmacros - 1 > self.maxmacros:
self.nmacros = self.maxmacros - self.startb + 1
logger.warning("Truncating n%s to %d", sn, self.nmacros)
self.endb = self.startb + self.nmacros - 1
if self.endb == self.maxmacros:
nrecs = getattr(self, sn + '_nrec') % self.macro_size + \
(self.nmacros - 1) * self.macro_size
else:
nrecs = self.nmacros * self.macro_size
num_batches = nrecs / batch_size
self.mean_img = getattr(self, sn + '_mean')
self.mean_img.shape = (self.num_channels, osz, osz)
pad = (osz - csz) / 2
self.mean_crop = self.mean_img[:, pad:(pad + csz), pad:(pad + csz)]
self.mean_be = sbe((self.npixels, 1), dtype=betype)
self.mean_be.copy_from(self.mean_crop.reshape(
(self.npixels, 1)).astype(np.float32))
# Control params for macrobatch decoding thread
self.macro_active_buf_idx = 0
self.macro_decode_buf_idx = 0
self.macro_num_decode_buf = 2
self.macro_decode_thread = None
self.batch_size = batch_size
self.predict = predict
self.minis_per_macro = [self.macro_size / batch_size
for i in range(self.macro_num_decode_buf)]
if self.macro_size % batch_size != 0:
raise ValueError('self.macro_size not divisible by batch_size')
self.macro_idx = self.endb
self.mini_idx = -1
# Allocate space for host side image, targets and labels
self.img_mini_T = [None for i in range(self.macro_num_decode_buf)]
self.tgt_macro = [None for i in range(self.macro_num_decode_buf)]
self.lbl_one_hot = [None for i in range(self.macro_num_decode_buf)]
# Allocate space for device side buffers
inp_shape = (self.npixels, self.batch_size)
self.inp_be = sbe(inp_shape, dtype=betype)
self.inp_be.name = "minibatch"
lbl_shape = {lbl: (self.nclass[lbl], self.batch_size)
for lbl in self.label_list}
self.lbl_be = {lbl: sbe(lbl_shape[lbl], dtype=betype)
for lbl in self.label_list}
# Allocate space for device side targets if necessary
tgt_shape = (self.tdims, self.batch_size)
self.tgt_be = sbe(tgt_shape, dtype=betype) if self.tdims != 0 else None
return num_batches
def get_mini_batch(self, batch_idx):
b_idx = self.macro_active_buf_idx
self.mini_idx = (self.mini_idx + 1) % self.minis_per_macro[b_idx]
# Decode macrobatches in a background thread,
# except for the first one which blocks
if self.mini_idx == 0:
if self.macro_decode_thread is not None:
# No-op unless all mini finish faster than one macro
self.macro_decode_thread.join()
else:
# special case for first run through
self.macro_decode_thread = MacrobatchDecodeThread(self)
self.macro_decode_thread.start()
self.macro_decode_thread.join()
# usual case for kicking off a background macrobatch thread
self.macro_active_buf_idx = self.macro_decode_buf_idx
self.macro_decode_buf_idx = \
(self.macro_decode_buf_idx + 1) % self.macro_num_decode_buf
self.macro_decode_thread = MacrobatchDecodeThread(self)
self.macro_decode_thread.start()
# All minibatches except for the 0th just copy pre-prepared data
b_idx = self.macro_active_buf_idx
s_idx = self.mini_idx * self.batch_size
e_idx = (self.mini_idx + 1) * self.batch_size
# See if we are a partial minibatch
self.inp_be.copy_from(self.img_mini_T[b_idx][self.mini_idx])
# Try to avoid this if possible as it inhibits async stream copy
if self.mean_norm:
self.backend.subtract(self.inp_be, self.mean_be, self.inp_be)
if self.unit_norm:
self.backend.divide(self.inp_be, self.norm_factor, self.inp_be)
for lbl in self.label_list:
self.lbl_be[lbl].copy_from(
self.lbl_one_hot[b_idx][lbl][self.mini_idx])
if self.tgt_be is not None:
self.tgt_be.copy_from(
self.tgt_macro[b_idx][:, s_idx:e_idx]
.astype(self.backend_type))
return self.inp_be, self.tgt_be, self.lbl_be
def has_set(self, setname):
return True if (setname in ['train', 'validation']) else False
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
import random
import importlib
from twisted.python import log
sys.path.append('.')
import regex2dfa
import fte.encoder
import fte.bit_ops
import marionette_tg.channel
EVENT_LOOP_FREQUENCY_S = 0.001
# the following varibles are reserved and shouldn't be passed down
# to spawned models.
RESERVED_LOCAL_VARS = ['party','model_instance_id','model_uuid']
class PIOA(object):
def __init__(self, party, first_sender):
super(PIOA, self).__init__()
self.actions_ = []
self.channel_ = None
self.channel_requested_ = False
self.current_state_ = 'start'
self.first_sender_ = first_sender
self.next_state_ = None
self.marionette_state_ = MarionetteSystemState()
self.marionette_state_.set_local("party", party)
self.party_ = party
self.port_ = None
self.transport_protocol_ = None
self.rng_ = None
self.history_len_ = 0
self.states_ = {}
self.success_ = False
if self.party_ == first_sender:
self.marionette_state_.set_local(
"model_instance_id", fte.bit_ops.bytes_to_long(os.urandom(4)))
self.rng_ = random.Random()
self.rng_.seed(
self.marionette_state_.get_local("model_instance_id"))
def do_precomputations(self):
for action in self.actions_:
if action.get_module() == 'fte' and action.get_method().startswith('send'):
[regex, msg_len] = action.get_args()
self.marionette_state_.get_fte_obj(regex, msg_len)
def execute(self, reactor):
if self.isRunning():
self.transition()
reactor.callLater(EVENT_LOOP_FREQUENCY_S, self.execute, reactor)
else:
self.channel_.close()
def check_channel_state(self):
if self.party_ == "client":
if not self.channel_:
if not self.channel_requested_:
marionette_tg.channel.open_new_channel(self.get_transport_protocol(),
self.get_port(), self.set_channel)
self.channel_requested_ = True
return (self.channel_ != None)
def set_channel(self, channel):
self.channel_ = channel
def check_rng_state(self):
if self.marionette_state_.get_local("model_instance_id"):
if not self.rng_:
self.rng_ = random.Random()
self.rng_.seed(
self.marionette_state_.get_local("model_instance_id"))
self.current_state_ = 'start'
for i in range(self.history_len_):
self.current_state_ = self.states_[
self.current_state_].transition(self.rng_)
self.next_state_ = None
#Reset history length once RNGs are sync'd
self.history_len_ = 0
def determine_action_block(self, src_state, dst_state):
retval = []
for action in self.actions_:
action_name = self.states_[src_state].transitions_[dst_state][0]
success = action.execute(self.party_, action_name)
if success is not None:
retval.append(action)
return retval
def get_potential_transitions(self):
retval = []
if self.rng_:
if not self.next_state_:
self.next_state_ = self.states_[
self.current_state_].transition(self.rng_)
retval += [self.next_state_]
else:
for transition in \
self.states_[self.current_state_].transitions_.keys():
if self.states_[self.current_state_].transitions_[transition][1]>0:
retval += [transition]
return retval
def advance_to_next_state(self):
retval = False
# get the list of possible transitions we could make
potential_transitions = self.get_potential_transitions()
assert len(potential_transitions) > 0
# attempt to do a normal transition
fatal = 0
success = False
for dst_state in potential_transitions:
action_block = self.determine_action_block(self.current_state_, dst_state)
try:
success = self.eval_action_block(action_block)
except Exception as e:
log.msg("EXCEPTION: %s" % (str(e)))
fatal += 1
finally:
if success:
break
# if all potential transitions are fatal, attempt the error transition
if not success and fatal == len(potential_transitions):
src_state = self.current_state_
dst_state = self.states_[self.current_state_].get_error_transition()
if dst_state:
action_block = self.determine_action_block(src_state, dst_state)
success = self.eval_action_block(action_block)
# if we have a successful transition, update our state info.
if success:
self.history_len_ += 1
self.current_state_ = dst_state
self.next_state_ = None
retval = True
if self.current_state_ == 'dead':
self.success_ = True
return retval
def eval_action_block(self, action_block):
retval = False
if len(action_block)==0:
retval = True
elif len(action_block)>=1:
for action_obj in action_block:
if action_obj.get_regex_match_incoming():
incoming_buffer = self.channel_.peek()
m = re.search(action_obj.get_regex_match_incoming(), incoming_buffer)
if m:
retval = self.eval_action(action_obj)
else:
retval = self.eval_action(action_obj)
if retval: break
return retval
def transition(self):
success = False
if self.check_channel_state():
self.check_rng_state()
success = self.advance_to_next_state()
return success
def replicate(self):
retval = PIOA(self.party_,
self.first_sender_)
retval.actions_ = self.actions_
retval.states_ = self.states_
retval.marionette_state_.global_ = self.marionette_state_.global_
model_uuid = self.marionette_state_.get_local("model_uuid")
retval.marionette_state_.set_local("model_uuid", model_uuid)
retval.port_ = self.port_
retval.transport_protocol_ = self.transport_protocol_
return retval
def isRunning(self):
return (self.current_state_ != "dead")
def eval_action(self, action_obj):
module = action_obj.get_module()
method = action_obj.get_method()
args = action_obj.get_args()
i = importlib.import_module("marionette_tg.plugins._" + module)
method_obj = getattr(i, method)
success = method_obj(self.channel_, self.marionette_state_, args)
return success
def add_state(self, name):
if not name in list(self.states_.keys()):
self.states_[name] = PAState(name)
def set_multiplexer_outgoing(self, multiplexer):
self.marionette_state_.set_global("multiplexer_outgoing", multiplexer)
def set_multiplexer_incoming(self, multiplexer):
self.marionette_state_.set_global("multiplexer_incoming", multiplexer)
def stop(self):
self.current_state_ = "dead"
def set_port(self, port):
self.port_ = port
def get_port(self):
retval = None
try:
retval = int(self.port_)
except ValueError:
retval = self.marionette_state_.get_local(self.port_)
return retval
def set_transport_protocol(self, transport_protocol):
self.transport_protocol_ = transport_protocol
def get_transport_protocol(self):
return self.transport_protocol_
def set_local(self, key, value):
self.marionette_state_.set_local(key, value)
def set_global(self, key, value):
self.marionette_state_.set_global(key, value)
def get_local(self, key):
return self.marionette_state_.get_local(key)
def get_global(self, key):
return self.marionette_state_.get_global(key)
def get_success(self):
return self.success_
class PAState(object):
def __init__(self, name):
self.name_ = name
self.transitions_ = {}
self.format_type_ = None
self.format_value_ = None
self.error_state_ = None
def add_transition(self, dst, action_name, probability):
self.transitions_[dst] = [action_name, float(probability)]
def set_error_transition(self, error_state):
self.error_state_ = error_state
def get_error_transition(self):
return self.error_state_
def transition(self, rng):
assert (rng or len(self.transitions_) == 1)
if rng and len(self.transitions_) > 1:
coin = rng.random()
sum = 0
for state in self.transitions_:
if self.transitions_[state][1] == 0:
continue
sum += self.transitions_[state][1]
if sum >= coin:
break
else:
state = list(self.transitions_.keys())[0]
return state
class MarionetteSystemState(object):
def __init__(self):
self.global_ = {}
self.local_ = {}
def set_global(self, key, val):
self.global_[key] = val
def get_global(self, key):
return self.global_.get(key)
def set_local(self, key, val):
self.local_[key] = val
def get_local(self, key):
return self.local_.get(key)
def get_fte_obj(self, regex, msg_len):
fte_key = 'fte_obj-' + regex + str(msg_len)
if not self.get_global(fte_key):
dfa = regex2dfa.regex2dfa(regex)
fte_obj = fte.encoder.DfaEncoder(dfa, msg_len)
self.set_global(fte_key, fte_obj)
return self.get_global(fte_key)
| |
# Copyright 2015 Nexenta Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from cinder import exception
from cinder.i18n import _, _LE
from cinder.volume import driver
from cinder.volume.drivers.nexenta.nexentaedge import jsonrpc
from cinder.volume.drivers.nexenta import options
LOG = logging.getLogger(__name__)
class NexentaEdgeISCSIDriver(driver.ISCSIDriver):
"""Executes volume driver commands on NexentaEdge cluster.
Version history:
1.0.0 - Initial driver version.
1.0.1 - Moved opts to options.py.
"""
VERSION = '1.0.1'
def __init__(self, *args, **kwargs):
super(NexentaEdgeISCSIDriver, self).__init__(*args, **kwargs)
if self.configuration:
self.configuration.append_config_values(
options.NEXENTA_CONNECTION_OPTS)
self.configuration.append_config_values(
options.NEXENTA_ISCSI_OPTS)
self.configuration.append_config_values(
options.NEXENTA_DATASET_OPTS)
self.configuration.append_config_values(
options.NEXENTA_EDGE_OPTS)
self.restapi_protocol = self.configuration.nexenta_rest_protocol
self.restapi_host = self.configuration.nexenta_rest_address
self.restapi_port = self.configuration.nexenta_rest_port
self.restapi_user = self.configuration.nexenta_rest_user
self.restapi_password = self.configuration.nexenta_rest_password
self.iscsi_service = self.configuration.nexenta_iscsi_service
self.bucket_path = self.configuration.nexenta_lun_container
self.blocksize = self.configuration.nexenta_blocksize
self.chunksize = self.configuration.nexenta_chunksize
self.cluster, self.tenant, self.bucket = self.bucket_path.split('/')
self.bucket_url = ('clusters/' + self.cluster + '/tenants/' +
self.tenant + '/buckets/' + self.bucket)
self.iscsi_target_port = (self.configuration.
nexenta_iscsi_target_portal_port)
self.target_vip = None
@property
def backend_name(self):
backend_name = None
if self.configuration:
backend_name = self.configuration.safe_get('volume_backend_name')
if not backend_name:
backend_name = self.__class__.__name__
return backend_name
def do_setup(self, context):
if self.restapi_protocol == 'auto':
protocol, auto = 'http', True
else:
protocol, auto = self.restapi_protocol, False
try:
self.restapi = jsonrpc.NexentaEdgeJSONProxy(
protocol, self.restapi_host, self.restapi_port, '/',
self.restapi_user, self.restapi_password, auto=auto)
rsp = self.restapi.get('service/'
+ self.iscsi_service + '/iscsi/status')
data_keys = rsp['data'][list(rsp['data'].keys())[0]]
self.target_name = data_keys.split('\n', 1)[0].split(' ')[2]
rsp = self.restapi.get('service/' + self.iscsi_service)
if 'X-VIPS' in rsp['data']:
vips = json.loads(rsp['data']['X-VIPS'])
if len(vips[0]) == 1:
self.target_vip = vips[0][0]['ip'].split('/', 1)[0]
else:
self.target_vip = vips[0][1]['ip'].split('/', 1)[0]
else:
self.target_vip = self.configuration.safe_get(
'nexenta_client_address')
if not self.target_vip:
LOG.error(_LE('No VIP configured for service %s'),
self.iscsi_service)
raise exception.VolumeBackendAPIException(
_('No service VIP configured and '
'no nexenta_client_address'))
except exception.VolumeBackendAPIException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Error verifying iSCSI service %(serv)s on '
'host %(hst)s'), {'serv': self.iscsi_service,
'hst': self.restapi_host})
def check_for_setup_error(self):
try:
self.restapi.get(self.bucket_url + '/objects/')
except exception.VolumeBackendAPIException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Error verifying LUN container %(bkt)s'),
{'bkt': self.bucket_path})
def _get_lun_number(self, volname):
try:
rsp = self.restapi.put(
'service/' + self.iscsi_service + '/iscsi/number',
{
'objectPath': self.bucket_path + '/' + volname
})
except exception.VolumeBackendAPIException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Error retrieving LUN %(vol)s number'),
{'vol': volname})
return rsp['data']
def _get_target_address(self, volname):
return self.target_vip
def _get_provider_location(self, volume):
return '%(host)s:%(port)s,1 %(name)s %(number)s' % {
'host': self._get_target_address(volume['name']),
'port': self.iscsi_target_port,
'name': self.target_name,
'number': self._get_lun_number(volume['name'])
}
def create_volume(self, volume):
try:
self.restapi.post('service/' + self.iscsi_service + '/iscsi', {
'objectPath': self.bucket_path + '/' + volume['name'],
'volSizeMB': int(volume['size']) * units.Ki,
'blockSize': self.blocksize,
'chunkSize': self.chunksize
})
except exception.VolumeBackendAPIException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Error creating volume'))
def delete_volume(self, volume):
try:
self.restapi.delete('service/' + self.iscsi_service +
'/iscsi', {'objectPath': self.bucket_path +
'/' + volume['name']})
except exception.VolumeBackendAPIException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Error deleting volume'))
def extend_volume(self, volume, new_size):
try:
self.restapi.put('service/' + self.iscsi_service + '/iscsi/resize',
{'objectPath': self.bucket_path +
'/' + volume['name'],
'newSizeMB': new_size * units.Ki})
except exception.VolumeBackendAPIException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Error extending volume'))
def create_volume_from_snapshot(self, volume, snapshot):
try:
self.restapi.put(
'service/' + self.iscsi_service + '/iscsi/snapshot/clone',
{
'objectPath': self.bucket_path + '/' +
snapshot['volume_name'],
'clonePath': self.bucket_path + '/' + volume['name'],
'snapName': snapshot['name']
})
except exception.VolumeBackendAPIException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Error cloning volume'))
def create_snapshot(self, snapshot):
try:
self.restapi.post(
'service/' + self.iscsi_service + '/iscsi/snapshot',
{
'objectPath': self.bucket_path + '/' +
snapshot['volume_name'],
'snapName': snapshot['name']
})
except exception.VolumeBackendAPIException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Error creating snapshot'))
def delete_snapshot(self, snapshot):
try:
self.restapi.delete(
'service/' + self.iscsi_service + '/iscsi/snapshot',
{
'objectPath': self.bucket_path + '/' +
snapshot['volume_name'],
'snapName': snapshot['name']
})
except exception.VolumeBackendAPIException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Error deleting snapshot'))
def create_cloned_volume(self, volume, src_vref):
vol_url = (self.bucket_url + '/objects/' +
src_vref['name'] + '/clone')
clone_body = {
'tenant_name': self.tenant,
'bucket_name': self.bucket,
'object_name': volume['name']
}
try:
self.restapi.post(vol_url, clone_body)
self.restapi.post('service/' + self.iscsi_service + '/iscsi', {
'objectPath': self.bucket_path + '/' + volume['name'],
'volSizeMB': int(src_vref['size']) * units.Ki,
'blockSize': self.blocksize,
'chunkSize': self.chunksize
})
except exception.VolumeBackendAPIException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Error creating cloned volume'))
def create_export(self, context, volume, connector=None):
return {'provider_location': self._get_provider_location(volume)}
def ensure_export(self, context, volume):
pass
def remove_export(self, context, volume):
pass
def local_path(self, volume):
raise NotImplementedError
def get_volume_stats(self, refresh=False):
location_info = '%(driver)s:%(host)s:%(bucket)s' % {
'driver': self.__class__.__name__,
'host': self._get_target_address(None),
'bucket': self.bucket_path
}
return {
'vendor_name': 'Nexenta',
'driver_version': self.VERSION,
'storage_protocol': 'iSCSI',
'reserved_percentage': 0,
'total_capacity_gb': 'unknown',
'free_capacity_gb': 'unknown',
'QoS_support': False,
'volume_backend_name': self.backend_name,
'location_info': location_info,
'iscsi_target_portal_port': self.iscsi_target_port,
'restapi_url': self.restapi.url
}
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from pyspark import since, keyword_only
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams, JavaWrapper
from pyspark.ml.param.shared import *
from pyspark.ml.common import inherit_doc
from pyspark.sql import DataFrame
__all__ = ['BisectingKMeans', 'BisectingKMeansModel', 'BisectingKMeansSummary',
'KMeans', 'KMeansModel',
'GaussianMixture', 'GaussianMixtureModel', 'GaussianMixtureSummary',
'LDA', 'LDAModel', 'LocalLDAModel', 'DistributedLDAModel', 'PowerIterationClustering']
class ClusteringSummary(JavaWrapper):
"""
.. note:: Experimental
Clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def predictionCol(self):
"""
Name for column of predicted clusters in `predictions`.
"""
return self._call_java("predictionCol")
@property
@since("2.1.0")
def predictions(self):
"""
DataFrame produced by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.1.0")
def featuresCol(self):
"""
Name for column of features in `predictions`.
"""
return self._call_java("featuresCol")
@property
@since("2.1.0")
def k(self):
"""
The number of clusters the model was trained with.
"""
return self._call_java("k")
@property
@since("2.1.0")
def cluster(self):
"""
DataFrame of predicted cluster centers for each training data point.
"""
return self._call_java("cluster")
@property
@since("2.1.0")
def clusterSizes(self):
"""
Size of (number of data points in) each cluster.
"""
return self._call_java("clusterSizes")
class GaussianMixtureModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by GaussianMixture.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def weights(self):
"""
Weight for each Gaussian distribution in the mixture.
This is a multinomial probability distribution over the k Gaussians,
where weights[i] is the weight for Gaussian i, and weights sum to 1.
"""
return self._call_java("weights")
@property
@since("2.0.0")
def gaussiansDF(self):
"""
Retrieve Gaussian distributions as a DataFrame.
Each row represents a Gaussian Distribution.
The DataFrame has two columns: mean (Vector) and cov (Matrix).
"""
return self._call_java("gaussiansDF")
@property
@since("2.1.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model
instance.
"""
return self._call_java("hasSummary")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return GaussianMixtureSummary(self._call_java("summary"))
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@inherit_doc
class GaussianMixture(JavaEstimator, HasFeaturesCol, HasPredictionCol, HasMaxIter, HasTol, HasSeed,
HasProbabilityCol, JavaMLWritable, JavaMLReadable):
"""
GaussianMixture clustering.
This class performs expectation maximization for multivariate Gaussian
Mixture Models (GMMs). A GMM represents a composite distribution of
independent Gaussian distributions with associated "mixing" weights
specifying each's contribution to the composite.
Given a set of sample points, this class will maximize the log-likelihood
for a mixture of k Gaussians, iterating until the log-likelihood changes by
less than convergenceTol, or until it has reached the max number of iterations.
While this process is generally guaranteed to converge, it is not guaranteed
to find a global optimum.
.. note:: For high-dimensional data (with many features), this algorithm may perform poorly.
This is due to high-dimensional data (a) making it difficult to cluster at all
(based on statistical/theoretical arguments) and (b) numerical issues with
Gaussian distributions.
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([-0.1, -0.05 ]),),
... (Vectors.dense([-0.01, -0.1]),),
... (Vectors.dense([0.9, 0.8]),),
... (Vectors.dense([0.75, 0.935]),),
... (Vectors.dense([-0.83, -0.68]),),
... (Vectors.dense([-0.91, -0.76]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> gm = GaussianMixture(k=3, tol=0.0001,
... maxIter=10, seed=10)
>>> model = gm.fit(df)
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
3
>>> summary.clusterSizes
[2, 2, 2]
>>> summary.logLikelihood
8.14636...
>>> weights = model.weights
>>> len(weights)
3
>>> model.gaussiansDF.select("mean").head()
Row(mean=DenseVector([0.825, 0.8675]))
>>> model.gaussiansDF.select("cov").head()
Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[4].prediction == rows[5].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
>>> gmm_path = temp_path + "/gmm"
>>> gm.save(gmm_path)
>>> gm2 = GaussianMixture.load(gmm_path)
>>> gm2.getK()
3
>>> model_path = temp_path + "/gmm_model"
>>> model.save(model_path)
>>> model2 = GaussianMixtureModel.load(model_path)
>>> model2.hasSummary
False
>>> model2.weights == model.weights
True
>>> model2.gaussiansDF.select("mean").head()
Row(mean=DenseVector([0.825, 0.8675]))
>>> model2.gaussiansDF.select("cov").head()
Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))
.. versionadded:: 2.0.0
"""
k = Param(Params._dummy(), "k", "Number of independent Gaussians in the mixture model. " +
"Must be > 1.", typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None):
"""
__init__(self, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None)
"""
super(GaussianMixture, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.GaussianMixture",
self.uid)
self._setDefault(k=2, tol=0.01, maxIter=100)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return GaussianMixtureModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None):
"""
setParams(self, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None)
Sets params for GaussianMixture.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
class GaussianMixtureSummary(ClusteringSummary):
"""
.. note:: Experimental
Gaussian mixture clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def probabilityCol(self):
"""
Name for column of predicted probability of each cluster in `predictions`.
"""
return self._call_java("probabilityCol")
@property
@since("2.1.0")
def probability(self):
"""
DataFrame of probabilities of each cluster for each training data point.
"""
return self._call_java("probability")
@property
@since("2.2.0")
def logLikelihood(self):
"""
Total log-likelihood for this model on the given data.
"""
return self._call_java("logLikelihood")
class KMeansSummary(ClusteringSummary):
"""
.. note:: Experimental
Summary of KMeans.
.. versionadded:: 2.1.0
"""
@property
@since("2.4.0")
def trainingCost(self):
"""
K-means cost (sum of squared distances to the nearest centroid for all points in the
training dataset). This is equivalent to sklearn's inertia.
"""
return self._call_java("trainingCost")
class KMeansModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by KMeans.
.. versionadded:: 1.5.0
"""
@since("1.5.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@since("2.0.0")
def computeCost(self, dataset):
"""
Return the K-means cost (sum of squared distances of points to their nearest center)
for this model on the given data.
..note:: Deprecated in 2.4.0. It will be removed in 3.0.0. Use ClusteringEvaluator instead.
You can also get the cost on the training dataset in the summary.
"""
warnings.warn("Deprecated in 2.4.0. It will be removed in 3.0.0. Use ClusteringEvaluator "
"instead. You can also get the cost on the training dataset in the summary.",
DeprecationWarning)
return self._call_java("computeCost", dataset)
@property
@since("2.1.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model instance.
"""
return self._call_java("hasSummary")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return KMeansSummary(self._call_java("summary"))
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@inherit_doc
class KMeans(JavaEstimator, HasDistanceMeasure, HasFeaturesCol, HasPredictionCol, HasMaxIter,
HasTol, HasSeed, JavaMLWritable, JavaMLReadable):
"""
K-means clustering with a k-means++ like initialization mode
(the k-means|| algorithm by Bahmani et al).
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
... (Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> kmeans = KMeans(k=2, seed=1)
>>> model = kmeans.fit(df)
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> model.computeCost(df)
2.000...
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[0].prediction == rows[1].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> summary.trainingCost
2.000...
>>> kmeans_path = temp_path + "/kmeans"
>>> kmeans.save(kmeans_path)
>>> kmeans2 = KMeans.load(kmeans_path)
>>> kmeans2.getK()
2
>>> model_path = temp_path + "/kmeans_model"
>>> model.save(model_path)
>>> model2 = KMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
.. versionadded:: 1.5.0
"""
k = Param(Params._dummy(), "k", "The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either \"random\" to " +
"choose random points as initial cluster centers, or \"k-means||\" " +
"to use a parallel variant of k-means++",
typeConverter=TypeConverters.toString)
initSteps = Param(Params._dummy(), "initSteps", "The number of steps for k-means|| " +
"initialization mode. Must be > 0.", typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None,
distanceMeasure="euclidean"):
"""
__init__(self, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, \
distanceMeasure="euclidean")
"""
super(KMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.KMeans", self.uid)
self._setDefault(k=2, initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20,
distanceMeasure="euclidean")
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return KMeansModel(java_model)
@keyword_only
@since("1.5.0")
def setParams(self, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None,
distanceMeasure="euclidean"):
"""
setParams(self, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, \
distanceMeasure="euclidean")
Sets params for KMeans.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.5.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("1.5.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
@since("1.5.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("1.5.0")
def getInitMode(self):
"""
Gets the value of `initMode`
"""
return self.getOrDefault(self.initMode)
@since("1.5.0")
def setInitSteps(self, value):
"""
Sets the value of :py:attr:`initSteps`.
"""
return self._set(initSteps=value)
@since("1.5.0")
def getInitSteps(self):
"""
Gets the value of `initSteps`
"""
return self.getOrDefault(self.initSteps)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("2.4.0")
def getDistanceMeasure(self):
"""
Gets the value of `distanceMeasure`
"""
return self.getOrDefault(self.distanceMeasure)
class BisectingKMeansModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by BisectingKMeans.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@since("2.0.0")
def computeCost(self, dataset):
"""
Computes the sum of squared distances between the input points
and their corresponding cluster centers.
"""
return self._call_java("computeCost", dataset)
@property
@since("2.1.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model instance.
"""
return self._call_java("hasSummary")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return BisectingKMeansSummary(self._call_java("summary"))
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@inherit_doc
class BisectingKMeans(JavaEstimator, HasDistanceMeasure, HasFeaturesCol, HasPredictionCol,
HasMaxIter, HasSeed, JavaMLWritable, JavaMLReadable):
"""
A bisecting k-means algorithm based on the paper "A comparison of document clustering
techniques" by Steinbach, Karypis, and Kumar, with modification to fit Spark.
The algorithm starts from a single cluster that contains all points.
Iteratively it finds divisible clusters on the bottom level and bisects each of them using
k-means, until there are `k` leaf clusters in total or no leaf clusters are divisible.
The bisecting steps of clusters on the same level are grouped together to increase parallelism.
If bisecting all divisible clusters on the bottom level would result more than `k` leaf
clusters, larger clusters get higher priority.
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
... (Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> bkm = BisectingKMeans(k=2, minDivisibleClusterSize=1.0)
>>> model = bkm.fit(df)
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> model.computeCost(df)
2.000...
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[0].prediction == rows[1].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
>>> bkm_path = temp_path + "/bkm"
>>> bkm.save(bkm_path)
>>> bkm2 = BisectingKMeans.load(bkm_path)
>>> bkm2.getK()
2
>>> bkm2.getDistanceMeasure()
'euclidean'
>>> model_path = temp_path + "/bkm_model"
>>> model.save(model_path)
>>> model2 = BisectingKMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
.. versionadded:: 2.0.0
"""
k = Param(Params._dummy(), "k", "The desired number of leaf clusters. Must be > 1.",
typeConverter=TypeConverters.toInt)
minDivisibleClusterSize = Param(Params._dummy(), "minDivisibleClusterSize",
"The minimum number of points (if >= 1.0) or the minimum " +
"proportion of points (if < 1.0) of a divisible cluster.",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean"):
"""
__init__(self, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean")
"""
super(BisectingKMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.BisectingKMeans",
self.uid)
self._setDefault(maxIter=20, k=4, minDivisibleClusterSize=1.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean"):
"""
setParams(self, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean")
Sets params for BisectingKMeans.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def setMinDivisibleClusterSize(self, value):
"""
Sets the value of :py:attr:`minDivisibleClusterSize`.
"""
return self._set(minDivisibleClusterSize=value)
@since("2.0.0")
def getMinDivisibleClusterSize(self):
"""
Gets the value of `minDivisibleClusterSize` or its default value.
"""
return self.getOrDefault(self.minDivisibleClusterSize)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("2.4.0")
def getDistanceMeasure(self):
"""
Gets the value of `distanceMeasure` or its default value.
"""
return self.getOrDefault(self.distanceMeasure)
def _create_model(self, java_model):
return BisectingKMeansModel(java_model)
class BisectingKMeansSummary(ClusteringSummary):
"""
.. note:: Experimental
Bisecting KMeans clustering results for a given model.
.. versionadded:: 2.1.0
"""
pass
@inherit_doc
class LDAModel(JavaModel):
"""
Latent Dirichlet Allocation (LDA) model.
This abstraction permits for different underlying representations,
including local and distributed data structures.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def isDistributed(self):
"""
Indicates whether this instance is of type DistributedLDAModel
"""
return self._call_java("isDistributed")
@since("2.0.0")
def vocabSize(self):
"""Vocabulary size (number of terms or words in the vocabulary)"""
return self._call_java("vocabSize")
@since("2.0.0")
def topicsMatrix(self):
"""
Inferred topics, where each topic is represented by a distribution over terms.
This is a matrix of size vocabSize x k, where each column is a topic.
No guarantees are given about the ordering of the topics.
WARNING: If this model is actually a :py:class:`DistributedLDAModel` instance produced by
the Expectation-Maximization ("em") `optimizer`, then this method could involve
collecting a large amount of data to the driver (on the order of vocabSize x k).
"""
return self._call_java("topicsMatrix")
@since("2.0.0")
def logLikelihood(self, dataset):
"""
Calculates a lower bound on the log likelihood of the entire corpus.
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
WARNING: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logLikelihood", dataset)
@since("2.0.0")
def logPerplexity(self, dataset):
"""
Calculate an upper bound on perplexity. (Lower is better.)
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
WARNING: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logPerplexity", dataset)
@since("2.0.0")
def describeTopics(self, maxTermsPerTopic=10):
"""
Return the topics described by their top-weighted terms.
"""
return self._call_java("describeTopics", maxTermsPerTopic)
@since("2.0.0")
def estimatedDocConcentration(self):
"""
Value for :py:attr:`LDA.docConcentration` estimated from data.
If Online LDA was used and :py:attr:`LDA.optimizeDocConcentration` was set to false,
then this returns the fixed (given) value for the :py:attr:`LDA.docConcentration` parameter.
"""
return self._call_java("estimatedDocConcentration")
@inherit_doc
class DistributedLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Distributed model fitted by :py:class:`LDA`.
This type of model is currently only produced by Expectation-Maximization (EM).
This model stores the inferred topics, the full training dataset, and the topic distribution
for each training document.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def toLocal(self):
"""
Convert this distributed model to a local representation. This discards info about the
training dataset.
WARNING: This involves collecting a large :py:func:`topicsMatrix` to the driver.
"""
model = LocalLDAModel(self._call_java("toLocal"))
# SPARK-10931: Temporary fix to be removed once LDAModel defines Params
model._create_params_from_java()
model._transfer_params_from_java()
return model
@since("2.0.0")
def trainingLogLikelihood(self):
"""
Log likelihood of the observed tokens in the training set,
given the current parameter estimates:
log P(docs | topics, topic distributions for docs, Dirichlet hyperparameters)
Notes:
- This excludes the prior; for that, use :py:func:`logPrior`.
- Even with :py:func:`logPrior`, this is NOT the same as the data log likelihood given
the hyperparameters.
- This is computed from the topic distributions computed during training. If you call
:py:func:`logLikelihood` on the same training dataset, the topic distributions
will be computed again, possibly giving different results.
"""
return self._call_java("trainingLogLikelihood")
@since("2.0.0")
def logPrior(self):
"""
Log probability of the current parameter estimate:
log P(topics, topic distributions for docs | alpha, eta)
"""
return self._call_java("logPrior")
@since("2.0.0")
def getCheckpointFiles(self):
"""
If using checkpointing and :py:attr:`LDA.keepLastCheckpoint` is set to true, then there may
be saved checkpoint files. This method is provided so that users can manage those files.
.. note:: Removing the checkpoints can cause failures if a partition is lost and is needed
by certain :py:class:`DistributedLDAModel` methods. Reference counting will clean up
the checkpoints when this model and derivative data go out of scope.
:return List of checkpoint files from training
"""
return self._call_java("getCheckpointFiles")
@inherit_doc
class LocalLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Local (non-distributed) model fitted by :py:class:`LDA`.
This model stores the inferred topics only; it does not store info about the training dataset.
.. versionadded:: 2.0.0
"""
pass
@inherit_doc
class LDA(JavaEstimator, HasFeaturesCol, HasMaxIter, HasSeed, HasCheckpointInterval,
JavaMLReadable, JavaMLWritable):
"""
Latent Dirichlet Allocation (LDA), a topic model designed for text documents.
Terminology:
- "term" = "word": an element of the vocabulary
- "token": instance of a term appearing in a document
- "topic": multinomial distribution over terms representing some concept
- "document": one piece of text, corresponding to one row in the input data
Original LDA paper (journal version):
Blei, Ng, and Jordan. "Latent Dirichlet Allocation." JMLR, 2003.
Input data (featuresCol):
LDA is given a collection of documents as input data, via the featuresCol parameter.
Each document is specified as a :py:class:`Vector` of length vocabSize, where each entry is the
count for the corresponding term (word) in the document. Feature transformers such as
:py:class:`pyspark.ml.feature.Tokenizer` and :py:class:`pyspark.ml.feature.CountVectorizer`
can be useful for converting text to word count vectors.
>>> from pyspark.ml.linalg import Vectors, SparseVector
>>> from pyspark.ml.clustering import LDA
>>> df = spark.createDataFrame([[1, Vectors.dense([0.0, 1.0])],
... [2, SparseVector(2, {0: 1.0})],], ["id", "features"])
>>> lda = LDA(k=2, seed=1, optimizer="em")
>>> model = lda.fit(df)
>>> model.isDistributed()
True
>>> localModel = model.toLocal()
>>> localModel.isDistributed()
False
>>> model.vocabSize()
2
>>> model.describeTopics().show()
+-----+-----------+--------------------+
|topic|termIndices| termWeights|
+-----+-----------+--------------------+
| 0| [1, 0]|[0.50401530077160...|
| 1| [0, 1]|[0.50401530077160...|
+-----+-----------+--------------------+
...
>>> model.topicsMatrix()
DenseMatrix(2, 2, [0.496, 0.504, 0.504, 0.496], 0)
>>> lda_path = temp_path + "/lda"
>>> lda.save(lda_path)
>>> sameLDA = LDA.load(lda_path)
>>> distributed_model_path = temp_path + "/lda_distributed_model"
>>> model.save(distributed_model_path)
>>> sameModel = DistributedLDAModel.load(distributed_model_path)
>>> local_model_path = temp_path + "/lda_local_model"
>>> localModel.save(local_model_path)
>>> sameLocalModel = LocalLDAModel.load(local_model_path)
.. versionadded:: 2.0.0
"""
k = Param(Params._dummy(), "k", "The number of topics (clusters) to infer. Must be > 1.",
typeConverter=TypeConverters.toInt)
optimizer = Param(Params._dummy(), "optimizer",
"Optimizer or inference algorithm used to estimate the LDA model. "
"Supported: online, em", typeConverter=TypeConverters.toString)
learningOffset = Param(Params._dummy(), "learningOffset",
"A (positive) learning parameter that downweights early iterations."
" Larger values make early iterations count less",
typeConverter=TypeConverters.toFloat)
learningDecay = Param(Params._dummy(), "learningDecay", "Learning rate, set as an"
"exponential decay rate. This should be between (0.5, 1.0] to "
"guarantee asymptotic convergence.", typeConverter=TypeConverters.toFloat)
subsamplingRate = Param(Params._dummy(), "subsamplingRate",
"Fraction of the corpus to be sampled and used in each iteration "
"of mini-batch gradient descent, in range (0, 1].",
typeConverter=TypeConverters.toFloat)
optimizeDocConcentration = Param(Params._dummy(), "optimizeDocConcentration",
"Indicates whether the docConcentration (Dirichlet parameter "
"for document-topic distribution) will be optimized during "
"training.", typeConverter=TypeConverters.toBoolean)
docConcentration = Param(Params._dummy(), "docConcentration",
"Concentration parameter (commonly named \"alpha\") for the "
"prior placed on documents' distributions over topics (\"theta\").",
typeConverter=TypeConverters.toListFloat)
topicConcentration = Param(Params._dummy(), "topicConcentration",
"Concentration parameter (commonly named \"beta\" or \"eta\") for "
"the prior placed on topic' distributions over terms.",
typeConverter=TypeConverters.toFloat)
topicDistributionCol = Param(Params._dummy(), "topicDistributionCol",
"Output column with estimates of the topic mixture distribution "
"for each document (often called \"theta\" in the literature). "
"Returns a vector of zeros for an empty document.",
typeConverter=TypeConverters.toString)
keepLastCheckpoint = Param(Params._dummy(), "keepLastCheckpoint",
"(For EM optimizer) If using checkpointing, this indicates whether"
" to keep the last checkpoint. If false, then the checkpoint will be"
" deleted. Deleting the checkpoint can cause failures if a data"
" partition is lost, so set this bit with care.",
TypeConverters.toBoolean)
@keyword_only
def __init__(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
__init__(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
"""
super(LDA, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.LDA", self.uid)
self._setDefault(maxIter=20, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
if self.getOptimizer() == "em":
return DistributedLDAModel(java_model)
else:
return LocalLDAModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
setParams(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
Sets params for LDA.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
>>> algo = LDA().setK(10)
>>> algo.getK()
10
"""
return self._set(k=value)
@since("2.0.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def setOptimizer(self, value):
"""
Sets the value of :py:attr:`optimizer`.
Currenlty only support 'em' and 'online'.
>>> algo = LDA().setOptimizer("em")
>>> algo.getOptimizer()
'em'
"""
return self._set(optimizer=value)
@since("2.0.0")
def getOptimizer(self):
"""
Gets the value of :py:attr:`optimizer` or its default value.
"""
return self.getOrDefault(self.optimizer)
@since("2.0.0")
def setLearningOffset(self, value):
"""
Sets the value of :py:attr:`learningOffset`.
>>> algo = LDA().setLearningOffset(100)
>>> algo.getLearningOffset()
100.0
"""
return self._set(learningOffset=value)
@since("2.0.0")
def getLearningOffset(self):
"""
Gets the value of :py:attr:`learningOffset` or its default value.
"""
return self.getOrDefault(self.learningOffset)
@since("2.0.0")
def setLearningDecay(self, value):
"""
Sets the value of :py:attr:`learningDecay`.
>>> algo = LDA().setLearningDecay(0.1)
>>> algo.getLearningDecay()
0.1...
"""
return self._set(learningDecay=value)
@since("2.0.0")
def getLearningDecay(self):
"""
Gets the value of :py:attr:`learningDecay` or its default value.
"""
return self.getOrDefault(self.learningDecay)
@since("2.0.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
>>> algo = LDA().setSubsamplingRate(0.1)
>>> algo.getSubsamplingRate()
0.1...
"""
return self._set(subsamplingRate=value)
@since("2.0.0")
def getSubsamplingRate(self):
"""
Gets the value of :py:attr:`subsamplingRate` or its default value.
"""
return self.getOrDefault(self.subsamplingRate)
@since("2.0.0")
def setOptimizeDocConcentration(self, value):
"""
Sets the value of :py:attr:`optimizeDocConcentration`.
>>> algo = LDA().setOptimizeDocConcentration(True)
>>> algo.getOptimizeDocConcentration()
True
"""
return self._set(optimizeDocConcentration=value)
@since("2.0.0")
def getOptimizeDocConcentration(self):
"""
Gets the value of :py:attr:`optimizeDocConcentration` or its default value.
"""
return self.getOrDefault(self.optimizeDocConcentration)
@since("2.0.0")
def setDocConcentration(self, value):
"""
Sets the value of :py:attr:`docConcentration`.
>>> algo = LDA().setDocConcentration([0.1, 0.2])
>>> algo.getDocConcentration()
[0.1..., 0.2...]
"""
return self._set(docConcentration=value)
@since("2.0.0")
def getDocConcentration(self):
"""
Gets the value of :py:attr:`docConcentration` or its default value.
"""
return self.getOrDefault(self.docConcentration)
@since("2.0.0")
def setTopicConcentration(self, value):
"""
Sets the value of :py:attr:`topicConcentration`.
>>> algo = LDA().setTopicConcentration(0.5)
>>> algo.getTopicConcentration()
0.5...
"""
return self._set(topicConcentration=value)
@since("2.0.0")
def getTopicConcentration(self):
"""
Gets the value of :py:attr:`topicConcentration` or its default value.
"""
return self.getOrDefault(self.topicConcentration)
@since("2.0.0")
def setTopicDistributionCol(self, value):
"""
Sets the value of :py:attr:`topicDistributionCol`.
>>> algo = LDA().setTopicDistributionCol("topicDistributionCol")
>>> algo.getTopicDistributionCol()
'topicDistributionCol'
"""
return self._set(topicDistributionCol=value)
@since("2.0.0")
def getTopicDistributionCol(self):
"""
Gets the value of :py:attr:`topicDistributionCol` or its default value.
"""
return self.getOrDefault(self.topicDistributionCol)
@since("2.0.0")
def setKeepLastCheckpoint(self, value):
"""
Sets the value of :py:attr:`keepLastCheckpoint`.
>>> algo = LDA().setKeepLastCheckpoint(False)
>>> algo.getKeepLastCheckpoint()
False
"""
return self._set(keepLastCheckpoint=value)
@since("2.0.0")
def getKeepLastCheckpoint(self):
"""
Gets the value of :py:attr:`keepLastCheckpoint` or its default value.
"""
return self.getOrDefault(self.keepLastCheckpoint)
@inherit_doc
class PowerIterationClustering(HasMaxIter, HasWeightCol, JavaParams, JavaMLReadable,
JavaMLWritable):
"""
.. note:: Experimental
Power Iteration Clustering (PIC), a scalable graph clustering algorithm developed by
<a href=http://www.icml2010.org/papers/387.pdf>Lin and Cohen</a>. From the abstract:
PIC finds a very low-dimensional embedding of a dataset using truncated power
iteration on a normalized pair-wise similarity matrix of the data.
This class is not yet an Estimator/Transformer, use :py:func:`assignClusters` method
to run the PowerIterationClustering algorithm.
.. seealso:: `Wikipedia on Spectral clustering \
<http://en.wikipedia.org/wiki/Spectral_clustering>`_
>>> data = [(1, 0, 0.5), \
(2, 0, 0.5), (2, 1, 0.7), \
(3, 0, 0.5), (3, 1, 0.7), (3, 2, 0.9), \
(4, 0, 0.5), (4, 1, 0.7), (4, 2, 0.9), (4, 3, 1.1), \
(5, 0, 0.5), (5, 1, 0.7), (5, 2, 0.9), (5, 3, 1.1), (5, 4, 1.3)]
>>> df = spark.createDataFrame(data).toDF("src", "dst", "weight")
>>> pic = PowerIterationClustering(k=2, maxIter=40, weightCol="weight")
>>> assignments = pic.assignClusters(df)
>>> assignments.sort(assignments.id).show(truncate=False)
+---+-------+
|id |cluster|
+---+-------+
|0 |1 |
|1 |1 |
|2 |1 |
|3 |1 |
|4 |1 |
|5 |0 |
+---+-------+
...
>>> pic_path = temp_path + "/pic"
>>> pic.save(pic_path)
>>> pic2 = PowerIterationClustering.load(pic_path)
>>> pic2.getK()
2
>>> pic2.getMaxIter()
40
.. versionadded:: 2.4.0
"""
k = Param(Params._dummy(), "k",
"The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either " +
"'random' to use a random vector as vertex properties, or 'degree' to use " +
"a normalized sum of similarities with other vertices. Supported options: " +
"'random' and 'degree'.",
typeConverter=TypeConverters.toString)
srcCol = Param(Params._dummy(), "srcCol",
"Name of the input column for source vertex IDs.",
typeConverter=TypeConverters.toString)
dstCol = Param(Params._dummy(), "dstCol",
"Name of the input column for destination vertex IDs.",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",
weightCol=None):
"""
__init__(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",\
weightCol=None)
"""
super(PowerIterationClustering, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.clustering.PowerIterationClustering", self.uid)
self._setDefault(k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.4.0")
def setParams(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",
weightCol=None):
"""
setParams(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",\
weightCol=None)
Sets params for PowerIterationClustering.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.4.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.4.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.4.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("2.4.0")
def getInitMode(self):
"""
Gets the value of :py:attr:`initMode` or its default value.
"""
return self.getOrDefault(self.initMode)
@since("2.4.0")
def setSrcCol(self, value):
"""
Sets the value of :py:attr:`srcCol`.
"""
return self._set(srcCol=value)
@since("2.4.0")
def getSrcCol(self):
"""
Gets the value of :py:attr:`srcCol` or its default value.
"""
return self.getOrDefault(self.srcCol)
@since("2.4.0")
def setDstCol(self, value):
"""
Sets the value of :py:attr:`dstCol`.
"""
return self._set(dstCol=value)
@since("2.4.0")
def getDstCol(self):
"""
Gets the value of :py:attr:`dstCol` or its default value.
"""
return self.getOrDefault(self.dstCol)
@since("2.4.0")
def assignClusters(self, dataset):
"""
Run the PIC algorithm and returns a cluster assignment for each input vertex.
:param dataset:
A dataset with columns src, dst, weight representing the affinity matrix,
which is the matrix A in the PIC paper. Suppose the src column value is i,
the dst column value is j, the weight column value is similarity s,,ij,,
which must be nonnegative. This is a symmetric matrix and hence
s,,ij,, = s,,ji,,. For any (i, j) with nonzero similarity, there should be
either (i, j, s,,ij,,) or (j, i, s,,ji,,) in the input. Rows with i = j are
ignored, because we assume s,,ij,, = 0.0.
:return:
A dataset that contains columns of vertex id and the corresponding cluster for
the id. The schema of it will be:
- id: Long
- cluster: Int
.. versionadded:: 2.4.0
"""
self._transfer_params_to_java()
jdf = self._java_obj.assignClusters(dataset._jdf)
return DataFrame(jdf, dataset.sql_ctx)
if __name__ == "__main__":
import doctest
import numpy
import pyspark.ml.clustering
from pyspark.sql import SparkSession
try:
# Numpy 1.14+ changed it's string format.
numpy.set_printoptions(legacy='1.13')
except TypeError:
pass
globs = pyspark.ml.clustering.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.clustering tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
| |
from __future__ import division
import itertools
import numpy as np
import chainer
from chainer.backends import cuda
import chainer.functions as F
from chainer.links import Convolution2D
from chainercv.links import Conv2DBNActiv
from chainercv import utils
from chainercv.links.model.yolo.yolo_base import YOLOBase
def _leaky_relu(x):
return F.leaky_relu(x, slope=0.1)
def _upsample(x):
return F.unpooling_2d(x, 2, cover_all=False)
class ResidualBlock(chainer.ChainList):
"""ChainList with a residual connection."""
def __init__(self, *links):
super(ResidualBlock, self).__init__(*links)
def forward(self, x):
h = x
for link in self:
h = link(h)
h += x
return h
class Darknet53Extractor(chainer.ChainList):
"""A Darknet53 based feature extractor for YOLOv3.
This is a feature extractor for :class:`~chainercv.links.model.yolo.YOLOv3`
"""
insize = 416
grids = (13, 26, 52)
def __init__(self):
super(Darknet53Extractor, self).__init__()
# Darknet53
self.append(Conv2DBNActiv(32, 3, pad=1, activ=_leaky_relu))
for k, n_block in enumerate((1, 2, 8, 8, 4)):
self.append(Conv2DBNActiv(
32 << (k + 1), 3, stride=2, pad=1, activ=_leaky_relu))
for _ in range(n_block):
self.append(ResidualBlock(
Conv2DBNActiv(32 << k, 1, activ=_leaky_relu),
Conv2DBNActiv(32 << (k + 1), 3, pad=1, activ=_leaky_relu)))
# additional links
for i, n in enumerate((512, 256, 128)):
if i > 0:
self.append(Conv2DBNActiv(n, 1, activ=_leaky_relu))
self.append(Conv2DBNActiv(n, 1, activ=_leaky_relu))
self.append(Conv2DBNActiv(n * 2, 3, pad=1, activ=_leaky_relu))
self.append(Conv2DBNActiv(n, 1, activ=_leaky_relu))
self.append(Conv2DBNActiv(n * 2, 3, pad=1, activ=_leaky_relu))
self.append(Conv2DBNActiv(n, 1, activ=_leaky_relu))
def forward(self, x):
"""Compute feature maps from a batch of images.
This method extracts feature maps from 3 layers.
Args:
x (ndarray): An array holding a batch of images.
The images should be resized to :math:`416\\times 416`.
Returns:
list of Variable:
Each variable contains a feature map.
"""
ys = []
h = x
hs = []
for i, link in enumerate(self):
h = link(h)
if i in {33, 39, 45}:
ys.append(h)
elif i in {14, 23}:
hs.append(h)
elif i in {34, 40}:
h = F.concat((_upsample(h), hs.pop()))
return ys
class YOLOv3(YOLOBase):
"""YOLOv3.
This is a model of YOLOv3 [#]_.
This model uses :class:`~chainercv.links.model.yolo.Darknet53Extractor` as
its feature extractor.
.. [#] Joseph Redmon, Ali Farhadi.
YOLOv3: An Incremental Improvement. arXiv 2018.
Args:
n_fg_class (int): The number of classes excluding the background.
pretrained_model (string): The weight file to be loaded.
This can take :obj:`'voc0712'`, `filepath` or :obj:`None`.
The default value is :obj:`None`.
* :obj:`'voc0712'`: Load weights trained on trainval split of \
PASCAL VOC 2007 and 2012. \
The weight file is downloaded and cached automatically. \
:obj:`n_fg_class` must be :obj:`20` or :obj:`None`. \
These weights were converted from the darknet model. \
The conversion code is \
`chainercv/examples/yolo/darknet2npz.py`.
* `filepath`: A path of npz file. In this case, :obj:`n_fg_class` \
must be specified properly.
* :obj:`None`: Do not load weights.
"""
_models = {
'voc0712': {
'param': {'n_fg_class': 20},
'url': 'https://chainercv-models.preferred.jp/'
'yolo_v3_voc0712_converted_2018_05_01.npz',
'cv2': True
},
}
_anchors = (
((90, 116), (198, 156), (326, 373)),
((61, 30), (45, 62), (119, 59)),
((13, 10), (30, 16), (23, 33)))
def __init__(self, n_fg_class=None, pretrained_model=None):
super(YOLOv3, self).__init__()
param, path = utils.prepare_pretrained_model(
{'n_fg_class': n_fg_class}, pretrained_model, self._models)
self.n_fg_class = param['n_fg_class']
self.use_preset('visualize')
with self.init_scope():
self.extractor = Darknet53Extractor()
self.subnet = chainer.ChainList()
for i, n in enumerate((512, 256, 128)):
self.subnet.append(chainer.Sequential(
Conv2DBNActiv(n * 2, 3, pad=1, activ=_leaky_relu),
Convolution2D(
len(self._anchors[i]) * (4 + 1 + self.n_fg_class), 1)))
default_bbox = []
step = []
for k, grid in enumerate(self.extractor.grids):
for v, u in itertools.product(range(grid), repeat=2):
for h, w in self._anchors[k]:
default_bbox.append((v, u, h, w))
step.append(self.insize / grid)
self._default_bbox = np.array(default_bbox, dtype=np.float32)
self._step = np.array(step, dtype=np.float32)
if path:
chainer.serializers.load_npz(path, self, strict=False)
def to_cpu(self):
super(YOLOv3, self).to_cpu()
self._default_bbox = cuda.to_cpu(self._default_bbox)
self._step = cuda.to_cpu(self._step)
def to_gpu(self, device=None):
super(YOLOv3, self).to_gpu(device)
self._default_bbox = cuda.to_gpu(self._default_bbox, device)
self._step = cuda.to_gpu(self._step, device)
def forward(self, x):
"""Compute localization, objectness, and classification from a batch of images.
This method computes three variables, :obj:`locs`, :obj:`objs`,
and :obj:`confs`.
:meth:`self._decode` converts these variables to bounding box
coordinates and confidence scores.
These variables are also used in training YOLOv3.
Args:
x (chainer.Variable): A variable holding a batch of images.
Returns:
tuple of chainer.Variable:
This method returns three variables, :obj:`locs`,
:obj:`objs`, and :obj:`confs`.
* **locs**: A variable of float arrays of shape \
:math:`(B, K, 4)`, \
where :math:`B` is the number of samples in the batch and \
:math:`K` is the number of default bounding boxes.
* **objs**: A variable of float arrays of shape \
:math:`(B, K)`.
* **confs**: A variable of float arrays of shape \
:math:`(B, K, n\_fg\_class)`.
"""
ys = []
for i, h in enumerate(self.extractor(x)):
h = self.subnet[i](h)
h = F.transpose(h, (0, 2, 3, 1))
h = F.reshape(h, (h.shape[0], -1, 4 + 1 + self.n_fg_class))
ys.append(h)
y = F.concat(ys)
locs = y[:, :, :4]
objs = y[:, :, 4]
confs = y[:, :, 5:]
return locs, objs, confs
def _decode(self, loc, obj, conf):
raw_bbox = self._default_bbox.copy()
raw_bbox[:, :2] += 1 / (1 + self.xp.exp(-loc[:, :2]))
raw_bbox[:, :2] *= self._step[:, None]
raw_bbox[:, 2:] *= self.xp.exp(loc[:, 2:])
raw_bbox[:, :2] -= raw_bbox[:, 2:] / 2
raw_bbox[:, 2:] += raw_bbox[:, :2]
obj = 1 / (1 + self.xp.exp(-obj))
conf = 1 / (1 + self.xp.exp(-conf))
raw_score = obj[:, None] * conf
bbox = []
label = []
score = []
for l in range(self.n_fg_class):
bbox_l = raw_bbox
score_l = raw_score[:, l]
mask = score_l >= self.score_thresh
bbox_l = bbox_l[mask]
score_l = score_l[mask]
indices = utils.non_maximum_suppression(
bbox_l, self.nms_thresh, score_l)
bbox_l = bbox_l[indices]
score_l = score_l[indices]
bbox.append(bbox_l)
label.append(self.xp.array((l,) * len(bbox_l)))
score.append(score_l)
bbox = self.xp.vstack(bbox).astype(np.float32)
label = self.xp.hstack(label).astype(np.int32)
score = self.xp.hstack(score).astype(np.float32)
return bbox, label, score
| |
# -*- coding: utf-8 -*-
"""
Read_edf
========
Reading data from Elmiko DigiTrack. Integrating time info from XML (.EVX file from digitrack) about time of first EEG sample
with sampling rate info (from .1 file from digitrack) to make timestamps for EEG signal. EEG signal needs to be exported to .edf
from digitrack, then it can be parsed here.
Use timestamps from experiment log file to cut slices from EEG around events. EEG and events need to be saved with respect to the same
clock, so best do experiment and recording on the same machine.
"""
from io import open
import pandas as pd
import xml.etree.ElementTree as etree
import pyedflib
import numpy as np
from datetime import datetime
import struct
import mne
import pandas as pd
import scipy.stats.mstats as sp
import glob
def MNE_Read_EDF(path):
"""Read .edf exported from digitrack using MNE library.
Parameters
----------
path:str
directory of a folder containing the following files: sygnal.edf, unity_log.csv, digi_log.xml, digi_binary.1
This is usually the folder with the subject name. ex: '/Users/rcetnarski/Desktop/Dane EEG/Pilot_Gabory/Maciek/experiment/'
Returns
-------
raw_mne: mne.Raw object
http://martinos.org/mne/dev/generated/mne.io.RawArray.html#mne.io.RawArray
events: ndarray (n_events, 3)
the first column is the sample number when index occured,
second column is a mne requirement and has to be ignored,
third column is the event code
event_id: dict(event label : event code)
dictionary with labels describing the event codes
"""
# Load the eeg data
print(glob.glob(path +'*.edf'))
paths = str(glob.glob(path +'*.edf'))
assert len(glob.glob(path +'*.edf')) == 1,path + paths # Only one edf in the directory
edf_path = glob.glob(path +'*.edf')[0] # from the sygnal.edf file
raw_mne = mne.io.read_raw_edf(edf_path,stim_channel = None, preload = True)
# Fix the sampling rate info saved in the original sygnal.edf. Our software does not save it with high precision in the .edf file, so we will replace it manually.
exact_sr = Get_Exact_Sampling_rate(path) # Get the high precision sampling rate
raw_mne.info.update({'sfreq' : exact_sr }) # Update the mne info with the high precision sampling rate
# Create a timestamp vector so event times can be expressed in sample number of eeg
timestamp = exact_timestamp(path, raw_mne.n_times, exact_sr)
# Read the events file
log = pd.read_csv(path + 'unity_log.csv',parse_dates = True, index_col = 0, skiprows = 1, skipfooter = 1, engine='python')
# Select the columns where the timestamp of the event was written
event_time_columns =[col_name for col_name in log.columns if 'time' in col_name and 'response' not in col_name and 'psychopy' not in col_name and 'start_time' not in col_name]
# Convert the timestamp in datetime format to a sample number counted from the first sample of EEG recording
# Find the index of event time in the timestamp vector. This index is the sample number of the event, relative to the first eeg sample.
event_sample_indexes = {}
# Iterate over columns with times for different events
for time_col in event_time_columns:
event_sample_indexes[time_col] = []
print(time_col)
# Iterate over each event
for event in log[time_col]:
# Nulls should only happen when we have a time that only appears in one condition. Example: response time does not appear in control condition
if pd.notnull(event):
#print(timestamp[10])
#print(np.datetime64(event))
event_index = np.argmin(np.abs(timestamp - np.datetime64(event)))
event_sample_indexes[time_col].append(event_index)
# Store the code and ints label in the the dictionary
# IMPORTANT event code cannot be zero - the stimulus channel default value for no event is zero
event_id = {event_name : idx + 1 for idx, event_name in enumerate(event_time_columns)}
# Process the events info untill it is in the format specified by MNE, i.e. ndarray with 3 columns
events = pd.DataFrame(columns = ['sample_nr', 'code'])
# Stack vertically all sample numbers for different events
for event_label, sample_numbers in event_sample_indexes.items():
tmp = pd.DataFrame(sample_numbers, columns = ['sample_nr'])
tmp['code'] = event_id[event_label]
# stack
events = events.append(tmp)
# Sort events chronologically
events = events.sort_values(by = 'sample_nr')
# Change to numpy array of ints
events = events.as_matrix().astype('int')
# MNE needs an extra column of zeros in the middle, it won't be used but has to be there
events = np.insert(events, 1, 0, axis=1)
return raw_mne, events, event_id, log
def Combine_EDF_XML(path):
"""Creates a dictionary with eeg signals, timestamps and events.
Reads edf file with eeg signal. Uses xml file to add timestamps to eeg. Reads unity_log with experiment events times.
Parameters
----------
path:str
directory containing .edf, .xml, and .csv files.
Returns
-------
signal_dict (dict of Objects): dict
stores eeg channels, ttimestamps and events.
Keys:
"EEG <channel name>" : eeg signal
"timestamp" : timestamps for eeg
"events" : names and timestamps of events
"""
#---EEG SIGNAL PART---
#Load the edf from the edf file generated by digitrack
assert len(glob.glob(path +'*.edf')) == 1 # Only one edf in the directory
signal_dict = Read_EDF(glob.glob(path +'*.edf')[0]) # from the sygnal.edf file
# Store each eeg channel as an entry in a dictionary, key is electrode location, value is the signal.
for chan_name, sig in signal_dict.items():
signal_dict[chan_name] = sig
#---EVENT MARKERS PART---
print(path)
#Read the event markers from the experiment and store in the same dictionary as signals under key 'events'
log = pd.read_csv(path + 'unity_log.csv',parse_dates = True, index_col = 0, skiprows = 1, skipfooter = 1, engine='python')
signal_dict['events'] = log
#---TIMESTAMP PART---
#Get the timestamp based on the info from the exact_timestamp field in the .1 file
#Number of EEG samples
signal_dict['n_samples'] = next(len(value) for (key,value) in signal_dict.items() if 'EEG' in key)
#Sampling rate from the digi_binary.1 file
signal_dict['sr'] = Get_Exact_Sampling_rate(path)
#Expand the timestamp using the info about first sample time (from digi_log.xml) and sampling rate
signal_dict['timestamp'] = exact_timestamp(path, signal_dict['n_samples'], signal_dict['sr'])
#Add a list of all electrode names, usefull for plotting functions to loop over all electrodes
signal_dict['eeg_names'] = [key for key in signal_dict.keys() if 'EEG' in key]
#Keep tract of subject name, also useful for plotting
signal_dict['subject_name'] = path.split('/')[-3]
#Save the time of first sample for exporting data to edf
signal_dict['first_sample_time'] = signal_dict['timestamp'][0]
#store the timestamp in ms from start of eeg recording, for edf
signal_dict['first_sample_time'] = signal_dict['timestamp'][0]
signal_dict['timestamp_ms'] = (signal_dict['timestamp'] - signal_dict['first_sample_time']).astype('timedelta64[ms]').astype('float')
return signal_dict
def Read_EDF(path):
"""Read .edf exported from digitrack and converts them to a dictionary.
Parameters
----------
path:str
directory of .edf
Returns
-------
signal_dict: dict(np.array)
Keys are channel names
"""
f = pyedflib.EdfReader(path)
#n = f.signals_in_file
signal_labels = f.getSignalLabels()
signal_dict = {}
#print('Channels:')
for idx, name in enumerate(signal_labels):
#print(name.decode("utf-8"))
signal_dict[name.decode("utf-8")] = f.readSignal(idx)
f._close()
return signal_dict
def exact_timestamp(path, n_samples, sampling_rate):
"""Elmiko EEG amplifier 1042 calibrates to produce a sampling rate defined by the user.
The calibrated sampling rate is slightly different from user defintion.
Calibrated sampling rate is saved in the header of digi_binary.1 file.
"""
#Convert to nanoseconds by multiplying to desired resolution and cutting the reminding decimal places using int(). *time units change by order of 10^3
#Conversion will be used to produce a high precision timestamp (something went funny in the np.timedelta64 function when trying to use ms instead of ns)
#NOTE: exact_sr_ns is actually a sample duration in nanoseconds
exact_sr_ns = int(1000.0/sampling_rate*10**3 *10**3)
# Create an empty time vector of nanosecond precision datetimes
timestamp = np.empty(n_samples, dtype='datetime64[ns]')
# Set the first value using the first sample time saved by digi track
timestamp[0] =Read_XML(path)['DateTime'].iloc[0]
# Populate the time vector by adding sample duration to the next sample.
for i in range(n_samples - 1):
timestamp[i+1] = timestamp[i] + np.timedelta64(exact_sr_ns, 'ns')
return timestamp
def Read_XML(path):
"""Read the header for the signal from .EVX.
Returns
-------
df: DataFrame
Contains timestamp marking first EEG sample
"""
assert len(glob.glob(path +'*.evx')) == 1 # Only one digi log file in the directory
with open(glob.glob(path +'*.evx')[0], mode='r',encoding='utf-8') as xml_file:
xml_tree = etree.parse(xml_file)
root = xml_tree.getroot()
#Get only the relevant fields
for child_of_root in root:
if(child_of_root.attrib['strId'] == 'Technical_ExamStart'):
time_event = child_of_root.find('event')
#Timestamp in unix time
u_time = time_event.attrib['time']
#Timestamp in DateTime
dt_time = time_event.find('info').attrib['time']
#store this information in a dataframe in a datetime/timestamp format
df = pd.DataFrame()
#HACK changing timezone by manually adding one hour
#TODO make sure the timestamps will be possible to comapre between tz (utc) naive and tz aware formats
timezone_info = dt_time.find('+') # There an offset from some annoying timezone is saved
df['UNIXTIME'] = pd.to_datetime([u_time], unit='us').tz_localize('UTC') + pd.Timedelta(hours = int(dt_time[timezone_info +1: dt_time.find('+')+3]))
df['DateTime'] = pd.to_datetime([dt_time],infer_datetime_format =True).tz_localize('UTC')+ pd.Timedelta(hours = int(dt_time[timezone_info +1: dt_time.find('+')+3]))
return df
def Get_Exact_Sampling_rate(path):
#Read the bytes from .1 file
assert len(glob.glob(path +'*.1')) == 1 # Only one digi binary file in the directory
with open(glob.glob(path +'*.1')[0], "rb") as binary_file:
#Seek position and read N bytes
binary_file.seek((490+(89*64))) # Go to bite nr
couple_bytes = binary_file.read(8)
sr = struct.unpack("d", couple_bytes)
print('!!!!!!!!!!!! REMEMBER EXG ETHERNET DIFFERENCE FOR SR !!!!!!!!!!!!!!')
assert sr[0] > 100 and sr[0] < 10000
return sr[0]
| |
# -*- coding: utf-8 -*-
"""
Testing the helper functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from .context import salientregions as sr
import unittest
import cv2
import os
import numpy as np
class HelpersEllipseTester(unittest.TestCase):
'''
Tests for the helper functions related to ellipses
'''
def setUp(self):
'''
Load the binary masks to make ellipses from, and create the ground truths.
'''
# testing region to ellipse conversion
self.half_major_axis_len = 15
self.half_minor_axis_len = 9
self.theta = 0.52
self.standard_coeff = [
0.006395179230685,
-0.003407029045900,
0.010394944226105]
# testing elliptic features
testdata_path = os.path.normpath(
os.path.join(
os.path.dirname(
os.path.abspath(__file__)),
'images/Binary/'))
self.ellipse1_mask = np.array(
cv2.imread(
os.path.join(
testdata_path,
'Binary_ellipse1.png'), cv2.IMREAD_GRAYSCALE))
self.features_standard_ellipse1 = np.array([200, 175, 34, 14, 0, 2])
self.features_poly_ellipse1 = 100.00 * \
np.array(
[2.000000000000000,
1.750000000000000,
0.000008650519031,
-0.000000000000000,
0.000051020408163,
0.020000000000000])
self.ellipse2_mask = np.array(
cv2.imread(
os.path.join(
testdata_path,
'Binary_ellipse2.png'), cv2.IMREAD_GRAYSCALE))
self.features_standard_ellipse2 = np.array([187, 38.5, 10, 5, 90, 2])
self.features_poly_ellipse2 = 100.00 * np.array([1.870000000000000,
0.385000000000000,
0.000400000000000,
0.000000000000000,
0.000100000000000,
0.020000000000000])
self.ellipse3_mask = np.array(
cv2.imread(
os.path.join(
testdata_path,
'Binary_ellipse3.png'), cv2.IMREAD_GRAYSCALE))
self.features_standard_ellipse3 = np.array(
[101.9, 90.4, 24, 21, -9.9, 2])
self.features_poly_ellipse3 = 100.00 * \
np.array(
[1.019717800289436,
0.904095513748191,
0.000017518811785,
-0.000000901804067,
0.000022518036288,
0.020000000000000])
self.ellipse4_mask = np.array(
cv2.imread(
os.path.join(
testdata_path,
'Binary_ellipse4.png'), cv2.IMREAD_GRAYSCALE))
self.features_standard_ellipse4 = np.array(
[65.3, 186, 28, 13, 50.8, 2])
self.features_poly_ellipse4 = 100.00 * np.array([0.653333333333333,
1.860687093779016,
0.000040675758984,
0.000022724787475,
0.000031250940690,
0.020000000000000])
self.connectivty = 4
self.rtol = 2 # default for np.allclose is 1e-05!!
self.atol = 1e-02 # default for np.allclose is 1e-08
# tesing the saving and loading
self.num_regions = 7
self.num_holes = 1
self.num_islands = 2
self.num_indent = 3
self.num_protr = 1
self.features = {}
features_testpath = os.path.normpath(
os.path.join(
os.path.dirname(
os.path.abspath(__file__)),
'features/'))
self.features_filename = os.path.join(
features_testpath,
'ellipse_features.txt')
def test_standard2poly_ellipse(self):
'''
Test the function `standard2poly_ellipse`.
'''
A, B, C = sr.helpers.standard2poly_ellipse(
self.half_major_axis_len, self.half_minor_axis_len, self.theta)
coeff = [A, B, C]
assert sr.helpers.array_diff(self.standard_coeff, coeff)
def test_poly2standard_ellipse(self):
'''
Test the function `poly2standard_ellipse`.
'''
params = sr.helpers.poly2standard_ellipse(
self.standard_coeff[0], self.standard_coeff[1], self.standard_coeff[2])
print("Parameters:", params)
# params = [half_major_axis_len, half_minor_axis_len, theta]
true_params = [
self.half_major_axis_len,
self.half_minor_axis_len,
self.theta]
print("True parameters:", true_params)
assert sr.helpers.array_diff(params, true_params, 1e-5, 1e-8)
def test_mask2features_poly_ellipse1(self):
'''
Test the function `binary_mask2ellipse_features_single` for test image 1.
'''
_, _, features = sr.helpers.binary_mask2ellipse_features_single(
self.ellipse1_mask, self.connectivty, 2, True)
print("MATLAB features:", self.features_poly_ellipse1)
print("Python features:", features)
print('Difference: ', features - self.features_poly_ellipse1)
print(
'Max abs. difference: ',
np.max(np.max(np.abs(features - self.features_poly_ellipse1))))
assert sr.helpers.array_diff(
self.features_poly_ellipse1,
features,
self.rtol,
self.atol)
def test_mask2features_poly_ellipse2(self):
'''
Test the function `binary_mask2ellipse_features_single` for test image 2.
'''
_, _, features = sr.helpers.binary_mask2ellipse_features_single(
self.ellipse2_mask, self.connectivty, 2, True)
print("MATLAB features:", self.features_poly_ellipse2)
print("Python features:", features)
print('Difference: ', features - self.features_poly_ellipse2)
print(
'Max abs.difference: ',
np.max(np.max(np.abs(features - self.features_poly_ellipse2))))
assert sr.helpers.array_diff(
self.features_poly_ellipse2,
features,
self.rtol,
self.atol)
def test_mask2features_poly_ellipse3(self):
'''
Test the function `binary_mask2ellipse_features_single` for test image 3.
'''
_, _, features_poly = sr.helpers.binary_mask2ellipse_features_single(
self.ellipse3_mask, self.connectivty, 2)
assert sr.helpers.array_diff(
self.features_poly_ellipse3,
features_poly,
self.rtol,
self.atol)
def test_mask2features_poly_ellipse4(self):
'''
Test the function `binary_mask2ellipse_features_single` for test image 4.
'''
_, _, features = sr.helpers.binary_mask2ellipse_features_single(
self.ellipse4_mask, self.connectivty, 2)
assert sr.helpers.array_diff(
self.features_poly_ellipse4,
features,
self.rtol,
self.atol)
| |
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_bill_of_materials_table1073_all_of
except ImportError:
bt_bill_of_materials_table1073_all_of = sys.modules[
"onshape_client.oas.models.bt_bill_of_materials_table1073_all_of"
]
try:
from onshape_client.oas.models import bt_table1825
except ImportError:
bt_table1825 = sys.modules["onshape_client.oas.models.bt_table1825"]
try:
from onshape_client.oas.models import bt_table_column_info1222
except ImportError:
bt_table_column_info1222 = sys.modules[
"onshape_client.oas.models.bt_table_column_info1222"
]
try:
from onshape_client.oas.models import bt_table_row1054
except ImportError:
bt_table_row1054 = sys.modules["onshape_client.oas.models.bt_table_row1054"]
class BTBillOfMaterialsTable1073(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"failed_metadata_representative_occurrences": ([str],), # noqa: E501
"indented": (bool,), # noqa: E501
"showing_excluded": (bool,), # noqa: E501
"all_row_values": ([[str]],), # noqa: E501
"column_count": (int,), # noqa: E501
"frozen_columns": (int,), # noqa: E501
"node_id": (str,), # noqa: E501
"read_only": (bool,), # noqa: E501
"row_count": (int,), # noqa: E501
"table_columns": (
[bt_table_column_info1222.BTTableColumnInfo1222],
), # noqa: E501
"table_id": (str,), # noqa: E501
"table_rows": ([bt_table_row1054.BTTableRow1054],), # noqa: E501
"title": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"failed_metadata_representative_occurrences": "failedMetadataRepresentativeOccurrences", # noqa: E501
"indented": "indented", # noqa: E501
"showing_excluded": "showingExcluded", # noqa: E501
"all_row_values": "allRowValues", # noqa: E501
"column_count": "columnCount", # noqa: E501
"frozen_columns": "frozenColumns", # noqa: E501
"node_id": "nodeId", # noqa: E501
"read_only": "readOnly", # noqa: E501
"row_count": "rowCount", # noqa: E501
"table_columns": "tableColumns", # noqa: E501
"table_id": "tableId", # noqa: E501
"table_rows": "tableRows", # noqa: E501
"title": "title", # noqa: E501
}
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_bill_of_materials_table1073.BTBillOfMaterialsTable1073 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
failed_metadata_representative_occurrences ([str]): [optional] # noqa: E501
indented (bool): [optional] # noqa: E501
showing_excluded (bool): [optional] # noqa: E501
all_row_values ([[str]]): [optional] # noqa: E501
column_count (int): [optional] # noqa: E501
frozen_columns (int): [optional] # noqa: E501
node_id (str): [optional] # noqa: E501
read_only (bool): [optional] # noqa: E501
row_count (int): [optional] # noqa: E501
table_columns ([bt_table_column_info1222.BTTableColumnInfo1222]): [optional] # noqa: E501
table_id (str): [optional] # noqa: E501
table_rows ([bt_table_row1054.BTTableRow1054]): [optional] # noqa: E501
title (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
constant_args = {
"_check_type": _check_type,
"_path_to_item": _path_to_item,
"_from_server": _from_server,
"_configuration": _configuration,
}
required_args = {}
# remove args whose value is Null because they are unset
required_arg_names = list(required_args.keys())
for required_arg_name in required_arg_names:
if required_args[required_arg_name] is nulltype.Null:
del required_args[required_arg_name]
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in six.iteritems(kwargs):
if (
var_name in unused_args
and self._configuration is not None
and self._configuration.discard_unknown_keys
and not self._additional_properties_model_instances
):
# discard variable.
continue
setattr(self, var_name, var_value)
@staticmethod
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
return {
"anyOf": [],
"allOf": [
bt_bill_of_materials_table1073_all_of.BTBillOfMaterialsTable1073AllOf,
bt_table1825.BTTable1825,
],
"oneOf": [],
}
| |
#!/usr/bin/env python
from django.utils.encoding import StrAndUnicode, smart_unicode, force_unicode
from django.utils.translation import ugettext_lazy as _
from django.utils.safestring import mark_safe
from django.utils.html import escape, conditional_escape
from django.core.exceptions import ValidationError
from django.utils.simplejson import JSONEncoder
from django.utils.safestring import mark_safe
from django.conf import settings
from formsfive.widgets import HTML5Input
from formsfive import widgets
__all__ = (
'ValidateLoginPassWidget', 'ValidateEmailWidget', 'PreviewWidget', 'AttachWidget',
'MultipleFocusWidget', 'FocusSelect', 'DatePickerWidget', 'AutoCompleteModelWidget',
'AutoCompleteTagWidget', 'USPhoneNumberMultiWidget', 'SSNInputWidget', 'GenericWidget',
'DisplayOnlyWidget'
)
class ValidateLoginPassWidget(widgets.MultiWidget):
"""
Basic MultiWidget to validate passwords versus
each other. This returns both values.
"""
def __init__(self, attrs=None):
formsfive_widgets = (
widgets.PasswordInput(render_value=False, attrs={'placeholder': 'Insert your password', 'is_required': True}),
widgets.PasswordInput(render_value=False, attrs={'placeholder': 'Confirm password', 'is_required': False}),
)
super(ValidateLoginPassWidget, self).__init__(formsfive_widgets, attrs)
def format_output(self, widgets):
return u'</div>\n<div class="form-default"><label for="password1">Password (Validation): </label>'.join(widgets)
def decompress(self, value):
if isinstance(value, dict):
if value:
return [value.password, value.password1]
return [None, None]
class ValidateEmailWidget(widgets.MultiWidget):
"""
Basic MultiWidget to validate email addresses versus
each other. This returns both values.
"""
def __init__(self, attrs=None):
formsfive_widgets = (
widgets.TextInput(attrs={'placeholder': 'Email Address', 'is_required': True}),
widgets.TextInput( attrs={'placeholder': 'Validate Address', 'is_required': False}),
)
super(ValidateEmailWidget, self).__init__(formsfive_widgets, attrs)
def format_output(self, widgets):
return u'</div>\n<div class="multi"><label for="email1">Email Address (Validation):</label><br />'.join(widgets)
def decompress(self, value):
if value:
return [value.email, value.email1]
return [None, None]
class PreviewWidget(widgets.FileInput):
"""
This displays a thumbnail of the image attached to the
input element - think usabilty.
"""
def __init__(self, *args, **kwargs):
super(PreviewWidget, self).__init__(*args, **kwargs)
self.attrs = kwargs.get('attrs', {})
self.setting = self.attrs.get('setting', settings.MEDIA_ROOT)
def render(self, name, value, attrs=None):
thumb_html = ''
if value and hasattr(value, "url"):
# Take value and switch with thumbnail image
#junk, app_url = str(self.setting).rsplit(str(settings.THEIRRY_MEDIA)) #PRODUCTION
#app_url = app_url + '/' #PRODUCTION
#basename, format = str(value).rsplit('.', 1) # PRODUCTION
basename, format = str(value.name).rsplit('.', 1)
thumb = basename + '_' + str('thumbnail') + '.' + format
thumb_html = '<img src="%s/%s" class="thumb" /><br />' % (value.storage.base_url, thumb)
#thumb_html = '<img src="%s%s%s" class="thumb" /><br />' % (value.storage.base_url, app_url, thumb)
return mark_safe("%s%s" % (thumb_html, super(PreviewWidget, self).render(name, value, attrs)))
class AttachWidget(widgets.FileInput):
"""
This basically renders the name of the file attached to this
input element.
"""
def __init__(self):
super(AttachWidget, self).__init__({})
def render(self, name, value, attrs=None):
attach_html = ''
if value not in EMPTY_VALUES:
attach = u'%s' % value
attached = os.path.basename(attach)
attach_html = '<div class=\"attached\" name=\"%s\" >File Attached: %s</div>' % (attached, attached)
return mark_safe("%s%s" % (attach_html, super(AttachWidget, self).render(name, value, attrs)))
class MultipleFocusWidget(widgets.SelectMultiple):
'''
This adds the necessary html and js in order to dynamically
create this image gallery. The first appended <ul> preforms
the correct action functions (view, add, delete). The last
appended element allows the addition of selected files to
be viewed via a modal window.
'''
class Media:
js = ( settings.THEIRRY_MEDIA_URL + "js/image-form.js",)
def __init__(self, attrs=None, choices=(), *args, **kwargs):
super(MultipleFocusWidget, self).__init__(attrs, choices, *args, **kwargs)
'''
Look for select attributes to override default
'''
self.attrs = kwargs.get('attrs', {})
self.size = self.attrs.get('size', 5)
def render(self, name, value, attrs=None, choices=()):
# add url
add_url = reverse('generic_images_modal')
search_url = reverse('search_interchange_modal')
if attrs is None: attrs = {}
attrs['class'] = 'multiple add'
attrs['size'] = self.size
output = [super(MultipleFocusWidget, self).render(name, value, attrs, choices)]
output.append(u'''
<ul id="file-action" class="multiplefilechooser">
<li><a href="#id_image" id="view-file" name="view" class="form-elements file-view" title="View">View</a></li>
<li><a href="%s" id="folder-file" name="remove" class="form-elements file-folder" title="From Theirry Library">File</a></li>
<li><a href="%s" id="add-file" name="add" class="form-elements file-add" title="Add">Add</a></li>
<li><a href="#id_image" id="remove-file" name="remove" class="form-elements file-remove" title="Remove">Remove</a></li>
</ul>
''' % (search_url, add_url))
output.append(u'''
<article id="image" style="z-index: 12; opacity: 1; display: none;" >
<div id="image-close"><a href="#" class="simplemodal-close">x</a></div>
<h1 id="modal-title"></h1>
<div id="modal-area">
</div>
</article>
''')
return mark_safe(u''.join(output))
def render_option(self, selected_choices, option_value, option_label):
option_value = force_unicode(option_value)
selected_html = (option_value in selected_choices) and u' selected="selected"' or ''
if len(selected_choices) == 0:
image = 'none'
else:
# TODO - this gives you the url to see that image
uri = self.choices.queryset.get(pk=option_value)
image = uri.image.image.url
return u'<option value="%s"id="%s"%s>%s</option>' % (
escape(option_value), image, selected_html,
conditional_escape(force_unicode(option_label)))
class FocusSelect(widgets.Select): #TODO - change to (SelectMultiple)
"""
This is a basic select multiple widget, but checks if the data
changed based of the initial values set forth in the first pass.
"""
def render(self, name, value, attrs=None, choices=()):
if value is None: value = []
final_attrs = self.build_attrs(attrs, name=name)
output = [u'<select multiple="multiple" class="focus"%s>' % flatatt(final_attrs)]
options = self.render_options(choices, value)
if options:
output.append(options)
output.append('</select>')
return mark_safe(u'\n'.join(output))
def value_from_datadict(self, data, files, name):
if isinstance(data, (MultiValueDict, MergeDict)):
return data.getlist(name)
return data.get(name, None)
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
for value1, value2 in zip(initial, data):
if force_unicode(value1) != force_unicode(value2):
return True
return False
class DatePickerWidget(widgets.TextInput):
'''
Renders all the necessary code for datepicker-addon jquery
field. This uses the yepnope is an asynchronous conditional resource loader.
If your using this outside of the my application please:
CHECK YOUR SETTING LOCATIONS
'''
def render(self, name, value, attrs=None):
final_attrs = self.build_attrs(attrs)
self.options = dict()
self.options = {
'id': str(final_attrs.get('id', None)), 'static': settings.THEIRRY_MEDIA_URL
}
output = [super(DatePickerWidget, self).render(name, value, attrs)]
output.append(
u'''
<script>
yepnope({
load:[
'%(static)sjs/jquery.js', %(static)sjs/jquery-ui-1.8.15.custom.js, %(static)sjs/jquery-ui-timepicker-addon.js
],
complete: function(){
jQuery(function(){
var %(id)s = $('#%(id)s');
%(id)s.datetimepicker({
timeFormat: 'hh:mm:ss TT',
ampm: true,
hourMin: 0,
minuteMin: 0,
secondMin: 0,
stepMinute: 5,
});
$('#%(id)s_button').click(function(){
%(id)s.datetimepicker('setDate', (new Date()) );
});
});
}
});
</script>
''' % (self.options)
)
return mark_safe(u''. join(output))
css = {
'all': (
'%scss/theirry/jquery-ui-1.8.15.custom.css' % settings.THEIRRY_MEDIA_URL,
)
}
class AutoCompleteModelWidget(widgets.TextInput):
'''
Usage:
tags = TagField(widget=AutoCompleteTagWidget(attrs={'app': 'your application name', 'model': 'your model', 'source': 'tag_lookup', 'length': 2}), required=False)
'''
def __init__(self, options=dict(), *args, **kwargs):
self.options = options
super(AutoCompleteModelWidget, self).__init__(*args, **kwargs)
if self.options.get('length') in EMPTY_VALUES:
self.length = 2
else:
self.length = self.options.get('length', 2) # default to 2 letters
if isinstance(self.options.get('model'), Model):
self.model = self.options.get('model')
else:
pass
if self.options.has_key('uri'):
if isinstance(self.options['uri'], basestring):
self.is_url = True
self.uri = self.options.get('uri')
else:
self.options['disabled'] = 'true'
self.uri = '/'
class Media:
css = {'all': ('%scss/autocomplete.css' % settings.THEIRRY_MEDIA_URL,)}
def render(self, name, value, attrs=None):
if self.options['uri'].find('/') == -1 and self.is_url:
# Some occasions the url is present, check to see if present if not preform function
try:
self.uri = reverse(self.options['uri'])
except NoReverseMatch:
# seems to be an error -> disable search
self.uri = '/'
self.options = {
'id': attrs['id'], 'uri': self.uri,
'length': self.length, 'static': settings.THEIRRY_MEDIA_URL,
}
if value in EMPTY_VALUES or len(value) == 0:
value = ''
else:
complete_list = list()
if isinstance(value, QuerySet):
for v in value:
complete_list.append(v.tag.name)
value = ', '.join(complete_list)
if isinstance(value, unicode):
pass
'''if value is not None and not isinstance(value, basestring):
value = edit_string_for_tags([o.tag for o in value.select_related("tag")])
'''
output = [super(AutoCompleteModelWidget, self).render(name, value, attrs)]
output.append(u'''
<script>
yepnope({
load:[
'%(static)sjs/jquery.js', '%(static)sjs/jquery-ui-1.8.15.custom.js'
],
complete: function(){
jQuery(function(){
function split(val) {
return val.split(/\s*, \s*/);
}
function extractLast(term) {
return split(term).pop();
}
$('#%(id)s').autocomplete({
source: function(request, response) {
$.getJSON('%(uri)s', {
term: extractLast(request.term)
}, response);
},
search: function() {
// custom minLength
var term = extractLast(this.value);
if (term.length < %(length)s) {
return false;
}
},
focus: function() {
// prevent value inserted on focus
return false;
},
select: function(event, ui) {
var terms = split( this.value );
// remove the current input
terms.pop();
// add the selected item
terms.push( ui.item.value );
// add placeholder to get the comma-and-space at the end
terms.push("");
this.value = terms.join(", ");
return false;
}
});
});
}
});
</script>
''' % (self.options))
return mark_safe(u''. join(output))
class AutoCompleteTagWidget(widgets.TextInput):
'''
Usage:
tags = TagField(widget=AutoCompleteTagWidget(attrs={'app': 'your application name', 'model': 'your model', 'length': 2}), required=False)
'''
def __init__(self, *args, **kwargs):
super(AutoCompleteTagWidget, self).__init__(*args, **kwargs)
self.attrs = kwargs.get('attrs', {})
self.length = self.attrs.get('length', '2') # default to 2 letters
if self.attrs.get('model') and self.attrs.get('app') not in EMPTY_VALUES:
self.model = get_model(self.attrs.get('app'), self.attrs.get('model'))
else:
pass
class Media:
css = {'all': ('%scss/autocomplete.css' % settings.THEIRRY_MEDIA_URL,)}
def render(self, name, value, attrs=None):
self.options = {
'id': attrs['id'], 'tag_lookup': reverse('tag_lookup'),
'length': self.length, 'static': settings.THEIRRY_MEDIA_URL,
}
if value in EMPTY_VALUES or len(value) == 0:
value = ''
else:
complete_list = list()
if isinstance(value, QuerySet):
for v in value:
complete_list.append(v.tag.name)
value = ', '.join(complete_list)
if isinstance(value, unicode):
pass
output = [super(AutoCompleteTagWidget, self).render(name, value, attrs)]
output.append(u'''
<script>
yepnope({
load:[
'%(static)sjs/jquery.js', '%(static)sjs/jquery-ui-1.8.15.custom.js'
],
complete: function(){
jQuery(function(){
function split(val) {
return val.split(/\s*, \s*/);
}
function extractLast(term) {
return split(term).pop();
}
$('#%(id)s').autocomplete({
source: function(request, response) {
$.getJSON('%(tag_lookup)s', {
term: extractLast(request.term)
}, response);
},
search: function() {
// custom minLength
var term = extractLast(this.value);
if (term.length < %(length)s) {
return false;
}
},
focus: function() {
// prevent value inserted on focus
return false;
},
select: function(event, ui) {
var terms = split( this.value );
// remove the current input
terms.pop();
// add the selected item
terms.push( ui.item.value );
// add placeholder to get the comma-and-space at the end
terms.push("");
this.value = terms.join(", ");
return false;
}
});
});
}
});
</script>
''' % (self.options))
return mark_safe(u''. join(output))
class USPhoneNumberMultiWidget(widgets.MultiWidget):
"""
A Widget that splits US Phone number input into three boxes.
"""
def __init__(self,attrs=None):
widgets = (
TextInput(attrs={'size':'3','maxlength':'3', 'class':'phone'}),
TextInput(attrs={'size':'3','maxlength':'3', 'class':'phone'}),
TextInput(attrs={'size':'4','maxlength':'4', 'class':'phone'}),
)
super(USPhoneNumberMultiWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
return value.split('-')
return [None,None,None]
def value_from_datadict(self, data, files, name):
values = super(USPhoneNumberMultiWidget, self).value_from_datadict(data, files, name)
return u'%s-%s-%s' % values
class SSNInputWidget(widgets.TextInput):
'''Renders United States SSN as xxx-xx-xxxx'''
def render(self, name, value, attrs=None):
if value and len(value) == 9:
value = "%s-%s-%s" % (value[:3], value[3:5], value[5:])
return super(SSNInputWidget, self).render(name, value, attrs)
class GenericWidget(widgets.HiddenInput):
def __init__(self, attrs=None, choices=(), *args, **kwargs):
super(GenericWidget, self).__init__(attrs)
'''
Look for select attributes to override default
generic = GenericField(widget=GenericWidget(attrs={'app': 'your application name', 'model': 'your module', 'pk': 'custom id function'}))
'''
self.attrs = attrs
if self.attrs.get('module') and self.attrs.get('app') not in EMPTY_VALUES:
self.model = get_model(self.attrs.get('app'), self.attrs.get('module'))
self.fk = self.attrs.get('pk', create_uuid())
def render(self, name, value, attrs=None, choices=()):
'''
call initial to set the foreign key
'''
if value:
self.fk = value
self.options = {
"app": self.model._meta.app_label, 'module': self.model._meta.module_name,
'fk': self.fk
}
output = list()
output.append(u'''
<div style='display:none'><input type='hidden' id='gen_app' name='gen_app' value='%(app)s' />
<input type='hidden' id='gen_module' name='gen_module' value='%(module)s' />
<input type='hidden' id='gen_fk_app' name='gen_fk_app' value='%(fk)s' /></div>'''
% (self.options))
return mark_safe(u''.join(output))
class DisplayOnlyWidget(widgets.HiddenInput):
'''
This extends the HiddenWidget and displays
the info (value) contained in the input as
text or (html)
'''
def __init__(self, foreign_object, *args, **kwargs):
self.foreign_object = foreign_object
# default to displaying the primary key
self.field = 'id'
self.attrs = kwargs.get('attrs', {})
self.field = self.attrs.get('field', None)
super(DisplayOnlyWidget, self).__init__()
def render(self, name, value, attrs=None):
if self.foreign_object is not None:
display = '%s' % self.foreign_object.__dict__[self.field]
return super(DisplayOnlyWidget, self).render(name, value, attrs) + mark_safe(display)
else:
return "None"
| |
# -*- coding: utf-8 -*-
#
# escpos/helpers.py
#
# Copyright 2015 Base4 Sistemas Ltda ME
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import inspect
import time
from collections import namedtuple
from itertools import takewhile
from operator import attrgetter
from builtins import chr
from builtins import bytes
import six
from six.moves import zip_longest
from .exceptions import TimeoutException
_Model = namedtuple('_Model', 'name vendor')
Implementation = namedtuple('Implementation', 'model type fqname')
def find_implementations(sort_by=None):
"""
Returns a tuple of :class:`~escpos.helpers.Implementation` objects
containing metadata for all known implementations (subclasses of
:class:`~escpos.impl.epson.GenericESCPOS`) with vendor and model names, the
implementation type and its fully qualified name.
This example will print all vendor and model names, sorted by vendor name:
.. sourcecode::
for impl in find_implementations(sort_by='model.vendor'):
print impl.model.vendor, ':', impl.model.name
:param str sort_by: Attribute name to sort the resulting list (optional).
:rtype: tuple
"""
impls = [_describe_impl(t) for t in _list_impls()]
if sort_by:
impls.sort(key=attrgetter(sort_by))
return tuple(impls)
class TimeoutHelper(object):
def __init__(self, timeout=1):
self.timeout = 1
self.set()
def set(self):
self._mark = time.time()
def check(self):
if self.timeout > 0:
if time.time() - self._mark > self.timeout:
raise TimeoutException((
'{!r} seconds have passed'
).format(self.timeout))
return False
class ByteValue(object):
"""A helper for easy bit handling."""
def __init__(self):
self._int_value = 0
@property
def byte(self):
return six.int2byte(self._int_value)
@property
def value(self):
return self._int_value
def get_bit(self, n):
return ((self._int_value >> n & 1) != 0)
def set_bit(self, n):
self._int_value |= (1 << n)
def clear_bit(self, n):
self._int_value &= ~(1 << n)
def chunks(iterable, size):
def grouper(n, iterable, fillvalue=None):
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
for chunk in grouper(size, iterable):
data = [i for i in takewhile(lambda e: e is not None, chunk)]
yield bytearray(data)
def to_bytes(content, encoding='utf-8', errors='strict'):
"""Convert a sequence to a bytes type.
Borrowed from `PySerial <https://github.com/pyserial/pyserial>`_ since
it is now optional.
"""
if isinstance(content, bytes):
# Python 2: isinstance('', bytes) is True
return bytes(content)
elif isinstance(content, bytearray):
return bytes(content)
elif isinstance(content, memoryview):
return content.tobytes()
elif isinstance(content, six.string_types):
return bytes(content.encode(encoding, errors))
else:
# handle list of integers and bytes (one or more items)
# for Python 2 and 3
return bytes(bytearray(content))
def hexdump(content, encoding='utf-8', errors='strict', eol='\n', panel_gap=2):
b_content = to_bytes(content, encoding=encoding, errors=errors)
hex_panel, char_panel = hexdump_bytes(b_content)
gap = ' ' * panel_gap
return eol.join(
'{}{}{}'.format(h, gap, c)
for h, c in zip(hex_panel, char_panel)
)
def hexdump_bytes(data, fill_last_line=True):
def _hex_values():
return ['{:02x}'.format(b) for b in data]
def _chr_values():
return [chr(b) if 32 <= b <= 126 else '.' for b in data]
def _cut(sequence, size):
for i in range(0, len(sequence), size):
yield sequence[i:i + size]
hex_panel = [' '.join(line) for line in _cut(_hex_values(), 16)]
char_panel = [''.join(line) for line in _cut(_chr_values(), 16)]
if hex_panel and fill_last_line:
hex_panel[-1] = hex_panel[-1] + (' ' * (47 - len(hex_panel[-1])))
if char_panel and fill_last_line:
char_panel[-1] = char_panel[-1] + (' ' * (16 - len(char_panel[-1])))
return hex_panel, char_panel
def is_value_in(constants_group, value):
"""Checks whether value can be found in the given constants group,
which in turn, must be a Django-like choices tuple.
"""
return value in [k for k, v in constants_group]
def _list_impls():
from escpos.impl.epson import GenericESCPOS
return _impls_for(GenericESCPOS)
def _impls_for(t):
impls = [t]
for subcls in t.__subclasses__():
impls.extend(_impls_for(subcls))
return impls
def _describe_impl(t):
impl = Implementation(
model=_Model(name=t.model.name, vendor=t.model.vendor),
type=t,
fqname=_fqname(t)
)
return impl
def _fqname(t):
m = inspect.getmodule(t)
return '.'.join([m.__name__, t.__name__])
| |
from django.conf import settings
from django.utils import translation
from elasticsearch_dsl import F, query
from elasticsearch_dsl.filter import Bool
from rest_framework.filters import BaseFilterBackend
import mkt
from mkt.api.base import form_errors, get_region_from_request
from mkt.constants.applications import get_device_id
from mkt.features.utils import get_feature_profile
class SearchQueryFilter(BaseFilterBackend):
"""
A django-rest-framework filter backend that scores the given ES queryset
with a should query based on the search query found in the current
request's query parameters.
"""
def _get_locale_analyzer(self, lang):
analyzer = mkt.SEARCH_LANGUAGE_TO_ANALYZER.get(lang)
if (analyzer in mkt.SEARCH_ANALYZER_PLUGINS and
not settings.ES_USE_PLUGINS):
analyzer = None
return analyzer
def filter_queryset(self, request, queryset, view):
q = request.GET.get('q', '').lower()
lang = translation.get_language()
analyzer = self._get_locale_analyzer(lang)
if not q:
return queryset
should = []
rules = [
(query.Match, {'query': q, 'boost': 3, 'analyzer': 'standard'}),
(query.Match, {'query': q, 'boost': 4, 'type': 'phrase',
'slop': 1}),
(query.Prefix, {'value': q, 'boost': 1.5}),
]
# Only add fuzzy queries if q is a single word. It doesn't make sense
# to do a fuzzy query for multi-word queries.
if ' ' not in q:
rules.append(
(query.Fuzzy, {'value': q, 'boost': 2, 'prefix_length': 1}))
# Apply rules to search on few base fields. Some might not be present
# in every document type / indexes.
for k, v in rules:
for field in ('app_slug', 'author', 'name', 'short_name', 'slug',
'title', 'url_tokenized'):
should.append(k(**{field: v}))
# Exact matches need to be queried against a non-analyzed field. Let's
# do a term query on `name.raw` for an exact match against the item
# name and give it a good boost since this is likely what the user
# wants.
# FIXME: we should also do that on translations and slug/app_slug, but
# we don't store a raw version for them at the moment.
should.append(query.Term(**{'name.raw': {'value': q, 'boost': 10}}))
# Do the same for GUID searches.
should.append(query.Term(**{'guid': {'value': q, 'boost': 10}}))
# If query is numeric, check if it is an ID.
if q.isnumeric():
should.append(query.Term(**{'id': {'value': q, 'boost': 10}}))
if analyzer:
should.append(
query.Match(**{'name_l10n_%s' % analyzer: {'query': q,
'boost': 2.5}}))
should.append(
query.Match(**{'short_name_l10n_%s' % analyzer: {
'query': q,
'boost': 2.5}}))
# Add searches on the description field.
should.append(
query.Match(description={'query': q, 'boost': 0.8,
'type': 'phrase'}))
if analyzer:
desc_field = 'description_l10n_%s' % analyzer
desc_analyzer = ('%s_analyzer' % analyzer
if analyzer in mkt.STEMMER_MAP else analyzer)
should.append(
query.Match(
**{desc_field: {'query': q, 'boost': 0.6, 'type': 'phrase',
'analyzer': desc_analyzer}}))
# Add searches on tag field.
should.append(query.Term(tags={'value': q}))
if ' ' not in q:
should.append(query.Fuzzy(tags={'value': q, 'prefix_length': 1}))
# The list of functions applied to our `function_score` query.
functions = [
query.SF('field_value_factor', field='boost'),
]
# Add a boost for the preferred region, if it exists.
region = get_region_from_request(request)
if region:
functions.append({
'filter': {'term': {'preferred_regions': region.id}},
# TODO: When we upgrade to Elasticsearch 1.4, change this
# to 'weight'.
'boost_factor': 4,
})
return queryset.query('function_score',
query=query.Bool(should=should),
functions=functions)
class SearchFormFilter(BaseFilterBackend):
"""
A django-rest-framework filter backend that filters the given queryset
based on `self.form_class`.
"""
# A mapping of form fields to Elasticsearch fields for those that differ.
FORM_TO_FIELD_MAP = {
'author': 'author.raw',
'cat': 'category',
'has_info_request': 'latest_version.has_info_request',
'has_editor_comment': 'latest_version.has_editor_comment',
'languages': 'supported_locales',
'offline': 'is_offline',
'premium_types': 'premium_type',
'tag': 'tags'
}
def filter_queryset(self, request, queryset, view):
form = view.form_class(request.GET)
if not form.is_valid():
raise form_errors(form)
self.form_data = form.cleaned_data
data = {}
for k, v in self.form_data.items():
data[self.FORM_TO_FIELD_MAP.get(k, k)] = v
# Must filters.
must = []
for field in self.VALID_FILTERS:
value = data.get(field)
if value is not None:
if type(value) == list:
filter_type = 'terms'
else:
filter_type = 'term'
must.append(F(filter_type, **{field: value}))
if must:
return queryset.filter(Bool(must=must))
return queryset
class PublicSearchFormFilter(SearchFormFilter):
VALID_FILTERS = ['app_type', 'author.raw', 'category', 'device', 'guid',
'installs_allowed_from', 'is_offline', 'manifest_url',
'premium_type', 'supported_locales', 'tags']
class ReviewerSearchFormFilter(SearchFormFilter):
VALID_FILTERS = ['app_type', 'author.raw', 'category', 'device',
'latest_version.has_editor_comment',
'latest_version.has_info_request',
'latest_version.status',
'installs_allowed_from', 'is_escalated', 'is_offline',
'manifest_url', 'premium_type', 'status',
'supported_locales', 'tags']
def filter_queryset(self, request, queryset, view):
queryset = super(ReviewerSearchFormFilter,
self).filter_queryset(request, queryset, view)
# Special case for `is_tarako`, which gets converted to a tag filter.
is_tarako = self.form_data.get('is_tarako')
if is_tarako is not None:
if is_tarako:
queryset = queryset.filter(
Bool(must=[F('term', tags='tarako')]))
else:
queryset = queryset.filter(
Bool(must=[~F('term', tags='tarako')]))
return queryset
class WebsiteSearchFormFilter(SearchFormFilter):
VALID_FILTERS = ['keywords', 'category', 'device']
class ReviewerWebsiteSearchFormFilter(SearchFormFilter):
VALID_FILTERS = ['keywords', 'category', 'device', 'status', 'is_disabled']
class ExtensionSearchFormFilter(SearchFormFilter):
VALID_FILTERS = ['author.raw']
class PublicContentFilter(BaseFilterBackend):
"""
A django-rest-framework filter backend that filters only public items --
those not deleted, with PUBLIC status and not disabled.
"""
def filter_queryset(self, request, queryset, view):
# Note: only Extensions have is_deleted, for Webapps the status is
# changed when deleted. That's why a must_not is used, it will be true
# even if the field does not exist.
return queryset.filter(
Bool(must=[F('term', status=mkt.STATUS_PUBLIC),
F('term', is_disabled=False)],
must_not=[F('term', is_deleted=True)]))
class ValidAppsFilter(BaseFilterBackend):
"""
A django-rest-framework filter backend that filters only valid items --
those with any valid status and not disabled or deleted.
"""
def filter_queryset(self, request, queryset, view):
return queryset.filter(
Bool(must=[F('terms', status=mkt.VALID_STATUSES),
F('term', is_disabled=False)]))
class NotDeletedFilter(BaseFilterBackend):
"""
A django-rest-framework filter backend that filters out soft-deleted items.
"""
def filter_queryset(self, request, queryset, view):
return queryset.filter(
Bool(must_not=[F('term', is_deleted=True)]))
class DeviceTypeFilter(BaseFilterBackend):
"""
A django-rest-framework filter backend that filters based on the matching
device type provided.
"""
def filter_queryset(self, request, queryset, view):
device_id = get_device_id(request)
data = {
'gaia': getattr(request, 'GAIA', False),
'mobile': getattr(request, 'MOBILE', False),
'tablet': getattr(request, 'TABLET', False),
}
flash_incompatible = data['mobile'] or data['gaia']
if device_id:
queryset = queryset.filter(
Bool(must=[F('term', device=device_id)]))
if flash_incompatible:
queryset = queryset.filter(
Bool(must_not=[F('term', uses_flash=True)]))
return queryset
class RegionFilter(BaseFilterBackend):
"""
A django-rest-framework filter backend that filters based on the matching
region provided.
"""
def filter_queryset(self, request, queryset, view):
region = get_region_from_request(request)
if region:
return queryset.filter(
Bool(must_not=[F('term', region_exclusions=region.id)]))
return queryset
class ProfileFilter(BaseFilterBackend):
"""
A django-rest-framework filter backend that filters based on the feature
profile provided.
"""
def filter_queryset(self, request, queryset, view):
profile = get_feature_profile(request)
if profile:
must_not = []
for k in profile.to_kwargs(prefix='features.has_').keys():
must_not.append(F('term', **{k: True}))
if must_not:
return queryset.filter(Bool(must_not=must_not))
return queryset
class SortingFilter(BaseFilterBackend):
"""
A django-rest-framework filter backend that applies sorting based on the
form data provided.
"""
DEFAULT_SORTING = {
'popularity': '-popularity',
'rating': '-bayesian_rating',
'created': '-created',
'reviewed': '-reviewed',
'name': 'name_sort',
'trending': '-trending',
}
def _get_regional_sort(self, region, field):
"""
A helper method to return the sort field with region for mature
regions, otherwise returns the field.
"""
if region and not region.adolescent:
return ['-%s_%s' % (field, region.id)]
return ['-%s' % field]
def filter_queryset(self, request, queryset, view):
region = get_region_from_request(request)
search_query = request.GET.get('q')
sort = request.GET.getlist('sort')
# When querying (with `?q=`) we want to sort by relevance. If no query
# is provided and no `?sort` is provided, i.e. we are only applying
# filters which don't affect the relevance, we sort by popularity
# descending.
order_by = None
if not search_query:
order_by = self._get_regional_sort(region, 'popularity')
if sort:
if 'popularity' in sort:
order_by = self._get_regional_sort(region, 'popularity')
elif 'trending' in sort:
order_by = self._get_regional_sort(region, 'trending')
else:
order_by = [self.DEFAULT_SORTING[name] for name in sort
if name in self.DEFAULT_SORTING]
if order_by:
return queryset.sort(*order_by)
return queryset
class OpenMobileACLFilter(BaseFilterBackend):
"""
A django-rest-framework filter backend that finds apps using openmobile_acl
feature flag.
"""
def filter_queryset(self, request, queryset, view):
return queryset.filter(
Bool(must=[F('term', **{'features.has_openmobileacl': True})]))
| |
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import itertools
class TemplateWriter(object):
'''Abstract base class for writing policy templates in various formats.
The methods of this class will be called by PolicyTemplateGenerator.
'''
def __init__(self, platforms, config):
'''Initializes a TemplateWriter object.
Args:
platforms: List of platforms for which this writer can write policies.
config: A dictionary of information required to generate the template.
It contains some key-value pairs, including the following examples:
'build': 'chrome' or 'chromium'
'branding': 'Google Chrome' or 'Chromium'
'mac_bundle_id': The Mac bundle id of Chrome. (Only set when building
for Mac.)
messages: List of all the message strings from the grd file. Most of them
are also present in the policy data structures that are passed to
methods. That is the preferred way of accessing them, this should only
be used in exceptional cases. An example for its use is the
IDS_POLICY_WIN_SUPPORTED_WINXPSP2 message in ADM files, because that
cannot be associated with any policy or group.
'''
self.platforms = platforms
self.config = config
def IsDeprecatedPolicySupported(self, policy):
'''Checks if the given deprecated policy is supported by the writer.
Args:
policy: The dictionary of the policy.
Returns:
True if the writer chooses to include the deprecated 'policy' in its
output.
'''
return False
def IsFuturePolicySupported(self, policy):
'''Checks if the given future policy is supported by the writer.
Args:
policy: The dictionary of the policy.
Returns:
True if the writer chooses to include the deprecated 'policy' in its
output.
'''
return False
def IsPolicySupported(self, policy):
'''Checks if the given policy is supported by the writer.
In other words, the set of platforms supported by the writer
has a common subset with the set of platforms that support
the policy.
Args:
policy: The dictionary of the policy.
Returns:
True if the writer chooses to include 'policy' in its output.
'''
if ('deprecated' in policy and policy['deprecated'] is True and
not self.IsDeprecatedPolicySupported(policy)):
return False
if ('future' in policy and policy['future'] is True and
not self.IsFuturePolicySupported(policy)):
return False
if '*' in self.platforms:
# Currently chrome_os is only catched here.
return True
for supported_on in policy['supported_on']:
for supported_on_platform in supported_on['platforms']:
if supported_on_platform in self.platforms:
return True
return False
def _GetPoliciesForWriter(self, group):
'''Filters the list of policies in the passed group that are supported by
the writer.
Args:
group: The dictionary of the policy group.
Returns: The list of policies of the policy group that are compatible
with the writer.
'''
if not 'policies' in group:
return []
result = []
for policy in group['policies']:
if self.IsPolicySupported(policy):
result.append(policy)
return result
def Init(self):
'''Initializes the writer. If the WriteTemplate method is overridden, then
this method must be called as first step of each template generation
process.
'''
pass
def WriteTemplate(self, template):
'''Writes the given template definition.
Args:
template: Template definition to write.
Returns:
Generated output for the passed template definition.
'''
self.messages = template['messages']
self.Init()
template['policy_definitions'] = \
self.PreprocessPolicies(template['policy_definitions'])
self.BeginTemplate()
for policy in template['policy_definitions']:
if policy['type'] == 'group':
child_policies = self._GetPoliciesForWriter(policy)
if child_policies:
# Only write nonempty groups.
self.BeginPolicyGroup(policy)
for child_policy in child_policies:
# Nesting of groups is currently not supported.
self.WritePolicy(child_policy)
self.EndPolicyGroup()
elif self.IsPolicySupported(policy):
self.WritePolicy(policy)
self.EndTemplate()
return self.GetTemplateText()
def PreprocessPolicies(self, policy_list):
'''Preprocesses a list of policies according to a given writer's needs.
Preprocessing steps include sorting policies and stripping unneeded
information such as groups (for writers that ignore them).
Subclasses are encouraged to override this method, overriding
implementations may call one of the provided specialized implementations.
The default behaviour is to use SortPoliciesGroupsFirst().
Args:
policy_list: A list containing the policies to sort.
Returns:
The sorted policy list.
'''
return self.SortPoliciesGroupsFirst(policy_list)
def WritePolicy(self, policy):
'''Appends the template text corresponding to a policy into the
internal buffer.
Args:
policy: The policy as it is found in the JSON file.
'''
raise NotImplementedError()
def BeginPolicyGroup(self, group):
'''Appends the template text corresponding to the beginning of a
policy group into the internal buffer.
Args:
group: The policy group as it is found in the JSON file.
'''
pass
def EndPolicyGroup(self):
'''Appends the template text corresponding to the end of a
policy group into the internal buffer.
'''
pass
def BeginTemplate(self):
'''Appends the text corresponding to the beginning of the whole
template into the internal buffer.
'''
raise NotImplementedError()
def EndTemplate(self):
'''Appends the text corresponding to the end of the whole
template into the internal buffer.
'''
pass
def GetTemplateText(self):
'''Gets the content of the internal template buffer.
Returns:
The generated template from the the internal buffer as a string.
'''
raise NotImplementedError()
def SortPoliciesGroupsFirst(self, policy_list):
'''Sorts a list of policies alphabetically. The order is the
following: first groups alphabetically by caption, then other policies
alphabetically by name. The order of policies inside groups is unchanged.
Args:
policy_list: The list of policies to sort. Sub-lists in groups will not
be sorted.
'''
policy_list.sort(key=self.GetPolicySortingKeyGroupsFirst)
return policy_list
def FlattenGroupsAndSortPolicies(self, policy_list, sorting_key=None):
'''Sorts a list of policies according to |sorting_key|, defaulting
to alphabetical sorting if no key is given. If |policy_list| contains
policies with type="group", it is flattened first, i.e. any groups' contents
are inserted into the list as first-class elements and the groups are then
removed.
'''
new_list = []
for policy in policy_list:
if policy['type'] == 'group':
for grouped_policy in policy['policies']:
new_list.append(grouped_policy)
else:
new_list.append(policy)
if sorting_key == None:
sorting_key = self.GetPolicySortingKeyName
new_list.sort(key=sorting_key)
return new_list
def GetPolicySortingKeyName(self, policy):
return policy['name']
def GetPolicySortingKeyGroupsFirst(self, policy):
'''Extracts a sorting key from a policy. These keys can be used for
list.sort() methods to sort policies.
See TemplateWriter.SortPolicies for usage.
'''
is_group = policy['type'] == 'group'
if is_group:
# Groups are sorted by caption.
str_key = policy['caption']
else:
# Regular policies are sorted by name.
str_key = policy['name']
# Groups come before regular policies.
return (not is_group, str_key)
| |
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from selenium.common.exceptions import (
NoSuchElementException,
NoSuchWindowException,
TimeoutException,
WebDriverException)
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def testGetTitle(driver, pages):
pages.load("simpleTest.html")
title = driver.title
assert "Hello WebDriver" == title
def testGetCurrentUrl(driver, pages, webserver):
pages.load("simpleTest.html")
url = driver.current_url
assert webserver.where_is('simpleTest.html') == url
def testFindElementsByXPath(driver, pages):
pages.load("simpleTest.html")
elem = driver.find_element_by_xpath("//h1")
assert "Heading" == elem.text
def testFindElementByXpathThrowNoSuchElementException(driver, pages):
pages.load("simpleTest.html")
with pytest.raises(NoSuchElementException):
driver.find_element_by_xpath("//h4")
def testFindElementsByXpath(driver, pages):
pages.load("nestedElements.html")
elems = driver.find_elements_by_xpath("//option")
assert 48 == len(elems)
assert "One" == elems[0].get_attribute("value")
def testFindElementsByName(driver, pages):
pages.load("xhtmlTest.html")
elem = driver.find_element_by_name("windowOne")
assert "Open new window" == elem.text
def testFindElementsByNameInElementContext(driver, pages):
pages.load("nestedElements.html")
elem = driver.find_element_by_name("form2")
sub_elem = elem.find_element_by_name("selectomatic")
assert "2" == sub_elem.get_attribute("id")
def testFindElementsByLinkTextInElementContext(driver, pages):
pages.load("nestedElements.html")
elem = driver.find_element_by_name("div1")
sub_elem = elem.find_element_by_link_text("hello world")
assert "link1" == sub_elem.get_attribute("name")
def testFindElementByIdInElementContext(driver, pages):
pages.load("nestedElements.html")
elem = driver.find_element_by_name("form2")
sub_elem = elem.find_element_by_id("2")
assert "selectomatic" == sub_elem.get_attribute("name")
def testFindElementByXpathInElementContext(driver, pages):
pages.load("nestedElements.html")
elem = driver.find_element_by_name("form2")
sub_elem = elem.find_element_by_xpath("select")
assert "2" == sub_elem.get_attribute("id")
def testFindElementByXpathInElementContextNotFound(driver, pages):
pages.load("nestedElements.html")
elem = driver.find_element_by_name("form2")
with pytest.raises(NoSuchElementException):
elem.find_element_by_xpath("div")
def testShouldBeAbleToEnterDataIntoFormFields(driver, pages):
pages.load("xhtmlTest.html")
elem = driver.find_element_by_xpath("//form[@name='someForm']/input[@id='username']")
elem.clear()
elem.send_keys("some text")
elem = driver.find_element_by_xpath("//form[@name='someForm']/input[@id='username']")
assert "some text" == elem.get_attribute("value")
def testFindElementByTagName(driver, pages):
pages.load("simpleTest.html")
elems = driver.find_elements_by_tag_name("div")
num_by_xpath = len(driver.find_elements_by_xpath("//div"))
assert num_by_xpath == len(elems)
elems = driver.find_elements_by_tag_name("iframe")
assert 0 == len(elems)
def testFindElementByTagNameWithinElement(driver, pages):
pages.load("simpleTest.html")
div = driver.find_element_by_id("multiline")
elems = div.find_elements_by_tag_name("p")
assert len(elems) == 1
@pytest.mark.xfail_marionette(
reason="W3C implementations can't switch to a window by name",
raises=TimeoutException,
run=False)
def testSwitchToWindow(driver, pages):
title_1 = "XHTML Test Page"
title_2 = "We Arrive Here"
switch_to_window_timeout = 5
wait = WebDriverWait(driver, switch_to_window_timeout, ignored_exceptions=[NoSuchWindowException])
pages.load("xhtmlTest.html")
driver.find_element_by_link_text("Open new window").click()
assert title_1 == driver.title
wait.until(lambda dr: dr.switch_to.window("result") is None)
assert title_2 == driver.title
def testSwitchFrameByName(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_name("third"))
checkbox = driver.find_element_by_id("checky")
checkbox.click()
checkbox.submit()
def testIsEnabled(driver, pages):
pages.load("formPage.html")
elem = driver.find_element_by_xpath("//input[@id='working']")
assert elem.is_enabled()
elem = driver.find_element_by_xpath("//input[@id='notWorking']")
assert not elem.is_enabled()
def testIsSelectedAndToggle(driver, pages):
pages.load("formPage.html")
elem = driver.find_element_by_id("multi")
option_elems = elem.find_elements_by_xpath("option")
assert option_elems[0].is_selected()
option_elems[0].click()
assert not option_elems[0].is_selected()
option_elems[0].click()
assert option_elems[0].is_selected()
assert option_elems[2].is_selected()
def testNavigate(driver, pages):
pages.load("formPage.html")
driver.find_element_by_id("imageButton").submit()
WebDriverWait(driver, 3).until(EC.title_is("We Arrive Here"))
driver.back()
assert "We Leave From Here" == driver.title
driver.forward()
assert "We Arrive Here" == driver.title
def testGetAttribute(driver, pages):
url = pages.url('xhtmlTest.html')
driver.get(url)
elem = driver.find_element_by_id("id1")
attr = elem.get_attribute("href")
assert '{0}#'.format(url) == attr
def testGetImplicitAttribute(driver, pages):
pages.load("nestedElements.html")
elems = driver.find_elements_by_xpath("//option")
assert len(elems) >= 3
for i, elem in enumerate(elems[:3]):
assert i == int(elem.get_attribute("index"))
def testExecuteSimpleScript(driver, pages):
pages.load("xhtmlTest.html")
title = driver.execute_script("return document.title;")
assert "XHTML Test Page" == title
def testExecuteScriptAndReturnElement(driver, pages):
pages.load("xhtmlTest.html")
elem = driver.execute_script("return document.getElementById('id1');")
assert "WebElement" in str(type(elem))
def testExecuteScriptWithArgs(driver, pages):
pages.load("xhtmlTest.html")
result = driver.execute_script("return arguments[0] == 'fish' ? 'fish' : 'not fish';", "fish")
assert "fish" == result
def testExecuteScriptWithMultipleArgs(driver, pages):
pages.load("xhtmlTest.html")
result = driver.execute_script(
"return arguments[0] + arguments[1]", 1, 2)
assert 3 == result
def testExecuteScriptWithElementArgs(driver, pages):
pages.load("javascriptPage.html")
button = driver.find_element_by_id("plainButton")
result = driver.execute_script("arguments[0]['flibble'] = arguments[0].getAttribute('id'); return arguments[0]['flibble'];", button)
assert "plainButton" == result
def testFindElementsByPartialLinkText(driver, pages):
pages.load("xhtmlTest.html")
elem = driver.find_element_by_partial_link_text("new window")
elem.click()
def testIsElementDisplayed(driver, pages):
pages.load("javascriptPage.html")
visible = driver.find_element_by_id("displayed").is_displayed()
not_visible = driver.find_element_by_id("hidden").is_displayed()
assert visible
assert not not_visible
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/466')
def testMoveWindowPosition(driver, pages):
pages.load("blank.html")
loc = driver.get_window_position()
# note can't test 0,0 since some OS's dont allow that location
# because of system toolbars
new_x = 50
new_y = 50
if loc['x'] == new_x:
new_x += 10
if loc['y'] == new_y:
new_y += 10
driver.set_window_position(new_x, new_y)
loc = driver.get_window_position()
assert loc['x'] == new_x
assert loc['y'] == new_y
def testChangeWindowSize(driver, pages):
pages.load("blank.html")
size = driver.get_window_size()
newSize = [600, 600]
if size['width'] == 600:
newSize[0] = 500
if size['height'] == 600:
newSize[1] = 500
driver.set_window_size(newSize[0], newSize[1])
size = driver.get_window_size()
assert size['width'] == newSize[0]
assert size['height'] == newSize[1]
@pytest.mark.xfail_marionette(
raises=WebDriverException)
def testGetLogTypes(driver, pages):
pages.load("blank.html")
assert isinstance(driver.log_types, list)
@pytest.mark.xfail_marionette(
raises=WebDriverException)
def testGetLog(driver, pages):
pages.load("blank.html")
for log_type in driver.log_types:
log = driver.get_log(log_type)
assert isinstance(log, list)
| |
# OnAir.py 19/05/2016 D.J.Whale
#
# A set of adaptors to allow device classes to interact with radio interfaces.
# At the moment, the main purpose of this abstraction is to prevent the need
# for device classes to know specifics about radio technology and focus
# on the device aspects only.
#
# In the future, this will be a useful point in the architecture to add
# an intelligent message scheduler, that learns the report patterns of
# specific devices, builds a timeslotted schedule, and schedules transmits
# into mostly free slots.
#
# NOTE: This also might include intelligent power level selection based
# on RSSI reports from different devices.
import time
try:
# Python 2
import OpenThings
import TwoBit
import radio
from lifecycle import *
except ImportError:
# Python 3
from . import OpenThings
from . import TwoBit
from . import radio
from .lifecycle import *
class OpenThingsAirInterface():
def __init__(self):
self.radio = radio # aids mocking later
class RadioDefaults():
frequency = 433.92
modulation = radio.RADIO_MODULATION_FSK
class TxDefaults(RadioDefaults):
##power_level = 0
inner_times = 4
outer_delay = 0
outer_times = 1
self.tx_defaults = TxDefaults()
class RxDefaults(RadioDefaults):
poll_rate = 100 #ms
timeout = 1000 #ms
self.rx_defaults = RxDefaults()
##@log_method
def send(self, payload, radio_config=None):
# payload is a pydict suitable for OpenThings
# radio_params is an overlay on top of radio tx defaults
p = OpenThings.encode(payload)
# Set radio defaults, if no override
outer_times = self.tx_defaults.outer_times
outer_delay = self.tx_defaults.outer_delay
inner_times = self.tx_defaults.inner_times
# Merge any wanted radio params, if provided
if radio_config != None:
try:
outer_times = radio_config.outer_times
except AttributeError: pass
try:
outer_delay = radio_config.outer_delay
except AttributeError: pass
try:
inner_times = radio_config.inner_times
except AttributeError: pass
radio.transmitter(fsk=True)
##print("inner times %s" % inner_times)
radio.transmit(p, outer_times=outer_times, inner_times=inner_times, outer_delay=outer_delay)
# radio auto-returns to previous state after transmit completes
return 0 # tx_silence remaining
##@log_method
def receive(self, radio_config=None): # -> (radio_measurements, address or None, payload or None)
# radio_params is an overlay on top of radio rx defaults (e.g. poll rate, timeout, min payload, max payload)
# radio_measurements might include rssi reading, short payload report, etc
pass # TODO
#TODO: set radio to receive mode
#TODO: merge radio_params with self.tx_defaults
#TODO: configure radio modulation based on merged params
#TODO: poll radio at rate until timeout or received
#TODO: start timeout timer
payload = None
radio.receiver(fsk=True)
while True: # timer not expired
if radio.is_receive_waiting():
payload = radio.receive() #TODO: payload, radio_measurements = radio.receive()
now = time.time()
p = OpenThings.decode(payload, receive_timestamp=now)
#TODO: if crc failure, report it, but keep trying
#if crc check passes...
break
#TODO: inter-try delay
#TODO: return radio to state it was before receiver (e.g. standby) - radio needs a pop() on this too?
if payload == None: # nothing received in timeout
return (None, None, None) # (radio_measurements, address, payload) #TODO: might be measurements, average min max?
#TODO: extract addresses: header_manufacturerid, header_productid header_deviceid -> (m, p, d)
m, p, d = None, None, None
radio_measurements = None #TODO: get from radio.receive()
address = (m, p, d)
return (radio_measurements, address, payload)
class TwoBitAirInterface():
def __init__(self):
self.radio = radio # aids mocking later
self._last_tx_time = None
self._blocking_max = None
class RadioDefaults():
frequency = 433.92
modulation = radio.RADIO_MODULATION_OOK
class TxDefaults(RadioDefaults):
power_level = 0
inner_times = 8
outer_delay = 0
outer_times = 1
self.tx_defaults = TxDefaults()
class RxDefaults(RadioDefaults):
poll_rate = 100 #ms
timeout = 1000 #ms
self.rx_defaults = RxDefaults()
def send(self, payload, radio_config=None):
"""Decide if it is safe to send a payload, wait, send or return"""
# if there is a time restriction, process it
if self._last_tx_time is not None:
if radio_config is not None and hasattr(radio_config, "tx_pre_silence"):
next_tx = self._last_tx_time + radio_config.tx_pre_silence
now = time.time()
if now < next_tx:
# wait or defer
print("<<<TwoBit.NOTYET")
rem = next_tx - now
if self._blocking_max is not None:
if rem >= self._blocking_max:
print("<<<TwoBit.DEFER(%f)" % rem)
return rem # defer for rem seconds
# wait
print("<<<TwoBit.WAIT(%f)" % rem)
time.sleep(rem)
# actually send the device payload to this device
print("<<<TwoBit.SEND")
self._send2(payload, radio_config)
self._last_tx_time = time.time()
return 0 # We did actually transmit this time round
def _send2(self, payload, radio_config=None):
"""Actually send a payload"""
# payload is just a list of bytes, or a byte buffer
# radio_config is an overlay on top of radio tx defaults
house_address = payload["house_address"]
device_index = payload["device_index"]
state = payload["on"]
bytes = TwoBit.encode_switch_message(state, device_index, house_address)
radio.modulation(ook=True)
# Set radio defaults, if no override
outer_times = self.tx_defaults.outer_times
outer_delay = self.tx_defaults.outer_delay
inner_times = self.tx_defaults.inner_times
# Merge any wanted radio params, if provided
if radio_config != None:
try:
outer_times = radio_config.outer_times
except AttributeError: pass
try:
outer_delay = radio_config.outer_delay
except AttributeError: pass
try:
inner_times = radio_config.inner_times
except AttributeError: pass
##print("inner times %s" % inner_times)
radio.transmit(bytes, outer_times=outer_times, inner_times=inner_times, outer_delay=outer_delay)
# radio auto-pops to state before transmit
##@log_method
def receive(self, radio_config=None): # -> (radio_measurements, address or None, payload or None)
# radio_params is an overlay on top of radio rx defaults (e.g. poll rate, timeout, min payload, max payload)
# radio_measurements might include rssi reading, short payload report, etc
#TODO: merge radio_params with self.tx_defaults
#TODO: configure radio modulation based on merged params
#TODO: poll radio at rate until timeout or received
#TODO: start timeout timer
payload = None
radio.receiver(ook=True)
while True: # timer not expired
if radio.is_receive_waiting():
#TODO: radio config should set receive preamble 4 bytes to prevent false triggers
payload = radio.receive(size=12) #TODO: payload, radio_measurements = radio.receive()
p = TwoBit.decode(payload)
#TODO: if failure, report it, but keep trying
#if check passes...
break
#TODO: inter-try delay
#TODO: return radio to state it was before receiver (e.g. standby) - radio needs a pop() on this too?
if payload == None: # nothing received in timeout
return (None, None, None) # (radio_measurements, address, payload) #TODO: might be measurements, average min max?
#TODO: extract addresses (house_address, device_index)
radio_measurements = None #TODO: return this from radio.receive()
h = 0xC8C8C #TODO: Get house address from TwoBit.decode()[:10]
d = 0xEE #TODO: Get device command from TwoBit.decode()[11:12]
address = (h, d)
return (radio_measurements, address, payload)
# END
| |
# download_run_fs.py
#
# Author: Daniel Clark, 2015
'''
This module downloads anatomical data from S3 and runs freesurfer's
recon-all -all command on it
Usage:
python download_run_fs <index> <local_dir>
'''
# Setup log file
def setup_logger(logger_name, log_file, level, to_screen=False):
'''
Function to initialize and configure a logger that can write to file
and (optionally) the screen.
Parameters
----------
logger_name : string
name of the logger
log_file : string
file path to the log file on disk
level : integer
indicates the level at which the logger should log; this is
controlled by integers that come with the python logging
package. (e.g. logging.INFO=20, logging.DEBUG=10)
to_screen : boolean (optional)
flag to indicate whether to enable logging to the screen
Returns
-------
logger : logging.Logger object
Python logging.Logger object which is capable of logging run-
time information about the program to file and/or screen
'''
# Import packages
import logging
# Init logger, formatter, filehandler, streamhandler
logger = logging.getLogger(logger_name)
logger.setLevel(level)
formatter = logging.Formatter('%(asctime)s : %(message)s')
# Write logs to file
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
# Write to screen, if desired
if to_screen:
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
# Return the logger
return logger
# Form list of anatomical s3 keys
def return_anat_dict(bucket, prefix):
'''
Function to create and return an dictionary from an S3 bucket
prefix, where the key is the subject unique id and the value is the
S3 key filepath
Parameters
----------
bucket : boto.s3.bucket.Bucket instance
an instance of the boto S3 bucket class to download from
prefix : string
S3 bucket prefix to parse for anatomical data in
Returns
-------
key_dict : dictionary
dictionary of unique subject id's as keys and S3 key filepaths
as values
'''
# Init variables
key_list = []
key_dict = {}
# Check prefix
if not prefix.endswith('/'):
prefix = prefix + '/'
# Gather all anatomical files
for key in bucket.list(prefix=prefix):
key_name = str(key.name)
if 'anat' in key_name:
key_list.append(key_name)
print 'Adding %s to list...' % key_name
# Create subject dictionary
for key_idx, key_name in enumerate(key_list):
# Grab unique subj/session as id
key_suffix = key_name.replace(prefix, '')
subj_id = '-'.join(key_suffix.split('/')[:2])
# Add key, val to dictionary
key_dict[subj_id] = key_name
# Return dictionary
return key_dict
# Main routine
def main(index, local_dir):
'''
Function to download an anatomical dataset from S3 and process it
through Freesurfer's recon-all command, then upload the data back
to S3
Parameters
----------
index : integer
the index of the subject to process
local_dir : string
filepath to the local directory to store the input and
processed outputs
'''
# Import packages
import boto
import logging
import os
import subprocess
from CPAC.AWS import aws_utils, fetch_creds
# Init variables
creds_path = '/home/ubuntu/secure-creds/aws-keys/fcp-indi-keys2.csv'
bucket = fetch_creds.return_bucket(creds_path, 'fcp-indi')
prefix = 'data/Projects/CORR/RawData/IBA_TRT/'
dl_dir = os.path.join(local_dir, 'inputs')
subjects_dir = os.path.join(local_dir, 'subjects')
# Setup logger
fs_log_path = os.path.join(local_dir, 'download_run_fs_%d.log' % index)
fs_log = setup_logger('fs_log', fs_log_path, logging.INFO, to_screen=True)
# Make input and subject dirs
if not os.path.exists(dl_dir):
os.makedirs(dl_dir)
if not os.path.exists(subjects_dir):
os.makedirs(subjects_dir)
# Get S3 anatomical paths dictionary
anat_dict = return_anat_dict(bucket, prefix)
# Get list of unique subject ids to download
key_list = sorted(anat_dict.keys())
# Extract subject of interest
subj_id = key_list[index]
s3_path = anat_dict[subj_id]
# Download data
fs_log.info('Downloading %s...' % s3_path)
s3_key = bucket.get_key(s3_path)
s3_filename = os.path.basename(s3_path)
dl_filename = os.path.join(dl_dir, subj_id, s3_filename)
# Make folders if need be
dl_dirs = os.path.dirname(dl_filename)
if not os.path.exists(dl_dirs):
os.makedirs(dl_dirs)
s3_key.get_contents_to_filename(dl_filename)
# Execute recon-all
cmd_list = ['recon-all', '-openmp', '4', '-i', dl_filename,
'-subjid', subj_id, '-qcache', '-all']
cmd_str = ' '.join(cmd_list)
fs_log.info('Executing %s...' % cmd_str)
# Use subprocess to send command and communicate outputs
proc = subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Stream output
while proc.poll() is None:
stdout_line = proc.stdout.readline()
fs_log.info(stdout_line)
proc.wait()
# Gather processed data
fs_log.info('Gathering outputs for upload to S3...')
upl_list = []
subj_dir = os.path.join(subjects_dir, subj_id)
for root, dirs, files in os.walk(subj_dir):
if files:
upl_list.extend([os.path.join(root, fl) for fl in files])
# Update log with upload info
fs_log.info('Gathered %d files for upload to S3' % len(upl_list))
# Build upload list
upl_prefix = os.path.join(prefix.replace('RawData', 'Outputs'),
'freesurfer', subj_id)
s3_upl_list = [upl.replace(subj_dir, upl_prefix) for upl in upl_list]
# Upload to S3
aws_utils.s3_upload(bucket, upl_list, s3_upl_list, overwrite=True, make_public=True)
# Make executable
if __name__ == '__main__':
# Import packages
import sys
# Init variables
index = int(sys.argv[1])-1
local_dir = sys.argv[2]
main(index, local_dir)
| |
""" Test cases for Series.plot """
from datetime import datetime
from itertools import chain
import numpy as np
from numpy.random import randn
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Series, date_range
import pandas._testing as tm
from pandas.tests.plotting.common import TestPlotBase, _check_plot_works
import pandas.plotting as plotting
@td.skip_if_no_mpl
class TestSeriesPlots(TestPlotBase):
def setup_method(self, method):
TestPlotBase.setup_method(self, method)
import matplotlib as mpl
mpl.rcdefaults()
self.ts = tm.makeTimeSeries()
self.ts.name = "ts"
self.series = tm.makeStringSeries()
self.series.name = "series"
self.iseries = tm.makePeriodSeries()
self.iseries.name = "iseries"
@pytest.mark.slow
def test_plot(self):
_check_plot_works(self.ts.plot, label="foo")
_check_plot_works(self.ts.plot, use_index=False)
axes = _check_plot_works(self.ts.plot, rot=0)
self._check_ticks_props(axes, xrot=0)
ax = _check_plot_works(self.ts.plot, style=".", logy=True)
self._check_ax_scales(ax, yaxis="log")
ax = _check_plot_works(self.ts.plot, style=".", logx=True)
self._check_ax_scales(ax, xaxis="log")
ax = _check_plot_works(self.ts.plot, style=".", loglog=True)
self._check_ax_scales(ax, xaxis="log", yaxis="log")
_check_plot_works(self.ts[:10].plot.bar)
_check_plot_works(self.ts.plot.area, stacked=False)
_check_plot_works(self.iseries.plot)
for kind in ["line", "bar", "barh", "kde", "hist", "box"]:
_check_plot_works(self.series[:5].plot, kind=kind)
_check_plot_works(self.series[:10].plot.barh)
ax = _check_plot_works(Series(randn(10)).plot.bar, color="black")
self._check_colors([ax.patches[0]], facecolors=["black"])
# GH 6951
ax = _check_plot_works(self.ts.plot, subplots=True)
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
ax = _check_plot_works(self.ts.plot, subplots=True, layout=(-1, 1))
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
ax = _check_plot_works(self.ts.plot, subplots=True, layout=(1, -1))
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_plot_figsize_and_title(self):
# figsize and title
_, ax = self.plt.subplots()
ax = self.series.plot(title="Test", figsize=(16, 8), ax=ax)
self._check_text_labels(ax.title, "Test")
self._check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16, 8))
def test_dont_modify_rcParams(self):
# GH 8242
key = "axes.prop_cycle"
colors = self.plt.rcParams[key]
_, ax = self.plt.subplots()
Series([1, 2, 3]).plot(ax=ax)
assert colors == self.plt.rcParams[key]
def test_ts_line_lim(self):
fig, ax = self.plt.subplots()
ax = self.ts.plot(ax=ax)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data(orig=False)[0][0]
assert xmax >= lines[0].get_data(orig=False)[0][-1]
tm.close()
ax = self.ts.plot(secondary_y=True, ax=ax)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data(orig=False)[0][0]
assert xmax >= lines[0].get_data(orig=False)[0][-1]
def test_ts_area_lim(self):
_, ax = self.plt.subplots()
ax = self.ts.plot.area(stacked=False, ax=ax)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
assert xmin <= line[0]
assert xmax >= line[-1]
tm.close()
# GH 7471
_, ax = self.plt.subplots()
ax = self.ts.plot.area(stacked=False, x_compat=True, ax=ax)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
assert xmin <= line[0]
assert xmax >= line[-1]
tm.close()
tz_ts = self.ts.copy()
tz_ts.index = tz_ts.tz_localize("GMT").tz_convert("CET")
_, ax = self.plt.subplots()
ax = tz_ts.plot.area(stacked=False, x_compat=True, ax=ax)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
assert xmin <= line[0]
assert xmax >= line[-1]
tm.close()
_, ax = self.plt.subplots()
ax = tz_ts.plot.area(stacked=False, secondary_y=True, ax=ax)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
assert xmin <= line[0]
assert xmax >= line[-1]
def test_label(self):
s = Series([1, 2])
_, ax = self.plt.subplots()
ax = s.plot(label="LABEL", legend=True, ax=ax)
self._check_legend_labels(ax, labels=["LABEL"])
self.plt.close()
_, ax = self.plt.subplots()
ax = s.plot(legend=True, ax=ax)
self._check_legend_labels(ax, labels=["None"])
self.plt.close()
# get name from index
s.name = "NAME"
_, ax = self.plt.subplots()
ax = s.plot(legend=True, ax=ax)
self._check_legend_labels(ax, labels=["NAME"])
self.plt.close()
# override the default
_, ax = self.plt.subplots()
ax = s.plot(legend=True, label="LABEL", ax=ax)
self._check_legend_labels(ax, labels=["LABEL"])
self.plt.close()
# Add lebel info, but don't draw
_, ax = self.plt.subplots()
ax = s.plot(legend=False, label="LABEL", ax=ax)
assert ax.get_legend() is None # Hasn't been drawn
ax.legend() # draw it
self._check_legend_labels(ax, labels=["LABEL"])
def test_boolean(self):
# GH 23719
s = Series([False, False, True])
_check_plot_works(s.plot, include_bool=True)
msg = "no numeric data to plot"
with pytest.raises(TypeError, match=msg):
_check_plot_works(s.plot)
def test_line_area_nan_series(self):
values = [1, 2, np.nan, 3]
s = Series(values)
ts = Series(values, index=tm.makeDateIndex(k=4))
for d in [s, ts]:
ax = _check_plot_works(d.plot)
masked = ax.lines[0].get_ydata()
# remove nan for comparison purpose
exp = np.array([1, 2, 3], dtype=np.float64)
tm.assert_numpy_array_equal(np.delete(masked.data, 2), exp)
tm.assert_numpy_array_equal(
masked.mask, np.array([False, False, True, False])
)
expected = np.array([1, 2, 0, 3], dtype=np.float64)
ax = _check_plot_works(d.plot, stacked=True)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
ax = _check_plot_works(d.plot.area)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
ax = _check_plot_works(d.plot.area, stacked=False)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
def test_line_use_index_false(self):
s = Series([1, 2, 3], index=["a", "b", "c"])
s.index.name = "The Index"
_, ax = self.plt.subplots()
ax = s.plot(use_index=False, ax=ax)
label = ax.get_xlabel()
assert label == ""
_, ax = self.plt.subplots()
ax2 = s.plot.bar(use_index=False, ax=ax)
label2 = ax2.get_xlabel()
assert label2 == ""
@pytest.mark.slow
def test_bar_log(self):
expected = np.array([1e-1, 1e0, 1e1, 1e2, 1e3, 1e4])
_, ax = self.plt.subplots()
ax = Series([200, 500]).plot.bar(log=True, ax=ax)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
tm.close()
_, ax = self.plt.subplots()
ax = Series([200, 500]).plot.barh(log=True, ax=ax)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)
tm.close()
# GH 9905
expected = np.array([1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1])
_, ax = self.plt.subplots()
ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind="bar", ax=ax)
ymin = 0.0007943282347242822
ymax = 0.12589254117941673
res = ax.get_ylim()
tm.assert_almost_equal(res[0], ymin)
tm.assert_almost_equal(res[1], ymax)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
tm.close()
_, ax = self.plt.subplots()
ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind="barh", ax=ax)
res = ax.get_xlim()
tm.assert_almost_equal(res[0], ymin)
tm.assert_almost_equal(res[1], ymax)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)
@pytest.mark.slow
def test_bar_ignore_index(self):
df = Series([1, 2, 3, 4], index=["a", "b", "c", "d"])
_, ax = self.plt.subplots()
ax = df.plot.bar(use_index=False, ax=ax)
self._check_text_labels(ax.get_xticklabels(), ["0", "1", "2", "3"])
def test_bar_user_colors(self):
s = Series([1, 2, 3, 4])
ax = s.plot.bar(color=["red", "blue", "blue", "red"])
result = [p.get_facecolor() for p in ax.patches]
expected = [
(1.0, 0.0, 0.0, 1.0),
(0.0, 0.0, 1.0, 1.0),
(0.0, 0.0, 1.0, 1.0),
(1.0, 0.0, 0.0, 1.0),
]
assert result == expected
def test_rotation(self):
df = DataFrame(randn(5, 5))
# Default rot 0
_, ax = self.plt.subplots()
axes = df.plot(ax=ax)
self._check_ticks_props(axes, xrot=0)
_, ax = self.plt.subplots()
axes = df.plot(rot=30, ax=ax)
self._check_ticks_props(axes, xrot=30)
def test_irregular_datetime(self):
rng = date_range("1/1/2000", "3/1/2000")
rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]]
ser = Series(randn(len(rng)), rng)
_, ax = self.plt.subplots()
ax = ser.plot(ax=ax)
xp = datetime(1999, 1, 1).toordinal()
ax.set_xlim("1/1/1999", "1/1/2001")
assert xp == ax.get_xlim()[0]
def test_unsorted_index_xlim(self):
ser = Series(
[0.0, 1.0, np.nan, 3.0, 4.0, 5.0, 6.0],
index=[1.0, 0.0, 3.0, 2.0, np.nan, 3.0, 2.0],
)
_, ax = self.plt.subplots()
ax = ser.plot(ax=ax)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= np.nanmin(lines[0].get_data(orig=False)[0])
assert xmax >= np.nanmax(lines[0].get_data(orig=False)[0])
@pytest.mark.slow
def test_pie_series(self):
# if sum of values is less than 1.0, pie handle them as rate and draw
# semicircle.
series = Series(
np.random.randint(1, 5), index=["a", "b", "c", "d", "e"], name="YLABEL"
)
ax = _check_plot_works(series.plot.pie)
self._check_text_labels(ax.texts, series.index)
assert ax.get_ylabel() == "YLABEL"
# without wedge labels
ax = _check_plot_works(series.plot.pie, labels=None)
self._check_text_labels(ax.texts, [""] * 5)
# with less colors than elements
color_args = ["r", "g", "b"]
ax = _check_plot_works(series.plot.pie, colors=color_args)
color_expected = ["r", "g", "b", "r", "g"]
self._check_colors(ax.patches, facecolors=color_expected)
# with labels and colors
labels = ["A", "B", "C", "D", "E"]
color_args = ["r", "g", "b", "c", "m"]
ax = _check_plot_works(series.plot.pie, labels=labels, colors=color_args)
self._check_text_labels(ax.texts, labels)
self._check_colors(ax.patches, facecolors=color_args)
# with autopct and fontsize
ax = _check_plot_works(
series.plot.pie, colors=color_args, autopct="%.2f", fontsize=7
)
pcts = [f"{s*100:.2f}" for s in series.values / float(series.sum())]
expected_texts = list(chain.from_iterable(zip(series.index, pcts)))
self._check_text_labels(ax.texts, expected_texts)
for t in ax.texts:
assert t.get_fontsize() == 7
# includes negative value
with pytest.raises(ValueError):
series = Series([1, 2, 0, 4, -1], index=["a", "b", "c", "d", "e"])
series.plot.pie()
# includes nan
series = Series([1, 2, np.nan, 4], index=["a", "b", "c", "d"], name="YLABEL")
ax = _check_plot_works(series.plot.pie)
self._check_text_labels(ax.texts, ["a", "b", "", "d"])
def test_pie_nan(self):
s = Series([1, np.nan, 1, 1])
_, ax = self.plt.subplots()
ax = s.plot.pie(legend=True, ax=ax)
expected = ["0", "", "2", "3"]
result = [x.get_text() for x in ax.texts]
assert result == expected
@pytest.mark.slow
def test_hist_df_kwargs(self):
df = DataFrame(np.random.randn(10, 2))
_, ax = self.plt.subplots()
ax = df.plot.hist(bins=5, ax=ax)
assert len(ax.patches) == 10
@pytest.mark.slow
def test_hist_df_with_nonnumerics(self):
# GH 9853
with tm.RNGContext(1):
df = DataFrame(np.random.randn(10, 4), columns=["A", "B", "C", "D"])
df["E"] = ["x", "y"] * 5
_, ax = self.plt.subplots()
ax = df.plot.hist(bins=5, ax=ax)
assert len(ax.patches) == 20
_, ax = self.plt.subplots()
ax = df.plot.hist(ax=ax) # bins=10
assert len(ax.patches) == 40
@pytest.mark.slow
def test_hist_legacy(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
_check_plot_works(self.ts.hist, figsize=(8, 10))
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(self.ts.hist, by=self.ts.index.month)
with tm.assert_produces_warning(UserWarning):
_check_plot_works(self.ts.hist, by=self.ts.index.month, bins=5)
fig, ax = self.plt.subplots(1, 1)
_check_plot_works(self.ts.hist, ax=ax)
_check_plot_works(self.ts.hist, ax=ax, figure=fig)
_check_plot_works(self.ts.hist, figure=fig)
tm.close()
fig, (ax1, ax2) = self.plt.subplots(1, 2)
_check_plot_works(self.ts.hist, figure=fig, ax=ax1)
_check_plot_works(self.ts.hist, figure=fig, ax=ax2)
with pytest.raises(ValueError):
self.ts.hist(by=self.ts.index, figure=fig)
@pytest.mark.slow
def test_hist_bins_legacy(self):
df = DataFrame(np.random.randn(10, 2))
ax = df.hist(bins=2)[0][0]
assert len(ax.patches) == 2
@pytest.mark.slow
def test_hist_layout(self):
df = self.hist_df
with pytest.raises(ValueError):
df.height.hist(layout=(1, 1))
with pytest.raises(ValueError):
df.height.hist(layout=[1, 1])
@pytest.mark.slow
def test_hist_layout_with_by(self):
df = self.hist_df
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist, by=df.gender, layout=(2, 1))
self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist, by=df.gender, layout=(3, -1))
self._check_axes_shape(axes, axes_num=2, layout=(3, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist, by=df.category, layout=(4, 1))
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist, by=df.category, layout=(2, -1))
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist, by=df.category, layout=(3, -1))
self._check_axes_shape(axes, axes_num=4, layout=(3, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist, by=df.category, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=4, layout=(1, 4))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist, by=df.classroom, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7))
self._check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 7))
@pytest.mark.slow
def test_hist_no_overlap(self):
from matplotlib.pyplot import subplot, gcf
x = Series(randn(2))
y = Series(randn(2))
subplot(121)
x.hist()
subplot(122)
y.hist()
fig = gcf()
axes = fig.axes
assert len(axes) == 2
@pytest.mark.slow
def test_hist_secondary_legend(self):
# GH 9610
df = DataFrame(np.random.randn(30, 4), columns=list("abcd"))
# primary -> secondary
_, ax = self.plt.subplots()
ax = df["a"].plot.hist(legend=True, ax=ax)
df["b"].plot.hist(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=["a", "b (right)"])
assert ax.get_yaxis().get_visible()
assert ax.right_ax.get_yaxis().get_visible()
tm.close()
# secondary -> secondary
_, ax = self.plt.subplots()
ax = df["a"].plot.hist(legend=True, secondary_y=True, ax=ax)
df["b"].plot.hist(ax=ax, legend=True, secondary_y=True)
# both legends are draw on left ax
# left axis must be invisible, right axis must be visible
self._check_legend_labels(ax.left_ax, labels=["a (right)", "b (right)"])
assert not ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
# secondary -> primary
_, ax = self.plt.subplots()
ax = df["a"].plot.hist(legend=True, secondary_y=True, ax=ax)
# right axes is returned
df["b"].plot.hist(ax=ax, legend=True)
# both legends are draw on left ax
# left and right axis must be visible
self._check_legend_labels(ax.left_ax, labels=["a (right)", "b"])
assert ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
@pytest.mark.slow
def test_df_series_secondary_legend(self):
# GH 9779
df = DataFrame(np.random.randn(30, 3), columns=list("abc"))
s = Series(np.random.randn(30), name="x")
# primary -> secondary (without passing ax)
_, ax = self.plt.subplots()
ax = df.plot(ax=ax)
s.plot(legend=True, secondary_y=True, ax=ax)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=["a", "b", "c", "x (right)"])
assert ax.get_yaxis().get_visible()
assert ax.right_ax.get_yaxis().get_visible()
tm.close()
# primary -> secondary (with passing ax)
_, ax = self.plt.subplots()
ax = df.plot(ax=ax)
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=["a", "b", "c", "x (right)"])
assert ax.get_yaxis().get_visible()
assert ax.right_ax.get_yaxis().get_visible()
tm.close()
# secondary -> secondary (without passing ax)
_, ax = self.plt.subplots()
ax = df.plot(secondary_y=True, ax=ax)
s.plot(legend=True, secondary_y=True, ax=ax)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ["a (right)", "b (right)", "c (right)", "x (right)"]
self._check_legend_labels(ax.left_ax, labels=expected)
assert not ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
# secondary -> secondary (with passing ax)
_, ax = self.plt.subplots()
ax = df.plot(secondary_y=True, ax=ax)
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ["a (right)", "b (right)", "c (right)", "x (right)"]
self._check_legend_labels(ax.left_ax, expected)
assert not ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
# secondary -> secondary (with passing ax)
_, ax = self.plt.subplots()
ax = df.plot(secondary_y=True, mark_right=False, ax=ax)
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ["a", "b", "c", "x (right)"]
self._check_legend_labels(ax.left_ax, expected)
assert not ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
@pytest.mark.slow
@pytest.mark.parametrize(
"input_logy, expected_scale", [(True, "log"), ("sym", "symlog")]
)
def test_secondary_logy(self, input_logy, expected_scale):
# GH 25545
s1 = Series(np.random.randn(30))
s2 = Series(np.random.randn(30))
# GH 24980
ax1 = s1.plot(logy=input_logy)
ax2 = s2.plot(secondary_y=True, logy=input_logy)
assert ax1.get_yscale() == expected_scale
assert ax2.get_yscale() == expected_scale
@pytest.mark.slow
def test_plot_fails_with_dupe_color_and_style(self):
x = Series(randn(2))
with pytest.raises(ValueError):
_, ax = self.plt.subplots()
x.plot(style="k--", color="k", ax=ax)
@pytest.mark.slow
@td.skip_if_no_scipy
def test_hist_kde(self):
_, ax = self.plt.subplots()
ax = self.ts.plot.hist(logy=True, ax=ax)
self._check_ax_scales(ax, yaxis="log")
xlabels = ax.get_xticklabels()
# ticks are values, thus ticklabels are blank
self._check_text_labels(xlabels, [""] * len(xlabels))
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [""] * len(ylabels))
_check_plot_works(self.ts.plot.kde)
_check_plot_works(self.ts.plot.density)
_, ax = self.plt.subplots()
ax = self.ts.plot.kde(logy=True, ax=ax)
self._check_ax_scales(ax, yaxis="log")
xlabels = ax.get_xticklabels()
self._check_text_labels(xlabels, [""] * len(xlabels))
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [""] * len(ylabels))
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_kwargs(self):
sample_points = np.linspace(-100, 100, 20)
_check_plot_works(self.ts.plot.kde, bw_method="scott", ind=20)
_check_plot_works(self.ts.plot.kde, bw_method=None, ind=20)
_check_plot_works(self.ts.plot.kde, bw_method=None, ind=np.int(20))
_check_plot_works(self.ts.plot.kde, bw_method=0.5, ind=sample_points)
_check_plot_works(self.ts.plot.density, bw_method=0.5, ind=sample_points)
_, ax = self.plt.subplots()
ax = self.ts.plot.kde(logy=True, bw_method=0.5, ind=sample_points, ax=ax)
self._check_ax_scales(ax, yaxis="log")
self._check_text_labels(ax.yaxis.get_label(), "Density")
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_missing_vals(self):
s = Series(np.random.uniform(size=50))
s[0] = np.nan
axes = _check_plot_works(s.plot.kde)
# gh-14821: check if the values have any missing values
assert any(~np.isnan(axes.lines[0].get_xdata()))
@pytest.mark.slow
def test_hist_kwargs(self):
_, ax = self.plt.subplots()
ax = self.ts.plot.hist(bins=5, ax=ax)
assert len(ax.patches) == 5
self._check_text_labels(ax.yaxis.get_label(), "Frequency")
tm.close()
_, ax = self.plt.subplots()
ax = self.ts.plot.hist(orientation="horizontal", ax=ax)
self._check_text_labels(ax.xaxis.get_label(), "Frequency")
tm.close()
_, ax = self.plt.subplots()
ax = self.ts.plot.hist(align="left", stacked=True, ax=ax)
tm.close()
@pytest.mark.slow
@td.skip_if_no_scipy
def test_hist_kde_color(self):
_, ax = self.plt.subplots()
ax = self.ts.plot.hist(logy=True, bins=10, color="b", ax=ax)
self._check_ax_scales(ax, yaxis="log")
assert len(ax.patches) == 10
self._check_colors(ax.patches, facecolors=["b"] * 10)
_, ax = self.plt.subplots()
ax = self.ts.plot.kde(logy=True, color="r", ax=ax)
self._check_ax_scales(ax, yaxis="log")
lines = ax.get_lines()
assert len(lines) == 1
self._check_colors(lines, ["r"])
@pytest.mark.slow
def test_boxplot_series(self):
_, ax = self.plt.subplots()
ax = self.ts.plot.box(logy=True, ax=ax)
self._check_ax_scales(ax, yaxis="log")
xlabels = ax.get_xticklabels()
self._check_text_labels(xlabels, [self.ts.name])
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [""] * len(ylabels))
@pytest.mark.slow
def test_kind_both_ways(self):
s = Series(range(3))
kinds = (
plotting.PlotAccessor._common_kinds + plotting.PlotAccessor._series_kinds
)
_, ax = self.plt.subplots()
for kind in kinds:
s.plot(kind=kind, ax=ax)
getattr(s.plot, kind)()
@pytest.mark.slow
def test_invalid_plot_data(self):
s = Series(list("abcd"))
_, ax = self.plt.subplots()
for kind in plotting.PlotAccessor._common_kinds:
msg = "no numeric data to plot"
with pytest.raises(TypeError, match=msg):
s.plot(kind=kind, ax=ax)
@pytest.mark.slow
def test_valid_object_plot(self):
s = Series(range(10), dtype=object)
for kind in plotting.PlotAccessor._common_kinds:
_check_plot_works(s.plot, kind=kind)
def test_partially_invalid_plot_data(self):
s = Series(["a", "b", 1.0, 2])
_, ax = self.plt.subplots()
for kind in plotting.PlotAccessor._common_kinds:
msg = "no numeric data to plot"
with pytest.raises(TypeError, match=msg):
s.plot(kind=kind, ax=ax)
def test_invalid_kind(self):
s = Series([1, 2])
with pytest.raises(ValueError):
s.plot(kind="aasdf")
@pytest.mark.slow
def test_dup_datetime_index_plot(self):
dr1 = date_range("1/1/2009", periods=4)
dr2 = date_range("1/2/2009", periods=4)
index = dr1.append(dr2)
values = randn(index.size)
s = Series(values, index=index)
_check_plot_works(s.plot)
@pytest.mark.slow
def test_errorbar_plot(self):
s = Series(np.arange(10), name="x")
s_err = np.random.randn(10)
d_err = DataFrame(randn(10, 2), index=s.index, columns=["x", "y"])
# test line and bar plots
kinds = ["line", "bar"]
for kind in kinds:
ax = _check_plot_works(s.plot, yerr=Series(s_err), kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=s_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=s_err.tolist(), kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, xerr=0.2, yerr=0.2, kind=kind)
self._check_has_errorbars(ax, xerr=1, yerr=1)
ax = _check_plot_works(s.plot, xerr=s_err)
self._check_has_errorbars(ax, xerr=1, yerr=0)
# test time series plotting
ix = date_range("1/1/2000", "1/1/2001", freq="M")
ts = Series(np.arange(12), index=ix, name="x")
ts_err = Series(np.random.randn(12), index=ix)
td_err = DataFrame(randn(12, 2), index=ix, columns=["x", "y"])
ax = _check_plot_works(ts.plot, yerr=ts_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(ts.plot, yerr=td_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
# check incorrect lengths and types
with pytest.raises(ValueError):
s.plot(yerr=np.arange(11))
s_err = ["zzz"] * 10
with pytest.raises(TypeError):
s.plot(yerr=s_err)
def test_table(self):
_check_plot_works(self.series.plot, table=True)
_check_plot_works(self.series.plot, table=self.series)
@pytest.mark.slow
def test_series_grid_settings(self):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
self._check_grid_settings(
Series([1, 2, 3]),
plotting.PlotAccessor._series_kinds + plotting.PlotAccessor._common_kinds,
)
@pytest.mark.slow
def test_standard_colors(self):
from pandas.plotting._matplotlib.style import _get_standard_colors
for c in ["r", "red", "green", "#FF0000"]:
result = _get_standard_colors(1, color=c)
assert result == [c]
result = _get_standard_colors(1, color=[c])
assert result == [c]
result = _get_standard_colors(3, color=c)
assert result == [c] * 3
result = _get_standard_colors(3, color=[c])
assert result == [c] * 3
@pytest.mark.slow
def test_standard_colors_all(self):
import matplotlib.colors as colors
from pandas.plotting._matplotlib.style import _get_standard_colors
# multiple colors like mediumaquamarine
for c in colors.cnames:
result = _get_standard_colors(num_colors=1, color=c)
assert result == [c]
result = _get_standard_colors(num_colors=1, color=[c])
assert result == [c]
result = _get_standard_colors(num_colors=3, color=c)
assert result == [c] * 3
result = _get_standard_colors(num_colors=3, color=[c])
assert result == [c] * 3
# single letter colors like k
for c in colors.ColorConverter.colors:
result = _get_standard_colors(num_colors=1, color=c)
assert result == [c]
result = _get_standard_colors(num_colors=1, color=[c])
assert result == [c]
result = _get_standard_colors(num_colors=3, color=c)
assert result == [c] * 3
result = _get_standard_colors(num_colors=3, color=[c])
assert result == [c] * 3
def test_series_plot_color_kwargs(self):
# GH1890
_, ax = self.plt.subplots()
ax = Series(np.arange(12) + 1).plot(color="green", ax=ax)
self._check_colors(ax.get_lines(), linecolors=["green"])
def test_time_series_plot_color_kwargs(self):
# #1890
_, ax = self.plt.subplots()
ax = Series(np.arange(12) + 1, index=date_range("1/1/2000", periods=12)).plot(
color="green", ax=ax
)
self._check_colors(ax.get_lines(), linecolors=["green"])
def test_time_series_plot_color_with_empty_kwargs(self):
import matplotlib as mpl
def_colors = self._unpack_cycler(mpl.rcParams)
index = date_range("1/1/2000", periods=12)
s = Series(np.arange(1, 13), index=index)
ncolors = 3
_, ax = self.plt.subplots()
for i in range(ncolors):
ax = s.plot(ax=ax)
self._check_colors(ax.get_lines(), linecolors=def_colors[:ncolors])
def test_xticklabels(self):
# GH11529
s = Series(np.arange(10), index=[f"P{i:02d}" for i in range(10)])
_, ax = self.plt.subplots()
ax = s.plot(xticks=[0, 3, 5, 9], ax=ax)
exp = [f"P{i:02d}" for i in [0, 3, 5, 9]]
self._check_text_labels(ax.get_xticklabels(), exp)
def test_xtick_barPlot(self):
# GH28172
s = pd.Series(range(10), index=[f"P{i:02d}" for i in range(10)])
ax = s.plot.bar(xticks=range(0, 11, 2))
exp = np.array(list(range(0, 11, 2)))
tm.assert_numpy_array_equal(exp, ax.get_xticks())
def test_custom_business_day_freq(self):
# GH7222
from pandas.tseries.offsets import CustomBusinessDay
s = Series(
range(100, 121),
index=pd.bdate_range(
start="2014-05-01",
end="2014-06-01",
freq=CustomBusinessDay(holidays=["2014-05-26"]),
),
)
_check_plot_works(s.plot)
@pytest.mark.xfail
def test_plot_accessor_updates_on_inplace(self):
s = Series([1, 2, 3, 4])
_, ax = self.plt.subplots()
ax = s.plot(ax=ax)
before = ax.xaxis.get_ticklocs()
s.drop([0, 1], inplace=True)
_, ax = self.plt.subplots()
after = ax.xaxis.get_ticklocs()
tm.assert_numpy_array_equal(before, after)
@pytest.mark.parametrize("kind", ["line", "area"])
def test_plot_xlim_for_series(self, kind):
# test if xlim is also correctly plotted in Series for line and area
# GH 27686
s = Series([2, 3])
_, ax = self.plt.subplots()
s.plot(kind=kind, ax=ax)
xlims = ax.get_xlim()
assert xlims[0] < 0
assert xlims[1] > 1
def test_plot_no_rows(self):
# GH 27758
df = pd.Series(dtype=int)
assert df.empty
ax = df.plot()
assert len(ax.get_lines()) == 1
line = ax.get_lines()[0]
assert len(line.get_xdata()) == 0
assert len(line.get_ydata()) == 0
def test_plot_no_numeric_data(self):
df = pd.Series(["a", "b", "c"])
with pytest.raises(TypeError):
df.plot()
def test_style_single_ok(self):
s = pd.Series([1, 2])
ax = s.plot(style="s", color="C3")
assert ax.lines[0].get_color() == ["C3"]
| |
"""Trusted Networks auth provider.
It shows list of users if access from trusted network.
Abort login flow if not access from trusted network.
"""
from ipaddress import IPv4Address, IPv4Network, IPv6Address, IPv6Network, ip_network
from typing import Any, Dict, List, Optional, Union, cast
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from . import AUTH_PROVIDER_SCHEMA, AUTH_PROVIDERS, AuthProvider, LoginFlow
from ..models import Credentials, UserMeta
IPAddress = Union[IPv4Address, IPv6Address]
IPNetwork = Union[IPv4Network, IPv6Network]
CONF_TRUSTED_NETWORKS = "trusted_networks"
CONF_TRUSTED_USERS = "trusted_users"
CONF_GROUP = "group"
CONF_ALLOW_BYPASS_LOGIN = "allow_bypass_login"
CONFIG_SCHEMA = AUTH_PROVIDER_SCHEMA.extend(
{
vol.Required(CONF_TRUSTED_NETWORKS): vol.All(cv.ensure_list, [ip_network]),
vol.Optional(CONF_TRUSTED_USERS, default={}): vol.Schema(
# we only validate the format of user_id or group_id
{
ip_network: vol.All(
cv.ensure_list,
[
vol.Or(
cv.uuid4_hex,
vol.Schema({vol.Required(CONF_GROUP): cv.uuid4_hex}),
)
],
)
}
),
vol.Optional(CONF_ALLOW_BYPASS_LOGIN, default=False): cv.boolean,
},
extra=vol.PREVENT_EXTRA,
)
class InvalidAuthError(HomeAssistantError):
"""Raised when try to access from untrusted networks."""
class InvalidUserError(HomeAssistantError):
"""Raised when try to login as invalid user."""
@AUTH_PROVIDERS.register("trusted_networks")
class TrustedNetworksAuthProvider(AuthProvider):
"""Trusted Networks auth provider.
Allow passwordless access from trusted network.
"""
DEFAULT_TITLE = "Trusted Networks"
@property
def trusted_networks(self) -> List[IPNetwork]:
"""Return trusted networks."""
return cast(List[IPNetwork], self.config[CONF_TRUSTED_NETWORKS])
@property
def trusted_users(self) -> Dict[IPNetwork, Any]:
"""Return trusted users per network."""
return cast(Dict[IPNetwork, Any], self.config[CONF_TRUSTED_USERS])
@property
def support_mfa(self) -> bool:
"""Trusted Networks auth provider does not support MFA."""
return False
async def async_login_flow(self, context: Optional[Dict]) -> LoginFlow:
"""Return a flow to login."""
assert context is not None
ip_addr = cast(IPAddress, context.get("ip_address"))
users = await self.store.async_get_users()
available_users = [
user for user in users if not user.system_generated and user.is_active
]
for ip_net, user_or_group_list in self.trusted_users.items():
if ip_addr in ip_net:
user_list = [
user_id
for user_id in user_or_group_list
if isinstance(user_id, str)
]
group_list = [
group[CONF_GROUP]
for group in user_or_group_list
if isinstance(group, dict)
]
flattened_group_list = [
group for sublist in group_list for group in sublist
]
available_users = [
user
for user in available_users
if (
user.id in user_list
or any(
[group.id in flattened_group_list for group in user.groups]
)
)
]
break
return TrustedNetworksLoginFlow(
self,
ip_addr,
{user.id: user.name for user in available_users},
self.config[CONF_ALLOW_BYPASS_LOGIN],
)
async def async_get_or_create_credentials(
self, flow_result: Dict[str, str]
) -> Credentials:
"""Get credentials based on the flow result."""
user_id = flow_result["user"]
users = await self.store.async_get_users()
for user in users:
if not user.system_generated and user.is_active and user.id == user_id:
for credential in await self.async_credentials():
if credential.data["user_id"] == user_id:
return credential
cred = self.async_create_credentials({"user_id": user_id})
await self.store.async_link_user(user, cred)
return cred
# We only allow login as exist user
raise InvalidUserError
async def async_user_meta_for_credentials(
self, credentials: Credentials
) -> UserMeta:
"""Return extra user metadata for credentials.
Trusted network auth provider should never create new user.
"""
raise NotImplementedError
@callback
def async_validate_access(self, ip_addr: IPAddress) -> None:
"""Make sure the access from trusted networks.
Raise InvalidAuthError if not.
Raise InvalidAuthError if trusted_networks is not configured.
"""
if not self.trusted_networks:
raise InvalidAuthError("trusted_networks is not configured")
if not any(
ip_addr in trusted_network for trusted_network in self.trusted_networks
):
raise InvalidAuthError("Not in trusted_networks")
class TrustedNetworksLoginFlow(LoginFlow):
"""Handler for the login flow."""
def __init__(
self,
auth_provider: TrustedNetworksAuthProvider,
ip_addr: IPAddress,
available_users: Dict[str, Optional[str]],
allow_bypass_login: bool,
) -> None:
"""Initialize the login flow."""
super().__init__(auth_provider)
self._available_users = available_users
self._ip_address = ip_addr
self._allow_bypass_login = allow_bypass_login
async def async_step_init(
self, user_input: Optional[Dict[str, str]] = None
) -> Dict[str, Any]:
"""Handle the step of the form."""
try:
cast(
TrustedNetworksAuthProvider, self._auth_provider
).async_validate_access(self._ip_address)
except InvalidAuthError:
return self.async_abort(reason="not_whitelisted")
if user_input is not None:
return await self.async_finish(user_input)
if self._allow_bypass_login and len(self._available_users) == 1:
return await self.async_finish(
{"user": next(iter(self._available_users.keys()))}
)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema({"user": vol.In(self._available_users)}),
)
| |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import status_params
import utils
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
import os
import re
from ambari_commons.os_check import OSCheck
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.stack_features import get_stack_feature_version
from resource_management.libraries.functions import format
from resource_management.libraries.functions.version import format_stack_version, get_major_version
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.expect import expect
from resource_management.libraries.functions import get_klist_path
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.script.script import Script
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.format_jvm_option import format_jvm_option
from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
from resource_management.libraries.functions.hdfs_utils import is_https_enabled_in_hdfs
from resource_management.libraries.functions import is_empty
from resource_management.libraries.functions.get_architecture import get_architecture
from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
architecture = get_architecture()
stack_name = status_params.stack_name
stack_root = Script.get_stack_root()
upgrade_direction = default("/commandParams/upgrade_direction", None)
rolling_restart = default("/commandParams/rolling_restart", False)
rolling_restart_safemode_exit_timeout = default("/configurations/cluster-env/namenode_rolling_restart_safemode_exit_timeout", None)
stack_version_unformatted = config['hostLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)
major_stack_version = get_major_version(stack_version_formatted)
agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
# there is a stack upgrade which has not yet been finalized; it's currently suspended
upgrade_suspended = default("roleParams/upgrade_suspended", False)
# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
version = default("/commandParams/version", None)
# The desired role is only available during a Non-Rolling Upgrade in HA.
# The server calculates which of the two NameNodes will be the active, and the other the standby since they
# are started using different commands.
desired_namenode_role = default("/commandParams/desired_namenode_role", None)
command_timeout = default("/commandParams/command_timeout", 900)
# get the correct version to use for checking stack features
version_for_stack_feature_checks = get_stack_feature_version(config)
stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
stack_supports_zk_security = check_stack_feature(StackFeature.SECURE_ZOOKEEPER, version_for_stack_feature_checks)
security_enabled = config['configurations']['cluster-env']['security_enabled']
hdfs_user = status_params.hdfs_user
root_user = "root"
hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
namenode_pid_file = status_params.namenode_pid_file
zkfc_pid_file = status_params.zkfc_pid_file
datanode_pid_file = status_params.datanode_pid_file
# Some datanode settings
dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None)
dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None)
dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
dfs_dn_ipc_address = config['configurations']['hdfs-site']['dfs.datanode.ipc.address']
secure_dn_ports_are_in_use = False
hdfs_tmp_dir = default("/configurations/hadoop-env/hdfs_tmp_dir", "/tmp")
namenode_backup_dir = default("/configurations/hadoop-env/namenode_backup_dir", "/tmp/upgrades")
# hadoop default parameters
mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
hadoop_bin = stack_select.get_hadoop_dir("sbin")
hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
hadoop_home = stack_select.get_hadoop_dir("home")
hadoop_secure_dn_user = hdfs_user
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
hadoop_lib_home = stack_select.get_hadoop_dir("lib")
# hadoop parameters for stacks that support rolling_upgrade
if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
mapreduce_libs_path = format("{stack_root}/current/hadoop-mapreduce-client/*")
if not security_enabled:
hadoop_secure_dn_user = '""'
else:
dfs_dn_port = utils.get_port(dfs_dn_addr)
dfs_dn_http_port = utils.get_port(dfs_dn_http_addr)
dfs_dn_https_port = utils.get_port(dfs_dn_https_addr)
# We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
if dfs_http_policy == "HTTPS_ONLY":
secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_https_port)
elif dfs_http_policy == "HTTP_AND_HTTPS":
secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_http_port) or utils.is_secure_port(dfs_dn_https_port)
else: # params.dfs_http_policy == "HTTP_ONLY" or not defined:
secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_http_port)
if secure_dn_ports_are_in_use:
hadoop_secure_dn_user = hdfs_user
else:
hadoop_secure_dn_user = '""'
ambari_libs_dir = "/var/lib/ambari-agent/lib"
limits_conf_dir = "/etc/security/limits.d"
hdfs_user_nofile_limit = default("/configurations/hadoop-env/hdfs_user_nofile_limit", "128000")
hdfs_user_nproc_limit = default("/configurations/hadoop-env/hdfs_user_nproc_limit", "65536")
create_lib_snappy_symlinks = check_stack_feature(StackFeature.SNAPPY, stack_version_formatted)
jsvc_path = "/usr/lib/bigtop-utils"
execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir
ulimit_cmd = "ulimit -c unlimited ; "
snappy_so = "libsnappy.so"
so_target_dir_x86 = format("{hadoop_lib_home}/native/Linux-i386-32")
so_target_dir_x64 = format("{hadoop_lib_home}/native/Linux-amd64-64")
so_target_x86 = format("{so_target_dir_x86}/{snappy_so}")
so_target_x64 = format("{so_target_dir_x64}/{snappy_so}")
so_src_dir_x86 = format("{hadoop_home}/lib")
so_src_dir_x64 = format("{hadoop_home}/lib64")
so_src_x86 = format("{so_src_dir_x86}/{snappy_so}")
so_src_x64 = format("{so_src_dir_x64}/{snappy_so}")
#security params
smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
falcon_user = config['configurations']['falcon-env']['falcon_user']
#exclude file
hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
slave_hosts = default("/clusterHostInfo/slave_hosts", [])
include_file_path = default("/configurations/hdfs-site/dfs.hosts", None)
hdfs_include_file = None
manage_include_files = default("/configurations/hdfs-site/manage.include.files", False)
if include_file_path and manage_include_files:
hdfs_include_file = slave_hosts
update_files_only = default("/commandParams/update_files_only",False)
command_phase = default("/commandParams/phase","")
klist_path_local = get_klist_path(default('/configurations/kerberos-env/executable_search_paths', None))
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
#hosts
hostname = config["hostname"]
public_hostname = config["public_hostname"]
rm_host = default("/clusterHostInfo/rm_host", [])
oozie_servers = default("/clusterHostInfo/oozie_server", [])
hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
hive_server_host = default("/clusterHostInfo/hive_server_host", [])
hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
hs_host = default("/clusterHostInfo/hs_host", [])
jtnode_host = default("/clusterHostInfo/jtnode_host", [])
namenode_host = default("/clusterHostInfo/namenode_host", [])
nm_host = default("/clusterHostInfo/nm_host", [])
ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", [])
zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
has_ganglia_server = not len(ganglia_server_hosts) == 0
has_namenodes = not len(namenode_host) == 0
has_jobtracker = not len(jtnode_host) == 0
has_resourcemanager = not len(rm_host) == 0
has_histroryserver = not len(hs_host) == 0
has_hbase_masters = not len(hbase_master_hosts) == 0
has_slaves = not len(slave_hosts) == 0
has_oozie_server = not len(oozie_servers) == 0
has_hcat_server_host = not len(hcat_server_hosts) == 0
has_hive_server_host = not len(hive_server_host) == 0
has_journalnode_hosts = not len(journalnode_hosts) == 0
has_zkfc_hosts = not len(zkfc_hosts) == 0
has_falcon_host = not len(falcon_host) == 0
is_namenode_master = hostname in namenode_host
is_jtnode_master = hostname in jtnode_host
is_rmnode_master = hostname in rm_host
is_hsnode_master = hostname in hs_host
is_hbase_master = hostname in hbase_master_hosts
is_slave = hostname in slave_hosts
if has_ganglia_server:
ganglia_server_host = ganglia_server_hosts[0]
#users and groups
yarn_user = config['configurations']['yarn-env']['yarn_user']
hbase_user = config['configurations']['hbase-env']['hbase_user']
oozie_user = config['configurations']['oozie-env']['oozie_user']
webhcat_user = config['configurations']['hive-env']['webhcat_user']
hive_user = config['configurations']['hive-env']['hive_user']
smoke_user = config['configurations']['cluster-env']['smokeuser']
smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
mapred_user = config['configurations']['mapred-env']['mapred_user']
hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
user_group = config['configurations']['cluster-env']['user_group']
root_group = "root"
proxyuser_group = config['configurations']['hadoop-env']['proxyuser_group']
#hadoop params
hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
nfs_file_dump_dir = config['configurations']['hdfs-site']['nfs.file.dump.dir']
dfs_domain_socket_path = config['configurations']['hdfs-site']['dfs.domain.socket.path']
dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
jn_edits_dir = config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']
dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
hdfs_log_dir = format("{hdfs_log_dir_prefix}/{hdfs_user}")
namenode_dirs_created_stub_dir = hdfs_log_dir
namenode_dirs_stub_filename = "namenode_dirs_created"
smoke_hdfs_user_dir = format("/user/{smoke_user}")
smoke_hdfs_user_mode = 0770
hdfs_namenode_format_disabled = default("/configurations/cluster-env/hdfs_namenode_format_disabled", False)
hdfs_namenode_formatted_mark_suffix = "/namenode-formatted/"
hdfs_namenode_bootstrapped_mark_suffix = "/namenode-bootstrapped/"
namenode_formatted_old_mark_dirs = ["/var/run/hadoop/hdfs/namenode-formatted",
format("{hadoop_pid_dir_prefix}/hdfs/namenode/formatted"),
"/var/lib/hdfs/namenode/formatted"]
dfs_name_dirs = dfs_name_dir.split(",")
namenode_formatted_mark_dirs = []
namenode_bootstrapped_mark_dirs = []
for dn_dir in dfs_name_dirs:
tmp_format_mark_dir = format("{dn_dir}{hdfs_namenode_formatted_mark_suffix}")
tmp_bootstrap_mark_dir = format("{dn_dir}{hdfs_namenode_bootstrapped_mark_suffix}")
namenode_formatted_mark_dirs.append(tmp_format_mark_dir)
namenode_bootstrapped_mark_dirs.append(tmp_bootstrap_mark_dir)
# Use the namenode RPC address if configured, otherwise, fallback to the default file system
namenode_address = None
if 'dfs.namenode.rpc-address' in config['configurations']['hdfs-site']:
namenode_rpcaddress = config['configurations']['hdfs-site']['dfs.namenode.rpc-address']
namenode_address = format("hdfs://{namenode_rpcaddress}")
else:
namenode_address = config['configurations']['core-site']['fs.defaultFS']
fs_checkpoint_dirs = default("/configurations/hdfs-site/dfs.namenode.checkpoint.dir", "").split(',')
dfs_data_dirs = config['configurations']['hdfs-site']['dfs.datanode.data.dir']
data_dir_mount_file = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist"
# HDFS High Availability properties
dfs_ha_enabled = False
dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
if dfs_ha_nameservices is None:
dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None)
dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
dfs_ha_automatic_failover_enabled = default("/configurations/hdfs-site/dfs.ha.automatic-failover.enabled", False)
# hostname of the active HDFS HA Namenode (only used when HA is enabled)
dfs_ha_namenode_active = default("/configurations/hadoop-env/dfs_ha_initial_namenode_active", None)
# hostname of the standby HDFS HA Namenode (only used when HA is enabled)
dfs_ha_namenode_standby = default("/configurations/hadoop-env/dfs_ha_initial_namenode_standby", None)
ha_zookeeper_quorum = config['configurations']['core-site']['ha.zookeeper.quorum']
jaas_file = os.path.join(hadoop_conf_secure_dir, 'hdfs_jaas.conf')
zk_namespace = default('/configurations/hdfs-site/ha.zookeeper.parent-znode', '/hadoop-ha')
# Values for the current Host
namenode_id = None
namenode_rpc = None
dfs_ha_namemodes_ids_list = []
other_namenode_id = None
if dfs_ha_namenode_ids:
dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
if dfs_ha_namenode_ids_array_len > 1:
dfs_ha_enabled = True
if dfs_ha_enabled:
for nn_id in dfs_ha_namemodes_ids_list:
nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
if hostname.lower() in nn_host.lower():
namenode_id = nn_id
namenode_rpc = nn_host
elif public_hostname.lower() in nn_host.lower():
namenode_id = nn_id
namenode_rpc = nn_host
# With HA enabled namenode_address is recomputed
namenode_address = format('hdfs://{dfs_ha_nameservices}')
# Calculate the namenode id of the other namenode. This is needed during RU to initiate an HA failover using ZKFC.
if namenode_id is not None and len(dfs_ha_namemodes_ids_list) == 2:
other_namenode_id = list(set(dfs_ha_namemodes_ids_list) - set([namenode_id]))[0]
if dfs_http_policy is not None and dfs_http_policy.upper() == "HTTPS_ONLY":
https_only = True
journalnode_address = default('/configurations/hdfs-site/dfs.journalnode.https-address', None)
else:
https_only = False
journalnode_address = default('/configurations/hdfs-site/dfs.journalnode.http-address', None)
if journalnode_address:
journalnode_port = journalnode_address.split(":")[1]
if security_enabled:
dn_principal_name = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
dn_keytab = config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
dn_principal_name = dn_principal_name.replace('_HOST',hostname.lower())
dn_kinit_cmd = format("{kinit_path_local} -kt {dn_keytab} {dn_principal_name};")
nn_principal_name = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
nn_keytab = config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
nn_principal_name = nn_principal_name.replace('_HOST',hostname.lower())
nn_kinit_cmd = format("{kinit_path_local} -kt {nn_keytab} {nn_principal_name};")
jn_principal_name = default("/configurations/hdfs-site/dfs.journalnode.kerberos.principal", None)
if jn_principal_name:
jn_principal_name = jn_principal_name.replace('_HOST', hostname.lower())
jn_keytab = default("/configurations/hdfs-site/dfs.journalnode.keytab.file", None)
hdfs_kinit_cmd = format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name};")
else:
dn_kinit_cmd = ""
nn_kinit_cmd = ""
hdfs_kinit_cmd = ""
hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']
dfs_type = default("/commandParams/dfs_type", "")
import functools
#create partial functions with common arguments for every HdfsResource call
#to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code
HdfsResource = functools.partial(
HdfsResource,
user=hdfs_user,
hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local,
hadoop_bin_dir = hadoop_bin_dir,
hadoop_conf_dir = hadoop_conf_dir,
principal_name = hdfs_principal_name,
hdfs_site = hdfs_site,
default_fs = default_fs,
immutable_paths = get_not_managed_resources(),
dfs_type = dfs_type
)
# The logic for LZO also exists in OOZIE's params.py
io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
lzo_packages = get_lzo_packages(stack_version_unformatted)
name_node_params = default("/commandParams/namenode", None)
java_home = config['hostLevelParams']['java_home']
java_version = expect("/hostLevelParams/java_version", int)
java_exec = format("{java_home}/bin/java")
hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
jtnode_opt_newsize = "200m"
jtnode_opt_maxnewsize = "200m"
jtnode_heapsize = "1024m"
ttnode_heapsize = "1024m"
dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
hadoop_security_authentication = config['configurations']['core-site']['hadoop.security.authentication']
hadoop_security_authorization = config['configurations']['core-site']['hadoop.security.authorization']
fs_default_name = config['configurations']['core-site']['fs.defaultFS']
hadoop_security_auth_to_local = config['configurations']['core-site']['hadoop.security.auth_to_local']
if security_enabled:
sn_principal_name = default("/configurations/hdfs-site/dfs.secondary.namenode.kerberos.principal", "nn/_HOST@EXAMPLE.COM")
sn_principal_name = sn_principal_name.replace('_HOST',hostname.lower())
# for curl command in ranger plugin to get db connector
jdk_location = config['hostLevelParams']['jdk_location']
java_share_dir = '/usr/share/java'
is_https_enabled = is_https_enabled_in_hdfs(config['configurations']['hdfs-site']['dfs.http.policy'],
config['configurations']['hdfs-site']['dfs.https.enable'])
# ranger hdfs plugin section start
# ranger host
ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
has_ranger_admin = not len(ranger_admin_hosts) == 0
# ranger support xml_configuration flag, instead of depending on ranger xml_configurations_supported/ranger-env, using stack feature
xml_configurations_supported = check_stack_feature(StackFeature.RANGER_XML_CONFIGURATION, version_for_stack_feature_checks)
# ambari-server hostname
ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
# ranger hdfs plugin enabled property
enable_ranger_hdfs = default("/configurations/ranger-hdfs-plugin-properties/ranger-hdfs-plugin-enabled", "No")
enable_ranger_hdfs = True if enable_ranger_hdfs.lower() == 'yes' else False
# get ranger hdfs properties if enable_ranger_hdfs is True
if enable_ranger_hdfs:
# ranger policy url
policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
if xml_configurations_supported:
policymgr_mgr_url = config['configurations']['ranger-hdfs-security']['ranger.plugin.hdfs.policy.rest.url']
if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'):
policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
# ranger audit db user
xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
# ranger hdfs service name
repo_name = str(config['clusterName']) + '_hadoop'
repo_name_value = config['configurations']['ranger-hdfs-security']['ranger.plugin.hdfs.service.name']
if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
repo_name = repo_name_value
hadoop_rpc_protection = config['configurations']['ranger-hdfs-plugin-properties']['hadoop.rpc.protection']
common_name_for_certificate = config['configurations']['ranger-hdfs-plugin-properties']['common.name.for.certificate']
repo_config_username = config['configurations']['ranger-hdfs-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
# ranger-env config
ranger_env = config['configurations']['ranger-env']
# create ranger-env config having external ranger credential properties
if not has_ranger_admin and enable_ranger_hdfs:
external_admin_username = default('/configurations/ranger-hdfs-plugin-properties/external_admin_username', 'admin')
external_admin_password = default('/configurations/ranger-hdfs-plugin-properties/external_admin_password', 'admin')
external_ranger_admin_username = default('/configurations/ranger-hdfs-plugin-properties/external_ranger_admin_username', 'amb_ranger_admin')
external_ranger_admin_password = default('/configurations/ranger-hdfs-plugin-properties/external_ranger_admin_password', 'amb_ranger_admin')
ranger_env = {}
ranger_env['admin_username'] = external_admin_username
ranger_env['admin_password'] = external_admin_password
ranger_env['ranger_admin_username'] = external_ranger_admin_username
ranger_env['ranger_admin_password'] = external_ranger_admin_password
ranger_plugin_properties = config['configurations']['ranger-hdfs-plugin-properties']
policy_user = config['configurations']['ranger-hdfs-plugin-properties']['policy_user']
repo_config_password = config['configurations']['ranger-hdfs-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']
xa_audit_db_password = ''
if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db and has_ranger_admin:
xa_audit_db_password = config['configurations']['admin-properties']['audit_db_password']
downloaded_custom_connector = None
previous_jdbc_jar_name = None
driver_curl_source = None
driver_curl_target = None
previous_jdbc_jar = None
# to get db connector related properties
if has_ranger_admin and stack_supports_ranger_audit_db:
xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR']
jdbc_jar_name, previous_jdbc_jar_name, audit_jdbc_url, jdbc_driver = get_audit_configs(config)
downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
driver_curl_target = format("{hadoop_lib_home}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
previous_jdbc_jar = format("{hadoop_lib_home}/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
sql_connector_jar = ''
hdfs_ranger_plugin_config = {
'username': repo_config_username,
'password': repo_config_password,
'hadoop.security.authentication': hadoop_security_authentication,
'hadoop.security.authorization': hadoop_security_authorization,
'fs.default.name': fs_default_name,
'hadoop.security.auth_to_local': hadoop_security_auth_to_local,
'hadoop.rpc.protection': hadoop_rpc_protection,
'commonNameForCertificate': common_name_for_certificate,
'dfs.datanode.kerberos.principal': dn_principal_name if security_enabled else '',
'dfs.namenode.kerberos.principal': nn_principal_name if security_enabled else '',
'dfs.secondary.namenode.kerberos.principal': sn_principal_name if security_enabled else ''
}
hdfs_ranger_plugin_repo = {
'isActive': 'true',
'config': json.dumps(hdfs_ranger_plugin_config),
'description': 'hdfs repo',
'name': repo_name,
'repositoryType': 'hdfs',
'assetType': '1'
}
custom_ranger_service_config = generate_ranger_service_config(ranger_plugin_properties)
if len(custom_ranger_service_config) > 0:
hdfs_ranger_plugin_config.update(custom_ranger_service_config)
if stack_supports_ranger_kerberos and security_enabled:
hdfs_ranger_plugin_config['policy.download.auth.users'] = hdfs_user
hdfs_ranger_plugin_config['tag.download.auth.users'] = hdfs_user
if stack_supports_ranger_kerberos:
hdfs_ranger_plugin_config['ambari.service.check.user'] = policy_user
hdfs_ranger_plugin_repo = {
'isEnabled': 'true',
'configs': hdfs_ranger_plugin_config,
'description': 'hdfs repo',
'name': repo_name,
'type': 'hdfs'
}
xa_audit_db_is_enabled = False
if xml_configurations_supported and stack_supports_ranger_audit_db:
xa_audit_db_is_enabled = config['configurations']['ranger-hdfs-audit']['xasecure.audit.destination.db']
xa_audit_hdfs_is_enabled = config['configurations']['ranger-hdfs-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else False
ssl_keystore_password = config['configurations']['ranger-hdfs-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password'] if xml_configurations_supported else None
ssl_truststore_password = config['configurations']['ranger-hdfs-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password'] if xml_configurations_supported else None
credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
# for SQLA explicitly disable audit to DB for Ranger
if has_ranger_admin and stack_supports_ranger_audit_db and xa_audit_db_flavor.lower() == 'sqla':
xa_audit_db_is_enabled = False
# need this to capture cluster name from where ranger hdfs plugin is enabled
cluster_name = config['clusterName']
# ranger hdfs plugin section end
| |
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
import base64
import binascii
from contextlib import closing
import functools
import sys
import threading
import datetime
from io import BytesIO
from tornado.escape import utf8
from tornado.httpclient import HTTPRequest, HTTPResponse, _RequestProxy, HTTPError, HTTPClient
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream
from tornado.log import gen_log
from tornado import netutil
from tornado.stack_context import ExceptionStackContext, NullContext
from tornado.testing import AsyncHTTPTestCase, bind_unused_port, gen_test, ExpectLog
from tornado.test.util import unittest, skipOnTravis
from tornado.util import u
from tornado.web import Application, RequestHandler, url
from tornado.httputil import format_timestamp, HTTPHeaders
class HelloWorldHandler(RequestHandler):
def get(self):
name = self.get_argument("name", "world")
self.set_header("Content-Type", "text/plain")
self.finish("Hello %s!" % name)
class PostHandler(RequestHandler):
def post(self):
self.finish("Post arg1: %s, arg2: %s" % (
self.get_argument("arg1"), self.get_argument("arg2")))
class PutHandler(RequestHandler):
def put(self):
self.write("Put body: ")
self.write(self.request.body)
class RedirectHandler(RequestHandler):
def prepare(self):
self.redirect(self.get_argument("url"),
status=int(self.get_argument("status", "302")))
class ChunkHandler(RequestHandler):
def get(self):
self.write("asdf")
self.flush()
self.write("qwer")
class AuthHandler(RequestHandler):
def get(self):
self.finish(self.request.headers["Authorization"])
class CountdownHandler(RequestHandler):
def get(self, count):
count = int(count)
if count > 0:
self.redirect(self.reverse_url("countdown", count - 1))
else:
self.write("Zero")
class EchoPostHandler(RequestHandler):
def post(self):
self.write(self.request.body)
class UserAgentHandler(RequestHandler):
def get(self):
self.write(self.request.headers.get('User-Agent', 'User agent not set'))
class ContentLength304Handler(RequestHandler):
def get(self):
self.set_status(304)
self.set_header('Content-Length', 42)
def _clear_headers_for_304(self):
# Tornado strips content-length from 304 responses, but here we
# want to simulate servers that include the headers anyway.
pass
class PatchHandler(RequestHandler):
def patch(self):
"Return the request payload - so we can check it is being kept"
self.write(self.request.body)
class AllMethodsHandler(RequestHandler):
SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS + ('OTHER',)
def method(self):
self.write(self.request.method)
get = post = put = delete = options = patch = other = method
# These tests end up getting run redundantly: once here with the default
# HTTPClient implementation, and then again in each implementation's own
# test suite.
class HTTPClientCommonTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application([
url("/hello", HelloWorldHandler),
url("/post", PostHandler),
url("/put", PutHandler),
url("/redirect", RedirectHandler),
url("/chunk", ChunkHandler),
url("/auth", AuthHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/echopost", EchoPostHandler),
url("/user_agent", UserAgentHandler),
url("/304_with_content_length", ContentLength304Handler),
url("/all_methods", AllMethodsHandler),
url('/patch', PatchHandler),
], gzip=True)
def test_patch_receives_payload(self):
body = b"some patch data"
response = self.fetch("/patch", method='PATCH', body=body)
self.assertEqual(response.code, 200)
self.assertEqual(response.body, body)
@skipOnTravis
def test_hello_world(self):
response = self.fetch("/hello")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["Content-Type"], "text/plain")
self.assertEqual(response.body, b"Hello world!")
self.assertEqual(int(response.request_time), 0)
response = self.fetch("/hello?name=Ben")
self.assertEqual(response.body, b"Hello Ben!")
def test_streaming_callback(self):
# streaming_callback is also tested in test_chunked
chunks = []
response = self.fetch("/hello",
streaming_callback=chunks.append)
# with streaming_callback, data goes to the callback and not response.body
self.assertEqual(chunks, [b"Hello world!"])
self.assertFalse(response.body)
def test_post(self):
response = self.fetch("/post", method="POST",
body="arg1=foo&arg2=bar")
self.assertEqual(response.code, 200)
self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_chunked(self):
response = self.fetch("/chunk")
self.assertEqual(response.body, b"asdfqwer")
chunks = []
response = self.fetch("/chunk",
streaming_callback=chunks.append)
self.assertEqual(chunks, [b"asdf", b"qwer"])
self.assertFalse(response.body)
def test_chunked_close(self):
# test case in which chunks spread read-callback processing
# over several ioloop iterations, but the connection is already closed.
sock, port = bind_unused_port()
with closing(sock):
def write_response(stream, request_data):
stream.write(b"""\
HTTP/1.1 200 OK
Transfer-Encoding: chunked
1
1
1
2
0
""".replace(b"\n", b"\r\n"), callback=stream.close)
def accept_callback(conn, address):
# fake an HTTP server using chunked encoding where the final chunks
# and connection close all happen at once
stream = IOStream(conn, io_loop=self.io_loop)
stream.read_until(b"\r\n\r\n",
functools.partial(write_response, stream))
netutil.add_accept_handler(sock, accept_callback, self.io_loop)
self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
resp = self.wait()
resp.rethrow()
self.assertEqual(resp.body, b"12")
self.io_loop.remove_handler(sock.fileno())
def test_streaming_stack_context(self):
chunks = []
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def streaming_cb(chunk):
chunks.append(chunk)
if chunk == b'qwer':
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', streaming_callback=streaming_cb)
self.assertEqual(chunks, [b'asdf', b'qwer'])
self.assertEqual(1, len(exc_info))
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_basic_auth(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_basic_auth_explicit_mode(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="basic").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_unsupported_auth_mode(self):
# curl and simple clients handle errors a bit differently; the
# important thing is that they don't fall back to basic auth
# on an unknown mode.
with ExpectLog(gen_log, "uncaught exception", required=False):
with self.assertRaises((ValueError, HTTPError)):
response = self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="asdf")
response.rethrow()
def test_follow_redirect(self):
response = self.fetch("/countdown/2", follow_redirects=False)
self.assertEqual(302, response.code)
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
response = self.fetch("/countdown/2")
self.assertEqual(200, response.code)
self.assertTrue(response.effective_url.endswith("/countdown/0"))
self.assertEqual(b"Zero", response.body)
def test_credentials_in_url(self):
url = self.get_url("/auth").replace("http://", "http://me:secret@")
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertEqual(b"Basic " + base64.b64encode(b"me:secret"),
response.body)
def test_body_encoding(self):
unicode_body = u("\xe9")
byte_body = binascii.a2b_hex(b"e9")
# unicode string in body gets converted to utf8
response = self.fetch("/echopost", method="POST", body=unicode_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "2")
self.assertEqual(response.body, utf8(unicode_body))
# byte strings pass through directly
response = self.fetch("/echopost", method="POST",
body=byte_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
# Mixing unicode in headers and byte string bodies shouldn't
# break anything
response = self.fetch("/echopost", method="POST", body=byte_body,
headers={"Content-Type": "application/blah"},
user_agent=u("foo"))
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
def test_types(self):
response = self.fetch("/hello")
self.assertEqual(type(response.body), bytes)
self.assertEqual(type(response.headers["Content-Type"]), str)
self.assertEqual(type(response.code), int)
self.assertEqual(type(response.effective_url), str)
def test_header_callback(self):
first_line = []
headers = {}
chunks = []
def header_callback(header_line):
if header_line.startswith('HTTP/'):
first_line.append(header_line)
elif header_line != '\r\n':
k, v = header_line.split(':', 1)
headers[k] = v.strip()
def streaming_callback(chunk):
# All header callbacks are run before any streaming callbacks,
# so the header data is available to process the data as it
# comes in.
self.assertEqual(headers['Content-Type'], 'text/html; charset=UTF-8')
chunks.append(chunk)
self.fetch('/chunk', header_callback=header_callback,
streaming_callback=streaming_callback)
self.assertEqual(len(first_line), 1)
self.assertRegexpMatches(first_line[0], 'HTTP/1.[01] 200 OK\r\n')
self.assertEqual(chunks, [b'asdf', b'qwer'])
def test_header_callback_stack_context(self):
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def header_callback(header_line):
if header_line.startswith('Content-Type:'):
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', header_callback=header_callback)
self.assertEqual(len(exc_info), 1)
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_configure_defaults(self):
defaults = dict(user_agent='TestDefaultUserAgent', allow_ipv6=False)
# Construct a new instance of the configured client class
client = self.http_client.__class__(self.io_loop, force_instance=True,
defaults=defaults)
try:
client.fetch(self.get_url('/user_agent'), callback=self.stop)
response = self.wait()
self.assertEqual(response.body, b'TestDefaultUserAgent')
finally:
client.close()
def test_header_types(self):
# Header values may be passed as character or utf8 byte strings,
# in a plain dictionary or an HTTPHeaders object.
# Keys must always be the native str type.
# All combinations should have the same results on the wire.
for value in [u("MyUserAgent"), b"MyUserAgent"]:
for container in [dict, HTTPHeaders]:
headers = container()
headers['User-Agent'] = value
resp = self.fetch('/user_agent', headers=headers)
self.assertEqual(
resp.body, b"MyUserAgent",
"response=%r, value=%r, container=%r" %
(resp.body, value, container))
def test_304_with_content_length(self):
# According to the spec 304 responses SHOULD NOT include
# Content-Length or other entity headers, but some servers do it
# anyway.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
response = self.fetch('/304_with_content_length')
self.assertEqual(response.code, 304)
self.assertEqual(response.headers['Content-Length'], '42')
def test_final_callback_stack_context(self):
# The final callback should be run outside of the httpclient's
# stack_context. We want to ensure that there is not stack_context
# between the user's callback and the IOLoop, so monkey-patch
# IOLoop.handle_callback_exception and disable the test harness's
# context with a NullContext.
# Note that this does not apply to secondary callbacks (header
# and streaming_callback), as errors there must be seen as errors
# by the http client so it can clean up the connection.
exc_info = []
def handle_callback_exception(callback):
exc_info.append(sys.exc_info())
self.stop()
self.io_loop.handle_callback_exception = handle_callback_exception
with NullContext():
self.http_client.fetch(self.get_url('/hello'),
lambda response: 1 / 0)
self.wait()
self.assertEqual(exc_info[0][0], ZeroDivisionError)
@gen_test
def test_future_interface(self):
response = yield self.http_client.fetch(self.get_url('/hello'))
self.assertEqual(response.body, b'Hello world!')
@gen_test
def test_future_http_error(self):
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(self.get_url('/notfound'))
self.assertEqual(context.exception.code, 404)
self.assertEqual(context.exception.response.code, 404)
@gen_test
def test_future_http_error_no_raise(self):
response = yield self.http_client.fetch(self.get_url('/notfound'), raise_error=False)
self.assertEqual(response.code, 404)
@gen_test
def test_reuse_request_from_response(self):
# The response.request attribute should be an HTTPRequest, not
# a _RequestProxy.
# This test uses self.http_client.fetch because self.fetch calls
# self.get_url on the input unconditionally.
url = self.get_url('/hello')
response = yield self.http_client.fetch(url)
self.assertEqual(response.request.url, url)
self.assertTrue(isinstance(response.request, HTTPRequest))
response2 = yield self.http_client.fetch(response.request)
self.assertEqual(response2.body, b'Hello world!')
def test_all_methods(self):
for method in ['GET', 'DELETE', 'OPTIONS']:
response = self.fetch('/all_methods', method=method)
self.assertEqual(response.body, utf8(method))
for method in ['POST', 'PUT', 'PATCH']:
response = self.fetch('/all_methods', method=method, body=b'')
self.assertEqual(response.body, utf8(method))
response = self.fetch('/all_methods', method='HEAD')
self.assertEqual(response.body, b'')
response = self.fetch('/all_methods', method='OTHER',
allow_nonstandard_methods=True)
self.assertEqual(response.body, b'OTHER')
@gen_test
def test_body_sanity_checks(self):
hello_url = self.get_url('/hello')
with self.assertRaises(ValueError) as context:
yield self.http_client.fetch(hello_url, body='data')
self.assertTrue('must be None' in str(context.exception))
with self.assertRaises(ValueError) as context:
yield self.http_client.fetch(hello_url, method='POST')
self.assertTrue('must not be None' in str(context.exception))
# This test causes odd failures with the combination of
# curl_httpclient (at least with the version of libcurl available
# on ubuntu 12.04), TwistedIOLoop, and epoll. For POST (but not PUT),
# curl decides the response came back too soon and closes the connection
# to start again. It does this *before* telling the socket callback to
# unregister the FD. Some IOLoop implementations have special kernel
# integration to discover this immediately. Tornado's IOLoops
# ignore errors on remove_handler to accommodate this behavior, but
# Twisted's reactor does not. The removeReader call fails and so
# do all future removeAll calls (which our tests do at cleanup).
#
#def test_post_307(self):
# response = self.fetch("/redirect?status=307&url=/post",
# method="POST", body=b"arg1=foo&arg2=bar")
# self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_put_307(self):
response = self.fetch("/redirect?status=307&url=/put",
method="PUT", body=b"hello")
response.rethrow()
self.assertEqual(response.body, b"Put body: hello")
class RequestProxyTest(unittest.TestCase):
def test_request_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
user_agent='foo'),
dict())
self.assertEqual(proxy.user_agent, 'foo')
def test_default_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict(network_interface='foo'))
self.assertEqual(proxy.network_interface, 'foo')
def test_both_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
proxy_host='foo'),
dict(proxy_host='bar'))
self.assertEqual(proxy.proxy_host, 'foo')
def test_neither_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
self.assertIs(proxy.auth_username, None)
def test_bad_attribute(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
with self.assertRaises(AttributeError):
proxy.foo
def test_defaults_none(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'), None)
self.assertIs(proxy.auth_username, None)
class HTTPResponseTestCase(unittest.TestCase):
def test_str(self):
response = HTTPResponse(HTTPRequest('http://example.com'),
200, headers={}, buffer=BytesIO())
s = str(response)
self.assertTrue(s.startswith('HTTPResponse('))
self.assertIn('code=200', s)
class SyncHTTPClientTest(unittest.TestCase):
def setUp(self):
if IOLoop.configured_class().__name__ in ('TwistedIOLoop',
'AsyncIOMainLoop'):
# TwistedIOLoop only supports the global reactor, so we can't have
# separate IOLoops for client and server threads.
# AsyncIOMainLoop doesn't work with the default policy
# (although it could with some tweaks to this test and a
# policy that created loops for non-main threads).
raise unittest.SkipTest(
'Sync HTTPClient not compatible with TwistedIOLoop or '
'AsyncIOMainLoop')
self.server_ioloop = IOLoop()
sock, self.port = bind_unused_port()
app = Application([('/', HelloWorldHandler)])
self.server = HTTPServer(app, io_loop=self.server_ioloop)
self.server.add_socket(sock)
self.server_thread = threading.Thread(target=self.server_ioloop.start)
self.server_thread.start()
self.http_client = HTTPClient()
def tearDown(self):
def stop_server():
self.server.stop()
self.server_ioloop.stop()
self.server_ioloop.add_callback(stop_server)
self.server_thread.join()
self.http_client.close()
self.server_ioloop.close(all_fds=True)
def get_url(self, path):
return 'http://localhost:%d%s' % (self.port, path)
def test_sync_client(self):
response = self.http_client.fetch(self.get_url('/'))
self.assertEqual(b'Hello world!', response.body)
def test_sync_client_error(self):
# Synchronous HTTPClient raises errors directly; no need for
# response.rethrow()
with self.assertRaises(HTTPError) as assertion:
self.http_client.fetch(self.get_url('/notfound'))
self.assertEqual(assertion.exception.code, 404)
class HTTPRequestTestCase(unittest.TestCase):
def test_headers(self):
request = HTTPRequest('http://example.com', headers={'foo': 'bar'})
self.assertEqual(request.headers, {'foo': 'bar'})
def test_headers_setter(self):
request = HTTPRequest('http://example.com')
request.headers = {'bar': 'baz'}
self.assertEqual(request.headers, {'bar': 'baz'})
def test_null_headers_setter(self):
request = HTTPRequest('http://example.com')
request.headers = None
self.assertEqual(request.headers, {})
def test_body(self):
request = HTTPRequest('http://example.com', body='foo')
self.assertEqual(request.body, utf8('foo'))
def test_body_setter(self):
request = HTTPRequest('http://example.com')
request.body = 'foo'
self.assertEqual(request.body, utf8('foo'))
def test_if_modified_since(self):
http_date = datetime.datetime.utcnow()
request = HTTPRequest('http://example.com', if_modified_since=http_date)
self.assertEqual(request.headers,
{'If-Modified-Since': format_timestamp(http_date)})
| |
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# ># IOOS System Test: [Extreme Events Theme:](https://github.com/ioos/system-test/wiki/Development-of-Test-Themes#theme-2-extreme-events) Inundation
# <markdowncell>
# ### Can we estimate the return period of a water level by comparing modeled and/or observed water levels with NOAA exceedance probability plots?
# <markdowncell>
# Methodology:
#
# * Define temporal and spatial bounds of interest, as well as parameters of interest
# * Search for availavle service endpoints in the NGDC CSW catalog, then inform the user of the DAP (model) and SOS (observation) services endpoints available
# * Obtain the stations in the spatial boundaries, and processed to obtain observation data for temporal contraints, identifying the yearly max
# * Plot observation stations on a map and indicate to the user if the minimum number of years has been met for extreme value analysis (red marker if condition is false)
# * Using DAP (model) endpoints find all available models data sets that fall in the area of interest, for the specified time range, and extract a model grid cell closest to all the given station locations (<b>Still in Development</b>)
# * Plot the extracted model grid cell from each available model on to the map
# * Plot the annual max for each station as a timeseries plot
# * Perform extreme value analysis for a selected station identifying the return period and compare to NOAA tides and currents plot for one of the same stations
#
# Esimated Time To Process Notebook: --.--
# <headingcell level=4>
# import required libraries
# <codecell>
import matplotlib.pyplot as plt
from pylab import *
import sys
import csv
import json
from scipy.stats import genextreme
import scipy.stats as ss
import numpy as np
from owslib.csw import CatalogueServiceWeb
from owslib import fes
import random
import netCDF4
import pandas as pd
import datetime as dt
from pyoos.collectors.coops.coops_sos import CoopsSos
import cStringIO
import iris
import urllib2
import parser
from lxml import etree #TODO suggest using bs4 instead for ease of access to XML objects
#generated for csw interface
#from fes_date_filter_formatter import fes_date_filter #date formatter (R.Signell)
import requests #required for the processing of requests
from utilities import *
from IPython.display import HTML, Image
from shapely.geometry import Polygon,Point,LineString #used for lat lon points
import folium #required for leaflet mapping
from pydap.client import open_url #pypdap
import datetime as dt
from datetime import datetime
from datetime import timedelta
%matplotlib inline
# <markdowncell>
# some functions from [Rich Signell Notebook](http://nbviewer.ipython.org/github/rsignell-usgs/notebook/blob/fef9438303b49a923024892db1ef3115e34d8271/CSW/IOOS_inundation.ipynb)
# <headingcell level=4>
# Speficy Temporal and Spatial conditions
# <codecell>
#bounding box of interest,[bottom right[lat,lon], top left[lat,lon]]
bounding_box_type = "box"
bounding_box = [[-75.94,38.67],[-66.94,41.5]]
#temporal range
start_date = dt.datetime(1980,5,1).strftime('%Y-%m-%d %H:00')
end_date = dt.datetime(2014,5,1).strftime('%Y-%m-%d %H:00')
time_date_range = [start_date,end_date] #start_date_end_date
print start_date,'to',end_date
#number of years required for analysis, obs and model data
num_years_required = 30
# <codecell>
name_list=['water_surface_height_above_reference_datum',
'sea_surface_height_above_geoid','sea_surface_elevation',
'sea_surface_height_above_reference_ellipsoid','sea_surface_height_above_sea_level',
'sea_surface_height','water level']
sos_name = 'water_surface_height_above_reference_datum'
# <codecell>
endpoint = 'http://www.ngdc.noaa.gov/geoportal/csw' # NGDC Geoportal
csw = CatalogueServiceWeb(endpoint,timeout=60)
for oper in csw.operations:
if oper.name == 'GetRecords':
print '\nISO Queryables:\n',oper.constraints['SupportedISOQueryables']['values']
#pass
#put the names in a dict for ease of access
data_dict = {}
data_dict["water"] = {"names":['water_surface_height_above_reference_datum',
'sea_surface_height_above_geoid','sea_surface_elevation',
'sea_surface_height_above_reference_ellipsoid','sea_surface_height_above_sea_level',
'sea_surface_height','water level'], "sos_name":['water_surface_height_above_reference_datum']}
# <codecell>
def fes_date_filter(start_date='1900-01-01',stop_date='2100-01-01',constraint='overlaps'):
if constraint == 'overlaps':
start = fes.PropertyIsLessThanOrEqualTo(propertyname='apiso:TempExtent_begin', literal=stop_date)
stop = fes.PropertyIsGreaterThanOrEqualTo(propertyname='apiso:TempExtent_end', literal=start_date)
elif constraint == 'within':
start = fes.PropertyIsGreaterThanOrEqualTo(propertyname='apiso:TempExtent_begin', literal=start_date)
stop = fes.PropertyIsLessThanOrEqualTo(propertyname='apiso:TempExtent_end', literal=stop_date)
return start,stop
# <codecell>
# convert User Input into FES filters
start,stop = fes_date_filter(start_date,end_date)
box = []
box.append(bounding_box[0][0])
box.append(bounding_box[0][1])
box.append(bounding_box[1][0])
box.append(bounding_box[1][1])
bbox = fes.BBox(box)
or_filt = fes.Or([fes.PropertyIsLike(propertyname='apiso:AnyText',literal=('*%s*' % val),
escapeChar='\\',wildCard='*',singleChar='?') for val in name_list])
val = 'Averages'
not_filt = fes.Not([fes.PropertyIsLike(propertyname='apiso:AnyText',literal=('*%s*' % val),
escapeChar='\\',wildCard='*',singleChar='?')])
# <codecell>
filter_list = [fes.And([ bbox, start, stop, or_filt, not_filt]) ]
# connect to CSW, explore it's properties
# try request using multiple filters "and" syntax: [[filter1,filter2]]
csw.getrecords2(constraints=filter_list,maxrecords=1000,esn='full')
# <codecell>
def service_urls(records,service_string='urn:x-esri:specification:ServiceType:odp:url'):
"""
extract service_urls of a specific type (DAP, SOS) from records
"""
urls=[]
for key,rec in records.iteritems():
#create a generator object, and iterate through it until the match is found
#if not found, gets the default value (here "none")
url = next((d['url'] for d in rec.references if d['scheme'] == service_string), None)
if url is not None:
urls.append(url)
return urls
# <codecell>
#print records that are available
print "number of datasets available: ",len(csw.records.keys())
# <markdowncell>
# Print all the records (should you want too)
# <codecell>
#print "\n".join(csw.records)
# <markdowncell>
# Dap URLS
# <codecell>
dap_urls = service_urls(csw.records,service_string='urn:x-esri:specification:ServiceType:odp:url')
#remove duplicates and organize
dap_urls = sorted(set(dap_urls))
print "Total DAP:",len(dap_urls)
#print the first 5...
print "\n".join(dap_urls[:])
# <markdowncell>
# SOS URLs
# <codecell>
sos_urls = service_urls(csw.records,service_string='urn:x-esri:specification:ServiceType:sos:url')
#remove duplicates and organize
sos_urls = sorted(set(sos_urls))
print "Total SOS:",len(sos_urls)
print "\n".join(sos_urls)
# <markdowncell>
# ### SOS Requirements
# #### Use Pyoos SOS collector to obtain Observation data from COOPS
# <codecell>
#use the get caps to get station start and get time
# <codecell>
start_time = dt.datetime.strptime(start_date,'%Y-%m-%d %H:%M')
end_time = dt.datetime.strptime(end_date,'%Y-%m-%d %H:%M')
# <codecell>
iso_start = start_time.strftime('%Y-%m-%dT%H:%M:%SZ')
iso_end = end_time.strftime('%Y-%m-%dT%H:%M:%SZ')
collector = CoopsSos()
collector.set_datum('NAVD')
collector.server.identification.title
collector.start_time = start_time
collector.end_time = end_time
collector.variables = [data_dict["water"]["sos_name"]]
# <codecell>
print "Date: ",iso_start," to ", iso_end
box_str=','.join(str(e) for e in box)
print "Lat/Lon Box: ",box_str
#grab the sos url and use it for the service
url=(sos_urls[0].split("?")[0]+'?'
'service=SOS&request=GetObservation&version=1.0.0&'
'observedProperty=%s&offering=urn:ioos:network:NOAA.NOS.CO-OPS:WaterLevelActive&'
'featureOfInterest=BBOX:%s&responseFormat=text/tab-separated-values&eventTime=%s') % (sos_name,box_str,iso_end)
r = requests.get(url)
data = r.text
#get the headers for the cols
data = data.split("\n")
headers = data[0]
station_list_dict = dict()
#parse the headers so i can create a dict
c = 0
for h in headers.split("\t"):
field = h.split(":")[0].split(" ")[0]
station_list_dict[field] = {"id":c}
c+=1
# <codecell>
def get_coops_longName(sta):
"""
get longName for specific station from COOPS SOS using DescribeSensor request
"""
url=(sos_urls[0].split("?")[0]+'?service=SOS&'
'request=DescribeSensor&version=1.0.0&outputFormat=text/xml;subtype="sensorML/1.0.1"&'
'procedure=%s') % sta
tree = etree.parse(urllib2.urlopen(url))
root = tree.getroot()
longName=root.xpath("//sml:identifier[@name='longName']/sml:Term/sml:value/text()", namespaces={'sml':"http://www.opengis.net/sensorML/1.0.1"})
return longName
# <codecell>
#finds the max value given a json object
def findMaxVal(data):
dates_array = []
vals_array = []
for x in data:
dates_array.append(str(x["t"]))
vals_array.append(x["v"])
p = np.array(vals_array,dtype=np.float)
x = np.arange(len(p))
max_val = np.amax(p)
max_idx = np.argmax(p)
return (max_val,len(p),dates_array[max_idx])
# <markdowncell>
# #### Extract the Observation Data from the collector
# <codecell>
def coops2data(collector,station_id,sos_name):
collector.features = [station_id]
collector.variables = [sos_name]
station_data = dict()
#loop through the years and get the data needed
for year_station in range(int(collector.start_time.year),collector.end_time.year+1):
link = "http://tidesandcurrents.noaa.gov/api/datagetter?product="+sos_name+"&application=NOS.COOPS.TAC.WL&"
date1 = "begin_date="+str(year_station)+"0101"
date2 = "&end_date="+str(year_station)+"1231"
datum = "&datum=MHHW"
units = "&units=metric"
station_request = "&station="+station_id+"&time_zone=GMT&units=english&format=json"
http_request = link+date1+date2+units+datum+station_request
#print http_request
d_r = requests.get(http_request,timeout=20)
if "Great Lake station" in d_r.text:
pass
else:
key_list = d_r.json().keys()
if "data" in key_list:
data = d_r.json()['data']
max_value,num_samples,date_string = findMaxVal(data)
station_data[str(year_station)] = {"max":max_value,"num_samples":num_samples,"date_string":date_string,"raw":data}
#print "\tyear:",year_station," MaxValue:",max_value
return station_data
# <codecell>
#create dict of stations
station_list = []
for i in range(1,len(data)):
station_info = data[i].split("\t")
station = dict()
for field in station_list_dict.keys():
col = station_list_dict[field]["id"]
if col < len(station_info):
station[field] = station_info[col]
station["type"] = "obs"
station_list.append(station)
# <codecell>
def add_invalid_marker(map,s,popup_string):
map.circle_marker(location=[s["latitude"],s["longitude"]], popup=popup_string, fill_color='#ff0000', radius=10000, line_color='#ff0000')
# <markdowncell>
# TODO: Add check before extracting the data to see if the required number of years will be met, i.e use SOS GetCaps and begin and end time
# <codecell>
def does_station_have_enough_times():
return True
# <codecell>
#Embeds the HTML source of the map directly into the IPython notebook.
def inline_map(map):
map._build_map()
return HTML('<iframe srcdoc="{srcdoc}" style="width: 100%; height: 500px; border: none"></iframe>'.format(srcdoc=map.HTML.replace('"', '"')))
#print bounding_box[0]
map = folium.Map(location=[bounding_box[0][1], bounding_box[0][0]], zoom_start=6)
station_yearly_max = []
for s in station_list:
if s["type"] is "obs": #if its an obs station
#get the long name
s["long_name"] =get_coops_longName(s['station_id'])
s["station_num"] = str(s['station_id']).split(':')[-1]
#this is different than sos name, hourly height is hourly water level
s["data"] = coops2data(collector,s["station_num"],"high_low")
#verifies that there is the required amount of data at the station
if "latitude" in s:
if len(s["data"].keys()) >= num_years_required:
popup_string = '<b>Station:</b><br>'+str(s['station_id']) + "<br><b>Long Name:</b><br>"+str(s["long_name"])
map.simple_marker([s["latitude"],s["longitude"]],popup=popup_string)
else:
popup_string = '<b>Not Enough Station Data for number of years requested</b><br><br>Num requested:'+str(num_years_required)+'<br>Num Available:'+str(len(s["data"].keys()))+'<br><b>Station:</b><br>'+str(s['station_id']) + "<br><b>Long Name:</b><br>"+str(s["long_name"])
add_invalid_marker(map,s,popup_string)
else: #if its a model station
if "latitude" in s:
popup_string = '<b>Station:</b><br>'+str(s['station_id']) + "<br><b>Long Name:</b><br>"+str(s["long_name"])
map.simple_marker([s["latitude"],s["longitude"]],popup=popup_string)
# Create the map and add the bounding box line
map.line(get_coordinates(bounding_box,bounding_box_type), line_color='#FF0000', line_weight=5)
#show map of results
inline_map(map)
# <markdowncell>
# ### Creates a time series plot only showing those stations that have enough data
# <codecell>
import prettyplotlib as ppl
# Set the random seed for consistency
np.random.seed(12)
fig, ax = plt.subplots(1)
# Show the whole color range
for s in station_list:
if "data" in s:
years = s["data"].keys()
#only show the stations with enough data
if len(s["data"].keys()) >= num_years_required:
xx = []
yx = []
for y in years:
xx.append(int(y))
val = s["data"][y]["max"]
yx.append(val)
ax.scatter(xx,yx,marker='o')
ppl.scatter(ax, xx, yx, alpha=0.8, edgecolor='black', linewidth=0.15, label=str(s["station_num"]))
#ax.scatter(xx, yx, label=str(s["station_num"]))
ppl.legend(ax, loc='right', ncol=1)
#legend = ax.legend(loc='best')
# The frame is matplotlib.patches.Rectangle instance surrounding the legend.
#frame = legend.get_frame()
title = s["long_name"][0] + ' water level'
ax.set_xlabel('Year')
ax.set_ylabel('water level (m)')
ax.set_title("Stations exceeding "+str(num_years_required)+ " years worth of water level data (MHHW)")
fig.set_size_inches(14,8)
# <markdowncell>
# ### Number of stations available by number of years
# <codecell>
fig, ax = plt.subplots(1)
year_list_map = []
for s in station_list:
if "data" in s:
years = s["data"].keys()
year_list_map.append(len(years))
ppl.hist(ax,np.array(year_list_map), grid='y')
plt.plot([num_years_required, num_years_required], [0, 8], 'r-', lw=2)
ax.set_ylabel("Number of Stations")
ax.set_xlabel("Number of Years Available")
ax.set_title("Number of available stations vs available years\n(for bounding box) - red is minimum requested years")
#
# <markdowncell>
# ### Get Model Data, uses the netcdf4 library to get the model data (<b>Still in Development</b>)
# #### Obtains the model data from a given dap url, for a given location
# #### TODO: Temporal extraction based on temporal contraints
# <codecell>
#### IJ GRID READER
#use the simple grid to find the data requested
#lat_var,lon_var are pointers to the data
def find_closest_pts_ij(lat_var,lon_var,f_lat,f_lon):
x = lat_var[:]
y = lon_var[:]
dist = -1
xidx = -1
yidx = -1
for i in range(0,len(x)):
for j in range(0,len(y)):
distance = Point(x[i],y[j]).distance(Point(f_lat,f_lon))
if dist == -1:
dist = distance
xidx = i
yidx = j
elif distance < dist:
dist = distance
xidx = i
yidx = j
lat = x[xidx]
lon = y[yidx]
#lat lon index of point
vals = [lat,lon,xidx,yidx]
return vals
#### NCELL GRID READER
#use the simple grid to find the data requested
#lat_var,lon_var are pointers to the data
def find_closest_pts_ncell(map1,lat_var,lon_var,f_lat,f_lon,spacing):
x = lat_var[::spacing]
y = lon_var[::spacing]
idx = get_dist(x,y,f_lat,f_lon,'#666699','#666699',map1,False)
#find the idx that is closest
print spacing," :index: ",idx
idx = idx[0]*spacing
st_idx = idx-(2*spacing)
ed_idx = idx+(2*spacing)
x = lat_var[st_idx:ed_idx]
y = lon_var[st_idx:ed_idx]
ret = get_dist(x,y,f_lat,f_lon,'#00FFFF','#33CCFF',map1,False)
lat = x[ret[0]]
lon = y[ret[0]]
#lat lon index of point distance between points
vals = [lat,lon,ret[0],ret[1]]
return vals
def get_dist(x,y,f_lat,f_lon,color1,color2,map1,show_pts):
dist = -1
idx = -1
for i in range(0,len(x)):
distance = Point(x[i],y[i]).distance(Point(f_lat,f_lon))
if dist == -1:
dist = distance
idx = i
elif distance < dist:
dist = distance
idx = i
if show_pts:
map1.circle_marker(location=[x[i], y[i]], radius=500,popup="idx:"+str(i), line_color=color2,fill_color=color1, fill_opacity=0.3)
return [idx,dist]
#### VERIFIES THAT THE GRID IS VALID
def check_grid_is_valid(time_var,lat_var,lon_var,interest_var):
grid_type = None
# there is data with the fields of interest, now lets check the fields for validity
valid_grid = False
#they are both the same length
if len(lon_var.shape) == len(lat_var.shape):
if lon_var.shape[0] == lat_var.shape[0]:
#both the same size
#print "gridded data..."
valid_grid = True
else:
#both different, possibly meaning i,j grid field
#print "gridded data..."
valid_grid = True
else:
print "shapes are different?...moving on..."
valid_grid = False
if valid_grid:
#find out what the grid is structured
if (len(interest_var.dimensions) == 2) and (interest_var.dimensions[0] == "time") and (interest_var.dimensions[1] == "node"):
#ncell
grid_type = "ncell"
pass
elif (len(interest_var.dimensions) == 3) and (interest_var.dimensions[0] == "time") and (interest_var.dimensions[1] == "lat") and (interest_var.dimensions[2] == "lon"):
#ij
grid_type = "ij"
pass
else:
#make sure it stays none
grid_type = None
if grid_type is not None:
#can be used to print some info
#print "dims: ",interest_var.dimensions
#print "lat: ", lat_var.shape
#print "lon: ", lon_var.shape
pass
return grid_type
def is_model_in_time_range(time_var):
return True
# use only data where the standard deviation of the time series exceeds 0.01 m (1 cm)
# this eliminates flat line model time series that come from land points that
# should have had missing values.
# min_var_value = 0.01
def data_min_value_met(min_var_value,data):
std_value = np.std(data)
if np.isinf(std_value):
print "... value is inf"
return False
if np.isnan(std_value):
print "... value is nan"
return False
if np.amax(data) < min_var_value:
print "...max value to low"
return False
if np.amax(data) >999:
print "...max value to high"
return False
if std_value > min_var_value:
return True
else:
print "...std value to low"
return False
return False
def get_model_data(map1,dap_urls,st_lat,st_lon,start_dt,end_dt,name_list):
# use only data within 0.04 degrees (about 4 km)
max_dist=0.04
min_var_value = 0.01
# set the lat,lon and time fields
lon_list =["degrees_east"]
lat_list = ["degrees_north"]
time_list = ["time"]
model_data_store = []
for url in dap_urls:
try:
#open the url
nc = netCDF4.Dataset(url, 'r')
#get the list of variables
lon_var = None
lat_var = None
time_var = None
interest_var = None
#get the var
var_list = nc.variables.keys()
for var in var_list:
v = nc.variables[var]
try:
#lon
if (v.units in lon_list or v.long_name in lon_list) and "zonal" not in v.long_name:
lon_var = v
#lat
elif (v.units in lat_list or v.long_name in lat_list) and "zonal" not in v.long_name:
lat_var = v
#make sure there is time in there
elif v.long_name in time_list or v.standard_name in time_list:
time_var = v
#get the data of interest
elif v.long_name in name_list or v.standard_name in name_list:
interest_var = v
#it was something else i dont know or care about
else:
pass
except Exception, e:
#print "\t", e
pass
#is time in range?
if is_model_in_time_range(time_var):
#all the variables should be set
if (lon_var is None) and (lat_var is None) and (time_var is None) and (interest_var is None):
pass
else:
#check the grid is valid and of a type
grid_type = check_grid_is_valid(time_var,lat_var,lon_var,interest_var)
try:
if grid_type == "ncell":
#
#usually ncell grids are massive so lets slice the grid
#
print "processing the grid..."
spacing = 10
'''
the distance is the Euclidean Distance
or Linear distance between two points on a plane
and not the Great-circle distance between two points on a sphere
TODO convert dist to m
see (http://gis.stackexchange.com/questions/80881/what-is-the-unit-the-shapely-length-attribute)
'''
# vals = lat lon index of point distance between points
vals = find_closest_pts_ncell(map1,lat_var,lon_var,st_lat,st_lon,spacing)
if vals[3] < 1:
#if the dist to the cell is small enough
time_vals = time_var[:]
data = interest_var[:,vals[2]]
data = np.array(data)
bool_a = data_min_value_met(min_var_value,data)
print bool_a
if bool_a:
#add a marker
map1.circle_marker(location=[vals[0], vals[1]], radius=500,popup="dist:"+str(vals[3]), line_color='#33CC33',fill_color='#00FF00', fill_opacity=0.6)
print vals
print url
print "distance To Station:",vals[3]
print "num time values:",len(time_vals)
print "units: ",interest_var.units
x = np.arange(len(time_vals))
plt.figure()
plt.plot(x, data)
plt.title('Water Level');
plt.xlabel('time index')
plt.ylabel(interest_var.units)
#set maxs
plt.ylim([np.amin(data),np.amax(data)])
plt.show()
print "---------------------"
pass
elif grid_type == "ij":
#
# IJ
#
pass
except Exception, e:
print e
else:
print "model not in time range..."
#something went wrong trying to access the grids
except RuntimeError, e:
print "possible connection error for url"
pass
except:
pass
def inline_map(map1):
map1._build_map()
return HTML('<iframe srcdoc="{srcdoc}" style="width: 95%; height: 550px; border: none"></iframe>'.format(srcdoc=map1.HTML.replace('"', '"')))
pt_lat = 41.501
pt_lon = -71
map1 = folium.Map(location=[pt_lat, pt_lon], zoom_start=9)
map1.simple_marker([pt_lat, pt_lon],popup="")
#EXAMPLE get model data for a station
start_time = dt.datetime(2008, 9, 10, 5, 1, 1)
end_time = dt.datetime(2008, 9, 11, 5, 1, 1)
sample_data = get_model_data(map1,dap_urls,pt_lat,pt_lon,start_time,end_time,data_dict["water"]["names"])
# <markdowncell>
# #### Show model results on a map
# <codecell>
inline_map(map1)
# <headingcell level=3>
# Extreme Value Analysis:
# <codecell>
# Show the whole color range
for s in station_list:
if "data" in s:
years = s["data"].keys()
#only show the stations with enough data
if len(s["data"].keys()) >= num_years_required:
xx = []
yx = []
for y in years:
xx.append(int(y))
val = s["data"][y]["max"]
yx.append(val)
break
# <codecell>
annual_max_levels = yx
# <headingcell level=4>
# Fit data to GEV distribution
# <codecell>
def sea_levels_gev_pdf(x):
return genextreme.pdf(x, xi, loc=mu, scale=sigma)
# <codecell>
mle = genextreme.fit(sorted(annual_max_levels), 0)
mu = mle[1]
sigma = mle[2]
xi = mle[0]
print "The mean, sigma, and shape parameters are %s, %s, and %s, resp." % (mu, sigma, xi)
# <headingcell level=4>
# Probability Density Plot
# <codecell>
min_x = min(annual_max_levels)-0.5
max_x = max(annual_max_levels)+0.5
x = np.linspace(min_x, max_x, num=100)
y = [sea_levels_gev_pdf(z) for z in x]
fig = plt.figure(figsize=(12,6))
axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])
xlabel = (s["long_name"][0] + " - Annual max water level (m)")
axes.set_title("Probability Density & Normalized Histogram")
axes.set_xlabel(xlabel)
axes.plot(x, y, color='Red')
axes.hist(annual_max_levels, bins=arange(min_x, max_x, abs((max_x-min_x)/10)), normed=1, color='Yellow')
#
# <headingcell level=4>
# Return Value Plot
# <markdowncell>
# This plot should match NOAA's [Annual Exceedance Probability Curves for station 8449130](http://tidesandcurrents.noaa.gov/est/curves.shtml?stnid=8449130)
# <codecell>
noaa_station_id = 8449130
Image(url='http://tidesandcurrents.noaa.gov/est/curves/high/'+str(noaa_station_id)+'.png')
# <codecell>
Image(url='http://tidesandcurrents.noaa.gov/est/images/color_legend.png')
# <markdowncell>
# <script type="text/javascript">
# $('div.input').show();
# </script>
# <codecell>
fig = plt.figure(figsize=(20,6))
axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])
T=np.r_[1:250]
sT = genextreme.isf(1./T, 0, mu, sigma)
axes.semilogx(T, sT, 'r'), hold
N=np.r_[1:len(annual_max_levels)+1];
Nmax=max(N);
axes.plot(Nmax/N, sorted(annual_max_levels)[::-1], 'bo')
title = s["long_name"][0]
axes.set_title(title)
axes.set_xlabel('Return Period (yrs)')
axes.set_ylabel('Meters above MHHW')
axes.set_xticklabels([0,1,10,100,1000])
axes.set_xlim([0,260])
axes.set_ylim([0,1.8])
axes.grid(True)
# <markdowncell>
# This plot does not match exactly. NOAA's curves were calculated using the Extremes Toolkit software package in R whereas this notebook uses scipy. There is a python package based on the Extremes Toolkit called pywafo but this is experimental and isn't building properly on Mac OS X
| |
"""
Define @jit and related decorators.
"""
import sys
import warnings
import inspect
import logging
from numba.core.errors import DeprecationError, NumbaDeprecationWarning
from numba.stencils.stencil import stencil
from numba.core import config, extending, sigutils, registry
_logger = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
# Decorators
_msg_deprecated_signature_arg = ("Deprecated keyword argument `{0}`. "
"Signatures should be passed as the first "
"positional argument.")
def jit(signature_or_function=None, locals={}, cache=False,
pipeline_class=None, boundscheck=None, **options):
"""
This decorator is used to compile a Python function into native code.
Args
-----
signature_or_function:
The (optional) signature or list of signatures to be compiled.
If not passed, required signatures will be compiled when the
decorated function is called, depending on the argument values.
As a convenience, you can directly pass the function to be compiled
instead.
locals: dict
Mapping of local variable names to Numba types. Used to override the
types deduced by Numba's type inference engine.
pipeline_class: type numba.compiler.CompilerBase
The compiler pipeline type for customizing the compilation stages.
options:
For a cpu target, valid options are:
nopython: bool
Set to True to disable the use of PyObjects and Python API
calls. The default behavior is to allow the use of PyObjects
and Python API. Default value is False.
forceobj: bool
Set to True to force the use of PyObjects for every value.
Default value is False.
looplift: bool
Set to True to enable jitting loops in nopython mode while
leaving surrounding code in object mode. This allows functions
to allocate NumPy arrays and use Python objects, while the
tight loops in the function can still be compiled in nopython
mode. Any arrays that the tight loop uses should be created
before the loop is entered. Default value is True.
error_model: str
The error-model affects divide-by-zero behavior.
Valid values are 'python' and 'numpy'. The 'python' model
raises exception. The 'numpy' model sets the result to
*+/-inf* or *nan*. Default value is 'python'.
inline: str or callable
The inline option will determine whether a function is inlined
at into its caller if called. String options are 'never'
(default) which will never inline, and 'always', which will
always inline. If a callable is provided it will be called with
the call expression node that is requesting inlining, the
caller's IR and callee's IR as arguments, it is expected to
return Truthy as to whether to inline.
NOTE: This inlining is performed at the Numba IR level and is in
no way related to LLVM inlining.
boundscheck: bool or None
Set to True to enable bounds checking for array indices. Out
of bounds accesses will raise IndexError. The default is to
not do bounds checking. If False, bounds checking is disabled,
out of bounds accesses can produce garbage results or segfaults.
However, enabling bounds checking will slow down typical
functions, so it is recommended to only use this flag for
debugging. You can also set the NUMBA_BOUNDSCHECK environment
variable to 0 or 1 to globally override this flag. The default
value is None, which under normal execution equates to False,
but if debug is set to True then bounds checking will be
enabled.
Returns
--------
A callable usable as a compiled function. Actual compiling will be
done lazily if no explicit signatures are passed.
Examples
--------
The function can be used in the following ways:
1) jit(signatures, **targetoptions) -> jit(function)
Equivalent to:
d = dispatcher(function, targetoptions)
for signature in signatures:
d.compile(signature)
Create a dispatcher object for a python function. Then, compile
the function with the given signature(s).
Example:
@jit("int32(int32, int32)")
def foo(x, y):
return x + y
@jit(["int32(int32, int32)", "float32(float32, float32)"])
def bar(x, y):
return x + y
2) jit(function, **targetoptions) -> dispatcher
Create a dispatcher function object that specializes at call site.
Examples:
@jit
def foo(x, y):
return x + y
@jit(nopython=True)
def bar(x, y):
return x + y
"""
if 'argtypes' in options:
raise DeprecationError(_msg_deprecated_signature_arg.format('argtypes'))
if 'restype' in options:
raise DeprecationError(_msg_deprecated_signature_arg.format('restype'))
if options.get('nopython', False) and options.get('forceobj', False):
raise ValueError("Only one of 'nopython' or 'forceobj' can be True.")
if "_target" in options:
# Set the "target_backend" option if "_target" is defined.
options['target_backend'] = options['_target']
target = options.pop('_target', 'cpu')
options['boundscheck'] = boundscheck
# Handle signature
if signature_or_function is None:
# No signature, no function
pyfunc = None
sigs = None
elif isinstance(signature_or_function, list):
# A list of signatures is passed
pyfunc = None
sigs = signature_or_function
elif sigutils.is_signature(signature_or_function):
# A single signature is passed
pyfunc = None
sigs = [signature_or_function]
else:
# A function is passed
pyfunc = signature_or_function
sigs = None
dispatcher_args = {}
if pipeline_class is not None:
dispatcher_args['pipeline_class'] = pipeline_class
wrapper = _jit(sigs, locals=locals, target=target, cache=cache,
targetoptions=options, **dispatcher_args)
if pyfunc is not None:
return wrapper(pyfunc)
else:
return wrapper
def _jit(sigs, locals, target, cache, targetoptions, **dispatcher_args):
from numba.core.target_extension import resolve_dispatcher_from_str
dispatcher = resolve_dispatcher_from_str(target)
def wrapper(func):
if extending.is_jitted(func):
raise TypeError(
"A jit decorator was called on an already jitted function "
f"{func}. If trying to access the original python "
f"function, use the {func}.py_func attribute."
)
if not inspect.isfunction(func):
raise TypeError(
"The decorated object is not a function (got type "
f"{type(func)})."
)
if config.ENABLE_CUDASIM and target == 'cuda':
from numba import cuda
return cuda.jit(func)
if config.DISABLE_JIT and not target == 'npyufunc':
return func
disp = dispatcher(py_func=func, locals=locals,
targetoptions=targetoptions,
**dispatcher_args)
if cache:
disp.enable_caching()
if sigs is not None:
# Register the Dispatcher to the type inference mechanism,
# even though the decorator hasn't returned yet.
from numba.core import typeinfer
with typeinfer.register_dispatcher(disp):
for sig in sigs:
disp.compile(sig)
disp.disable_compile()
return disp
return wrapper
def generated_jit(function=None, cache=False,
pipeline_class=None, **options):
"""
This decorator allows flexible type-based compilation
of a jitted function. It works as `@jit`, except that the decorated
function is called at compile-time with the *types* of the arguments
and should return an implementation function for those types.
"""
dispatcher_args = {}
if pipeline_class is not None:
dispatcher_args['pipeline_class'] = pipeline_class
wrapper = _jit(sigs=None, locals={}, target='cpu', cache=cache,
targetoptions=options, impl_kind='generated',
**dispatcher_args)
if function is not None:
return wrapper(function)
else:
return wrapper
def njit(*args, **kws):
"""
Equivalent to jit(nopython=True)
See documentation for jit function/decorator for full description.
"""
if 'nopython' in kws:
warnings.warn('nopython is set for njit and is ignored', RuntimeWarning)
if 'forceobj' in kws:
warnings.warn('forceobj is set for njit and is ignored', RuntimeWarning)
del kws['forceobj']
kws.update({'nopython': True})
return jit(*args, **kws)
def cfunc(sig, locals={}, cache=False, pipeline_class=None, **options):
"""
This decorator is used to compile a Python function into a C callback
usable with foreign C libraries.
Usage::
@cfunc("float64(float64, float64)", nopython=True, cache=True)
def add(a, b):
return a + b
"""
sig = sigutils.normalize_signature(sig)
def wrapper(func):
from numba.core.ccallback import CFunc
additional_args = {}
if pipeline_class is not None:
additional_args['pipeline_class'] = pipeline_class
res = CFunc(func, sig, locals=locals, options=options, **additional_args)
if cache:
res.enable_caching()
res.compile()
return res
return wrapper
def jit_module(**kwargs):
""" Automatically ``jit``-wraps functions defined in a Python module
Note that ``jit_module`` should only be called at the end of the module to
be jitted. In addition, only functions which are defined in the module
``jit_module`` is called from are considered for automatic jit-wrapping.
See the Numba documentation for more information about what can/cannot be
jitted.
:param kwargs: Keyword arguments to pass to ``jit`` such as ``nopython``
or ``error_model``.
"""
# Get the module jit_module is being called from
frame = inspect.stack()[1]
module = inspect.getmodule(frame[0])
# Replace functions in module with jit-wrapped versions
for name, obj in module.__dict__.items():
if inspect.isfunction(obj) and inspect.getmodule(obj) == module:
_logger.debug("Auto decorating function {} from module {} with jit "
"and options: {}".format(obj, module.__name__, kwargs))
module.__dict__[name] = jit(obj, **kwargs)
| |
import os
import sys
import textwrap
import glob
from os.path import join, curdir, pardir
import pytest
from pip import pep425tags
from pip.utils import appdirs, rmtree
from tests.lib import (pyversion, pyversion_tuple,
_create_test_package, _create_svn_repo, path_to_url,
requirements_file)
from tests.lib.local_repos import local_checkout
from tests.lib.path import Path
def test_without_setuptools(script, data):
script.run("pip", "uninstall", "setuptools", "-y")
result = script.run(
"python", "-c",
"import pip; pip.main(["
"'install', "
"'INITools==0.2', "
"'-f', '%s', "
"'--no-binary=:all:'])" % data.packages,
expect_error=True,
)
assert (
"Could not import setuptools which is required to install from a "
"source distribution."
in result.stderr
)
assert "Please install setuptools" in result.stderr
def test_with_setuptools_and_import_error(script, data):
# Make sure we get an ImportError while importing setuptools
setuptools_init_path = script.site_packages_path.join(
"setuptools", "__init__.py")
with open(setuptools_init_path, 'a') as f:
f.write('\nraise ImportError("toto")')
result = script.run(
"python", "-c",
"import pip; pip.main(["
"'install', "
"'INITools==0.2', "
"'-f', '%s', "
"'--no-binary=:all:'])" % data.packages,
expect_error=True,
)
assert (
"Could not import setuptools which is required to install from a "
"source distribution."
in result.stderr
)
assert "Traceback " in result.stderr
assert "ImportError: toto" in result.stderr
def test_pip_second_command_line_interface_works(script, data):
"""
Check if ``pip<PYVERSION>`` commands behaves equally
"""
# On old versions of Python, urllib3/requests will raise a warning about
# the lack of an SSLContext.
kwargs = {}
if pyversion_tuple < (2, 7, 9):
kwargs['expect_stderr'] = True
args = ['pip%s' % pyversion]
args.extend(['install', 'INITools==0.2'])
args.extend(['-f', data.packages])
result = script.run(*args, **kwargs)
egg_info_folder = (
script.site_packages / 'INITools-0.2-py%s.egg-info' % pyversion
)
initools_folder = script.site_packages / 'initools'
assert egg_info_folder in result.files_created, str(result)
assert initools_folder in result.files_created, str(result)
@pytest.mark.network
def test_install_from_pypi(script):
"""
Test installing a package from PyPI.
"""
result = script.pip('install', '-vvv', 'INITools==0.2')
egg_info_folder = (
script.site_packages / 'INITools-0.2-py%s.egg-info' % pyversion
)
initools_folder = script.site_packages / 'initools'
assert egg_info_folder in result.files_created, str(result)
assert initools_folder in result.files_created, str(result)
def test_editable_install(script):
"""
Test editable installation.
"""
result = script.pip('install', '-e', 'INITools==0.2', expect_error=True)
assert (
"INITools==0.2 should either be a path to a local project or a VCS url"
in result.stderr
)
assert not result.files_created
assert not result.files_updated
def test_install_editable_from_svn(script):
"""
Test checking out from svn.
"""
checkout_path = _create_test_package(script)
repo_url = _create_svn_repo(script, checkout_path)
result = script.pip(
'install',
'-e', 'svn+' + repo_url + '#egg=version-pkg'
)
result.assert_installed('version-pkg', with_files=['.svn'])
@pytest.mark.network
def test_download_editable_to_custom_path(script, tmpdir):
"""
Test downloading an editable using a relative custom src folder.
"""
script.scratch_path.join("customdl").mkdir()
result = script.pip(
'install',
'-e',
'%s#egg=initools-dev' %
local_checkout(
'svn+http://svn.colorstudy.com/INITools/trunk',
tmpdir.join("cache")
),
'--src',
'customsrc',
'--download',
'customdl',
expect_stderr=True
)
customsrc = Path('scratch') / 'customsrc' / 'initools'
assert customsrc in result.files_created, (
sorted(result.files_created.keys())
)
assert customsrc / 'setup.py' in result.files_created, (
sorted(result.files_created.keys())
)
customdl = Path('scratch') / 'customdl' / 'initools'
customdl_files_created = [
filename for filename in result.files_created
if filename.startswith(customdl)
]
assert customdl_files_created
assert ('DEPRECATION: pip install --download has been deprecated and will '
'be removed in the future. Pip now has a download command that '
'should be used instead.') in result.stderr
@pytest.mark.network
def test_install_dev_version_from_pypi(script):
"""
Test using package==dev.
"""
result = script.pip('install', 'INITools===dev', expect_error=True)
assert (script.site_packages / 'initools') in result.files_created, (
str(result.stdout)
)
def _test_install_editable_from_git(script, tmpdir, wheel):
"""Test cloning from Git."""
if wheel:
script.pip('install', 'wheel')
pkg_path = _create_test_package(script, name='testpackage', vcs='git')
args = ['install', '-e', 'git+%s#egg=testpackage' % path_to_url(pkg_path)]
result = script.pip(*args, **{"expect_error": True})
result.assert_installed('testpackage', with_files=['.git'])
def test_install_editable_from_git(script, tmpdir):
_test_install_editable_from_git(script, tmpdir, False)
def test_install_editable_from_git_autobuild_wheel(script, tmpdir):
_test_install_editable_from_git(script, tmpdir, True)
def test_install_editable_from_hg(script, tmpdir):
"""Test cloning from Mercurial."""
pkg_path = _create_test_package(script, name='testpackage', vcs='hg')
args = ['install', '-e', 'hg+%s#egg=testpackage' % path_to_url(pkg_path)]
result = script.pip(*args, **{"expect_error": True})
result.assert_installed('testpackage', with_files=['.hg'])
def test_vcs_url_final_slash_normalization(script, tmpdir):
"""
Test that presence or absence of final slash in VCS URL is normalized.
"""
pkg_path = _create_test_package(script, name='testpackage', vcs='hg')
args = ['install', '-e', 'hg+%s/#egg=testpackage' % path_to_url(pkg_path)]
result = script.pip(*args, **{"expect_error": True})
result.assert_installed('testpackage', with_files=['.hg'])
def test_install_editable_from_bazaar(script, tmpdir):
"""Test checking out from Bazaar."""
pkg_path = _create_test_package(script, name='testpackage', vcs='bazaar')
args = ['install', '-e', 'bzr+%s/#egg=testpackage' % path_to_url(pkg_path)]
result = script.pip(*args, **{"expect_error": True})
result.assert_installed('testpackage', with_files=['.bzr'])
@pytest.mark.network
def test_vcs_url_urlquote_normalization(script, tmpdir):
"""
Test that urlquoted characters are normalized for repo URL comparison.
"""
script.pip(
'install', '-e',
'%s/#egg=django-wikiapp' %
local_checkout(
'bzr+http://bazaar.launchpad.net/%7Edjango-wikiapp/django-wikiapp'
'/release-0.1',
tmpdir.join("cache"),
),
)
def test_install_from_local_directory(script, data):
"""
Test installing from a local directory.
"""
to_install = data.packages.join("FSPkg")
result = script.pip('install', to_install, expect_error=False)
fspkg_folder = script.site_packages / 'fspkg'
egg_info_folder = (
script.site_packages / 'FSPkg-0.1.dev0-py%s.egg-info' % pyversion
)
assert fspkg_folder in result.files_created, str(result.stdout)
assert egg_info_folder in result.files_created, str(result)
def test_hashed_install_success(script, data, tmpdir):
"""
Test that installing various sorts of requirements with correct hashes
works.
Test file URLs and index packages (which become HTTP URLs behind the
scenes).
"""
file_url = path_to_url(
(data.packages / 'simple-1.0.tar.gz').abspath)
with requirements_file(
'simple2==1.0 --hash=sha256:9336af72ca661e6336eb87bc7de3e8844d853e'
'3848c2b9bbd2e8bf01db88c2c7\n'
'{simple} --hash=sha256:393043e672415891885c9a2a0929b1af95fb866d6c'
'a016b42d2e6ce53619b653'.format(simple=file_url),
tmpdir) as reqs_file:
script.pip_install_local('-r', reqs_file.abspath, expect_error=False)
def test_hashed_install_failure(script, data, tmpdir):
"""Test that wrong hashes stop installation.
This makes sure prepare_files() is called in the course of installation
and so has the opportunity to halt if hashes are wrong. Checks on various
kinds of hashes are in test_req.py.
"""
with requirements_file('simple2==1.0 --hash=sha256:9336af72ca661e6336eb87b'
'c7de3e8844d853e3848c2b9bbd2e8bf01db88c2c\n',
tmpdir) as reqs_file:
result = script.pip_install_local('-r',
reqs_file.abspath,
expect_error=True)
assert len(result.files_created) == 0
def test_install_from_local_directory_with_symlinks_to_directories(
script, data):
"""
Test installing from a local directory containing symlinks to directories.
"""
to_install = data.packages.join("symlinks")
result = script.pip('install', to_install, expect_error=False)
pkg_folder = script.site_packages / 'symlinks'
egg_info_folder = (
script.site_packages / 'symlinks-0.1.dev0-py%s.egg-info' % pyversion
)
assert pkg_folder in result.files_created, str(result.stdout)
assert egg_info_folder in result.files_created, str(result)
def test_install_from_local_directory_with_no_setup_py(script, data):
"""
Test installing from a local directory with no 'setup.py'.
"""
result = script.pip('install', data.root, expect_error=True)
assert not result.files_created
assert "is not installable. File 'setup.py' not found." in result.stderr
def test_editable_install_from_local_directory_with_no_setup_py(script, data):
"""
Test installing from a local directory with no 'setup.py'.
"""
result = script.pip('install', '-e', data.root, expect_error=True)
assert not result.files_created
assert "is not installable. File 'setup.py' not found." in result.stderr
def test_install_as_egg(script, data):
"""
Test installing as egg, instead of flat install.
"""
to_install = data.packages.join("FSPkg")
result = script.pip('install', to_install, '--egg', expect_error=False)
fspkg_folder = script.site_packages / 'fspkg'
egg_folder = script.site_packages / 'FSPkg-0.1.dev0-py%s.egg' % pyversion
assert fspkg_folder not in result.files_created, str(result.stdout)
assert egg_folder in result.files_created, str(result)
assert join(egg_folder, 'fspkg') in result.files_created, str(result)
def test_install_curdir(script, data):
"""
Test installing current directory ('.').
"""
run_from = data.packages.join("FSPkg")
# Python 2.4 Windows balks if this exists already
egg_info = join(run_from, "FSPkg.egg-info")
if os.path.isdir(egg_info):
rmtree(egg_info)
result = script.pip('install', curdir, cwd=run_from, expect_error=False)
fspkg_folder = script.site_packages / 'fspkg'
egg_info_folder = (
script.site_packages / 'FSPkg-0.1.dev0-py%s.egg-info' % pyversion
)
assert fspkg_folder in result.files_created, str(result.stdout)
assert egg_info_folder in result.files_created, str(result)
def test_install_pardir(script, data):
"""
Test installing parent directory ('..').
"""
run_from = data.packages.join("FSPkg", "fspkg")
result = script.pip('install', pardir, cwd=run_from, expect_error=False)
fspkg_folder = script.site_packages / 'fspkg'
egg_info_folder = (
script.site_packages / 'FSPkg-0.1.dev0-py%s.egg-info' % pyversion
)
assert fspkg_folder in result.files_created, str(result.stdout)
assert egg_info_folder in result.files_created, str(result)
@pytest.mark.network
def test_install_global_option(script):
"""
Test using global distutils options.
(In particular those that disable the actual install action)
"""
result = script.pip(
'install', '--global-option=--version', "INITools==0.1",
expect_stderr=True)
assert '0.1\n' in result.stdout
def test_install_with_pax_header(script, data):
"""
test installing from a tarball with pax header for python<2.6
"""
script.pip('install', 'paxpkg.tar.bz2', cwd=data.packages)
def test_install_with_hacked_egg_info(script, data):
"""
test installing a package which defines its own egg_info class
"""
run_from = data.packages.join("HackedEggInfo")
result = script.pip('install', '.', cwd=run_from)
assert 'Successfully installed hackedegginfo-0.0.0\n' in result.stdout
@pytest.mark.network
def test_install_using_install_option_and_editable(script, tmpdir):
"""
Test installing a tool using -e and --install-option
"""
folder = 'script_folder'
script.scratch_path.join(folder).mkdir()
url = 'git+git://github.com/pypa/pip-test-package'
result = script.pip(
'install', '-e', '%s#egg=pip-test-package' %
local_checkout(url, tmpdir.join("cache")),
'--install-option=--script-dir=%s' % folder,
expect_stderr=True)
script_file = (
script.venv / 'src' / 'pip-test-package' /
folder / 'pip-test-package' + script.exe
)
assert script_file in result.files_created
@pytest.mark.network
def test_install_global_option_using_editable(script, tmpdir):
"""
Test using global distutils options, but in an editable installation
"""
url = 'hg+http://bitbucket.org/runeh/anyjson'
result = script.pip(
'install', '--global-option=--version', '-e',
'%s@0.2.5#egg=anyjson' % local_checkout(url, tmpdir.join("cache")),
expect_stderr=True)
assert 'Successfully installed anyjson' in result.stdout
@pytest.mark.network
def test_install_package_with_same_name_in_curdir(script):
"""
Test installing a package with the same name of a local folder
"""
script.scratch_path.join("mock==0.6").mkdir()
result = script.pip('install', 'mock==0.6')
egg_folder = script.site_packages / 'mock-0.6.0-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
mock100_setup_py = textwrap.dedent('''\
from setuptools import setup
setup(name='mock',
version='100.1')''')
def test_install_folder_using_dot_slash(script):
"""
Test installing a folder using pip install ./foldername
"""
script.scratch_path.join("mock").mkdir()
pkg_path = script.scratch_path / 'mock'
pkg_path.join("setup.py").write(mock100_setup_py)
result = script.pip('install', './mock')
egg_folder = script.site_packages / 'mock-100.1-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
def test_install_folder_using_slash_in_the_end(script):
r"""
Test installing a folder using pip install foldername/ or foldername\
"""
script.scratch_path.join("mock").mkdir()
pkg_path = script.scratch_path / 'mock'
pkg_path.join("setup.py").write(mock100_setup_py)
result = script.pip('install', 'mock' + os.path.sep)
egg_folder = script.site_packages / 'mock-100.1-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
def test_install_folder_using_relative_path(script):
"""
Test installing a folder using pip install folder1/folder2
"""
script.scratch_path.join("initools").mkdir()
script.scratch_path.join("initools", "mock").mkdir()
pkg_path = script.scratch_path / 'initools' / 'mock'
pkg_path.join("setup.py").write(mock100_setup_py)
result = script.pip('install', Path('initools') / 'mock')
egg_folder = script.site_packages / 'mock-100.1-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
@pytest.mark.network
def test_install_package_which_contains_dev_in_name(script):
"""
Test installing package from pypi which contains 'dev' in name
"""
result = script.pip('install', 'django-devserver==0.0.4')
devserver_folder = script.site_packages / 'devserver'
egg_info_folder = (
script.site_packages / 'django_devserver-0.0.4-py%s.egg-info' %
pyversion
)
assert devserver_folder in result.files_created, str(result.stdout)
assert egg_info_folder in result.files_created, str(result)
def test_install_package_with_target(script):
"""
Test installing a package using pip install --target
"""
target_dir = script.scratch_path / 'target'
result = script.pip_install_local('-t', target_dir, "simple==1.0")
assert Path('scratch') / 'target' / 'simple' in result.files_created, (
str(result)
)
# Test repeated call without --upgrade, no files should have changed
result = script.pip_install_local(
'-t', target_dir, "simple==1.0", expect_stderr=True,
)
assert not Path('scratch') / 'target' / 'simple' in result.files_updated
# Test upgrade call, check that new version is installed
result = script.pip_install_local('--upgrade', '-t',
target_dir, "simple==2.0")
assert Path('scratch') / 'target' / 'simple' in result.files_updated, (
str(result)
)
egg_folder = (
Path('scratch') / 'target' / 'simple-2.0-py%s.egg-info' % pyversion)
assert egg_folder in result.files_created, (
str(result)
)
# Test install and upgrade of single-module package
result = script.pip_install_local('-t', target_dir, 'singlemodule==0.0.0')
singlemodule_py = Path('scratch') / 'target' / 'singlemodule.py'
assert singlemodule_py in result.files_created, str(result)
result = script.pip_install_local('-t', target_dir, 'singlemodule==0.0.1',
'--upgrade')
assert singlemodule_py in result.files_updated, str(result)
def test_install_package_with_root(script, data):
"""
Test installing a package using pip install --root
"""
root_dir = script.scratch_path / 'root'
result = script.pip(
'install', '--root', root_dir, '-f', data.find_links, '--no-index',
'simple==1.0',
)
normal_install_path = (
script.base_path / script.site_packages / 'simple-1.0-py%s.egg-info' %
pyversion
)
# use distutils to change the root exactly how the --root option does it
from distutils.util import change_root
root_path = change_root(
os.path.join(script.scratch, 'root'),
normal_install_path
)
assert root_path in result.files_created, str(result)
def test_install_package_with_prefix(script, data):
"""
Test installing a package using pip install --prefix
"""
prefix_path = script.scratch_path / 'prefix'
result = script.pip(
'install', '--prefix', prefix_path, '-f', data.find_links,
'--no-binary', 'simple', '--no-index', 'simple==1.0',
)
if hasattr(sys, "pypy_version_info"):
path = script.scratch / 'prefix'
else:
path = script.scratch / 'prefix' / 'lib' / 'python{0}'.format(pyversion) # noqa
install_path = (
path / 'site-packages' / 'simple-1.0-py{0}.egg-info'.format(pyversion)
)
assert install_path in result.files_created, str(result)
def test_install_editable_with_prefix(script):
# make a dummy project
pkga_path = script.scratch_path / 'pkga'
pkga_path.mkdir()
pkga_path.join("setup.py").write(textwrap.dedent("""
from setuptools import setup
setup(name='pkga',
version='0.1')
"""))
site_packages = os.path.join(
'prefix', 'lib', 'python{0}'.format(pyversion), 'site-packages')
# make sure target path is in PYTHONPATH
pythonpath = script.scratch_path / site_packages
pythonpath.makedirs()
script.environ["PYTHONPATH"] = pythonpath
# install pkga package into the absolute prefix directory
prefix_path = script.scratch_path / 'prefix'
result = script.pip(
'install', '--editable', pkga_path, '--prefix', prefix_path)
# assert pkga is installed at correct location
install_path = script.scratch / site_packages / 'pkga.egg-link'
assert install_path in result.files_created, str(result)
def test_install_package_conflict_prefix_and_user(script, data):
"""
Test installing a package using pip install --prefix --user errors out
"""
prefix_path = script.scratch_path / 'prefix'
result = script.pip(
'install', '-f', data.find_links, '--no-index', '--user',
'--prefix', prefix_path, 'simple==1.0',
expect_error=True, quiet=True,
)
assert (
"Can not combine '--user' and '--prefix'" in result.stderr
)
# skip on win/py3 for now, see issue #782
@pytest.mark.skipif("sys.platform == 'win32' and sys.version_info >= (3,)")
def test_install_package_that_emits_unicode(script, data):
"""
Install a package with a setup.py that emits UTF-8 output and then fails.
Refs https://github.com/pypa/pip/issues/326
"""
to_install = data.packages.join("BrokenEmitsUTF8")
result = script.pip(
'install', to_install, expect_error=True, expect_temp=True, quiet=True,
)
assert (
'FakeError: this package designed to fail on install' in result.stdout
)
assert 'UnicodeDecodeError' not in result.stdout
def test_install_package_with_utf8_setup(script, data):
"""Install a package with a setup.py that declares a utf-8 encoding."""
to_install = data.packages.join("SetupPyUTF8")
script.pip('install', to_install)
def test_install_package_with_latin1_setup(script, data):
"""Install a package with a setup.py that declares a latin-1 encoding."""
to_install = data.packages.join("SetupPyLatin1")
script.pip('install', to_install)
def test_url_req_case_mismatch_no_index(script, data):
"""
tar ball url requirements (with no egg fragment), that happen to have upper
case project names, should be considered equal to later requirements that
reference the project name using lower case.
tests/data/packages contains Upper-1.0.tar.gz and Upper-2.0.tar.gz
'requiresupper' has install_requires = ['upper']
"""
Upper = os.path.join(data.find_links, 'Upper-1.0.tar.gz')
result = script.pip(
'install', '--no-index', '-f', data.find_links, Upper, 'requiresupper'
)
# only Upper-1.0.tar.gz should get installed.
egg_folder = script.site_packages / 'Upper-1.0-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
egg_folder = script.site_packages / 'Upper-2.0-py%s.egg-info' % pyversion
assert egg_folder not in result.files_created, str(result)
def test_url_req_case_mismatch_file_index(script, data):
"""
tar ball url requirements (with no egg fragment), that happen to have upper
case project names, should be considered equal to later requirements that
reference the project name using lower case.
tests/data/packages3 contains Dinner-1.0.tar.gz and Dinner-2.0.tar.gz
'requiredinner' has install_requires = ['dinner']
This test is similar to test_url_req_case_mismatch_no_index; that test
tests behaviour when using "--no-index -f", while this one does the same
test when using "--index-url". Unfortunately this requires a different
set of packages as it requires a prepared index.html file and
subdirectory-per-package structure.
"""
Dinner = os.path.join(data.find_links3, 'dinner', 'Dinner-1.0.tar.gz')
result = script.pip(
'install', '--index-url', data.find_links3, Dinner, 'requiredinner'
)
# only Upper-1.0.tar.gz should get installed.
egg_folder = script.site_packages / 'Dinner-1.0-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
egg_folder = script.site_packages / 'Dinner-2.0-py%s.egg-info' % pyversion
assert egg_folder not in result.files_created, str(result)
def test_url_incorrect_case_no_index(script, data):
"""
Same as test_url_req_case_mismatch_no_index, except testing for the case
where the incorrect case is given in the name of the package to install
rather than in a requirements file.
"""
result = script.pip(
'install', '--no-index', '-f', data.find_links, "upper",
)
# only Upper-2.0.tar.gz should get installed.
egg_folder = script.site_packages / 'Upper-1.0-py%s.egg-info' % pyversion
assert egg_folder not in result.files_created, str(result)
egg_folder = script.site_packages / 'Upper-2.0-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
def test_url_incorrect_case_file_index(script, data):
"""
Same as test_url_req_case_mismatch_file_index, except testing for the case
where the incorrect case is given in the name of the package to install
rather than in a requirements file.
"""
result = script.pip(
'install', '--index-url', data.find_links3, "dinner",
expect_stderr=True,
)
# only Upper-2.0.tar.gz should get installed.
egg_folder = script.site_packages / 'Dinner-1.0-py%s.egg-info' % pyversion
assert egg_folder not in result.files_created, str(result)
egg_folder = script.site_packages / 'Dinner-2.0-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
@pytest.mark.network
def test_compiles_pyc(script):
"""
Test installing with --compile on
"""
del script.environ["PYTHONDONTWRITEBYTECODE"]
script.pip("install", "--compile", "--no-binary=:all:", "INITools==0.2")
# There are many locations for the __init__.pyc file so attempt to find
# any of them
exists = [
os.path.exists(script.site_packages_path / "initools/__init__.pyc"),
]
exists += glob.glob(
script.site_packages_path / "initools/__pycache__/__init__*.pyc"
)
assert any(exists)
@pytest.mark.network
def test_no_compiles_pyc(script, data):
"""
Test installing from wheel with --compile on
"""
del script.environ["PYTHONDONTWRITEBYTECODE"]
script.pip("install", "--no-compile", "--no-binary=:all:", "INITools==0.2")
# There are many locations for the __init__.pyc file so attempt to find
# any of them
exists = [
os.path.exists(script.site_packages_path / "initools/__init__.pyc"),
]
exists += glob.glob(
script.site_packages_path / "initools/__pycache__/__init__*.pyc"
)
assert not any(exists)
def test_install_upgrade_editable_depending_on_other_editable(script):
script.scratch_path.join("pkga").mkdir()
pkga_path = script.scratch_path / 'pkga'
pkga_path.join("setup.py").write(textwrap.dedent("""
from setuptools import setup
setup(name='pkga',
version='0.1')
"""))
script.pip('install', '--editable', pkga_path)
result = script.pip('list')
assert "pkga" in result.stdout
script.scratch_path.join("pkgb").mkdir()
pkgb_path = script.scratch_path / 'pkgb'
pkgb_path.join("setup.py").write(textwrap.dedent("""
from setuptools import setup
setup(name='pkgb',
version='0.1',
install_requires=['pkga'])
"""))
script.pip('install', '--upgrade', '--editable', pkgb_path)
result = script.pip('list')
assert "pkgb" in result.stdout
def test_install_topological_sort(script, data):
args = ['install', 'TopoRequires4', '-f', data.packages]
res = str(script.pip(*args, expect_error=False))
order1 = 'TopoRequires, TopoRequires2, TopoRequires3, TopoRequires4'
order2 = 'TopoRequires, TopoRequires3, TopoRequires2, TopoRequires4'
assert order1 in res or order2 in res, res
def test_install_wheel_broken(script, data):
script.pip('install', 'wheel')
res = script.pip(
'install', '--no-index', '-f', data.find_links, 'wheelbroken',
expect_stderr=True)
assert "Successfully installed wheelbroken-0.1" in str(res), str(res)
def test_cleanup_after_failed_wheel(script, data):
script.pip('install', 'wheel')
res = script.pip(
'install', '--no-index', '-f', data.find_links, 'wheelbrokenafter',
expect_stderr=True)
# One of the effects of not cleaning up is broken scripts:
script_py = script.bin_path / "script.py"
assert script_py.exists, script_py
shebang = open(script_py, 'r').readline().strip()
assert shebang != '#!python', shebang
# OK, assert that we *said* we were cleaning up:
assert "Running setup.py clean for wheelbrokenafter" in str(res), str(res)
def test_install_builds_wheels(script, data):
# NB This incidentally tests a local tree + tarball inputs
# see test_install_editable_from_git_autobuild_wheel for editable
# vcs coverage.
script.pip('install', 'wheel')
to_install = data.packages.join('requires_wheelbroken_upper')
res = script.pip(
'install', '--no-index', '-f', data.find_links,
to_install, expect_stderr=True)
expected = ("Successfully installed requires-wheelbroken-upper-0"
" upper-2.0 wheelbroken-0.1")
# Must have installed it all
assert expected in str(res), str(res)
root = appdirs.user_cache_dir('pip')
wheels = []
for top, dirs, files in os.walk(os.path.join(root, "wheels")):
wheels.extend(files)
# and built wheels for upper and wheelbroken
assert "Running setup.py bdist_wheel for upper" in str(res), str(res)
assert "Running setup.py bdist_wheel for wheelb" in str(res), str(res)
# But not requires_wheel... which is a local dir and thus uncachable.
assert "Running setup.py bdist_wheel for requir" not in str(res), str(res)
# wheelbroken has to run install
# into the cache
assert wheels != [], str(res)
# and installed from the wheel
assert "Running setup.py install for upper" not in str(res), str(res)
# the local tree can't build a wheel (because we can't assume that every
# build will have a suitable unique key to cache on).
assert "Running setup.py install for requires-wheel" in str(res), str(res)
# wheelbroken has to run install
assert "Running setup.py install for wheelb" in str(res), str(res)
# We want to make sure we used the correct implementation tag
assert wheels == [
"Upper-2.0-{0}-none-any.whl".format(pep425tags.implementation_tag),
]
def test_install_no_binary_disables_building_wheels(script, data):
script.pip('install', 'wheel')
to_install = data.packages.join('requires_wheelbroken_upper')
res = script.pip(
'install', '--no-index', '--no-binary=upper', '-f', data.find_links,
to_install, expect_stderr=True)
expected = ("Successfully installed requires-wheelbroken-upper-0"
" upper-2.0 wheelbroken-0.1")
# Must have installed it all
assert expected in str(res), str(res)
root = appdirs.user_cache_dir('pip')
wheels = []
for top, dirs, files in os.walk(root):
wheels.extend(files)
# and built wheels for wheelbroken only
assert "Running setup.py bdist_wheel for wheelb" in str(res), str(res)
# But not requires_wheel... which is a local dir and thus uncachable.
assert "Running setup.py bdist_wheel for requir" not in str(res), str(res)
# Nor upper, which was blacklisted
assert "Running setup.py bdist_wheel for upper" not in str(res), str(res)
# wheelbroken has to run install
# into the cache
assert wheels != [], str(res)
# the local tree can't build a wheel (because we can't assume that every
# build will have a suitable unique key to cache on).
assert "Running setup.py install for requires-wheel" in str(res), str(res)
# And these two fell back to sdist based installed.
assert "Running setup.py install for wheelb" in str(res), str(res)
assert "Running setup.py install for upper" in str(res), str(res)
def test_install_no_binary_disables_cached_wheels(script, data):
script.pip('install', 'wheel')
# Seed the cache
script.pip(
'install', '--no-index', '-f', data.find_links,
'upper')
script.pip('uninstall', 'upper', '-y')
res = script.pip(
'install', '--no-index', '--no-binary=:all:', '-f', data.find_links,
'upper', expect_stderr=True)
assert "Successfully installed upper-2.0" in str(res), str(res)
# No wheel building for upper, which was blacklisted
assert "Running setup.py bdist_wheel for upper" not in str(res), str(res)
# Must have used source, not a cached wheel to install upper.
assert "Running setup.py install for upper" in str(res), str(res)
def test_install_editable_with_wrong_egg_name(script):
script.scratch_path.join("pkga").mkdir()
pkga_path = script.scratch_path / 'pkga'
pkga_path.join("setup.py").write(textwrap.dedent("""
from setuptools import setup
setup(name='pkga',
version='0.1')
"""))
result = script.pip(
'install', '--editable', 'file://%s#egg=pkgb' % pkga_path,
expect_error=True)
assert ("egg_info for package pkgb produced metadata "
"for project name pkga") in result.stderr
| |
# This file should be kept compatible with both Python 2.6 and Python >= 3.0.
from __future__ import division
from __future__ import print_function
"""
ccbench, a Python concurrency benchmark.
"""
import time
import os
import sys
import functools
import itertools
import threading
import subprocess
import socket
from optparse import OptionParser, SUPPRESS_HELP
import platform
# Compatibility
try:
xrange
except NameError:
xrange = range
try:
map = itertools.imap
except AttributeError:
pass
THROUGHPUT_DURATION = 2.0
LATENCY_PING_INTERVAL = 0.1
LATENCY_DURATION = 2.0
BANDWIDTH_PACKET_SIZE = 1024
BANDWIDTH_DURATION = 2.0
def task_pidigits():
"""Pi calculation (Python)"""
_map = map
_count = itertools.count
_islice = itertools.islice
def calc_ndigits(n):
# From http://shootout.alioth.debian.org/
def gen_x():
return _map(lambda k: (k, 4*k + 2, 0, 2*k + 1), _count(1))
def compose(a, b):
aq, ar, as_, at = a
bq, br, bs, bt = b
return (aq * bq,
aq * br + ar * bt,
as_ * bq + at * bs,
as_ * br + at * bt)
def extract(z, j):
q, r, s, t = z
return (q*j + r) // (s*j + t)
def pi_digits():
z = (1, 0, 0, 1)
x = gen_x()
while 1:
y = extract(z, 3)
while y != extract(z, 4):
z = compose(z, next(x))
y = extract(z, 3)
z = compose((10, -10*y, 0, 1), z)
yield y
return list(_islice(pi_digits(), n))
return calc_ndigits, (50, )
def task_regex():
"""regular expression (C)"""
# XXX this task gives horrendous latency results.
import re
# Taken from the `inspect` module
pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)', re.MULTILINE)
with open(__file__, "r") as f:
arg = f.read(2000)
def findall(s):
t = time.time()
try:
return pat.findall(s)
finally:
print(time.time() - t)
return pat.findall, (arg, )
def task_sort():
"""list sorting (C)"""
def list_sort(l):
l = l[::-1]
l.sort()
return list_sort, (list(range(1000)), )
def task_compress_zlib():
"""zlib compression (C)"""
import zlib
with open(__file__, "rb") as f:
arg = f.read(5000) * 3
def compress(s):
zlib.decompress(zlib.compress(s, 5))
return compress, (arg, )
def task_compress_bz2():
"""bz2 compression (C)"""
import bz2
with open(__file__, "rb") as f:
arg = f.read(3000) * 2
def compress(s):
bz2.compress(s)
return compress, (arg, )
def task_hashing():
"""SHA1 hashing (C)"""
import hashlib
with open(__file__, "rb") as f:
arg = f.read(5000) * 30
def compute(s):
hashlib.sha1(s).digest()
return compute, (arg, )
throughput_tasks = [task_pidigits, task_regex]
for mod in 'bz2', 'hashlib':
try:
globals()[mod] = __import__(mod)
except ImportError:
globals()[mod] = None
# For whatever reasons, zlib gives irregular results, so we prefer bz2 or
# hashlib if available.
# (NOTE: hashlib releases the GIL from 2.7 and 3.1 onwards)
if bz2 is not None:
throughput_tasks.append(task_compress_bz2)
elif hashlib is not None:
throughput_tasks.append(task_hashing)
else:
throughput_tasks.append(task_compress_zlib)
latency_tasks = throughput_tasks
bandwidth_tasks = [task_pidigits]
class TimedLoop:
def __init__(self, func, args):
self.func = func
self.args = args
def __call__(self, start_time, min_duration, end_event, do_yield=False):
step = 20
niters = 0
duration = 0.0
_time = time.time
_sleep = time.sleep
_func = self.func
_args = self.args
t1 = start_time
while True:
for i in range(step):
_func(*_args)
t2 = _time()
# If another thread terminated, the current measurement is invalid
# => return the previous one.
if end_event:
return niters, duration
niters += step
duration = t2 - start_time
if duration >= min_duration:
end_event.append(None)
return niters, duration
if t2 - t1 < 0.01:
# Minimize interference of measurement on overall runtime
step = step * 3 // 2
elif do_yield:
# OS scheduling of Python threads is sometimes so bad that we
# have to force thread switching ourselves, otherwise we get
# completely useless results.
_sleep(0.0001)
t1 = t2
def run_throughput_test(func, args, nthreads):
assert nthreads >= 1
# Warm up
func(*args)
results = []
loop = TimedLoop(func, args)
end_event = []
if nthreads == 1:
# Pure single-threaded performance, without any switching or
# synchronization overhead.
start_time = time.time()
results.append(loop(start_time, THROUGHPUT_DURATION,
end_event, do_yield=False))
return results
started = False
ready_cond = threading.Condition()
start_cond = threading.Condition()
ready = []
def run():
with ready_cond:
ready.append(None)
ready_cond.notify()
with start_cond:
while not started:
start_cond.wait()
results.append(loop(start_time, THROUGHPUT_DURATION,
end_event, do_yield=True))
threads = []
for i in range(nthreads):
threads.append(threading.Thread(target=run))
for t in threads:
t.setDaemon(True)
t.start()
# We don't want measurements to include thread startup overhead,
# so we arrange for timing to start after all threads are ready.
with ready_cond:
while len(ready) < nthreads:
ready_cond.wait()
with start_cond:
start_time = time.time()
started = True
start_cond.notify(nthreads)
for t in threads:
t.join()
return results
def run_throughput_tests(max_threads):
for task in throughput_tasks:
print(task.__doc__)
print()
func, args = task()
nthreads = 1
baseline_speed = None
while nthreads <= max_threads:
results = run_throughput_test(func, args, nthreads)
# Taking the max duration rather than average gives pessimistic
# results rather than optimistic.
speed = sum(r[0] for r in results) / max(r[1] for r in results)
print("threads=%d: %d" % (nthreads, speed), end="")
if baseline_speed is None:
print(" iterations/s.")
baseline_speed = speed
else:
print(" ( %d %%)" % (speed / baseline_speed * 100))
nthreads += 1
print()
LAT_END = "END"
def _sendto(sock, s, addr):
sock.sendto(s.encode('ascii'), addr)
def _recv(sock, n):
return sock.recv(n).decode('ascii')
def latency_client(addr, nb_pings, interval):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
_time = time.time
_sleep = time.sleep
def _ping():
_sendto(sock, "%r\n" % _time(), addr)
# The first ping signals the parent process that we are ready.
_ping()
# We give the parent a bit of time to notice.
_sleep(1.0)
for i in range(nb_pings):
_sleep(interval)
_ping()
_sendto(sock, LAT_END + "\n", addr)
finally:
sock.close()
def run_latency_client(**kwargs):
cmd_line = [sys.executable, '-E', os.path.abspath(__file__)]
cmd_line.extend(['--latclient', repr(kwargs)])
return subprocess.Popen(cmd_line) #, stdin=subprocess.PIPE,
#stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def run_latency_test(func, args, nthreads):
# Create a listening socket to receive the pings. We use UDP which should
# be painlessly cross-platform.
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(("127.0.0.1", 0))
addr = sock.getsockname()
interval = LATENCY_PING_INTERVAL
duration = LATENCY_DURATION
nb_pings = int(duration / interval)
results = []
threads = []
end_event = []
start_cond = threading.Condition()
started = False
if nthreads > 0:
# Warm up
func(*args)
results = []
loop = TimedLoop(func, args)
ready = []
ready_cond = threading.Condition()
def run():
with ready_cond:
ready.append(None)
ready_cond.notify()
with start_cond:
while not started:
start_cond.wait()
loop(start_time, duration * 1.5, end_event, do_yield=False)
for i in range(nthreads):
threads.append(threading.Thread(target=run))
for t in threads:
t.setDaemon(True)
t.start()
# Wait for threads to be ready
with ready_cond:
while len(ready) < nthreads:
ready_cond.wait()
# Run the client and wait for the first ping(s) to arrive before
# unblocking the background threads.
chunks = []
process = run_latency_client(addr=sock.getsockname(),
nb_pings=nb_pings, interval=interval)
s = _recv(sock, 4096)
_time = time.time
with start_cond:
start_time = _time()
started = True
start_cond.notify(nthreads)
while LAT_END not in s:
s = _recv(sock, 4096)
t = _time()
chunks.append((t, s))
# Tell the background threads to stop.
end_event.append(None)
for t in threads:
t.join()
process.wait()
sock.close()
for recv_time, chunk in chunks:
# NOTE: it is assumed that a line sent by a client wasn't received
# in two chunks because the lines are very small.
for line in chunk.splitlines():
line = line.strip()
if line and line != LAT_END:
send_time = eval(line)
assert isinstance(send_time, float)
results.append((send_time, recv_time))
return results
def run_latency_tests(max_threads):
for task in latency_tasks:
print("Background CPU task:", task.__doc__)
print()
func, args = task()
nthreads = 0
while nthreads <= max_threads:
results = run_latency_test(func, args, nthreads)
n = len(results)
# We print out milliseconds
lats = [1000 * (t2 - t1) for (t1, t2) in results]
#print(list(map(int, lats)))
avg = sum(lats) / n
dev = (sum((x - avg) ** 2 for x in lats) / n) ** 0.5
print("CPU threads=%d: %d ms. (std dev: %d ms.)" % (nthreads, avg, dev), end="")
print()
#print(" [... from %d samples]" % n)
nthreads += 1
print()
BW_END = "END"
def bandwidth_client(addr, packet_size, duration):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(("127.0.0.1", 0))
local_addr = sock.getsockname()
_time = time.time
_sleep = time.sleep
def _send_chunk(msg):
_sendto(sock, ("%r#%s\n" % (local_addr, msg)).rjust(packet_size), addr)
# We give the parent some time to be ready.
_sleep(1.0)
try:
start_time = _time()
end_time = start_time + duration * 2.0
i = 0
while _time() < end_time:
_send_chunk(str(i))
s = _recv(sock, packet_size)
assert len(s) == packet_size
i += 1
_send_chunk(BW_END)
finally:
sock.close()
def run_bandwidth_client(**kwargs):
cmd_line = [sys.executable, '-E', os.path.abspath(__file__)]
cmd_line.extend(['--bwclient', repr(kwargs)])
return subprocess.Popen(cmd_line) #, stdin=subprocess.PIPE,
#stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def run_bandwidth_test(func, args, nthreads):
# Create a listening socket to receive the packets. We use UDP which should
# be painlessly cross-platform.
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(("127.0.0.1", 0))
addr = sock.getsockname()
duration = BANDWIDTH_DURATION
packet_size = BANDWIDTH_PACKET_SIZE
results = []
threads = []
end_event = []
start_cond = threading.Condition()
started = False
if nthreads > 0:
# Warm up
func(*args)
results = []
loop = TimedLoop(func, args)
ready = []
ready_cond = threading.Condition()
def run():
with ready_cond:
ready.append(None)
ready_cond.notify()
with start_cond:
while not started:
start_cond.wait()
loop(start_time, duration * 1.5, end_event, do_yield=False)
for i in range(nthreads):
threads.append(threading.Thread(target=run))
for t in threads:
t.setDaemon(True)
t.start()
# Wait for threads to be ready
with ready_cond:
while len(ready) < nthreads:
ready_cond.wait()
# Run the client and wait for the first packet to arrive before
# unblocking the background threads.
process = run_bandwidth_client(addr=addr,
packet_size=packet_size,
duration=duration)
_time = time.time
# This will also wait for the parent to be ready
s = _recv(sock, packet_size)
remote_addr = eval(s.partition('#')[0])
with start_cond:
start_time = _time()
started = True
start_cond.notify(nthreads)
n = 0
first_time = None
while not end_event and BW_END not in s:
_sendto(sock, s, remote_addr)
s = _recv(sock, packet_size)
if first_time is None:
first_time = _time()
n += 1
end_time = _time()
end_event.append(None)
for t in threads:
t.join()
process.kill()
return (n - 1) / (end_time - first_time)
def run_bandwidth_tests(max_threads):
for task in bandwidth_tasks:
print("Background CPU task:", task.__doc__)
print()
func, args = task()
nthreads = 0
baseline_speed = None
while nthreads <= max_threads:
results = run_bandwidth_test(func, args, nthreads)
speed = results
#speed = len(results) * 1.0 / results[-1][0]
print("CPU threads=%d: %.1f" % (nthreads, speed), end="")
if baseline_speed is None:
print(" packets/s.")
baseline_speed = speed
else:
print(" ( %d %%)" % (speed / baseline_speed * 100))
nthreads += 1
print()
def main():
usage = "usage: %prog [-h|--help] [options]"
parser = OptionParser(usage=usage)
parser.add_option("-t", "--throughput",
action="store_true", dest="throughput", default=False,
help="run throughput tests")
parser.add_option("-l", "--latency",
action="store_true", dest="latency", default=False,
help="run latency tests")
parser.add_option("-b", "--bandwidth",
action="store_true", dest="bandwidth", default=False,
help="run I/O bandwidth tests")
parser.add_option("-i", "--interval",
action="store", type="int", dest="check_interval", default=None,
help="sys.setcheckinterval() value")
parser.add_option("-I", "--switch-interval",
action="store", type="float", dest="switch_interval", default=None,
help="sys.setswitchinterval() value")
parser.add_option("-n", "--num-threads",
action="store", type="int", dest="nthreads", default=4,
help="max number of threads in tests")
# Hidden option to run the pinging and bandwidth clients
parser.add_option("", "--latclient",
action="store", dest="latclient", default=None,
help=SUPPRESS_HELP)
parser.add_option("", "--bwclient",
action="store", dest="bwclient", default=None,
help=SUPPRESS_HELP)
options, args = parser.parse_args()
if args:
parser.error("unexpected arguments")
if options.latclient:
kwargs = eval(options.latclient)
latency_client(**kwargs)
return
if options.bwclient:
kwargs = eval(options.bwclient)
bandwidth_client(**kwargs)
return
if not options.throughput and not options.latency and not options.bandwidth:
options.throughput = options.latency = options.bandwidth = True
if options.check_interval:
sys.setcheckinterval(options.check_interval)
if options.switch_interval:
sys.setswitchinterval(options.switch_interval)
print("== %s %s (%s) ==" % (
platform.python_implementation(),
platform.python_version(),
platform.python_build()[0],
))
# Processor identification often has repeated spaces
cpu = ' '.join(platform.processor().split())
print("== %s %s on '%s' ==" % (
platform.machine(),
platform.system(),
cpu,
))
print()
if options.throughput:
print("--- Throughput ---")
print()
run_throughput_tests(options.nthreads)
if options.latency:
print("--- Latency ---")
print()
run_latency_tests(options.nthreads)
if options.bandwidth:
print("--- I/O bandwidth ---")
print()
run_bandwidth_tests(options.nthreads)
if __name__ == "__main__":
main()
| |
from collections import Counter
from typing import Dict, List, Union
from tensorflow.keras.callbacks import Callback
from ray import tune
import os
class TuneCallback(Callback):
"""Base class for Tune's Keras callbacks."""
_allowed = [
"batch_begin",
"batch_end",
"epoch_begin",
"epoch_end",
"train_batch_begin",
"train_batch_end",
"test_batch_begin",
"test_batch_end",
"predict_batch_begin",
"predict_batch_end",
"train_begin",
"train_end",
"test_begin",
"test_end",
"predict_begin",
"predict_end",
]
def __init__(self, on: Union[str, List[str]] = "validation_end"):
super(TuneCallback, self).__init__()
if not isinstance(on, list):
on = [on]
if any(w not in self._allowed for w in on):
raise ValueError(
"Invalid trigger time selected: {}. Must be one of {}".format(
on, self._allowed))
self._on = on
def _handle(self, logs: Dict, when: str):
raise NotImplementedError
def on_batch_begin(self, batch, logs=None):
if "batch_begin" in self._on:
self._handle(logs, "batch_begin")
def on_batch_end(self, batch, logs=None):
if "batch_end" in self._on:
self._handle(logs, "batch_end")
def on_epoch_begin(self, epoch, logs=None):
if "epoch_begin" in self._on:
self._handle(logs, "epoch_begin")
def on_epoch_end(self, epoch, logs=None):
if "epoch_end" in self._on:
self._handle(logs, "epoch_end")
def on_train_batch_begin(self, batch, logs=None):
if "train_batch_begin" in self._on:
self._handle(logs, "train_batch_begin")
def on_train_batch_end(self, batch, logs=None):
if "train_batch_end" in self._on:
self._handle(logs, "train_batch_end")
def on_test_batch_begin(self, batch, logs=None):
if "test_batch_begin" in self._on:
self._handle(logs, "test_batch_begin")
def on_test_batch_end(self, batch, logs=None):
if "test_batch_end" in self._on:
self._handle(logs, "test_batch_end")
def on_predict_batch_begin(self, batch, logs=None):
if "predict_batch_begin" in self._on:
self._handle(logs, "predict_batch_begin")
def on_predict_batch_end(self, batch, logs=None):
if "predict_batch_end" in self._on:
self._handle(logs, "predict_batch_end")
def on_train_begin(self, logs=None):
if "train_begin" in self._on:
self._handle(logs, "train_begin")
def on_train_end(self, logs=None):
if "train_end" in self._on:
self._handle(logs, "train_end")
def on_test_begin(self, logs=None):
if "test_begin" in self._on:
self._handle(logs, "test_begin")
def on_test_end(self, logs=None):
if "test_end" in self._on:
self._handle(logs, "test_end")
def on_predict_begin(self, logs=None):
if "predict_begin" in self._on:
self._handle(logs, "predict_begin")
def on_predict_end(self, logs=None):
if "predict_end" in self._on:
self._handle(logs, "predict_end")
class TuneReportCallback(TuneCallback):
"""Keras to Ray Tune reporting callback
Reports metrics to Ray Tune.
Args:
metrics (str|list|dict): Metrics to report to Tune. If this is a list,
each item describes the metric key reported to Keras,
and it will reported under the same name to Tune. If this is a
dict, each key will be the name reported to Tune and the respective
value will be the metric key reported to Keras. If this is None,
all Keras logs will be reported.
on (str|list): When to trigger checkpoint creations. Must be one of
the Keras event hooks (less the ``on_``), e.g.
"train_start", or "predict_end". Defaults to "epoch_end".
Example:
.. code-block:: python
from ray.tune.integration.keras import TuneReportCallback
# Report accuracy to Tune after each epoch:
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
verbose=0,
validation_data=(x_test, y_test),
callbacks=[TuneReportCallback(
{"mean_accuracy": "accuracy"}, on="epoch_end")])
"""
def __init__(self,
metrics: Union[None, str, List[str], Dict[str, str]] = None,
on: Union[str, List[str]] = "epoch_end"):
super(TuneReportCallback, self).__init__(on)
if isinstance(metrics, str):
metrics = [metrics]
self._metrics = metrics
def _handle(self, logs: Dict, when: str = None):
if not self._metrics:
report_dict = logs
else:
report_dict = {}
for key in self._metrics:
if isinstance(self._metrics, dict):
metric = self._metrics[key]
else:
metric = key
report_dict[key] = logs[metric]
tune.report(**report_dict)
class _TuneCheckpointCallback(TuneCallback):
"""Keras checkpoint callback
Saves checkpoints after each validation step.
Checkpoint are currently not registered if no ``tune.report()`` call
is made afterwards. Consider using ``TuneReportCheckpointCallback``
instead.
Args:
filename (str): Filename of the checkpoint within the checkpoint
directory. Defaults to "checkpoint".
frequency (int|list): Checkpoint frequency. If this is an integer `n`,
checkpoints are saved every `n` times each hook was called. If
this is a list, it specifies the checkpoint frequencies for each
hook individually.
on (str|list): When to trigger checkpoint creations. Must be one of
the Keras event hooks (less the ``on_``), e.g.
"train_start", or "predict_end". Defaults to "epoch_end".
"""
def __init__(self,
filename: str = "checkpoint",
frequency: Union[int, List[int]] = 1,
on: Union[str, List[str]] = "epoch_end"):
if isinstance(frequency, list):
if not isinstance(on, list) or len(frequency) != len(on):
raise ValueError(
"If you pass a list for checkpoint frequencies, the `on` "
"parameter has to be a list with the same length.")
self._frequency = frequency
super(_TuneCheckpointCallback, self).__init__(on)
self._filename = filename
self._counter = Counter()
self._cp_count = 0 # Has to be monotonically increasing
def _handle(self, logs: Dict, when: str = None):
self._counter[when] += 1
if isinstance(self._frequency, list):
index = self._on.index(when)
freq = self._frequency[index]
else:
freq = self._frequency
if self._counter[when] % freq == 0:
with tune.checkpoint_dir(step=self._cp_count) as checkpoint_dir:
self.model.save(
os.path.join(checkpoint_dir, self._filename),
overwrite=True)
self._cp_count += 1
class TuneReportCheckpointCallback(TuneCallback):
"""Keras report and checkpoint callback
Saves checkpoints after each validation step. Also reports metrics to Tune,
which is needed for checkpoint registration.
Use this callback to register saved checkpoints with Ray Tune. This means
that checkpoints will be manages by the `CheckpointManager` and can be
used for advanced scheduling and search algorithms, like
Population Based Training.
The ``tf.keras.callbacks.ModelCheckpoint`` callback also saves checkpoints,
but doesn't register them with Ray Tune.
Args:
metrics (str|list|dict): Metrics to report to Tune. If this is a list,
each item describes the metric key reported to Keras,
and it will reported under the same name to Tune. If this is a
dict, each key will be the name reported to Tune and the respective
value will be the metric key reported to Keras. If this is None,
all Keras logs will be reported.
filename (str): Filename of the checkpoint within the checkpoint
directory. Defaults to "checkpoint".
frequency (int|list): Checkpoint frequency. If this is an integer `n`,
checkpoints are saved every `n` times each hook was called. If
this is a list, it specifies the checkpoint frequencies for each
hook individually.
on (str|list): When to trigger checkpoint creations. Must be one of
the Keras event hooks (less the ``on_``), e.g.
"train_start", or "predict_end". Defaults to "epoch_end".
Example:
.. code-block:: python
from ray.tune.integration.keras import TuneReportCheckpointCallback
# Save checkpoint and report accuracy to Tune after each epoch:
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
verbose=0,
validation_data=(x_test, y_test),
callbacks=[TuneReportCheckpointCallback(
metrics={"mean_accuracy": "accuracy"},
filename="model",
on="epoch_end")])
"""
def __init__(self,
metrics: Union[None, str, List[str], Dict[str, str]] = None,
filename: str = "checkpoint",
frequency: Union[int, List[int]] = 1,
on: Union[str, List[str]] = "epoch_end"):
super(TuneReportCheckpointCallback, self).__init__(on)
self._checkpoint = _TuneCheckpointCallback(filename, frequency, on)
self._report = TuneReportCallback(metrics, on)
def _handle(self, logs: Dict, when: str = None):
self._checkpoint._handle(logs, when)
self._report._handle(logs, when)
def set_model(self, model):
# Pass through for the checkpoint callback to set model
self._checkpoint.set_model(model)
self._report.set_model(model)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import unittest
import os
import numpy as np
from mxnet import gluon
from mxnet.gluon import nn
from mxnet.test_utils import assert_almost_equal
from common import with_seed, assertRaises, xfail_when_nonstandard_decimal_separator
from copy import deepcopy
import pytest
def dict_equ(a, b):
assert set(a) == set(b)
for k in a:
assert (a[k].asnumpy() == b[k].asnumpy()).all()
@with_seed()
def test_multi_trainer():
x = gluon.Parameter('x', shape=(10,), stype='row_sparse')
x.initialize()
# test set trainer
trainer0 = gluon.Trainer([x], 'sgd')
assert(x._trainer() is trainer0)
# test unset trainer
x._set_trainer(None)
assert(x._trainer is None)
x._set_trainer(trainer0)
with pytest.raises(RuntimeError):
# multiple trainers for a sparse Parameter is not allowed
trainer1 = gluon.Trainer([x], 'sgd')
@with_seed()
def test_trainer_with_sparse_grad_on_single_context():
x = gluon.Parameter('x', shape=(10,), grad_stype='row_sparse')
x.initialize(ctx=[mx.cpu(0)], init='zeros')
trainer = gluon.Trainer([x], 'sgd', {'learning_rate': 1.0, 'momentum': 0.5})
with mx.autograd.record():
for w in x.list_data():
y = w + 1
y.backward()
trainer.step(1)
assert trainer._update_on_kvstore is None
assert trainer._kvstore is None # No kvstore created for single-device training
assert (x.data(mx.cpu(0)).asnumpy() == -1).all()
@with_seed()
def test_trainer_with_teststore():
x = gluon.Parameter('x', shape=(10,))
x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
kv = mx.kv.create('teststore')
trainer = gluon.Trainer([x], 'sgd', {'learning_rate': 1.0, 'momentum': 0.5}, kvstore=kv)
with mx.autograd.record():
for w in x.list_data():
y = w + 1
y.backward()
trainer.step(1)
assert trainer._update_on_kvstore == False
assert (x.data(mx.cpu(1)).asnumpy() == -2).all()
# Expect exceptions if update_on_kvstore is set to True,
# because TestStore does not support that
invalid_trainer = gluon.Trainer([x], 'sgd', kvstore=kv, update_on_kvstore=True)
pytest.raises(ValueError, invalid_trainer._init_kvstore)
@with_seed()
def test_trainer():
x = gluon.Parameter('x', shape=(10,))
x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
trainer = gluon.Trainer([x], 'sgd', {'learning_rate': 1.0, 'momentum': 0.5})
with mx.autograd.record():
for w in x.list_data():
y = w + 1
y.backward()
trainer.step(1)
assert trainer._optimizer.param_dict == trainer._optimizer.param_dict
assert (x.data(mx.cpu(1)).asnumpy() == -2).all()
x.lr_mult = 0.5
with mx.autograd.record():
for w in x.list_data():
y = w + 1
y.backward()
trainer.step(1)
assert (x.data(mx.cpu(1)).asnumpy() == -4).all()
trainer.save_states('test_trainer.states')
states = deepcopy(trainer._kvstore._updater.states) if trainer._update_on_kvstore \
else deepcopy(trainer._updaters[0].states)
trainer.load_states('test_trainer.states')
if trainer._update_on_kvstore:
dict_equ(trainer._kvstore._updater.states, states)
assert trainer._optimizer == trainer._kvstore._updater.optimizer
# invalid usage of update and allreduce_grads if update_on_kvstore
pytest.raises(AssertionError, trainer.update, 1)
pytest.raises(AssertionError, trainer.allreduce_grads)
else:
for updater in trainer._updaters:
dict_equ(updater.states, states)
assert trainer._optimizer == trainer._updaters[0].optimizer
x = gluon.Parameter('x', shape=(10,))
x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
trainer2 = gluon.Trainer([x], 'sgd', {'learning_rate': 1.0, 'momentum': 0.5},
update_on_kvstore=False)
with mx.autograd.record():
for i, w in enumerate(x.list_data()):
y = i*w
y.backward()
assert (x.grad(mx.cpu(0)).asnumpy() != x.grad(mx.cpu(1)).asnumpy()).all()
trainer2.allreduce_grads()
assert (x.grad(mx.cpu(0)).asnumpy() == x.grad(mx.cpu(1)).asnumpy()).all()
trainer2.update(1)
assert (x.data(mx.cpu(1)).asnumpy() == -1).all(), x.data(mx.cpu(1)).asnumpy()
@with_seed()
def test_trainer_save_load():
previous_update_on_kvstore = os.getenv('MXNET_UPDATE_ON_KVSTORE', "1")
os.putenv('MXNET_UPDATE_ON_KVSTORE', '1')
x = gluon.Parameter('x', shape=(10,), lr_mult=1.0)
x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
trainer = gluon.Trainer([x], 'sgd', {'learning_rate': 0.1})
with mx.autograd.record():
for w in x.list_data():
y = w + 1
y.backward()
trainer.step(1)
assert trainer._kvstore._updater.optimizer._get_lr(0) == 0.1
trainer.save_states('test_trainer_save_load.states')
trainer.load_states('test_trainer_save_load.states')
x.lr_mult = 2.0
# check if parameter dict is correctly associated with optimizer after load_state
assert trainer._kvstore._updater.optimizer._get_lr(0) == 0.2
os.putenv('MXNET_UPDATE_ON_KVSTORE', previous_update_on_kvstore)
@with_seed()
def test_trainer_sparse_save_load():
x = gluon.Parameter('x', shape=(10, 1), lr_mult=1.0,
stype='row_sparse', grad_stype='row_sparse')
x.initialize(ctx=[mx.cpu(0)], init='zeros')
trainer = gluon.Trainer([x], 'sgd', {'learning_rate': 0.1})
all_rows = mx.nd.arange(0, 10, ctx=mx.cpu(0))
with mx.autograd.record():
for w in x.list_row_sparse_data(all_rows):
y = w * 1
y.backward()
trainer.step(1)
assert trainer._kvstore._updater.optimizer._get_lr(0) == 0.1
trainer.save_states('test_trainer_sparse_save_load.states')
trainer.load_states('test_trainer_sparse_save_load.states')
x.lr_mult = 2.0
# check if parameter dict is correctly associated with optimizer after load_state
assert trainer._kvstore._updater.optimizer._get_lr(0) == 0.2
@with_seed()
def test_trainer_multi_layer_init():
class Net(gluon.Block):
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
# sparse param
self.embed_weight = gluon.Parameter('embed_weight', stype='row_sparse',
shape=(4,3), grad_stype='row_sparse')
# dense param from a hybrid block
self.dense0 = nn.Dense(2)
def forward(self, x):
embed_weight = self.embed_weight.row_sparse_data(x)
embed = mx.nd.Embedding(data=x, weight=embed_weight,
input_dim=4, output_dim=3, sparse_grad=True)
return self.dense0(embed)
def check_init(ctxes):
net = Net()
net.initialize(mx.init.One(), ctx=ctxes)
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 1})
data = mx.nd.array([[0,2], [1,2]])
xs = gluon.utils.split_and_load(data, ctxes)
ys = []
with mx.autograd.record():
for x in xs:
y = net(x)
ys.append(y)
for y in ys:
y.backward()
trainer.step(1)
# all parameters should be initialized
assert not trainer._params_to_init
all_rows = mx.nd.arange(0, 4, ctx=mx.cpu(1))
# check the updated weights
weight = net.embed_weight.row_sparse_data(all_rows).asnumpy()
assert (weight[0] == -1).all()
assert (weight[1] == -1).all()
assert (weight[2] == -3).all()
assert (weight[3] == 1).all()
check_init([mx.cpu(1), mx.cpu(2)])
check_init([mx.cpu(1)])
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_trainer_reset_kv():
def check_trainer_reset_kv(kv):
x = gluon.Parameter('x', shape=(10,), lr_mult=1.0)
params = {'x': x}
x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
trainer = gluon.Trainer(params, 'sgd', {'learning_rate': 0.1}, kvstore=kv)
mx.nd.save('test_trainer_reset_kv.params', {k: v._reduce() for k, v in params.items()})
with mx.autograd.record():
for w in x.list_data():
y = w + 1
y.backward()
trainer.step(1)
assert trainer._kvstore.type == kv
# load would reset kvstore
mx.nd.waitall()
params = mx.nd.load('test_trainer_reset_kv.params')
x._load_init(params['x'], None)
if trainer._update_on_kvstore:
# drop kvstore state if new parameters are loaded
assert trainer._kvstore is None
assert trainer._kv_initialized is False
with mx.autograd.record():
for w in x.list_data():
y = w + 1
y.backward()
trainer.step(1)
# the updated parameter should be based on the loaded checkpoint
assert (x.data(mx.cpu()) == -0.2).asnumpy().all()
kvs = ['local', 'device']
for kv in kvs:
check_trainer_reset_kv(kv)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_trainer_sparse_kv():
def check_trainer_sparse_kv(kv, stype, grad_stype, update_on_kv, expected):
x = mx.gluon.Parameter('x', shape=(10,1), lr_mult=1.0, stype=stype, grad_stype=grad_stype)
x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
trainer = gluon.Trainer([x], 'sgd', {'learning_rate': 0.1},
kvstore=kv, update_on_kvstore=update_on_kv)
all_rows = mx.nd.arange(0, 10, ctx=mx.cpu(0))
try:
ws = x.list_data() if stype == 'default' else x.list_row_sparse_data(all_rows)
with mx.autograd.record():
for w in ws:
y = w + 1
y.backward()
trainer.step(1)
assert trainer._kvstore.type == kv
assert trainer._kv_initialized
assert trainer._update_on_kvstore is expected
# the updated parameter should be based on the loaded checkpoint
mx.nd.waitall()
updated_w = x.data(mx.cpu(0)) if stype == 'default' else x.row_sparse_data(all_rows)
assert (updated_w == -0.2).asnumpy().all(), updated_w
except Exception as err:
assert isinstance(err, expected)
kvs = ['local', 'device']
global_update_on_kvstore = bool(int(os.getenv('MXNET_UPDATE_ON_KVSTORE', "1")))
for kv in kvs:
check_trainer_sparse_kv(kv, 'default', 'default', True, True)
check_trainer_sparse_kv(kv, 'default', 'default', False, False)
check_trainer_sparse_kv(kv, 'default', 'default', None, global_update_on_kvstore)
check_trainer_sparse_kv(kv, 'default', 'row_sparse', None, False)
check_trainer_sparse_kv(kv, 'default', 'row_sparse', True, True)
check_trainer_sparse_kv(kv, 'default', 'row_sparse', False, False)
check_trainer_sparse_kv(kv, 'row_sparse', 'row_sparse', None, True)
check_trainer_sparse_kv(kv, 'row_sparse', 'row_sparse', False, ValueError)
@with_seed()
def test_trainer_lr_sched():
x = gluon.Parameter('x', shape=(10,))
x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
freq = 2
factor = 0.1
lr = 1
lr_sched = mx.lr_scheduler.FactorScheduler(freq, factor=factor, base_lr=lr)
trainer = gluon.Trainer([x], 'sgd', {'learning_rate': lr, 'lr_scheduler': lr_sched})
for i in range(10):
with mx.autograd.record():
for w in x.list_data():
y = w + 1
y.backward()
trainer.step(1)
if i % freq == 0:
assert trainer.learning_rate == lr, (lr, trainer.learning_rate, i)
lr *= factor
mx.nd.waitall()
# Update on kvstore = False
x = gluon.Parameter('x', shape=(10,))
x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
freq = 2
factor = 0.1
lr = 1
lr_sched = mx.lr_scheduler.FactorScheduler(freq, factor=factor, base_lr=lr)
trainer = gluon.Trainer([x], 'sgd', {'learning_rate': lr, 'lr_scheduler': lr_sched},
update_on_kvstore=False)
for i in range(10):
with mx.autograd.record():
for w in x.list_data():
y = w + 1
y.backward()
trainer.step(1)
if i % freq == 0:
assert trainer.learning_rate == lr, (lr, trainer.learning_rate, i)
lr *= factor
mx.nd.waitall()
@with_seed()
def test_gluon_trainer_param_order():
net = mx.gluon.nn.Sequential()
# layers may be added in a random order for all workers
layers = {'ones_': 1, 'zeros_': 0}
for name, init in layers.items():
net.add(mx.gluon.nn.Dense(10, in_units=10, weight_initializer=mx.init.Constant(init),
use_bias=False))
net.initialize()
params = net.collect_params()
trainer = gluon.Trainer(params, 'sgd')
for name, init in layers.items():
expected_idx = 0 if name == 'ones_' else 1
expected_name = '{}.weight'.format(expected_idx)
assert trainer._params[expected_idx].name == params[expected_name].name
def test_trainer_allreduce_hybridsequential():
contexts = [mx.cpu(0), mx.cpu(1)]
net = mx.gluon.nn.HybridSequential()
for _ in range(8): # Create a network with 8 layers
net.add(mx.gluon.nn.Dense(1, weight_initializer='ones', bias_initializer='ones'))
net.initialize(ctx=contexts)
net.hybridize()
trainer = mx.gluon.Trainer(net.collect_params(), 'sgd', update_on_kvstore=False)
for ctx in contexts:
with mx.autograd.record():
out = net(mx.nd.ones((1, 1), ctx=ctx))
out.backward()
trainer.allreduce_grads()
def test_trainer_share_parameters():
class Net(gluon.Block):
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
self.dense1 = gluon.nn.Dense(5, in_units=2, use_bias=False)
params = self.dense1.collect_params()
self.dense2 = gluon.nn.Dense(5, in_units=2,
use_bias=False).share_parameters(params)
self.dense3 = gluon.nn.Dense(5, in_units=5, use_bias=False)
def forward(self, x):
hidden = self.dense1(x) + self.dense2(x)
out = self.dense3(hidden)
return out
net = Net()
ctxes = [mx.cpu(0), mx.cpu(1)]
net.initialize(mx.init.One(), ctx=ctxes)
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 1})
data = mx.nd.array([[1, 1], [1, 1]])
xs = gluon.utils.split_and_load(data, ctxes)
ys = []
with mx.autograd.record():
for x in xs:
y = net(x)
ys.append(y)
for y in ys:
y.backward()
trainer.step(1)
params = net.collect_params()
shared_params = []
for param in params.values():
p = param.data(mx.cpu(0)).asnumpy()
if p.shape[1] == 2:
shared_params.append(p)
assert((shared_params[0] == shared_params[1]).all())
| |
#!/usr/bin/env python
from translate.convert import pot2po
from translate.convert import test_convert
from translate.misc import wStringIO
from translate.storage import po
import warnings
class TestPOT2PO:
def setup_method(self, method):
warnings.resetwarnings()
def teardown_method(self, method):
warnings.resetwarnings()
def convertpot(self, potsource, posource=None):
"""helper that converts pot source to po source without requiring files"""
potfile = wStringIO.StringIO(potsource)
if posource:
pofile = wStringIO.StringIO(posource)
else:
pofile = None
pooutfile = wStringIO.StringIO()
pot2po.convertpot(potfile, pooutfile, pofile)
pooutfile.seek(0)
return po.pofile(pooutfile.read())
def singleunit(self, pofile):
"""checks that the pofile contains a single non-header unit, and returns it"""
assert len(pofile.units) == 2
assert pofile.units[0].isheader()
print pofile.units[1]
return pofile.units[1]
def test_convertpot_blank(self):
"""checks that the convertpot function is working for a simple file initialisation"""
potsource = '''#: simple.label%ssimple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr ""\n''' % po.lsep
newpo = self.convertpot(potsource)
assert str(self.singleunit(newpo)) == potsource
def test_convertpot_blank_plurals(self):
"""checks that the convertpot function is working for initialising plurals correctly"""
potsource = r'''msgid ""
msgstr""
msgid "%d manual"
msgid_plural "%d manuals"
msgstr[0] ""
msgstr[1] ""
'''
posource = r'''msgid ""
msgstr""
"Plural-Forms: nplurals=1; plural=0;\n"
'''
poexpected = r'''msgid "%d manual"
msgid_plural "%d manuals"
msgstr[0] ""
'''
newpo = self.convertpot(potsource, posource)
assert str(self.singleunit(newpo)) == poexpected
def test_merging_simple(self):
"""checks that the convertpot function is working for a simple merge"""
potsource = '''#: simple.label%ssimple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr ""\n''' % po.lsep
posource = '''#: simple.label%ssimple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr "&Hart gekoeerde nuwe lyne\\n"\n''' % po.lsep
newpo = self.convertpot(potsource, posource)
assert str(self.singleunit(newpo)) == posource
def test_merging_messages_marked_fuzzy(self):
"""test that when we merge PO files with a fuzzy message that it remains fuzzy"""
potsource = '''#: simple.label%ssimple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr ""\n''' % po.lsep
posource = '''#: simple.label%ssimple.accesskey\n#, fuzzy\nmsgid "A &hard coded newline.\\n"\nmsgstr "&Hart gekoeerde nuwe lyne\\n"\n''' % po.lsep
newpo = self.convertpot(potsource, posource)
assert str(self.singleunit(newpo)) == posource
def test_merging_plurals_with_fuzzy_matching(self):
"""test that when we merge PO files with a fuzzy message that it remains fuzzy"""
potsource = r'''#: file.cpp:2
msgid "%d manual"
msgid_plural "%d manuals"
msgstr[0] ""
msgstr[1] ""
'''
posource = r'''#: file.cpp:3
#, fuzzy
msgid "%d manual"
msgid_plural "%d manuals"
msgstr[0] "%d handleiding."
msgstr[1] "%d handleidings."
'''
# The #: comment and msgid's are different between the pot and the po
poexpected = r'''#: file.cpp:2
#, fuzzy
msgid "%d manual"
msgid_plural "%d manuals"
msgstr[0] "%d handleiding."
msgstr[1] "%d handleidings."
'''
newpo = self.convertpot(potsource, posource)
assert str(self.singleunit(newpo)) == poexpected
def xtest_merging_msgid_change(self):
"""tests that if the msgid changes but the location stays the same that we merge"""
potsource = '''#: simple.label\n#: simple.accesskey\nmsgid "Its &hard coding a newline.\\n"\nmsgstr ""\n'''
posource = '''#: simple.label\n#: simple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr "&Hart gekoeerde nuwe lyne\\n"\n'''
poexpected = '''#: simple.label\n#: simple.accesskey\n#, fuzzy\nmsgid "Its &hard coding a newline.\\n"\nmsgstr "&Hart gekoeerde nuwe lyne\\n"\n'''
newpo = self.convertpot(potsource, posource)
print newpo
assert str(self.singleunit(newpo)) == poexpected
def test_merging_location_change(self):
"""tests that if the location changes but the msgid stays the same that we merge"""
potsource = '''#: new_simple.label%snew_simple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr ""\n''' % po.lsep
posource = '''#: simple.label%ssimple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr "&Hart gekoeerde nuwe lyne\\n"\n''' % po.lsep
poexpected = '''#: new_simple.label%snew_simple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr "&Hart gekoeerde nuwe lyne\\n"\n''' % po.lsep
newpo = self.convertpot(potsource, posource)
print newpo
assert str(self.singleunit(newpo)) == poexpected
def test_merging_location_and_whitespace_change(self):
"""test that even if the location changes that if the msgid only has whitespace changes we can still merge"""
potsource = '''#: singlespace.label%ssinglespace.accesskey\nmsgid "&We have spaces"\nmsgstr ""\n''' % po.lsep
posource = '''#: doublespace.label%sdoublespace.accesskey\nmsgid "&We have spaces"\nmsgstr "&One het spasies"\n''' % po.lsep
poexpected = '''#: singlespace.label%ssinglespace.accesskey\n#, fuzzy\nmsgid "&We have spaces"\nmsgstr "&One het spasies"\n''' % po.lsep
newpo = self.convertpot(potsource, posource)
print newpo
assert str(self.singleunit(newpo)) == poexpected
def test_merging_location_ambiguous_with_disambiguous(self):
"""test that when we have a PO in ambiguous (Gettext form) and merge with disamabiguous (KDE comment form)
that we don't duplicate the location #: comments"""
potsource = '''#: location.c:1\nmsgid ""\n"_: location.c:1\\n"\n"Source"\nmsgstr ""\n\n''' + \
'''#: location.c:10\nmsgid ""\n"_: location.c:10\\n"\n"Source"\nmsgstr ""\n'''
posource = '''#: location.c:1\n#: location.c:10\nmsgid "Source"\nmsgstr "Target"\n\n'''
poexpected1 = '''#: location.c:1\n#, fuzzy\nmsgid ""\n"_: location.c:1\\n"\n"Source"\nmsgstr "Target"\n'''
poexpected2 = '''#: location.c:10\n#, fuzzy\nmsgid ""\n"_: location.c:10\\n"\n"Source"\nmsgstr "Target"\n'''
newpo = self.convertpot(potsource, posource)
print "Expected:\n", poexpected1, "Actual:\n", newpo.units[1]
assert str(newpo.units[1]) == poexpected1
assert str(newpo.units[2]) == poexpected2
def wtest_merging_accelerator_changes(self):
"""test that a change in the accelerator localtion still allows merging"""
potsource = '''#: someline.c\nmsgid "A&bout"\nmsgstr ""\n'''
posource = '''#: someline.c\nmsgid "&About"\nmsgstr "&Info"\n'''
poexpected = '''#: someline.c\nmsgid "A&bout"\nmsgstr "&Info"\n'''
newpo = self.convertpot(potsource, posource)
print newpo
assert str(self.singleunit(newpo)) == poexpected
def xtest_lines_cut_differently(self):
"""Checks that the correct formatting is preserved when pot an po lines differ."""
potsource = '''#: simple.label\nmsgid "Line split "\n"differently"\nmsgstr ""\n'''
posource = '''#: simple.label\nmsgid "Line"\n" split differently"\nmsgstr "Lyne verskillend gesny"\n'''
newpo = self.convertpot(potsource, posource)
newpounit = self.singleunit(newpo)
assert str(newpounit) == posource
def test_merging_automatic_comments_dont_duplicate(self):
"""ensure that we can merge #. comments correctly"""
potsource = '''#. Row 35\nmsgid "&About"\nmsgstr ""\n'''
posource = '''#. Row 35\nmsgid "&About"\nmsgstr "&Info"\n'''
newpo = self.convertpot(potsource, posource)
newpounit = self.singleunit(newpo)
assert str(newpounit) == posource
def test_merging_automatic_comments_new_overides_old(self):
"""ensure that new #. comments override the old comments"""
potsource = '''#. new comment\n#: someline.c\nmsgid "&About"\nmsgstr ""\n'''
posource = '''#. old comment\n#: someline.c\nmsgid "&About"\nmsgstr "&Info"\n'''
poexpected = '''#. new comment\n#: someline.c\nmsgid "&About"\nmsgstr "&Info"\n'''
newpo = self.convertpot(potsource, posource)
newpounit = self.singleunit(newpo)
assert str(newpounit) == poexpected
def test_merging_comments_with_blank_comment_lines(self):
"""test that when we merge a comment that has a blank line we keep the blank line"""
potsource = '''#: someline.c\nmsgid "About"\nmsgstr ""\n'''
posource = '''# comment1\n#\n# comment2\n#: someline.c\nmsgid "About"\nmsgstr "Omtrent"\n'''
poexpected = posource
newpo = self.convertpot(potsource, posource)
newpounit = self.singleunit(newpo)
assert str(newpounit) == poexpected
def test_empty_commentlines(self):
potsource = '''#: paneSecurity.title
msgid "Security"
msgstr ""
'''
posource = '''# - Contributor(s):
# -
# - Alternatively, the
# -
#: paneSecurity.title
msgid "Security"
msgstr "Sekuriteit"
'''
poexpected = posource
newpo = self.convertpot(potsource, posource)
newpounit = self.singleunit(newpo)
print "expected"
print poexpected
print "got:"
print str(newpounit)
assert str(newpounit) == poexpected
def test_merging_msgidcomments(self):
"""ensure that we can merge msgidcomments messages"""
potsource = r'''#: window.width
msgid ""
"_: Do not translate this.\n"
"36em"
msgstr ""
'''
posource = r'''#: window.width
msgid ""
"_: Do not translate this.\n"
"36em"
msgstr "36em"
'''
newpo = self.convertpot(potsource, posource)
newpounit = self.singleunit(newpo)
assert str(newpounit) == posource
def test_merging_msgid_with_msgidcomment(self):
"""test that we can merge an otherwise identical string that has a different msgid"""
potsource = r'''#: pref.certs.title
msgid ""
"_: pref.certs.title\n"
"Certificates"
msgstr ""
#: certs.label
msgid ""
"_: certs.label\n"
"Certificates"
msgstr ""
'''
posource = r'''#: pref.certs.title
msgid ""
"_: pref.certs.title\n"
"Certificates"
msgstr ""
#: certs.label
msgid ""
"_: certs.label\n"
"Certificates"
msgstr "Sertifikate"
'''
expected = r'''#: pref.certs.title
#, fuzzy
msgid ""
"_: pref.certs.title\n"
"Certificates"
msgstr "Sertifikate"
'''
newpo = self.convertpot(potsource, posource)
newpounit = newpo.units[1]
assert str(newpounit) == expected
def test_merging_plurals(self):
"""ensure that we can merge plural messages"""
potsource = '''msgid "One"\nmsgid_plural "Two"\nmsgstr[0] ""\nmsgstr[1] ""\n'''
posource = '''msgid "One"\nmsgid_plural "Two"\nmsgstr[0] "Een"\nmsgstr[1] "Twee"\nmsgstr[2] "Drie"\n'''
newpo = self.convertpot(potsource, posource)
print newpo
newpounit = self.singleunit(newpo)
assert str(newpounit) == posource
def test_merging_obsoleting_messages(self):
"""check that we obsolete messages no longer present in the new file"""
#add emtpy msgid line to help factory identify format
potsource = 'msgid ""\nmsgstr ""\n'
posource = '# Some comment\n#. Extracted comment\n#: obsoleteme:10\nmsgid "One"\nmsgstr "Een"\n'
expected = '# Some comment\n#~ msgid "One"\n#~ msgstr "Een"\n'
newpo = self.convertpot(potsource, posource)
print str(newpo)
newpounit = self.singleunit(newpo)
assert str(newpounit) == expected
def test_not_obsoleting_empty_messages(self):
"""check that we don't obsolete (and keep) untranslated messages"""
#add emtpy msgid line to help factory identify format
potsource = 'msgid ""\nmsgstr ""\n'
posource = '#: obsoleteme:10\nmsgid "One"\nmsgstr ""\n'
newpo = self.convertpot(potsource, posource)
print str(newpo)
# We should only have the header
assert len(newpo.units) == 1
def test_merging_new_before_obsolete(self):
"""test to check that we place new blank message before obsolete messages"""
potsource = '''#: newline.c\nmsgid "&About"\nmsgstr ""\n'''
posource = '''#~ msgid "Old"\n#~ msgstr "Oud"\n'''
newpo = self.convertpot(potsource, posource)
assert len(newpo.units) == 3
assert newpo.units[0].isheader()
assert newpo.units[2].isobsolete()
assert str(newpo.units[1]) == potsource
assert str(newpo.units[2]) == posource
# Now test with real units present in posource
posource2 = '''msgid "Old"\nmsgstr "Oud"\n'''
newpo = self.convertpot(potsource, posource)
assert len(newpo.units) == 3
assert newpo.units[0].isheader()
assert newpo.units[2].isobsolete()
assert str(newpo.units[1]) == potsource
assert str(newpo.units[2]) == posource
def test_merging_resurect_obsolete_messages(self):
"""check that we can reuse old obsolete messages if the message comes back"""
potsource = '''#: resurect.c\nmsgid "&About"\nmsgstr ""\n'''
posource = '''#~ msgid "&About"\n#~ msgstr "&Omtrent"\n'''
expected = '''#: resurect.c\nmsgid "&About"\nmsgstr "&Omtrent"\n'''
newpo = self.convertpot(potsource, posource)
print newpo
assert len(newpo.units) == 2
assert newpo.units[0].isheader()
newpounit = self.singleunit(newpo)
assert str(newpounit) == expected
def test_merging_resurect_obsolete_messages_into_msgidcomment(self):
"""check that we can reuse old obsolete messages even if the recipient has a msgidcomment"""
potsource = '''#: resurect1.c\nmsgid "About"\nmsgstr ""\n\n''' + \
'''#: resurect2.c\nmsgid ""\n"_: resurect2.c\\n"\n"About"\nmsgstr ""\n'''
posource = '''#~ msgid "About"\n#~ msgstr "Omtrent"\n'''
expected1 = '''#: resurect1.c\nmsgid "About"\nmsgstr "Omtrent"\n'''
expected2 = '''#: resurect2.c\n#, fuzzy\nmsgid ""\n"_: resurect2.c\\n"\n"About"\nmsgstr "Omtrent"\n'''
newpo = self.convertpot(potsource, posource)
print newpo
assert len(newpo.units) == 3
assert newpo.units[0].isheader()
assert str(newpo.units[1]) == expected1
assert str(newpo.units[2]) == expected2
def test_header_initialisation(self):
"""test to check that we initialise the header correctly"""
potsource = r'''#, fuzzy
msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"Report-Msgid-Bugs-To: new@example.com\n"
"POT-Creation-Date: 2006-11-11 11:11+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=INTEGER; plural=EXPRESSION;\n"
"X-Generator: Translate Toolkit 0.10rc2\n"
'''
posource = r'''msgid ""
msgstr ""
"Project-Id-Version: Pootle 0.10\n"
"Report-Msgid-Bugs-To: old@example.com\n"
"POT-Creation-Date: 2006-01-01 01:01+0100\n"
"PO-Revision-Date: 2006-09-09 09:09+0900\n"
"Last-Translator: Joe Translate <joe@example.com>\n"
"Language-Team: Pig Latin <piglatin@example.com>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"X-Generator: Translate Toolkit 0.9\n"
'''
expected = r'''msgid ""
msgstr ""
"Project-Id-Version: Pootle 0.10\n"
"Report-Msgid-Bugs-To: new@example.com\n"
"POT-Creation-Date: 2006-11-11 11:11+0000\n"
"PO-Revision-Date: 2006-09-09 09:09+0900\n"
"Last-Translator: Joe Translate <joe@example.com>\n"
"Language-Team: Pig Latin <piglatin@example.com>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"X-Generator: Translate Toolkit 0.10rc2\n"
'''
newpo = self.convertpot(potsource, posource)
print 'Output Header:\n%s' % newpo
print 'Expected Header:\n%s' % expected
assert str(newpo) == expected
def test_merging_comments(self):
"""Test that we can merge comments correctly"""
potsource = '''#. Don't do it!\n#: file.py:1\nmsgid "One"\nmsgstr ""\n'''
posource = '''#. Don't do it!\n#: file.py:2\nmsgid "One"\nmsgstr "Een"\n'''
poexpected = '''#. Don't do it!\n#: file.py:1\nmsgid "One"\nmsgstr "Een"\n'''
newpo = self.convertpot(potsource, posource)
print newpo
newpounit = self.singleunit(newpo)
assert str(newpounit) == poexpected
def test_merging_typecomments(self):
"""Test that we can merge with typecomments"""
potsource = '''#: file.c:1\n#, c-format\nmsgid "%d pipes"\nmsgstr ""\n'''
posource = '''#: file.c:2\nmsgid "%d pipes"\nmsgstr "%d pype"\n'''
poexpected = '''#: file.c:1\n#, c-format\nmsgid "%d pipes"\nmsgstr "%d pype"\n'''
newpo = self.convertpot(potsource, posource)
newpounit = self.singleunit(newpo)
print newpounit
assert str(newpounit) == poexpected
potsource = '''#: file.c:1\n#, c-format\nmsgid "%d computers"\nmsgstr ""\n'''
posource = '''#: file.c:2\n#, c-format\nmsgid "%s computers "\nmsgstr "%s-rekenaars"\n'''
poexpected = '''#: file.c:1\n#, fuzzy, c-format\nmsgid "%d computers"\nmsgstr "%s-rekenaars"\n'''
newpo = self.convertpot(potsource, posource)
newpounit = self.singleunit(newpo)
assert newpounit.isfuzzy()
assert newpounit.hastypecomment("c-format")
def test_msgctxt(self):
"""Test that msgctxt is migrated correctly"""
potsource = """
#: something.h:5
msgctxt "context1"
msgid "text"
msgstr ""
#: something.h:6
msgctxt "context2"
msgid "text"
msgstr ""
"""
posource = """
#: something.h:3
msgctxt "context0"
msgid "text"
msgstr "teks"
#: something.h:4
msgctxt "context1"
msgid "text"
msgstr "sms"
"""
poexpected = """
#: something.h:5
msgctxt "context1"
msgid "text"
msgstr "sms"
#: something.h:6
#, fuzzy
msgctxt "context2"
msgid "text"
msgstr "teks"
"""
newpo = self.convertpot(potsource, posource)
print newpo
assert poexpected in str(newpo)
def test_empty_msgid(self):
"""Test that we handle empty msgids correctly."""
#TODO: this test will fail if we don't have the gettext location
# comment in the pot file
potsource = '#: file:1\nmsgctxt "bla"\nmsgid ""\nmsgstr ""\n'
posource = r"""
msgid ""
"Project-Id-Version: Pootle 0.10\n"
msgstr ""
msgctxt "bla"
msgid ""
msgstr "trans"
"""
newpo = self.convertpot(potsource, posource)
print newpo
assert len(newpo.units) == 2
assert newpo.units[0].isheader()
unit = newpo.units[1]
assert unit.source == u""
assert unit.getid() == u"bla\04"
assert unit.target == "trans"
assert not unit.isfuzzy()
def test_migrate_msgidcomment_to_msgctxt(self):
"""Test that we migrate correctly from msgidcomments to msgctxt.
This is needed for our move away from using msgidcomments for mozilla."""
potsource = '#: bla\nmsgctxt "bla"\nmsgid ""\nmsgstr ""'
posource = r"""
msgid ""
"Project-Id-Version: Pootle 0.10\n"
msgstr ""
#: bla
msgid ""
"_: bla\n"
msgstr "trans"
"""
newpo = self.convertpot(potsource, posource)
print newpo
assert len(newpo.units) == 2
assert newpo.units[0].isheader()
unit = newpo.units[1]
assert unit.source == u""
assert unit.getid() == u"bla\04"
assert unit.target == "trans"
assert not unit.isfuzzy()
def test_obsolete_msgctxt(self):
"""Test that obsolete units' msgctxt is preserved."""
potsource = 'msgctxt "newContext"\nmsgid "First unit"\nmsgstr ""'
posource = """
msgctxt "newContext"
msgid "First unit"
msgstr "Eerste eenheid"
#~ msgctxt "context"
#~ msgid "Old unit"
#~ msgstr "Ou eenheid1"
#~ msgctxt "context2"
#~ msgid "Old unit"
#~ msgstr "Ou eenheid2"
#~ msgid "Old unit"
#~ msgstr "Ou eenheid3"
"""
newpo = self.convertpot(potsource, posource)
print newpo
assert len(newpo.units) == 5
assert newpo.units[1].getcontext() == 'newContext'
# Search in unit string, because obsolete units can't return a context
assert 'msgctxt "context"' in str(newpo.units[2])
assert 'msgctxt "context2"' in str(newpo.units[3])
def test_small_strings(self):
"""Test that units with small source strings are not incorrectly
populated by means of fuzzy matching."""
potsource = r'''#, fuzzy
msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"Report-Msgid-Bugs-To: new@example.com\n"
"POT-Creation-Date: 2006-11-11 11:11+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=INTEGER; plural=EXPRESSION;\n"
"X-Generator: Translate Toolkit 0.10rc2\n"
#: new_disassociated_mozilla_accesskey
msgid "R"
msgstr ""
'''
posource = r'''msgid ""
msgstr ""
"Project-Id-Version: Pootle 0.10\n"
"Report-Msgid-Bugs-To: old@example.com\n"
"POT-Creation-Date: 2006-01-01 01:01+0100\n"
"PO-Revision-Date: 2006-09-09 09:09+0900\n"
"Last-Translator: Joe Translate <joe@example.com>\n"
"Language-Team: Pig Latin <piglatin@example.com>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"X-Generator: Translate Toolkit 0.9\n"
#: old_disassociated_mozilla_accesskey
msgid "R"
msgstr "S"
'''
expected = r'''msgid ""
msgstr ""
"Project-Id-Version: Pootle 0.10\n"
"Report-Msgid-Bugs-To: new@example.com\n"
"POT-Creation-Date: 2006-11-11 11:11+0000\n"
"PO-Revision-Date: 2006-09-09 09:09+0900\n"
"Last-Translator: Joe Translate <joe@example.com>\n"
"Language-Team: Pig Latin <piglatin@example.com>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"X-Generator: Translate Toolkit 0.10rc2\n"
#: new_disassociated_mozilla_accesskey
msgid "R"
msgstr ""
'''
newpo = self.convertpot(potsource, posource)
print 'Output:\n%s' % newpo
print 'Expected:\n%s' % expected
assert str(newpo) == expected
class TestPOT2POCommand(test_convert.TestConvertCommand, TestPOT2PO):
"""Tests running actual pot2po commands on files"""
convertmodule = pot2po
def test_help(self):
"""tests getting help"""
options = test_convert.TestConvertCommand.test_help(self)
options = self.help_check(options, "-t TEMPLATE, --template=TEMPLATE")
options = self.help_check(options, "-P, --pot")
options = self.help_check(options, "--tm")
options = self.help_check(options, "-s MIN_SIMILARITY, --similarity=MIN_SIMILARITY")
options = self.help_check(options, "--nofuzzymatching", last=True)
| |
# Copyright 2011 OpenStack Foundation
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume drivers for libvirt."""
import glob
import os
import time
import urllib2
from oslo.config import cfg
import six
import six.moves.urllib.parse as urlparse
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common.gettextutils import _LW
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.openstack.common import processutils
from nova import paths
from nova.storage import linuxscsi
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import utils as virtutils
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.IntOpt('num_iscsi_scan_tries',
default=5,
help='Number of times to rescan iSCSI target to find volume',
deprecated_group='DEFAULT'),
cfg.IntOpt('num_iser_scan_tries',
default=5,
help='Number of times to rescan iSER target to find volume',
deprecated_group='DEFAULT'),
cfg.StrOpt('rbd_user',
help='The RADOS client name for accessing rbd volumes',
deprecated_group='DEFAULT'),
cfg.StrOpt('rbd_secret_uuid',
help='The libvirt UUID of the secret for the rbd_user'
'volumes',
deprecated_group='DEFAULT'),
cfg.StrOpt('nfs_mount_point_base',
default=paths.state_path_def('mnt'),
help='Directory where the NFS volume is mounted on the'
' compute node',
deprecated_group='DEFAULT'),
cfg.StrOpt('nfs_mount_options',
help='Mount options passedf to the NFS client. See section '
'of the nfs man page for details',
deprecated_group='DEFAULT'),
cfg.IntOpt('num_aoe_discover_tries',
default=3,
help='Number of times to rediscover AoE target to find volume',
deprecated_group='DEFAULT'),
cfg.StrOpt('glusterfs_mount_point_base',
default=paths.state_path_def('mnt'),
help='Directory where the glusterfs volume is mounted on the '
'compute node',
deprecated_group='DEFAULT'),
cfg.BoolOpt('iscsi_use_multipath',
default=False,
help='Use multipath connection of the iSCSI volume',
deprecated_group='DEFAULT',
deprecated_name='libvirt_iscsi_use_multipath'),
cfg.BoolOpt('iser_use_multipath',
default=False,
help='Use multipath connection of the iSER volume',
deprecated_group='DEFAULT',
deprecated_name='libvirt_iser_use_multipath'),
cfg.StrOpt('scality_sofs_config',
help='Path or URL to Scality SOFS configuration file',
deprecated_group='DEFAULT'),
cfg.StrOpt('scality_sofs_mount_point',
default='$state_path/scality',
help='Base dir where Scality SOFS shall be mounted',
deprecated_group='DEFAULT'),
cfg.ListOpt('qemu_allowed_storage_drivers',
default=[],
help='Protocols listed here will be accessed directly '
'from QEMU. Currently supported protocols: [gluster]',
deprecated_group='DEFAULT')
]
CONF = cfg.CONF
CONF.register_opts(volume_opts, 'libvirt')
class LibvirtBaseVolumeDriver(object):
"""Base class for volume drivers."""
def __init__(self, connection, is_block_dev):
self.connection = connection
self.is_block_dev = is_block_dev
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
conf = vconfig.LibvirtConfigGuestDisk()
conf.driver_name = virtutils.pick_disk_driver_name(
self.connection.get_hypervisor_version(),
self.is_block_dev
)
conf.source_device = disk_info['type']
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.target_dev = disk_info['dev']
conf.target_bus = disk_info['bus']
conf.serial = connection_info.get('serial')
# Support for block size tuning
data = {}
if 'data' in connection_info:
data = connection_info['data']
if 'logical_block_size' in data:
conf.logical_block_size = data['logical_block_size']
if 'physical_block_size' in data:
conf.physical_block_size = data['physical_block_size']
# Extract rate_limit control parameters
if 'qos_specs' in data and data['qos_specs']:
tune_opts = ['total_bytes_sec', 'read_bytes_sec',
'write_bytes_sec', 'total_iops_sec',
'read_iops_sec', 'write_iops_sec']
specs = data['qos_specs']
if isinstance(specs, dict):
for k, v in specs.iteritems():
if k in tune_opts:
new_key = 'disk_' + k
setattr(conf, new_key, v)
else:
LOG.warn(_('Unknown content in connection_info/'
'qos_specs: %s') % specs)
# Extract access_mode control parameters
if 'access_mode' in data and data['access_mode']:
access_mode = data['access_mode']
if access_mode in ('ro', 'rw'):
conf.readonly = access_mode == 'ro'
else:
msg = (_('Unknown content in connection_info/access_mode: %s')
% access_mode)
LOG.error(msg)
raise exception.InvalidVolumeAccessMode(
access_mode=access_mode)
return conf
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
pass
class LibvirtVolumeDriver(LibvirtBaseVolumeDriver):
"""Class for volumes backed by local file."""
def __init__(self, connection):
super(LibvirtVolumeDriver,
self).__init__(connection, is_block_dev=True)
def connect_volume(self, connection_info, disk_info):
"""Connect the volume to a local device."""
conf = super(LibvirtVolumeDriver,
self).connect_volume(connection_info,
disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
class LibvirtFakeVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach fake volumes to libvirt."""
def __init__(self, connection):
super(LibvirtFakeVolumeDriver,
self).__init__(connection, is_block_dev=True)
def connect_volume(self, connection_info, disk_info):
"""Connect the volume to a fake device."""
conf = super(LibvirtFakeVolumeDriver,
self).connect_volume(connection_info,
disk_info)
conf.source_type = "network"
conf.source_protocol = "fake"
conf.source_name = "fake"
return conf
class LibvirtNetVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtNetVolumeDriver,
self).__init__(connection, is_block_dev=False)
def connect_volume(self, connection_info, disk_info):
conf = super(LibvirtNetVolumeDriver,
self).connect_volume(connection_info,
disk_info)
netdisk_properties = connection_info['data']
conf.source_type = "network"
conf.source_protocol = connection_info['driver_volume_type']
conf.source_name = netdisk_properties.get('name')
conf.source_hosts = netdisk_properties.get('hosts', [])
conf.source_ports = netdisk_properties.get('ports', [])
auth_enabled = netdisk_properties.get('auth_enabled')
if (conf.source_protocol == 'rbd' and
CONF.libvirt.rbd_secret_uuid):
conf.auth_secret_uuid = CONF.libvirt.rbd_secret_uuid
auth_enabled = True # Force authentication locally
if CONF.libvirt.rbd_user:
conf.auth_username = CONF.libvirt.rbd_user
if auth_enabled:
conf.auth_username = (conf.auth_username or
netdisk_properties['auth_username'])
conf.auth_secret_type = netdisk_properties['secret_type']
conf.auth_secret_uuid = (conf.auth_secret_uuid or
netdisk_properties['secret_uuid'])
return conf
class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtISCSIVolumeDriver, self).__init__(connection,
is_block_dev=True)
self.num_scan_tries = CONF.libvirt.num_iscsi_scan_tries
self.use_multipath = CONF.libvirt.iscsi_use_multipath
def _run_iscsiadm(self, iscsi_properties, iscsi_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = utils.execute('iscsiadm', '-m', 'node', '-T',
iscsi_properties['target_iqn'],
'-p', iscsi_properties['target_portal'],
*iscsi_command, run_as_root=True,
check_exit_code=check_exit_code)
LOG.debug(_("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s"),
{'command': iscsi_command, 'out': out, 'err': err})
return (out, err)
def _iscsiadm_update(self, iscsi_properties, property_key, property_value,
**kwargs):
iscsi_command = ('--op', 'update', '-n', property_key,
'-v', property_value)
return self._run_iscsiadm(iscsi_properties, iscsi_command, **kwargs)
def _get_target_portals_from_iscsiadm_output(self, output):
# return both portals and iqns
return [line.split() for line in output.splitlines()]
@utils.synchronized('connect_volume')
def connect_volume(self, connection_info, disk_info):
"""Attach the volume to instance_name."""
conf = super(LibvirtISCSIVolumeDriver,
self).connect_volume(connection_info,
disk_info)
iscsi_properties = connection_info['data']
if self.use_multipath:
#multipath installed, discovering other targets if available
#multipath should be configured on the nova-compute node,
#in order to fit storage vendor
out = self._run_iscsiadm_bare(['-m',
'discovery',
'-t',
'sendtargets',
'-p',
iscsi_properties['target_portal']],
check_exit_code=[0, 255])[0] \
or ""
for ip, iqn in self._get_target_portals_from_iscsiadm_output(out):
props = iscsi_properties.copy()
props['target_portal'] = ip
props['target_iqn'] = iqn
self._connect_to_iscsi_portal(props)
self._rescan_iscsi()
else:
self._connect_to_iscsi_portal(iscsi_properties)
# Detect new/resized LUNs for existing sessions
self._run_iscsiadm(iscsi_properties, ("--rescan",))
host_device = self._get_host_device(iscsi_properties)
# The /dev/disk/by-path/... node is not always present immediately
# TODO(justinsb): This retry-with-delay is a pattern, move to utils?
tries = 0
disk_dev = disk_info['dev']
while not os.path.exists(host_device):
if tries >= self.num_scan_tries:
raise exception.NovaException(_("iSCSI device not found at %s")
% (host_device))
LOG.warn(_("ISCSI volume not yet found at: %(disk_dev)s. "
"Will rescan & retry. Try number: %(tries)s"),
{'disk_dev': disk_dev,
'tries': tries})
# The rescan isn't documented as being necessary(?), but it helps
self._run_iscsiadm(iscsi_properties, ("--rescan",))
tries = tries + 1
if not os.path.exists(host_device):
time.sleep(tries ** 2)
if tries != 0:
LOG.debug(_("Found iSCSI node %(disk_dev)s "
"(after %(tries)s rescans)"),
{'disk_dev': disk_dev,
'tries': tries})
if self.use_multipath:
#we use the multipath device instead of the single path device
self._rescan_multipath()
multipath_device = self._get_multipath_device_name(host_device)
if multipath_device is not None:
host_device = multipath_device
conf.source_type = "block"
conf.source_path = host_device
return conf
@utils.synchronized('connect_volume')
def disconnect_volume(self, connection_info, disk_dev):
"""Detach the volume from instance_name."""
iscsi_properties = connection_info['data']
host_device = self._get_host_device(iscsi_properties)
multipath_device = None
if self.use_multipath:
multipath_device = self._get_multipath_device_name(host_device)
super(LibvirtISCSIVolumeDriver,
self).disconnect_volume(connection_info, disk_dev)
if self.use_multipath and multipath_device:
return self._disconnect_volume_multipath_iscsi(iscsi_properties,
multipath_device)
# NOTE(vish): Only disconnect from the target if no luns from the
# target are in use.
device_prefix = ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-" %
(iscsi_properties['target_portal'],
iscsi_properties['target_iqn']))
devices = self.connection.get_all_block_devices()
devices = [dev for dev in devices if dev.startswith(device_prefix)]
if not devices:
self._disconnect_from_iscsi_portal(iscsi_properties)
elif host_device not in devices:
# Delete device if LUN is not in use by another instance
self._delete_device(host_device)
def _delete_device(self, device_path):
device_name = os.path.basename(os.path.realpath(device_path))
delete_control = '/sys/block/' + device_name + '/device/delete'
if os.path.exists(delete_control):
# Copy '1' from stdin to the device delete control file
utils.execute('cp', '/dev/stdin', delete_control,
process_input='1', run_as_root=True)
else:
LOG.warn(_("Unable to delete volume device %s"), device_name)
def _remove_multipath_device_descriptor(self, disk_descriptor):
disk_descriptor = disk_descriptor.replace('/dev/mapper/', '')
try:
self._run_multipath(['-f', disk_descriptor],
check_exit_code=[0, 1])
except exception.ProcessExecutionError as exc:
# Because not all cinder drivers need to remove the dev mapper,
# here just logs a warning to avoid affecting those drivers in
# exceptional cases.
LOG.warn(_('Failed to remove multipath device descriptor '
'%(dev_mapper)s. Exception message: %(msg)s')
% {'dev_mapper': disk_descriptor,
'msg': exc.message})
def _disconnect_volume_multipath_iscsi(self, iscsi_properties,
multipath_device):
self._rescan_iscsi()
self._rescan_multipath()
block_devices = self.connection.get_all_block_devices()
devices = []
for dev in block_devices:
if "/mapper/" in dev:
devices.append(dev)
else:
mpdev = self._get_multipath_device_name(dev)
if mpdev:
devices.append(mpdev)
# Do a discovery to find all targets.
# Targets for multiple paths for the same multipath device
# may not be the same.
out = self._run_iscsiadm_bare(['-m',
'discovery',
'-t',
'sendtargets',
'-p',
iscsi_properties['target_portal']],
check_exit_code=[0, 255])[0] \
or ""
ips_iqns = self._get_target_portals_from_iscsiadm_output(out)
if not devices:
# disconnect if no other multipath devices
self._disconnect_mpath(iscsi_properties, ips_iqns)
return
# Get a target for all other multipath devices
other_iqns = [self._get_multipath_iqn(device)
for device in devices]
# Get all the targets for the current multipath device
current_iqns = [iqn for ip, iqn in ips_iqns]
in_use = False
for current in current_iqns:
if current in other_iqns:
in_use = True
break
# If no other multipath device attached has the same iqn
# as the current device
if not in_use:
# disconnect if no other multipath devices with same iqn
self._disconnect_mpath(iscsi_properties, ips_iqns)
return
elif multipath_device not in devices:
# delete the devices associated w/ the unused multipath
self._delete_mpath(iscsi_properties, multipath_device, ips_iqns)
# else do not disconnect iscsi portals,
# as they are used for other luns,
# just remove multipath mapping device descriptor
self._remove_multipath_device_descriptor(multipath_device)
return
def _connect_to_iscsi_portal(self, iscsi_properties):
# NOTE(vish): If we are on the same host as nova volume, the
# discovery makes the target so we don't need to
# run --op new. Therefore, we check to see if the
# target exists, and if we get 255 (Not Found), then
# we run --op new. This will also happen if another
# volume is using the same target.
try:
self._run_iscsiadm(iscsi_properties, ())
except processutils.ProcessExecutionError as exc:
# iscsiadm returns 21 for "No records found" after version 2.0-871
if exc.exit_code in [21, 255]:
self._reconnect(iscsi_properties)
else:
raise
if iscsi_properties.get('auth_method'):
self._iscsiadm_update(iscsi_properties,
"node.session.auth.authmethod",
iscsi_properties['auth_method'])
self._iscsiadm_update(iscsi_properties,
"node.session.auth.username",
iscsi_properties['auth_username'])
self._iscsiadm_update(iscsi_properties,
"node.session.auth.password",
iscsi_properties['auth_password'])
#duplicate logins crash iscsiadm after load,
#so we scan active sessions to see if the node is logged in.
out = self._run_iscsiadm_bare(["-m", "session"],
run_as_root=True,
check_exit_code=[0, 1, 21])[0] or ""
portals = [{'portal': p.split(" ")[2], 'iqn': p.split(" ")[3]}
for p in out.splitlines() if p.startswith("tcp:")]
stripped_portal = iscsi_properties['target_portal'].split(",")[0]
if len(portals) == 0 or len([s for s in portals
if stripped_portal ==
s['portal'].split(",")[0]
and
s['iqn'] ==
iscsi_properties['target_iqn']]
) == 0:
try:
self._run_iscsiadm(iscsi_properties,
("--login",),
check_exit_code=[0, 255])
except processutils.ProcessExecutionError as err:
#as this might be one of many paths,
#only set successful logins to startup automatically
if err.exit_code in [15]:
self._iscsiadm_update(iscsi_properties,
"node.startup",
"automatic")
return
self._iscsiadm_update(iscsi_properties,
"node.startup",
"automatic")
def _disconnect_from_iscsi_portal(self, iscsi_properties):
self._iscsiadm_update(iscsi_properties, "node.startup", "manual",
check_exit_code=[0, 21, 255])
self._run_iscsiadm(iscsi_properties, ("--logout",),
check_exit_code=[0, 21, 255])
self._run_iscsiadm(iscsi_properties, ('--op', 'delete'),
check_exit_code=[0, 21, 255])
def _get_multipath_device_name(self, single_path_device):
device = os.path.realpath(single_path_device)
out = self._run_multipath(['-ll',
device],
check_exit_code=[0, 1])[0]
mpath_line = [line for line in out.splitlines()
if "scsi_id" not in line] # ignore udev errors
if len(mpath_line) > 0 and len(mpath_line[0]) > 0:
return "/dev/mapper/%s" % mpath_line[0].split(" ")[0]
return None
def _get_iscsi_devices(self):
try:
devices = list(os.walk('/dev/disk/by-path'))[0][-1]
except IndexError:
return []
return [entry for entry in devices if entry.startswith("ip-")]
def _delete_mpath(self, iscsi_properties, multipath_device, ips_iqns):
entries = self._get_iscsi_devices()
# Loop through ips_iqns to construct all paths
iqn_luns = []
for ip, iqn in ips_iqns:
iqn_lun = '%s-lun-%s' % (iqn,
iscsi_properties.get('target_lun', 0))
iqn_luns.append(iqn_lun)
for dev in ['/dev/disk/by-path/%s' % dev for dev in entries]:
for iqn_lun in iqn_luns:
if iqn_lun in dev:
self._delete_device(dev)
self._rescan_multipath()
def _disconnect_mpath(self, iscsi_properties, ips_iqns):
for ip, iqn in ips_iqns:
props = iscsi_properties.copy()
props['target_portal'] = ip
props['target_iqn'] = iqn
self._disconnect_from_iscsi_portal(props)
self._rescan_multipath()
def _get_multipath_iqn(self, multipath_device):
entries = self._get_iscsi_devices()
for entry in entries:
entry_real_path = os.path.realpath("/dev/disk/by-path/%s" % entry)
entry_multipath = self._get_multipath_device_name(entry_real_path)
if entry_multipath == multipath_device:
return entry.split("iscsi-")[1].split("-lun")[0]
return None
def _run_iscsiadm_bare(self, iscsi_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = utils.execute('iscsiadm',
*iscsi_command,
run_as_root=True,
check_exit_code=check_exit_code)
LOG.debug(_("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s"),
{'command': iscsi_command, 'out': out, 'err': err})
return (out, err)
def _run_multipath(self, multipath_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = utils.execute('multipath',
*multipath_command,
run_as_root=True,
check_exit_code=check_exit_code)
LOG.debug(_("multipath %(command)s: stdout=%(out)s stderr=%(err)s"),
{'command': multipath_command, 'out': out, 'err': err})
return (out, err)
def _rescan_iscsi(self):
self._run_iscsiadm_bare(('-m', 'node', '--rescan'),
check_exit_code=[0, 1, 21, 255])
self._run_iscsiadm_bare(('-m', 'session', '--rescan'),
check_exit_code=[0, 1, 21, 255])
def _rescan_multipath(self):
self._run_multipath('-r', check_exit_code=[0, 1, 21])
def _get_host_device(self, iscsi_properties):
return ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-%s" %
(iscsi_properties['target_portal'],
iscsi_properties['target_iqn'],
iscsi_properties.get('target_lun', 0)))
def _reconnect(self, iscsi_properties):
self._run_iscsiadm(iscsi_properties, ('--op', 'new'))
class LibvirtISERVolumeDriver(LibvirtISCSIVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtISERVolumeDriver, self).__init__(connection)
self.num_scan_tries = CONF.libvirt.num_iser_scan_tries
self.use_multipath = CONF.libvirt.iser_use_multipath
def _get_multipath_iqn(self, multipath_device):
entries = self._get_iscsi_devices()
for entry in entries:
entry_real_path = os.path.realpath("/dev/disk/by-path/%s" % entry)
entry_multipath = self._get_multipath_device_name(entry_real_path)
if entry_multipath == multipath_device:
return entry.split("iser-")[1].split("-lun")[0]
return None
def _get_host_device(self, iser_properties):
time.sleep(1)
host_device = None
device = ("ip-%s-iscsi-%s-lun-%s" %
(iser_properties['target_portal'],
iser_properties['target_iqn'],
iser_properties.get('target_lun', 0)))
look_for_device = glob.glob('/dev/disk/by-path/*%s' % device)
if look_for_device:
host_device = look_for_device[0]
return host_device
def _reconnect(self, iser_properties):
self._run_iscsiadm(iser_properties,
('--interface', 'iser', '--op', 'new'))
class LibvirtNFSVolumeDriver(LibvirtBaseVolumeDriver):
"""Class implements libvirt part of volume driver for NFS."""
def __init__(self, connection):
"""Create back-end to nfs."""
super(LibvirtNFSVolumeDriver,
self).__init__(connection, is_block_dev=False)
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
conf = super(LibvirtNFSVolumeDriver,
self).connect_volume(connection_info,
disk_info)
options = connection_info['data'].get('options')
path = self._ensure_mounted(connection_info['data']['export'], options)
path = os.path.join(path, connection_info['data']['name'])
conf.source_type = 'file'
conf.source_path = path
conf.driver_format = connection_info['data'].get('format', 'raw')
return conf
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
export = connection_info['data']['export']
mount_path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(export))
try:
utils.execute('umount', mount_path, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if 'target is busy' in exc.message:
LOG.debug(_("The NFS share %s is still in use."), export)
else:
LOG.exception(_("Couldn't unmount the NFS share %s"), export)
def _ensure_mounted(self, nfs_export, options=None):
"""@type nfs_export: string
@type options: string
"""
mount_path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(nfs_export))
if not virtutils.is_mounted(mount_path, nfs_export):
self._mount_nfs(mount_path, nfs_export, options, ensure=True)
return mount_path
def _mount_nfs(self, mount_path, nfs_share, options=None, ensure=False):
"""Mount nfs export to mount path."""
utils.execute('mkdir', '-p', mount_path)
# Construct the NFS mount command.
nfs_cmd = ['mount', '-t', 'nfs']
if CONF.libvirt.nfs_mount_options is not None:
nfs_cmd.extend(['-o', CONF.libvirt.nfs_mount_options])
if options is not None:
nfs_cmd.extend(options.split(' '))
nfs_cmd.extend([nfs_share, mount_path])
try:
utils.execute(*nfs_cmd, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ensure and 'already mounted' in exc.message:
LOG.warn(_("%s is already mounted"), nfs_share)
else:
raise
class LibvirtAOEVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach AoE volumes to libvirt."""
def __init__(self, connection):
super(LibvirtAOEVolumeDriver,
self).__init__(connection, is_block_dev=True)
def _aoe_discover(self):
"""Call aoe-discover (aoe-tools) AoE Discover."""
(out, err) = utils.execute('aoe-discover',
run_as_root=True, check_exit_code=0)
return (out, err)
def _aoe_revalidate(self, aoedev):
"""Revalidate the LUN Geometry (When an AoE ID is reused)."""
(out, err) = utils.execute('aoe-revalidate', aoedev,
run_as_root=True, check_exit_code=0)
return (out, err)
def connect_volume(self, connection_info, mount_device):
shelf = connection_info['data']['target_shelf']
lun = connection_info['data']['target_lun']
aoedev = 'e%s.%s' % (shelf, lun)
aoedevpath = '/dev/etherd/%s' % (aoedev)
if os.path.exists(aoedevpath):
# NOTE(jbr_): If aoedevpath already exists, revalidate the LUN.
self._aoe_revalidate(aoedev)
else:
# NOTE(jbr_): If aoedevpath does not exist, do a discover.
self._aoe_discover()
#NOTE(jbr_): Device path is not always present immediately
def _wait_for_device_discovery(aoedevpath, mount_device):
tries = self.tries
if os.path.exists(aoedevpath):
raise loopingcall.LoopingCallDone()
if self.tries >= CONF.libvirt.num_aoe_discover_tries:
raise exception.NovaException(_("AoE device not found at %s") %
(aoedevpath))
LOG.warn(_("AoE volume not yet found at: %(aoedevpath)s. "
"Try number: %(tries)s"),
{'aoedevpath': aoedevpath,
'tries': tries})
self._aoe_discover()
self.tries = self.tries + 1
self.tries = 0
timer = loopingcall.FixedIntervalLoopingCall(
_wait_for_device_discovery, aoedevpath, mount_device)
timer.start(interval=2).wait()
tries = self.tries
if tries != 0:
LOG.debug(_("Found AoE device %(aoedevpath)s "
"(after %(tries)s rediscover)"),
{'aoedevpath': aoedevpath,
'tries': tries})
conf = super(LibvirtAOEVolumeDriver,
self).connect_volume(connection_info, mount_device)
conf.source_type = "block"
conf.source_path = aoedevpath
return conf
class LibvirtGlusterfsVolumeDriver(LibvirtBaseVolumeDriver):
"""Class implements libvirt part of volume driver for GlusterFS."""
def __init__(self, connection):
"""Create back-end to glusterfs."""
super(LibvirtGlusterfsVolumeDriver,
self).__init__(connection, is_block_dev=False)
def connect_volume(self, connection_info, mount_device):
"""Connect the volume. Returns xml for libvirt."""
conf = super(LibvirtGlusterfsVolumeDriver,
self).connect_volume(connection_info, mount_device)
data = connection_info['data']
if 'gluster' in CONF.libvirt.qemu_allowed_storage_drivers:
vol_name = data['export'].split('/')[1]
source_host = data['export'].split('/')[0][:-1]
conf.source_ports = ['24007']
conf.source_type = 'network'
conf.source_protocol = 'gluster'
conf.source_hosts = [source_host]
conf.source_name = '%s/%s' % (vol_name, data['name'])
else:
path = self._ensure_mounted(data['export'], data.get('options'))
path = os.path.join(path, data['name'])
conf.source_type = 'file'
conf.source_path = path
conf.driver_format = connection_info['data'].get('format', 'raw')
return conf
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
if 'gluster' in CONF.libvirt.qemu_allowed_storage_drivers:
return
export = connection_info['data']['export']
mount_path = os.path.join(CONF.libvirt.glusterfs_mount_point_base,
utils.get_hash_str(export))
try:
utils.execute('umount', mount_path, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if 'target is busy' in exc.message:
LOG.debug(_("The GlusterFS share %s is still in use."), export)
else:
LOG.exception(_("Couldn't unmount the GlusterFS share %s"),
export)
def _ensure_mounted(self, glusterfs_export, options=None):
"""@type glusterfs_export: string
@type options: string
"""
mount_path = os.path.join(CONF.libvirt.glusterfs_mount_point_base,
utils.get_hash_str(glusterfs_export))
if not virtutils.is_mounted(mount_path, glusterfs_export):
self._mount_glusterfs(mount_path, glusterfs_export,
options, ensure=True)
return mount_path
def _mount_glusterfs(self, mount_path, glusterfs_share,
options=None, ensure=False):
"""Mount glusterfs export to mount path."""
utils.execute('mkdir', '-p', mount_path)
gluster_cmd = ['mount', '-t', 'glusterfs']
if options is not None:
gluster_cmd.extend(options.split(' '))
gluster_cmd.extend([glusterfs_share, mount_path])
try:
utils.execute(*gluster_cmd, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ensure and 'already mounted' in exc.message:
LOG.warn(_("%s is already mounted"), glusterfs_share)
else:
raise
class LibvirtFibreChannelVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Fibre Channel Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtFibreChannelVolumeDriver,
self).__init__(connection, is_block_dev=False)
def _get_pci_num(self, hba):
# NOTE(walter-boring)
# device path is in format of
# /sys/devices/pci0000:00/0000:00:03.0/0000:05:00.3/host2/fc_host/host2
# sometimes an extra entry exists before the host2 value
# we always want the value prior to the host2 value
pci_num = None
if hba is not None:
if "device_path" in hba:
index = 0
device_path = hba['device_path'].split('/')
for value in device_path:
if value.startswith('host'):
break
index = index + 1
if index > 0:
pci_num = device_path[index - 1]
return pci_num
@utils.synchronized('connect_volume')
def connect_volume(self, connection_info, disk_info):
"""Attach the volume to instance_name."""
fc_properties = connection_info['data']
mount_device = disk_info["dev"]
ports = fc_properties['target_wwn']
wwns = []
# we support a list of wwns or a single wwn
if isinstance(ports, list):
for wwn in ports:
wwns.append(str(wwn))
elif isinstance(ports, six.string_types):
wwns.append(str(ports))
# We need to look for wwns on every hba
# because we don't know ahead of time
# where they will show up.
hbas = virtutils.get_fc_hbas_info()
host_devices = []
for hba in hbas:
pci_num = self._get_pci_num(hba)
if pci_num is not None:
for wwn in wwns:
target_wwn = "0x%s" % wwn.lower()
host_device = ("/dev/disk/by-path/pci-%s-fc-%s-lun-%s" %
(pci_num,
target_wwn,
fc_properties.get('target_lun', 0)))
host_devices.append(host_device)
if len(host_devices) == 0:
# this is empty because we don't have any FC HBAs
msg = _("We are unable to locate any Fibre Channel devices")
raise exception.NovaException(msg)
# The /dev/disk/by-path/... node is not always present immediately
# We only need to find the first device. Once we see the first device
# multipath will have any others.
def _wait_for_device_discovery(host_devices, mount_device):
tries = self.tries
for device in host_devices:
LOG.debug(_("Looking for Fibre Channel dev %(device)s"),
{'device': device})
if os.path.exists(device):
self.host_device = device
# get the /dev/sdX device. This is used
# to find the multipath device.
self.device_name = os.path.realpath(device)
raise loopingcall.LoopingCallDone()
if self.tries >= CONF.libvirt.num_iscsi_scan_tries:
msg = _("Fibre Channel device not found.")
raise exception.NovaException(msg)
LOG.warn(_("Fibre volume not yet found at: %(mount_device)s. "
"Will rescan & retry. Try number: %(tries)s"),
{'mount_device': mount_device,
'tries': tries})
linuxscsi.rescan_hosts(hbas)
self.tries = self.tries + 1
self.host_device = None
self.device_name = None
self.tries = 0
timer = loopingcall.FixedIntervalLoopingCall(
_wait_for_device_discovery, host_devices, mount_device)
timer.start(interval=2).wait()
tries = self.tries
if self.host_device is not None and self.device_name is not None:
LOG.debug(_("Found Fibre Channel volume %(mount_device)s "
"(after %(tries)s rescans)"),
{'mount_device': mount_device,
'tries': tries})
# see if the new drive is part of a multipath
# device. If so, we'll use the multipath device.
mdev_info = linuxscsi.find_multipath_device(self.device_name)
if mdev_info is not None:
LOG.debug(_("Multipath device discovered %(device)s")
% {'device': mdev_info['device']})
device_path = mdev_info['device']
connection_info['data']['devices'] = mdev_info['devices']
connection_info['data']['multipath_id'] = mdev_info['id']
else:
# we didn't find a multipath device.
# so we assume the kernel only sees 1 device
device_path = self.host_device
device_info = linuxscsi.get_device_info(self.device_name)
connection_info['data']['devices'] = [device_info]
conf = super(LibvirtFibreChannelVolumeDriver,
self).connect_volume(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = device_path
return conf
@utils.synchronized('connect_volume')
def disconnect_volume(self, connection_info, mount_device):
"""Detach the volume from instance_name."""
super(LibvirtFibreChannelVolumeDriver,
self).disconnect_volume(connection_info, mount_device)
# If this is a multipath device, we need to search again
# and make sure we remove all the devices. Some of them
# might not have shown up at attach time.
if 'multipath_id' in connection_info['data']:
multipath_id = connection_info['data']['multipath_id']
mdev_info = linuxscsi.find_multipath_device(multipath_id)
devices = mdev_info['devices']
LOG.debug(_("devices to remove = %s"), devices)
else:
# only needed when multipath-tools work improperly
devices = connection_info['data'].get('devices', [])
LOG.warn(_LW("multipath-tools probably work improperly. "
"devices to remove = %s.") % devices)
# There may have been more than 1 device mounted
# by the kernel for this volume. We have to remove
# all of them
for device in devices:
linuxscsi.remove_device(device)
class LibvirtScalityVolumeDriver(LibvirtBaseVolumeDriver):
"""Scality SOFS Nova driver. Provide hypervisors with access
to sparse files on SOFS.
"""
def __init__(self, connection):
"""Create back-end to SOFS and check connection."""
super(LibvirtScalityVolumeDriver,
self).__init__(connection, is_block_dev=False)
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
self._check_prerequisites()
self._mount_sofs()
conf = super(LibvirtScalityVolumeDriver,
self).connect_volume(connection_info, disk_info)
path = os.path.join(CONF.libvirt.scality_sofs_mount_point,
connection_info['data']['sofs_path'])
conf.source_type = 'file'
conf.source_path = path
# The default driver cache policy is 'none', and this causes
# qemu/kvm to open the volume file with O_DIRECT, which is
# rejected by FUSE (on kernels older than 3.3). Scality SOFS
# is FUSE based, so we must provide a more sensible default.
conf.driver_cache = 'writethrough'
return conf
def _check_prerequisites(self):
"""Sanity checks before attempting to mount SOFS."""
# config is mandatory
config = CONF.libvirt.scality_sofs_config
if not config:
msg = _("Value required for 'scality_sofs_config'")
LOG.warn(msg)
raise exception.NovaException(msg)
# config can be a file path or a URL, check it
if urlparse.urlparse(config).scheme == '':
# turn local path into URL
config = 'file://%s' % config
try:
urllib2.urlopen(config, timeout=5).close()
except urllib2.URLError as e:
msg = _("Cannot access 'scality_sofs_config': %s") % e
LOG.warn(msg)
raise exception.NovaException(msg)
# mount.sofs must be installed
if not os.access('/sbin/mount.sofs', os.X_OK):
msg = _("Cannot execute /sbin/mount.sofs")
LOG.warn(msg)
raise exception.NovaException(msg)
def _mount_sofs(self):
config = CONF.libvirt.scality_sofs_config
mount_path = CONF.libvirt.scality_sofs_mount_point
sysdir = os.path.join(mount_path, 'sys')
if not os.path.isdir(mount_path):
utils.execute('mkdir', '-p', mount_path)
if not os.path.isdir(sysdir):
utils.execute('mount', '-t', 'sofs', config, mount_path,
run_as_root=True)
if not os.path.isdir(sysdir):
msg = _("Cannot mount Scality SOFS, check syslog for errors")
LOG.warn(msg)
raise exception.NovaException(msg)
| |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""This file contains code for use with "Think Bayes",
by Allen B. Downey, available from greenteapress.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import math
# import columns
import pandas as columns
import thinkbayes2
import thinkplot
USE_SUMMARY_DATA = True
class Hockey(thinkbayes2.Suite):
"""Represents hypotheses about the scoring rate for a team."""
def __init__(self, label=None):
"""Initializes the Hockey object.
label: string
"""
if USE_SUMMARY_DATA:
# prior based on each team's average goals scored
mu = 2.8
sigma = 0.3
else:
# prior based on each pair-wise match-up
mu = 2.8
sigma = 0.85
pmf = thinkbayes2.MakeNormalPmf(mu, sigma, 4)
thinkbayes2.Suite.__init__(self, pmf, label=label)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under the hypothesis.
Evaluates the Poisson PMF for lambda and k.
hypo: goal scoring rate in goals per game
data: goals scored in one period
"""
lam = hypo
k = data
like = thinkbayes2.EvalPoissonPmf(k, lam)
return like
def MakeGoalPmf(suite, high=10):
"""Makes the distribution of goals scored, given distribution of lam.
suite: distribution of goal-scoring rate
high: upper bound
returns: Pmf of goals per game
"""
metapmf = thinkbayes2.Pmf()
for lam, prob in suite.Items():
pmf = thinkbayes2.MakePoissonPmf(lam, high)
metapmf.Set(pmf, prob)
mix = thinkbayes2.MakeMixture(metapmf, label=suite.label)
return mix
def MakeGoalTimePmf(suite):
"""Makes the distribution of time til first goal.
suite: distribution of goal-scoring rate
returns: Pmf of goals per game
"""
metapmf = thinkbayes2.Pmf()
for lam, prob in suite.Items():
pmf = thinkbayes2.MakeExponentialPmf(lam, high=2, n=2001)
metapmf.Set(pmf, prob)
mix = thinkbayes2.MakeMixture(metapmf, label=suite.label)
return mix
class Game(object):
"""Represents a game.
Attributes are set in columns.read_csv.
"""
convert = dict()
def clean(self):
self.goals = self.pd1 + self.pd2 + self.pd3
def ReadHockeyData(filename='ThinkBayes2/code/hockey_data.csv'):
"""Read game scores from the data file.
filename: string
"""
game_list = columns.read_csv(filename, Game)
# map from gameID to list of two games
games = {}
for game in game_list:
if game.season != 2011:
continue
key = game.game
games.setdefault(key, []).append(game)
# map from (team1, team2) to (score1, score2)
pairs = {}
for key, pair in games.iteritems():
t1, t2 = pair
key = t1.team, t2.team
entry = t1.total, t2.total
pairs.setdefault(key, []).append(entry)
ProcessScoresTeamwise(pairs)
ProcessScoresPairwise(pairs)
def ProcessScoresPairwise(pairs):
"""Average number of goals for each team against each opponent.
pairs: map from (team1, team2) to (score1, score2)
"""
# map from (team1, team2) to list of goals scored
goals_scored = {}
for key, entries in pairs.iteritems():
t1, t2 = key
for entry in entries:
g1, g2 = entry
goals_scored.setdefault((t1, t2), []).append(g1)
goals_scored.setdefault((t2, t1), []).append(g2)
# make a list of average goals scored
lams = []
for key, goals in goals_scored.iteritems():
if len(goals) < 3:
continue
lam = thinkbayes2.Mean(goals)
lams.append(lam)
# make the distribution of average goals scored
cdf = thinkbayes2.MakeCdfFromList(lams)
thinkplot.Cdf(cdf)
thinkplot.Show()
mu, var = thinkbayes2.MeanVar(lams)
print('mu, sig', mu, math.sqrt(var))
print('BOS v VAN', pairs['BOS', 'VAN'])
def ProcessScoresTeamwise(pairs):
"""Average number of goals for each team.
pairs: map from (team1, team2) to (score1, score2)
"""
# map from team to list of goals scored
goals_scored = {}
for key, entries in pairs.iteritems():
t1, t2 = key
for entry in entries:
g1, g2 = entry
goals_scored.setdefault(t1, []).append(g1)
goals_scored.setdefault(t2, []).append(g2)
# make a list of average goals scored
lams = []
for key, goals in goals_scored.iteritems():
lam = thinkbayes2.Mean(goals)
lams.append(lam)
# make the distribution of average goals scored
cdf = thinkbayes2.MakeCdfFromList(lams)
thinkplot.Cdf(cdf)
thinkplot.Show()
mu, var = thinkbayes2.MeanVar(lams)
print('mu, sig', mu, math.sqrt(var))
def main():
#ReadHockeyData()
#return
formats = ['png']
suite1 = Hockey('bruins')
suite2 = Hockey('canucks')
thinkplot.Clf()
thinkplot.PrePlot(num=2)
thinkplot.Pmf(suite1)
thinkplot.Pmf(suite2)
thinkplot.Save(root='hockey0',
xlabel='Goals per game',
ylabel='Probability',
formats=formats)
suite1.UpdateSet([0, 2, 8, 4])
suite2.UpdateSet([1, 3, 1, 0])
thinkplot.Clf()
thinkplot.PrePlot(num=2)
thinkplot.Pmf(suite1)
thinkplot.Pmf(suite2)
thinkplot.Save(root='hockey1',
xlabel='Goals per game',
ylabel='Probability',
formats=formats)
goal_dist1 = MakeGoalPmf(suite1)
goal_dist2 = MakeGoalPmf(suite2)
thinkplot.Clf()
thinkplot.PrePlot(num=2)
thinkplot.Pmf(goal_dist1)
thinkplot.Pmf(goal_dist2)
thinkplot.Save(root='hockey2',
xlabel='Goals',
ylabel='Probability',
formats=formats)
time_dist1 = MakeGoalTimePmf(suite1)
time_dist2 = MakeGoalTimePmf(suite2)
print('MLE bruins', suite1.MaximumLikelihood())
print('MLE canucks', suite2.MaximumLikelihood())
thinkplot.Clf()
thinkplot.PrePlot(num=2)
thinkplot.Pmf(time_dist1)
thinkplot.Pmf(time_dist2)
thinkplot.Save(root='hockey3',
xlabel='Games until goal',
ylabel='Probability',
formats=formats)
diff = goal_dist1 - goal_dist2
p_win = diff.ProbGreater(0)
p_loss = diff.ProbLess(0)
p_tie = diff.Prob(0)
print(p_win, p_loss, p_tie)
p_overtime = thinkbayes2.PmfProbLess(time_dist1, time_dist2)
p_adjust = thinkbayes2.PmfProbEqual(time_dist1, time_dist2)
p_overtime += p_adjust / 2
print('p_overtime', p_overtime)
print(p_overtime * p_tie)
p_win += p_overtime * p_tie
print('p_win', p_win)
# win the next two
p_series = p_win**2
# split the next two, win the third
p_series += 2 * p_win * (1-p_win) * p_win
print('p_series', p_series)
if __name__ == '__main__':
main()
| |
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration files for benchmarks.
Each benchmark has a default configuration defined inside its module.
The configuration is written in YAML (www.yaml.org) and specifies what
resources are needed to run the benchmark. Users can write their own
config files, which will be merged with the default configuration. These
config files specify overrides to the default configuration. Users can also
specify which benchmarks to run in the same config file.
Valid top level keys:
benchmarks: A YAML array of dictionaries mapping benchmark names to their
configs. This also determines which benchmarks to run.
*any_benchmark_name*: If the 'benchmarks' key is not specified, then
specifying a benchmark name mapped to a config will override
that benchmark's default configuration in the event that that
benchmark is run.
Valid config keys:
vm_groups: A YAML dictionary mapping the names of VM groups to the groups
themselves. These names can be any string.
description: A description of the benchmark.
flags: A YAML dictionary with overrides for default flag values.
Valid VM group keys:
vm_spec: A YAML dictionary mapping names of clouds (e.g. AWS) to the
actual VM spec.
disk_spec: A YAML dictionary mapping names of clouds to the actual
disk spec.
vm_count: The number of VMs to create in this group. If this key isn't
specified, it defaults to 1.
disk_count: The number of disks to attach to VMs of this group. If this key
isn't specified, it defaults to 1.
cloud: The name of the cloud to create the group in. This is used for
multi-cloud configurations.
os_type: The OS type of the VMs to create (see the flag of the same name for
more information). This is used if you want to run a benchmark using VMs
with different OS types (e.g. Debian and RHEL).
static_vms: A YAML array of Static VM specs. These VMs will be used before
any Cloud VMs are created. The total number of VMs will still add up to
the number specified by the 'vm_count' key.
For valid VM spec keys, see virtual_machine.BaseVmSpec and derived classes.
For valid disk spec keys, see disk.BaseDiskSpec and derived classes.
See configs.spec.BaseSpec for more information about adding additional keys to
VM specs, disk specs, or any component of the benchmark configuration
dictionary.
"""
import copy
import logging
import yaml
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
FLAGS = flags.FLAGS
CONFIG_CONSTANTS = 'default_config_constants.yaml'
flags.DEFINE_string('benchmark_config_file', None,
'The file path to the user config file which will '
'override benchmark defaults. This should either be '
'a path relative to the current working directory, '
'an absolute path, or just the name of a file in the '
'configs/ directory.')
flags.DEFINE_multistring(
'config_override', None,
'This flag can be used to override any config value. It is applied after '
'the user config (specified via --benchmark_config_file_path), so it has '
'a higher priority than that config. The value of the flag should be '
'fully.qualified.key=value (e.g. --config_override=cluster_boot.vm_groups.'
'default.vm_count=4).')
def _LoadUserConfig(path):
"""Loads a user config from the supplied path."""
with open(data.ResourcePath(path)) as fp:
return yaml.load(fp.read())
def _GetConfigFromOverrides(overrides):
"""Converts a list of overrides into a config."""
config = {}
for override in overrides:
if override.count('=') != 1:
raise ValueError('--config_override flag value has incorrect number of '
'"=" characters. The value must take the form '
'fully.qualified.key=value.')
full_key, value = override.split('=')
keys = full_key.split('.')
new_config = {keys.pop(): yaml.load(value)}
while keys:
new_config = {keys.pop(): new_config}
config = MergeConfigs(config, new_config)
return config
def GetUserConfig():
"""Returns the user config with any overrides applied.
This loads config from --benchmark_config_file and merges it with
any overrides specified via --config_override and returns the result.
Returns:
dict. The result of merging the loaded config from the
--benchmark_config_file flag with the config generated from the
--config override flag.
"""
try:
if FLAGS.benchmark_config_file:
config = _LoadUserConfig(FLAGS.benchmark_config_file)
else:
config = {}
if FLAGS.config_override:
override_config = _GetConfigFromOverrides(FLAGS.config_override)
config = MergeConfigs(config, override_config)
except yaml.parser.ParserError as e:
raise errors.Config.ParseError(
'Encountered a problem loading config. Please ensure that the config '
'is valid YAML. Error received:\n%s' % e)
except yaml.composer.ComposerError as e:
raise errors.Config.ParseError(
'Encountered a problem loading config. Please ensure that all '
'references are defined. Error received:\n%s' % e)
return config
def MergeConfigs(default_config, override_config, warn_new_key=False):
"""Merges the override config into the default config.
This function will recursively merge two nested dicts.
The override_config represents overrides to the default_config dict, so any
leaf key/value pairs which are present in both dicts will take their value
from the override_config.
Args:
default_config: The dict which will have its values overridden.
override_config: The dict wich contains the overrides.
warn_new_key: Determines whether we warn the user if the override config
has a key that the default config did not have.
Returns:
A dict containing the values from the default_config merged with those from
the override_config.
"""
def _Merge(d1, d2):
merged_dict = copy.deepcopy(d1)
for k, v in d2.iteritems():
if k not in d1:
merged_dict[k] = copy.deepcopy(v)
if warn_new_key:
logging.warning('The key "%s" was not in the default config, '
'but was in user overrides. This may indicate '
'a typo.' % k)
elif isinstance(d1[k], dict) and isinstance(v, dict):
merged_dict[k] = _Merge(d1[k], v)
else:
merged_dict[k] = v
return merged_dict
if override_config:
return _Merge(default_config, override_config)
else:
return default_config
def LoadMinimalConfig(benchmark_config, benchmark_name):
"""Loads a benchmark config without using any flags in the process.
This function will prepend configs/default_config_constants.yaml to the
benchmark config prior to loading it. This allows the config to use
references to anchors defined in the constants file.
Args:
benchmark_config: str. The default config in YAML format.
benchmark_name: str. The name of the benchmark.
Returns:
dict. The loaded config.
"""
yaml_config = []
with open(data.ResourcePath(CONFIG_CONSTANTS, False)) as fp:
yaml_config.append(fp.read())
yaml_config.append(benchmark_config)
try:
config = yaml.load('\n'.join(yaml_config))
except yaml.parser.ParserError as e:
raise errors.Config.ParseError(
'Encountered a problem loading the default benchmark config. Please '
'ensure that the config is valid YAML. Error received:\n%s' % e)
except yaml.composer.ComposerError as e:
raise errors.Config.ParseError(
'Encountered a problem loading the default benchmark config. Please '
'ensure that all references are defined. Error received:\n%s' % e)
return config[benchmark_name]
def LoadConfig(benchmark_config, user_config, benchmark_name):
"""Loads a benchmark configuration.
This function loads a benchmark's default configuration (in YAML format),
then merges it with any overrides the user provided, and returns the result.
This loaded config is then passed to the benchmark_spec.BenchmarkSpec
constructor in order to create a BenchmarkSpec.
Args:
benchmark_config: str. The default configuration in YAML format.
user_config: dict. The loaded user config for the benchmark.
benchmark_name: str. The name of the benchmark.
Returns:
dict. The loaded config.
"""
config = LoadMinimalConfig(benchmark_config, benchmark_name)
config = MergeConfigs(config, user_config, warn_new_key=True)
return config
| |
import threading
import unohelper
from com.sun.star.awt import XWindowListener, XKeyHandler, \
XFocusListener, XActionListener, XMouseListener
from com.sun.star.awt.grid import XGridSelectionListener, XGridRowSelection
from com.sun.star.view.SelectionType import SINGLE as ST_SINGLE
from com.sun.star.style.HorizontalAlignment import RIGHT as HA_RIGHT
from com.sun.star.awt import Rectangle
from com.sun.star.awt.MouseButton import LEFT as MB_LEFT, RIGHT as MB_RIGHT
from com.sun.star.awt.Key import RETURN as K_RETURN, \
UP as K_UP, DOWN as K_DOWN, HOME as K_HOME, END as K_END, \
DELETE as K_DELETE, CONTEXTMENU as K_CONTEXTMENU
from com.sun.star.awt.PosSize import X as PS_X, Y as PS_Y, \
WIDTH as PS_WIDTH, HEIGHT as PS_HEIGHT, SIZE as PS_SIZE
from com.sun.star.awt.MenuItemStyle import CHECKABLE as MIS_CHECKABLE
from com.sun.star.awt.MessageBoxButtons import \
BUTTONS_OK_CANCEL as MBB_BUTTONS_OK_CANCEL, \
DEFAULT_BUTTON_CANCEL as MBB_DEFAULT_BUTTON_CANCEL
from com.sun.star.beans import PropertyValue
from com.sun.star.beans.PropertyState import DIRECT_VALUE as PS_DIRECT_VALUE
from pyww.settings import Settings
from pyww.helper import create_control, create_container, create_controls, \
PopupMenuWrapper, MenuEntry, messagebox
import pyww.resource
class WatchingWindowView(object):
""" Watching window view. """
LEFT_MARGIN = 3
RIGHT_MARGIN = 3
TOP_MARGIN = 3
BUTTON_SEP = 2
BUTTON_WIDTH = 28
BUTTON_HEIGHT = 28
INPUT_LINE_HEIGHT = 23
def __init__(self, ctx, model, frame, parent):
self.ctx = ctx
self.frame = frame
self.res = pyww.resource.CurrentStringResource.get(ctx)
self.model = model
self.grid = None
self._context_menu = None
self.input_line_shown = Settings(ctx).get("InputLine")
self._key_handler = self.KeyHandler(self)
self._grid_key_handler = self.GridKeyHandler(self)
self._focus_listener = self.FocusListener(self, False)
self._create_view(ctx, parent, self.res,
model.get_data_model(), self.input_line_shown)
parent.addWindowListener(self.WindowListener(self))
self.update_buttons_state()
def focus_to_doc(self):
""" Move focus to current document. """
self.frame.getContainerWindow().setFocus()
# Grid functions
def grid_select_entry(self, index):
""" Select specific row by index. """
if 0 <= index < self.model.get_watches_count():
self.grid.selectRow(index)
def grid_is_entry_selected(self):
""" Check any entry is selected. """
return self.grid.hasSelectedRows()
def get_selected_entry_index(self):
""" Get selected entry index. """
if self.grid.hasSelectedRows():
return self.grid.getSelectedRows()[0]
return -1
def get_selected_entry_heading(self):
""" Get row heading of selected entry. """
index = self.get_selected_entry_index()
if 0 <= index < self.model.get_watches_count():
return self.model.get_address(index)
return ""
def grid_select_current(self):
""" Select cursor row. """
self.grid_select_entry(self.grid.getCurrentRow())
def grid_deselect_all(self):
self.grid.deselectAllRows()
def grid_goto_row(self, index):
self.grid.goToCell(0, index)
def is_selected_entry_moveable(self, up):
""" Check selected entry is moveable. """
i = self.get_selected_entry_index()
if up and i > 0:
return True
elif not up and i < (self.model.get_watches_count() - 1):
return True
return False
def dispose(self):
self.cont = None
self.model = None
self.grid = None
self.res = None
self._context_menu = None
class ListenerBase(unohelper.Base):
def __init__(self, act):
self.act = act
# XEventListener
def disposing(self, ev):
self.act = None
class MouseListener(ListenerBase, XMouseListener):
def mouseEntered(self, ev): pass
def mouseExited(self, ev): pass
def mousePressed(self, ev):
if ev.Buttons == MB_RIGHT and ev.ClickCount == 1:
self.act.context_menu(ev.X, ev.Y)
def mouseReleased(self, ev):
if ev.Buttons == MB_LEFT and ev.ClickCount == 2:
self.act.cmd_goto()
class ButtonListener(ListenerBase, XActionListener):
def actionPerformed(self, ev):
self.act.execute_cmd(ev.ActionCommand)
# Command processing
def execute_cmd(self, command):
try:
getattr(self, "cmd_" + command)()
except:
pass
def cmd_add(self):
if 1000 < self.model.get_cells_count():
self.errorbox(self.res["Too many cells are selected."],
self.res["Watching Window"])
return
self.model.add_entry()
def cmd_delete(self):
index = self.get_selected_entry_index()
if 0 <= index:
self.model.remove_entry(index)
self.grid_select_current()
def cmd_update(self):
self.model.update_all()
def cmd_goto(self, addr=None):
if addr is None:
index = self.get_selected_entry_index()
if index < 0: return
try:
addr = self.model.get_address(index)
except:
pass
if addr:
self.goto_point(addr)
def cmd_up(self):
index = self.get_selected_entry_index()
if 0 <= index:
self.model.move_entry(index, True)
def cmd_down(self):
index = self.get_selected_entry_index()
if 0 <= index:
self.model.move_entry(index, False)
def cmd_clear(self):
self.model.remove_all_entries()
def cmd_settings(self):
from pyww.settings import Settings
try:
Settings(self.ctx).configure(self.res)
except Exception as e:
print(e)
def cmd_switch_inputline(self):
self.switch_input_line()
def cmd_option(self):
ps = self.cont.getControl("btn_option").getPosSize()
self.option_popup(ps.X, ps.Y + ps.Height)
def cmd_switch_store(self):
self.model.switch_store_state()
def cmd_about(self):
from pyww import EXT_ID, EXT_DIR
from pyww.dialogs import AboutDialog
from pyww.helper import get_package_info, get_text_content
name, version = get_package_info(self.ctx, EXT_ID)
text = get_text_content(self.ctx, EXT_DIR + "LICENSE")
text = "\n".join(text.split("\n")[20:])
AboutDialog(self.ctx, self.res,
name=name, version=version, text=text,
).execute()
def goto_point(self, desc):
""" move cursor to the specified address. """
self.dispatch(
".uno:GoToCell",
(PropertyValue("ToPoint", 0, desc, PS_DIRECT_VALUE),))
self.frame.getComponentWindow().setFocus()
def dispatch(self, cmd, args):
""" dispatch with arguments. """
self.ctx.getServiceManager().createInstanceWithContext(
"com.sun.star.frame.DispatchHelper", self.ctx).\
executeDispatch(self.frame, cmd, "_self", 0, args)
class GridSelectionListener(ListenerBase, XGridSelectionListener):
def selectionChanged(self, ev):
try:
self.act.update_buttons_state()
self.act.update_input_line()
except Exception as e:
print(e)
class FocusListener(ListenerBase, XFocusListener):
def __init__(self, act, is_grid):
self.act = act
self._is_grid = is_grid
def focusGained(self, ev):
self.act.focus_gained(self._is_grid)
def focusLost(self, ev):
self.act.focus_lost(self._is_grid)
def focus_gained(self, is_grid):
""" Set key handler to the toolkit when the focus gained into
input field or grid field.
This handler is required to consum some key events. """
self.frame.getContainerWindow().getToolkit().addKeyHandler(
self._grid_key_handler if is_grid else self._key_handler)
def focus_lost(self, is_grid):
""" Remove key handler has been set by focus gained event. """
self.frame.getContainerWindow().getToolkit().removeKeyHandler(
self._grid_key_handler if is_grid else self._key_handler)
class KeyHandler(ListenerBase, XKeyHandler):
RETURN = K_RETURN
def keyPressed(self, ev):
if ev.KeyCode == self.__class__.RETURN:
self.act.update_row_formula()
return True
return False
def keyReleased(self, ev):
return True
def update_row_formula(self):
""" Update cell formula from input line. """
index = self.get_selected_entry_index()
if 0 <= index:
self.model.update_row(index, self.get_input_text())
class GridKeyHandler(ListenerBase, XKeyHandler):
RETURN = K_RETURN
def keyPressed(self, ev):
code = ev.KeyCode
if code == K_RETURN:
self.act.cmd_goto()
return True
elif code in (K_UP, K_DOWN, K_HOME, K_END):
self.act.grid_cmd_cursor(code, ev.Modifiers & 0b11)
return True
return False
def keyReleased(self, ev):
if ev.KeyCode == K_CONTEXTMENU:
self.act.grid_cmd_contextmenu()
return True
def grid_cmd_contextmenu(self):
# ToDo current selection is not shown
index = self.get_selected_entry_index()
if 0 <= index:
# ToDo calculate better location
self.context_menu(0, 0)
# How to move focus to the popup menu? it seems no way
def grid_cmd_cursor(self, key, mod):
""" Move cursor by key event. """
index = self.get_selected_entry_index()
if key == K_UP:
index -= 1
elif key == K_DOWN:
index += 1
elif key == K_HOME:
index = 0
elif key == K_END:
index = self.model.get_watches_count() - 1
if index < 0 or self.model.get_watches_count() <= index:
return
self.grid_deselect_all()
self.grid_goto_row(index)
self.grid_select_current()
def update_buttons_state(self):
""" Update state of buttons by current situation. """
ubs = self.update_button_state
delete_state = False
goto_state = False
update_state = False
up_state = False
down_state = False
if self.model.get_watches_count() == 0:
pass
else:
if self.grid_is_entry_selected():
delete_state = True
goto_state = True
up_state = self.is_selected_entry_moveable(True)
down_state = self.is_selected_entry_moveable(False)
update_state = True
ubs("btn_delete", delete_state)
ubs("btn_goto", goto_state)
ubs("btn_update", update_state)
ubs("btn_up", up_state)
ubs("btn_down", down_state)
def update_input_line(self):
addr = self.get_selected_entry_heading()
if addr:
self.set_input_line(self.model.get_formula(addr))
def set_input_line(self, text):
""" Set text to input line. """
self.cont.getControl("edit_input").getModel().Text = text
def get_input_text(self):
""" Get text from input line. """
return self.cont.getControl("edit_input").getModel().Text
def enable_add_watch(self, state):
""" Request to change state of add button. """
self.update_button_state("btn_add", state)
def context_menu(self, x, y):
""" Show context menu at the coordinate. """
index = self.get_selected_entry_index()
if index < 0: return
_ = self.res.get
popup = self._context_menu
if popup is None:
popup = PopupMenuWrapper(self.ctx,
(
MenuEntry(_("Go to Cell"), 4, 0, "goto"),
MenuEntry(_("Go to"), 6, 1, "gotocell"),
MenuEntry(_("Remove"), 8, 2, "delete"),
MenuEntry("", -1, 3),
MenuEntry(_("Up"), 10, 4, "up"),
MenuEntry(_("Down"), 11, 5, "down")
), True)
self._context_menu = popup
if popup:
addr = self.get_selected_entry_heading()
state = False
if addr:
refs = self.model.get_cell_references(addr)
if refs:
popup.setPopupMenu(
6,
PopupMenuWrapper(
self.ctx,
[MenuEntry(ref, i + 1000, i, "")
for i, ref in enumerate(refs)],
False
)
)
state = True
popup.enableItem(6, state)
popup.enableItem(10, self.is_selected_entry_moveable(True))
popup.enableItem(11, self.is_selected_entry_moveable(False))
ps = self.grid.getPosSize()
n = popup.execute(self.cont.getPeer(), x + ps.X, y + ps.Y)
if n > 0 and n < 1000:
self.execute_cmd(popup.getCommand(n))
elif n >= 1000:
addr = refs[n - 1000]
self.cmd_goto(addr)
def option_popup(self, x, y):
""" Show popup menu for option button. """
_ = self.res.get
popup = PopupMenuWrapper(self.ctx,
(
MenuEntry(_("Clear"), 32, 0, "clear"),
MenuEntry("", -1, 1, ""),
MenuEntry(_("Input line"), 1024, 2, "switch_inputline", style=MIS_CHECKABLE),
MenuEntry(_("Store watches"), 2048, 3, "switch_store", style=MIS_CHECKABLE),
MenuEntry("", -1, 4, ""),
MenuEntry(_("Settings..."), 512, 5, "settings"),
MenuEntry(_("About"), 4096, 6, "about"),
), True)
popup.checkItem(1024, self.input_line_shown)
popup.checkItem(2048, self.model.store_watches)
n = popup.execute(self.cont.getPeer(), x, y)
if n > 0:
self.execute_cmd(popup.getCommand(n))
def _messagebox(self, message, title, message_type, buttons):
""" Show message in message box. """
return messagebox(self.ctx, self.frame.getContainerWindow(),
message, title, message_type, buttons)
def message(self, message, title):
""" Shows message with title. """
return self._messagebox(message, title, "messbox", 1)
def errorbox(self, message, title):
""" Shows error message with title. """
return self._messagebox(message, title, "errorbox", 1)
def update_button_state(self, name, state):
""" Update state of specific button. """
ctrl = self.cont.getControl(name)
if ctrl.isEnabled() != state:
ctrl.setEnable(state)
def switch_input_line(self, new_state=None):
""" Switch to show/hide input line. """
height = self.cont.getPosSize().Height
btn_height = self.BUTTON_HEIGHT
if new_state is None:
new_state = not self.input_line_shown
if new_state:
self.grid.setPosSize(
0, self.TOP_MARGIN * 3 + btn_height + self.INPUT_LINE_HEIGHT,
0, height - (self.TOP_MARGIN * 3 + btn_height + self.INPUT_LINE_HEIGHT),
PS_Y + PS_HEIGHT)
self.cont.getControl("edit_input").addFocusListener(self._focus_listener)
else:
self.grid.setPosSize(0, self.TOP_MARGIN * 2 + btn_height,
0, height - (self.TOP_MARGIN * 2 + btn_height), PS_Y + PS_HEIGHT)
self.cont.getControl("edit_input").removeFocusListener(self._focus_listener)
self.cont.getControl("edit_input").setVisible(new_state)
self.input_line_shown = new_state
class WindowListener(ListenerBase, XWindowListener):
def windowMoved(self, ev): pass
def windowHidden(self, ev):
pass#self.model.stop_watching()
def windowShown(self, ev):
pass#self.model.start_watching()
def windowResized(self, ev):
self.act.window_resized(ev.Width, ev.Height)
def window_resized(self, width, height):
gc = self.cont.getControl
btn_width = self.BUTTON_WIDTH
btn_height = self.BUTTON_HEIGHT
right_margin = self.RIGHT_MARGIN
self.cont.setPosSize(0, 0, width, height, PS_SIZE)
gc("btn_option").setPosSize(
width - btn_width - right_margin, 0, 0, 0, PS_X)
gc("btn_update").setPosSize(
width - btn_width * 2 - right_margin - self.BUTTON_SEP, 0, 0, 0, PS_X)
if self.input_line_shown:
gc("grid").setPosSize(
0, 0, width,
height - btn_height - self.TOP_MARGIN * 3 - self.INPUT_LINE_HEIGHT,
PS_SIZE)
else:
gc("grid").setPosSize(
0, 0, width,
height - btn_height - self.TOP_MARGIN * 2, PS_SIZE)
gc("edit_input").setPosSize(
0, 0, width - self.LEFT_MARGIN - right_margin, 0, PS_WIDTH)
def _create_view(self, ctx, parent, res, data_model, show_input_line=False):
from pyww import ICONS_DIR
LEFT_MARGIN = self.LEFT_MARGIN
RIGHT_MARGIN = self.RIGHT_MARGIN
TOP_MARGIN = self.TOP_MARGIN
BUTTON_SEP = self.BUTTON_SEP
BUTTON_WIDTH = self.BUTTON_WIDTH
BUTTON_HEIGHT = self.BUTTON_HEIGHT
INPUT_LINE_HEIGHT = self.INPUT_LINE_HEIGHT
cont = create_container(ctx, parent, (), ())
self.cont = cont
background_color = cont.StyleSettings.DialogColor
button_listener = self.ButtonListener(self)
ps = parent.getPosSize()
create_controls(ctx, cont,
(
("Button", "btn_add",
LEFT_MARGIN, TOP_MARGIN, BUTTON_WIDTH, BUTTON_HEIGHT,
("FocusOnClick", "HelpText", "HelpURL", "ImageURL"),
(False, res["Add"], "", ICONS_DIR + "add_16.png"),
{"ActionCommand": "add", "ActionListener": button_listener}),
("Button", "btn_delete",
LEFT_MARGIN + BUTTON_SEP + BUTTON_WIDTH, TOP_MARGIN,
BUTTON_WIDTH, BUTTON_HEIGHT,
("FocusOnClick", "HelpText", "HelpURL", "ImageURL"),
(False, res["Remove"], "", ICONS_DIR + "delete_16.png"),
{"ActionCommand": "delete", "ActionListener": button_listener}),
("Button", "btn_goto",
LEFT_MARGIN + (BUTTON_SEP + BUTTON_WIDTH) * 2, TOP_MARGIN,
BUTTON_WIDTH, BUTTON_HEIGHT,
("FocusOnClick", "HelpText", "HelpURL", "ImageURL"),
(False, res["Go to Cell"], "", ICONS_DIR + "goto_16.png"),
{"ActionCommand": "goto", "ActionListener": button_listener}),
("Button", "btn_up",
LEFT_MARGIN + (BUTTON_SEP + BUTTON_WIDTH) * 3, TOP_MARGIN,
BUTTON_WIDTH, BUTTON_HEIGHT,
("FocusOnClick", "HelpText", "HelpURL", "ImageURL"),
(False, res["Up"], "", ICONS_DIR + "up_16.png"),
{"ActionCommand": "up", "ActionListener": button_listener}),
("Button", "btn_down",
LEFT_MARGIN + (BUTTON_SEP + BUTTON_WIDTH) * 4, TOP_MARGIN,
BUTTON_WIDTH, BUTTON_HEIGHT,
("FocusOnClick", "HelpText", "HelpURL", "ImageURL"),
(False, res["Down"], "", ICONS_DIR + "down_16.png"),
{"ActionCommand": "down", "ActionListener": button_listener}),
("Button", "btn_update",
LEFT_MARGIN + (BUTTON_SEP + BUTTON_WIDTH) * 5, TOP_MARGIN,
BUTTON_WIDTH, BUTTON_HEIGHT,
("FocusOnClick", "HelpText", "HelpURL", "ImageURL"),
(False, res["Update All"], "", ICONS_DIR + "update_16.png"),
{"ActionCommand": "update", "ActionListener": button_listener}),
("Button", "btn_option",
LEFT_MARGIN + (BUTTON_WIDTH + BUTTON_SEP) * 6, TOP_MARGIN,
BUTTON_WIDTH, BUTTON_HEIGHT,
("FocusOnClick", "HelpText", "HelpURL", "ImageURL"),
(False, res["Option"], "", ICONS_DIR + "tune_16.png"),
{"ActionCommand": "option", "ActionListener": button_listener}),
("Edit", "edit_input",
LEFT_MARGIN, TOP_MARGIN * 2 + BUTTON_HEIGHT,
ps.Width, INPUT_LINE_HEIGHT,
("HelpText", "HelpURL"),
(res["Input line"], ""))
)
)
grid_y = TOP_MARGIN + BUTTON_HEIGHT + TOP_MARGIN + \
((TOP_MARGIN + INPUT_LINE_HEIGHT) if show_input_line else 0)
grid = create_control(ctx, "grid.UnoControlGrid",
0, grid_y, ps.Width, ps.Height - grid_y,
("BackgroundColor", "Border", "GridDataModel", "EvenRowBackgroundColor",
"HScroll", "SelectionModel", "ShowColumnHeader",
"ShowRowHeader", "VScroll"),
(background_color, 0, data_model, 0xeeeeee,
False, ST_SINGLE, True, False, False))
grid_model = grid.getModel()
self.grid = grid
column_model = grid_model.ColumnModel
for title in [res[_title]
for _title in ("Sheet", "Cell", "Value", "Formula")]:
column = column_model.createColumn()
column.Title = title
column_model.addColumn(column)
column_model.getColumn(2).HorizontalAlign = HA_RIGHT
cont.addControl("grid", grid)
grid.addMouseListener(self.MouseListener(self))
grid.addSelectionListener(self.GridSelectionListener(self))
grid.addFocusListener(self.FocusListener(self, True))
edit_input = cont.getControl("edit_input")
self.switch_input_line(show_input_line)
| |
# Originally generated by list_pcode: procedure_hierarchy
# Copyright (c) 2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
hierarchy = {(b'MAINLIB ', 78): (b'MAINLIB ', 27),
(b'MAINLIB ', 80): (b'MAINLIB ', 36),
(b'MAINLIB ', 81): (b'MAINLIB ', 15),
(b'MAINLIB ', 82): (b'MAINLIB ', 53),
(b'MAINLIB ', 84): (b'MAINLIB ', 73),
(b'SHIPLIB ', 27): (b'SHIPLIB ', 16),
(b'SHIPLIB ', 28): (b'SHIPLIB ', 27),
(b'SHIPLIB ', 36): (b'SHIPLIB ', 35),
(b'SHIPLIB ', 37): (b'SHIPLIB ', 35),
(b'WINDOWLI', 23): (b'WINDOWLI', 8),
(b'WINDOWLI', 24): (b'WINDOWLI', 23),
(b'WINDOWLI', 28): (b'WINDOWLI', 27),
(b'WINDOWLI', 29): (b'WINDOWLI', 21),
(b'XDOCOMBA', 2): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 3): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 4): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 5): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 6): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 7): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 8): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 9): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 10): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 11): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 12): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 13): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 14): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 15): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 16): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 17): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 18): (b'XDOCOMBA', 17),
(b'XDOFIGHT', 2): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 3): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 4): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 5): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 6): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 7): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 8): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 9): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 10): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 11): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 12): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 13): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 14): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 15): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 16): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 17): (b'XDOFIGHT', 16),
(b'XDOFIGHT', 18): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 19): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 20): (b'XDOFIGHT', 19),
(b'XDOFIGHT', 21): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 22): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 23): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 24): (b'XDOFIGHT', 1),
(b'XDOINTER', 2): (b'XDOINTER', 1),
(b'XDOINTER', 3): (b'XDOINTER', 1),
(b'XDOINTER', 4): (b'XDOINTER', 1),
(b'XDOINTER', 5): (b'XDOINTER', 4),
(b'XDOINTER', 6): (b'XDOINTER', 1),
(b'XDOINTER', 7): (b'XDOINTER', 6),
(b'XDOINTER', 8): (b'XDOINTER', 6),
(b'XDOINTER', 9): (b'XDOINTER', 1),
(b'XDOINTER', 10): (b'XDOINTER', 1),
(b'XDOINTER', 11): (b'XDOINTER', 1),
(b'XDOINTER', 12): (b'XDOINTER', 11),
(b'XDOINTER', 13): (b'XDOINTER', 11),
(b'XDOINTER', 14): (b'XDOINTER', 1),
(b'XDOINTER', 15): (b'XDOINTER', 1),
(b'XDOINTER', 16): (b'XDOINTER', 1),
(b'XDOINTER', 17): (b'XDOINTER', 16),
(b'XDOINTER', 18): (b'XDOINTER', 16),
(b'XDOINTER', 19): (b'XDOINTER', 16),
(b'XDOINTER', 20): (b'XDOINTER', 16),
(b'XDOINTER', 21): (b'XDOINTER', 17),
(b'XDOINTER', 22): (b'XDOINTER', 1),
(b'XDOINTER', 23): (b'XDOINTER', 22),
(b'XDOINTER', 24): (b'XDOINTER', 23),
(b'XDOINTER', 25): (b'XDOINTER', 22),
(b'XDOINTER', 26): (b'XDOINTER', 22),
(b'XDOINTER', 27): (b'XDOINTER', 22),
(b'XDOINTER', 28): (b'XDOINTER', 22),
(b'XDOINTER', 29): (b'XDOINTER', 1),
(b'XDOINTER', 30): (b'XDOINTER', 1),
(b'XDOINTER', 31): (b'XDOINTER', 1),
(b'XDOINTER', 32): (b'XDOINTER', 31),
(b'XDOINTER', 33): (b'XDOINTER', 31),
(b'XDOINTER', 34): (b'XDOINTER', 31),
(b'XDOINTER', 35): (b'XDOINTER', 1),
(b'XDOINTER', 36): (b'XDOINTER', 1),
(b'XDOINTER', 37): (b'XDOINTER', 36),
(b'XDOINTER', 38): (b'XDOINTER', 36),
(b'XDOINTER', 39): (b'XDOINTER', 1),
(b'XDOINTER', 40): (b'XDOINTER', 39),
(b'XDOINTER', 41): (b'XDOINTER', 39),
(b'XDOINTER', 42): (b'XDOINTER', 39),
(b'XDOINTER', 43): (b'XDOINTER', 1),
(b'XDOINTER', 44): (b'XDOINTER', 1),
(b'XDOINTER', 45): (b'XDOINTER', 1),
(b'XDOREPAI', 2): (b'XDOREPAI', 1),
(b'XDOREPAI', 3): (b'XDOREPAI', 1),
(b'XDOREPAI', 4): (b'XDOREPAI', 1),
(b'XDOREPAI', 5): (b'XDOREPAI', 1),
(b'XDOREPAI', 6): (b'XDOREPAI', 1),
(b'XDOREPAI', 7): (b'XDOREPAI', 1),
(b'XDOREPAI', 8): (b'XDOREPAI', 1),
(b'XDOREPAI', 9): (b'XDOREPAI', 8),
(b'XDOREPAI', 10): (b'XDOREPAI', 1),
(b'XDOREPAI', 11): (b'XDOREPAI', 1),
(b'XDOTRADI', 2): (b'XDOTRADI', 1),
(b'XDOTRADI', 3): (b'XDOTRADI', 1),
(b'XDOTRADI', 4): (b'XDOTRADI', 1),
(b'XDOTRADI', 5): (b'XDOTRADI', 1),
(b'XDOTRADI', 6): (b'XDOTRADI', 1),
(b'XDOTRADI', 7): (b'XDOTRADI', 1),
(b'XDOTRADI', 8): (b'XDOTRADI', 7),
(b'XDOTRADI', 9): (b'XDOTRADI', 7),
(b'XDOTRADI', 10): (b'XDOTRADI', 1),
(b'XDOTRADI', 11): (b'XDOTRADI', 1),
(b'XDOTRADI', 12): (b'XDOTRADI', 1),
(b'XDOTRADI', 13): (b'XDOTRADI', 1),
(b'XDOTRADI', 14): (b'XDOTRADI', 1),
(b'XDOTRADI', 15): (b'XDOTRADI', 1),
(b'XDOTRADI', 16): (b'XDOTRADI', 1),
(b'XDOTRADI', 17): (b'XDOTRADI', 16),
(b'XDOTRADI', 18): (b'XDOTRADI', 1),
(b'XDOTRADI', 19): (b'XDOTRADI', 18),
(b'XDOTRADI', 20): (b'XDOTRADI', 19),
(b'XDOTRADI', 21): (b'XDOTRADI', 18),
(b'XDOTRADI', 22): (b'XDOTRADI', 1),
(b'XDOTRADI', 23): (b'XDOTRADI', 1),
(b'XDOTRADI', 24): (b'XDOTRADI', 23),
(b'XDOTRADI', 25): (b'XDOTRADI', 1),
(b'XDOTRADI', 26): (b'XDOTRADI', 1),
(b'XDOTRADI', 27): (b'XDOTRADI', 1),
(b'XDOTRADI', 28): (b'XDOTRADI', 1),
(b'XDOTRADI', 29): (b'XDOTRADI', 1),
(b'XDOTRADI', 30): (b'XDOTRADI', 1),
(b'XDOTRADI', 31): (b'XDOTRADI', 30),
(b'XDOTRADI', 32): (b'XDOTRADI', 30),
(b'XDOTRADI', 33): (b'XDOTRADI', 30),
(b'XDOTRADI', 34): (b'XDOTRADI', 1),
(b'XDOUNITE', 2): (b'XDOUNITE', 1),
(b'XDOUNITE', 3): (b'XDOUNITE', 1),
(b'XDOUNITE', 4): (b'XDOUNITE', 1),
(b'XDOUNITE', 5): (b'XDOUNITE', 1),
(b'XDOUNITE', 6): (b'XDOUNITE', 1),
(b'XDOUNITE', 7): (b'XDOUNITE', 1),
(b'XDOUNITE', 8): (b'XDOUNITE', 1),
(b'XDOUNITE', 9): (b'XDOUNITE', 8),
(b'XDOUNITE', 10): (b'XDOUNITE', 9),
(b'XDOUNITE', 11): (b'XDOUNITE', 1),
(b'XDOUNITE', 12): (b'XDOUNITE', 11),
(b'XDOUNITE', 13): (b'XDOUNITE', 11),
(b'XDOUNITE', 14): (b'XDOUNITE', 11),
(b'XDOUNITE', 15): (b'XDOUNITE', 1),
(b'XDOUNITE', 16): (b'XDOUNITE', 15),
(b'XDOUNITE', 17): (b'XDOUNITE', 16),
(b'XDOUNITE', 18): (b'XDOUNITE', 15),
(b'XDOUNITE', 19): (b'XDOUNITE', 15),
(b'XDOUNITE', 20): (b'XDOUNITE', 19),
(b'XDOUNITE', 21): (b'XDOUNITE', 20),
(b'XDOUNITE', 22): (b'XDOUNITE', 15),
(b'XDOUNITE', 23): (b'XDOUNITE', 15),
(b'XDOUSERM', 2): (b'XDOUSERM', 1),
(b'XDOUSERM', 3): (b'XDOUSERM', 1),
(b'XDOUSERM', 4): (b'XDOUSERM', 1),
(b'XDOUSERM', 5): (b'XDOUSERM', 4),
(b'XDOUSERM', 6): (b'XDOUSERM', 1),
(b'XDOUSERM', 7): (b'XDOUSERM', 1),
(b'XDOUSERM', 8): (b'XDOUSERM', 1),
(b'XDOUSERM', 9): (b'XDOUSERM', 1),
(b'XDOUSERM', 10): (b'XDOUSERM', 1),
(b'XDOUSERM', 11): (b'XDOUSERM', 1),
(b'XDOUSERM', 12): (b'XDOUSERM', 1),
(b'XDOUSERM', 13): (b'XDOUSERM', 1),
(b'XDOUSERM', 14): (b'XDOUSERM', 1),
(b'XDOUSERM', 15): (b'XDOUSERM', 1),
(b'XDOUSERM', 16): (b'XDOUSERM', 1),
(b'XDOUSERM', 17): (b'XDOUSERM', 1),
(b'XLANDFX ', 2): (b'XLANDFX ', 1),
(b'XLANDFX ', 3): (b'XLANDFX ', 1),
(b'XLANDFX ', 4): (b'XLANDFX ', 1),
(b'XLANDFX ', 5): (b'XLANDFX ', 1),
(b'XLANDFX ', 6): (b'XLANDFX ', 1),
(b'XLANDFX ', 7): (b'XLANDFX ', 1),
(b'XLANDFX ', 8): (b'XLANDFX ', 1),
(b'XLANDFX ', 9): (b'XLANDFX ', 1),
(b'XLANDFX ', 10): (b'XLANDFX ', 1),
(b'XLANDFX ', 11): (b'XLANDFX ', 1),
(b'XLANDFX ', 12): (b'XLANDFX ', 1),
(b'XLANDFX ', 13): (b'XLANDFX ', 1),
(b'XLANDFX ', 14): (b'XLANDFX ', 1),
(b'XMOVEINB', 2): (b'XMOVEINB', 1),
(b'XMOVEINB', 3): (b'XMOVEINB', 1),
(b'XMOVEINB', 4): (b'XMOVEINB', 1),
(b'XMOVEINB', 5): (b'XMOVEINB', 1),
(b'XMOVEINB', 6): (b'XMOVEINB', 1),
(b'XMOVEINB', 7): (b'XMOVEINB', 1),
(b'XMOVEINB', 8): (b'XMOVEINB', 7),
(b'XMOVEINB', 9): (b'XMOVEINB', 1),
(b'XMOVEINB', 10): (b'XMOVEINB', 1),
(b'XMOVEINB', 11): (b'XMOVEINB', 1),
(b'XMOVEINB', 12): (b'XMOVEINB', 1),
(b'XMOVEINB', 13): (b'XMOVEINB', 1),
(b'XMOVEINB', 14): (b'XMOVEINB', 1),
(b'XMOVEINB', 15): (b'XMOVEINB', 1),
(b'XMOVEINB', 16): (b'XMOVEINB', 1),
(b'XMOVEINB', 17): (b'XMOVEINB', 1),
(b'XMOVEINB', 18): (b'XMOVEINB', 1),
(b'XMOVEINB', 19): (b'XMOVEINB', 1),
(b'XMOVEINB', 20): (b'XMOVEINB', 1),
(b'XMOVEINB', 21): (b'XMOVEINB', 1),
(b'XMOVEINB', 22): (b'XMOVEINB', 1),
(b'XMOVEINB', 23): (b'XMOVEINB', 1),
(b'XMOVEINB', 24): (b'XMOVEINB', 1),
(b'XMOVEINB', 25): (b'XMOVEINB', 1),
(b'XMOVEINB', 26): (b'XMOVEINB', 1),
(b'XMOVEINB', 27): (b'XMOVEINB', 1),
(b'XMOVEINB', 28): (b'XMOVEINB', 1),
(b'XMOVEONG', 2): (b'XMOVEONG', 1),
(b'XMOVEONG', 3): (b'XMOVEONG', 1),
(b'XMOVEONG', 4): (b'XMOVEONG', 3),
(b'XMOVEONG', 5): (b'XMOVEONG', 3),
(b'XMOVEONG', 6): (b'XMOVEONG', 1),
(b'XMOVEONG', 7): (b'XMOVEONG', 1),
(b'XMOVEONG', 8): (b'XMOVEONG', 1),
(b'XMOVEONG', 9): (b'XMOVEONG', 1),
(b'XMOVEONG', 10): (b'XMOVEONG', 9),
(b'XMOVEONG', 11): (b'XMOVEONG', 1),
(b'XMOVEONG', 12): (b'XMOVEONG', 11),
(b'XMOVEONG', 13): (b'XMOVEONG', 11),
(b'XMOVEONG', 14): (b'XMOVEONG', 11),
(b'XMOVEONG', 15): (b'XMOVEONG', 11),
(b'XMOVEONG', 16): (b'XMOVEONG', 11),
(b'XMOVEONG', 17): (b'XMOVEONG', 1),
(b'XMOVEONG', 18): (b'XMOVEONG', 1),
(b'XMOVEONG', 19): (b'XMOVEONG', 1),
(b'XMOVEONG', 20): (b'XMOVEONG', 1),
(b'XMOVEONG', 21): (b'XMOVEONG', 1),
(b'XMOVEONG', 22): (b'XMOVEONG', 1),
(b'XMOVEONG', 23): (b'XMOVEONG', 1),
(b'XMOVEONG', 24): (b'XMOVEONG', 1),
(b'XMOVEONG', 25): (b'XMOVEONG', 1),
(b'XMOVEONG', 26): (b'XMOVEONG', 25),
(b'XMOVEONG', 27): (b'XMOVEONG', 25),
(b'XMOVEONG', 28): (b'XMOVEONG', 1),
(b'XMOVEONG', 29): (b'XMOVEONG', 1),
(b'XMOVEONG', 30): (b'XMOVEONG', 1),
(b'XMOVEONG', 31): (b'XMOVEONG', 30),
(b'XMOVEONG', 32): (b'XMOVEONG', 1),
(b'XMOVEONG', 33): (b'XMOVEONG', 1),
(b'XMOVEONG', 34): (b'XMOVEONG', 1),
(b'XMOVEONG', 35): (b'XMOVEONG', 1),
(b'XMOVEONG', 36): (b'XMOVEONG', 1),
(b'XMOVEONG', 37): (b'XMOVEONG', 36),
(b'XMOVEONG', 38): (b'XMOVEONG', 37),
(b'XMOVEONG', 39): (b'XMOVEONG', 1),
(b'XMOVEONG', 40): (b'XMOVEONG', 1),
(b'XMOVEONG', 41): (b'XMOVEONG', 40),
(b'XMOVEONG', 42): (b'XMOVEONG', 40),
(b'XMOVEONG', 43): (b'XMOVEONG', 1),
(b'XMOVEONG', 44): (b'XMOVEONG', 43),
(b'XMOVEONG', 45): (b'XMOVEONG', 44),
(b'XMOVEONG', 46): (b'XMOVEONG', 43),
(b'XMOVEONG', 47): (b'XMOVEONG', 46),
(b'XMOVEONG', 48): (b'XMOVEONG', 43),
(b'XMOVEONG', 49): (b'XMOVEONG', 43),
(b'XMOVEONG', 50): (b'XMOVEONG', 43),
(b'XMOVEONG', 51): (b'XMOVEONG', 1),
(b'XMOVEONG', 52): (b'XMOVEONG', 51),
(b'XMOVEONG', 53): (b'XMOVEONG', 52),
(b'XMOVEONG', 54): (b'XMOVEONG', 52),
(b'XMOVEONG', 55): (b'XMOVEONG', 1),
(b'XMOVEONS', 2): (b'XMOVEONS', 1),
(b'XMOVEONS', 3): (b'XMOVEONS', 1),
(b'XMOVEONS', 4): (b'XMOVEONS', 1),
(b'XMOVEONS', 5): (b'XMOVEONS', 1),
(b'XMOVEONS', 6): (b'XMOVEONS', 1),
(b'XMOVEONS', 7): (b'XMOVEONS', 1),
(b'XMOVEONS', 8): (b'XMOVEONS', 1),
(b'XMOVEONS', 9): (b'XMOVEONS', 1),
(b'XMOVEONS', 10): (b'XMOVEONS', 1),
(b'XMOVEONS', 11): (b'XMOVEONS', 1),
(b'XMOVEONS', 12): (b'XMOVEONS', 1),
(b'XPILOTAG', 2): (b'XPILOTAG', 1),
(b'XPILOTAG', 3): (b'XPILOTAG', 1),
(b'XPILOTAG', 4): (b'XPILOTAG', 1),
(b'XPILOTAG', 5): (b'XPILOTAG', 1),
(b'XPILOTAG', 6): (b'XPILOTAG', 1),
(b'XPILOTAG', 7): (b'XPILOTAG', 6),
(b'XPILOTAG', 8): (b'XPILOTAG', 6),
(b'XPILOTAG', 9): (b'XPILOTAG', 1),
(b'XPILOTAG', 10): (b'XPILOTAG', 1),
(b'XPILOTAG', 11): (b'XPILOTAG', 1),
(b'XREADMAP', 2): (b'XREADMAP', 1),
(b'XREADMAP', 3): (b'XREADMAP', 2),
(b'XREADMAP', 4): (b'XREADMAP', 3),
(b'XREADMAP', 5): (b'XREADMAP', 2),
(b'XREADMAP', 6): (b'XREADMAP', 2),
(b'XREADMAP', 7): (b'XREADMAP', 6),
(b'XREADMAP', 8): (b'XREADMAP', 1),
(b'XREADMAP', 9): (b'XREADMAP', 8),
(b'XREADMAP', 10): (b'XREADMAP', 9),
(b'XREADMAP', 11): (b'XREADMAP', 9),
(b'XREADMAP', 12): (b'XREADMAP', 11),
(b'XREADMAP', 13): (b'XREADMAP', 9),
(b'XREADMAP', 14): (b'XREADMAP', 8),
(b'XREADMAP', 15): (b'XREADMAP', 8),
(b'XREADMAP', 16): (b'XREADMAP', 8),
(b'XSHOWMOV', 2): (b'XSHOWMOV', 1),
(b'XSHOWMOV', 3): (b'XSHOWMOV', 1),
(b'XSHOWMOV', 4): (b'XSHOWMOV', 1),
(b'XSHOWMOV', 5): (b'XSHOWMOV', 4),
(b'XSHOWMOV', 6): (b'XSHOWMOV', 1),
(b'XSHOWMOV', 7): (b'XSHOWMOV', 6),
(b'XSHOWMOV', 8): (b'XSHOWMOV', 1),
(b'XSHOWMOV', 9): (b'XSHOWMOV', 1),
(b'XSHOWMOV', 10): (b'XSHOWMOV', 1),
(b'XSHOWMOV', 11): (b'XSHOWMOV', 10),
(b'XSHOWMOV', 12): (b'XSHOWMOV', 1),
(b'XSHOWMOV', 13): (b'XSHOWMOV', 1),
(b'XSHOWMOV', 14): (b'XSHOWMOV', 13),
(b'XSLOTS ', 2): (b'XSLOTS ', 1),
(b'XSLOTS ', 3): (b'XSLOTS ', 1),
(b'XSLOTS ', 4): (b'XSLOTS ', 1),
(b'XSLOTS ', 5): (b'XSLOTS ', 1),
(b'XSLOTS ', 6): (b'XSLOTS ', 1),
(b'XSLOTS ', 7): (b'XSLOTS ', 1),
(b'XSLOTS ', 8): (b'XSLOTS ', 7),
(b'XSLOTS ', 9): (b'XSLOTS ', 1),
(b'XSLOTS ', 10): (b'XSLOTS ', 1),
(b'XSLOTS ', 11): (b'XSLOTS ', 1),
(b'XSLOTS ', 12): (b'XSLOTS ', 1),
(b'XSLOTS ', 13): (b'XSLOTS ', 12),
(b'XSTARTUP', 2): (b'XSTARTUP', 1),
(b'XSTARTUP', 3): (b'XSTARTUP', 1),
(b'XSTARTUP', 4): (b'XSTARTUP', 3),
(b'XSTARTUP', 5): (b'XSTARTUP', 3),
(b'XSTARTUP', 6): (b'XSTARTUP', 1),
(b'XSTARTUP', 7): (b'XSTARTUP', 1),
(b'XSTARTUP', 8): (b'XSTARTUP', 1),
(b'XSTARTUP', 9): (b'XSTARTUP', 1),
(b'XSTARTUP', 10): (b'XSTARTUP', 1),
(b'XSTARTUP', 11): (b'XSTARTUP', 1),
(b'XSTARTUP', 12): (b'XSTARTUP', 1),
(b'XSTARTUP', 13): (b'XSTARTUP', 1),
(b'XSTARTUP', 14): (b'XSTARTUP', 13),
(b'XSTARTUP', 15): (b'XSTARTUP', 1),
(b'XSTARTUP', 16): (b'XSTARTUP', 1),
(b'XSTARTUP', 17): (b'XSTARTUP', 16),
(b'XSTARTUP', 18): (b'XSTARTUP', 16),
(b'XSTARTUP', 19): (b'XSTARTUP', 18),
(b'XSTARTUP', 20): (b'XSTARTUP', 16),
(b'XSTARTUP', 21): (b'XSTARTUP', 16),
(b'XSTARTUP', 22): (b'XSTARTUP', 16),
(b'XSTARTUP', 23): (b'XSTARTUP', 16),
(b'XSTARTUP', 24): (b'XSTARTUP', 16),
(b'XSTARTUP', 25): (b'XSTARTUP', 16),
(b'XSTARTUP', 26): (b'XSTARTUP', 25),
(b'XSTARTUP', 27): (b'XSTARTUP', 25),
(b'XSTARTUP', 28): (b'XSTARTUP', 25),
(b'XSTARTUP', 29): (b'XSTARTUP', 25),
(b'XSTARTUP', 30): (b'XSTARTUP', 29),
(b'XSTARTUP', 31): (b'XSTARTUP', 25),
(b'XSTARTUP', 32): (b'XSTARTUP', 25),
(b'XSTARTUP', 33): (b'XSTARTUP', 32),
(b'XSTARTUP', 34): (b'XSTARTUP', 33),
(b'XSTARTUP', 35): (b'XSTARTUP', 33),
(b'XSTARTUP', 36): (b'XSTARTUP', 25),
(b'XSTARTUP', 37): (b'XSTARTUP', 16),
(b'XSTARTUP', 38): (b'XSTARTUP', 37),
(b'XSTARTUP', 39): (b'XSTARTUP', 38),
(b'XSTARTUP', 40): (b'XSTARTUP', 38),
(b'XSTARTUP', 41): (b'XSTARTUP', 37),
(b'XSTARTUP', 42): (b'XSTARTUP', 37),
(b'XSTARTUP', 43): (b'XSTARTUP', 42),
(b'XSTARTUP', 44): (b'XSTARTUP', 42),
(b'XSTARTUP', 45): (b'XSTARTUP', 37),
(b'XSTARTUP', 46): (b'XSTARTUP', 37),
(b'XSTARTUP', 47): (b'XSTARTUP', 37),
(b'XSTARTUP', 48): (b'XSTARTUP', 37),
(b'XSTARTUP', 49): (b'XSTARTUP', 48),
(b'XSTARTUP', 50): (b'XSTARTUP', 48),
(b'XSTARTUP', 51): (b'XSTARTUP', 16),
(b'XSTARTUP', 52): (b'XSTARTUP', 51),
(b'XSTARTUP', 53): (b'XSTARTUP', 51),
(b'XSTARTUP', 54): (b'XSTARTUP', 51),
(b'XSTARTUP', 55): (b'XSTARTUP', 51),
(b'XSTARTUP', 56): (b'XSTARTUP', 51),
(b'XSTARTUP', 57): (b'XSTARTUP', 51),
(b'XSTARTUP', 58): (b'XSTARTUP', 51),
(b'XSTARTUP', 59): (b'XSTARTUP', 51),
(b'XSTARTUP', 60): (b'XSTARTUP', 16),
(b'XSTARTUP', 61): (b'XSTARTUP', 60),
(b'XSTARTUP', 62): (b'XSTARTUP', 60),
(b'XSTARTUP', 63): (b'XSTARTUP', 60),
(b'XSTARTUP', 64): (b'XSTARTUP', 60),
(b'XSTARTUP', 65): (b'XSTARTUP', 60),
(b'XSTARTUP', 66): (b'XSTARTUP', 60),
(b'XSTARTUP', 67): (b'XSTARTUP', 60),
(b'XSTARTUP', 68): (b'XSTARTUP', 60),
(b'XSTARTUP', 69): (b'XSTARTUP', 16),
(b'XSTARTUP', 70): (b'XSTARTUP', 1),
(b'XSTARTUP', 71): (b'XSTARTUP', 70),
(b'XSTARTUP', 72): (b'XSTARTUP', 70),
(b'XSTARTUP', 73): (b'XSTARTUP', 1),
(b'XSTARTUP', 74): (b'XSTARTUP', 73),
(b'XSTARTUP', 75): (b'XSTARTUP', 74),
(b'XSTARTUP', 76): (b'XSTARTUP', 74),
(b'XSTARTUP', 77): (b'XSTARTUP', 73)}
| |
#!/usr/bin/python3
"""
REVISION
--------
Added revisions for overwatcher. This has nothing to do with actual versions of the code and is used to track major
changes to force rechecks of old tests and maybe keeping them up to date as new modifiers and options appear. New
revisions are kinda subjective, but for example new modifiers should mean a new revision (maybe the old tests can be
simplified with these) or big changes to the code flow.
Revision history (latest on top):
- 20191025 (REVISION NOT CHANGED) - added posibility to run commands on the local PC as part of the test. This is
done with a new modified LOCAL. All commands after this modifier are ran on the local PC and when the command set is
finished, it automatically reverts to running commands on the device. Revision is not changed because this does not
impact older test, it just improves the newer ones.
- 20181012 : Added new modifiers - NOPRWAIT and NOTSTRICT which help with reboot parts. Old tests should be updated.
Also prompt waits block now and the timeout part is used to recover. Major changes to read and write parts.
"""
revision = 20181012
import socket
import random
import time
import datetime
import queue
import threading
import argparse
import yaml
import os
import subprocess
class Overwatcher():
"""
TEST AUTOMATION BASED ON SERIAL CONSOLE CONTROL AND OUTPUT.
"""
"""
-------------------------MAIN SETUP FUNCTION. Reads the test file. Can be overloaded
"""
def setup_test(self, test):
"""
Function used to setup all test configurations.
NOTE: defaults are set before this is called, so only set what you need.
NOTE: for backwards compatibility, this should be kept
"""
self.name = os.path.splitext(os.path.basename(test))[0] #Used for log file, get only the name
self.full_name = os.path.abspath(test) #Also save the full file path in the logs, because you never know
tf = open(test, "r")
elems = list(yaml.safe_load_all(tf))[0]
#Thanks to YAML this was easy
self.info = dict(elems['info'])
self.markers = dict(elems['markers'])
self.prompts = list(elems['prompts'])
self.triggers = dict(elems['triggers'])
self.actions = dict(elems['actions'])
self.config_seq = list(elems['initconfig'])
self.test_seq = list(elems['test'])
#What we need to worry about are the options
for opt in elems['options']:
setattr(self, opt, elems['options'][opt])
"""
-------------------------TEST RESULT FUNCTIONS, called on test ending. Can be overloaded.
"""
def mytest_timeout(self):
"""
Trying to improve the timeout problem. Sometimes the socket fluctuates and
overwatcher misses some output. This should be solved with a CR.
"""
if self.counter["test_timeouts"] == 0:
self.setResult("timeout")
else:
self.counter["test_timeouts"] -= 1
self.log("GOT A TIMEOUT, giving it another try...we have", self.counter["test_timeouts"], "left")
self.mainTimer = self.timer_startTimer(self.mainTimer)
if self.telnetTest is False:
#On telnet this does not help
self.sendDeviceCmd("") #Send a CR
def mytest_failed(self):
self.setResult("failed")
def mytest_ok(self):
self.setResult("ok")
"""
-------------------------INIT FUNCTIONS
"""
def config_device(self):
"""
General device configuration
"""
self.log("\n\/ \/ \/ \/ STARTED CONFIG!\/ \/ \/ \/\n")
last_state = self.onetime_ConfigureDevice()
self.log("\n/\ /\ /\ /\ ENDED CONFIG!/\ /\ /\ /\ \n\n")
def setup_test_defaults(self):
#In case setup_test is overloaded, set these here
#NOTE: most likely will be overwritten in setup_test
self.name = type(self).__name__
self.full_name = type(self).__name__
self.timeout = 300.0 #seconds
self.largeCommand = 50 #what command should be sent into parts
self.strictStates = True #by default, enforce
self.config_seq = []
self.test_seq = []
self.actions = {}
self.triggers = {}
self.markers = {}
self.markers_cfg = {}
self.user_inp = {}
self.prompts = []
#Various test information
self.info = {}
def setup_modifiers_defaults(self):
self.opt_RunTriggers = True
self.opt_IgnoreStates = False
self.opt_RandomExec = False
self.opt_TimeCmd = False
self.mod_PromptWait = True
self.mod_RunLocal = False
self.modifiers ={ # Quick modifier set
"IGNORE_STATES" : self.e_IgnoreStates,
"WATCH_STATES" : self.d_IgnoreStates,
"TRIGGER_START" : self.e_RunTriggers,
"TRIGGER_STOP" : self.d_RunTriggers,
"SLEEP_RANDOM" : self.sleepRandom,
"RANDOM_START" : self.e_RandomExecution,
"RANDOM_STOP" : self.d_RandomExecution,
"COUNT" : self.countTrigger,
"TIMECMD" : self.timeCommand,
"NOTSTRICT" : self.notStrict,
"NOPRWAIT" : self.d_PromptWait,
"LOCAL" : self.e_runLocal
}
#What we need to run even if states are ignored and triggers disabled
self.critical_modifiers = ["WATCH_STATES", "TRIGGER_START"]
self.retval = {
"config failed": 3,
"timeout" : 2,
"failed" : 1,
"ok": 0
}
def __init__(self, test, server='169.168.56.254', port=23200, runAsTelnetTest=False, endr=False):
"""
Class init. KISS
NOTE: keeping default for backwards compatibility...for now
"""
#Connection stuff
self.server = server
self.port = port
if endr is False:
self.sendendr = 'noendr'
else:
self.sendendr = 'endr'
#Add support for infinite running tests - this can be set in setup_test
#NOTE: timeout still occurs!
self.infiniteTest = False
#Add support for running the tests over telnet
self.telnetTest = runAsTelnetTest
#For telnet we need to send just a '\r', adding a dict to make things easier
if self.telnetTest is False:
self.eol= { 'endr': "\r\n", 'noendr': "\n"}
else:
self.eol= { 'endr': "\r", 'noendr': "\r" }
#Add support for random sleep amounts - this can be set in setup_test
self.sleep_min = 30 #seconds
self.sleep_max = 120 #seconds
self.test_max_timeouts = 2 #How many timeouts can occur per test or per loop
#Store counts for various triggers
self.counter = {}
self.counter["test_loop"] = 1
self.counter["test_timeouts"] = self.test_max_timeouts
self.queue_state = queue.Queue()
self.queue_result = queue.Queue()
self.queue_serread = queue.Queue()
self.queue_serwrite = queue.Queue()
#Start with defaults
self.setup_test_defaults()
self.setup_modifiers_defaults()
#Use one main timer for all for now - note: needs default timeout value
self.mainTimer = self.timer_startTimer(None)
#Load the user setup
self.setup_test(test)
#Open the log file and print everything
self.file_test = open(self.name + "_testresults.log", "w", buffering=1)
self.print_test()
self.sleep_sockWait = 0 #Just for startup
self.mainSocket = self.sock_create()
self.sleep_sockWait = 30 #seconds
#For the config phase also use the cfg only markers
self.statewatcher_markers = dict(self.markers_cfg)
self.statewatcher_markers.update(self.markers)
#Prepare the threads
self.run = {}
self.th = {}
self.run["recv"] = True #receiver loop - used to get out of large commands
self.th["recv"] = threading.Thread(target=self.thread_SerialRead, daemon=True)
self.th["recv"].start()
self.run["send"] = True #receiver loop - used to get out of large commands
self.th["send"] = threading.Thread(target=self.thread_SerialWrite, daemon=True)
self.th["send"].start()
self.run["state_watcher"] = True
self.th["state_watcher"] = threading.Thread(target=self.thread_StateWatcher, daemon=True)
self.th["state_watcher"].start()
#Configure the device
self.config_device()
#For the normal run, revert back to the normal markers
self.statewatcher_markers = dict(self.markers)
#See if the config failed
res = self.getResult(block=False)
if res is not None:
self.cleanAll()
exit(res)
#Start the TEST thread
self.run["test"] = True
self.th["test"] = threading.Thread(target=self.thread_MyTest, daemon=True)
self.th["test"].start()
res = self.getResult(block=True)
self.cleanAll()
exit(res)
"""
-------------------------DEVICE CONFIGURATION
"""
def onetime_ConfigureDevice(self):
conf_len = len(self.config_seq)
#Quick detour
if conf_len == 0:
return
conf_idx = 0
while(conf_idx < conf_len):
#Look for the state
req_state = self.config_seq[conf_idx]
#
## See if we need to run some actions
###
try:
self.log("RUNNING ACTIONS:", req_state, "=", self.actions[req_state])
for elem in self.actions[req_state]:
self.sendDeviceCmd(elem)
self.waitDevicePrompt(elem)
conf_idx += 1
continue
except KeyError:
pass
self.log("Looking for:", self.config_seq[conf_idx]) #idx might change
current_state = self.getDeviceState()
if current_state == "":
break
# If the required state is found
if req_state == current_state:
self.log("MOVED TO STATE=", req_state)
conf_idx += 1
#Restart timer
self.mainTimer = self.timer_startTimer(self.mainTimer)
self.mainTimer = self.timer_stopTimer(self.mainTimer)
return current_state
"""
-------------------------THREADS
"""
def thread_SerialRead(self):
"""
Receiver thread.
Job: parses serial out and forms things in sentences. Does not interpret the information, except the line
endings to form lines.
"""
while self.run["recv"] is True:
serout = ""
while self.run["recv"] is True:
#Why do the timeout: the login screen displays "User:" and no endline.
#How do you know that the device is waiting for something in this case?
try:
x = self.mainSocket.recv(1)
except socket.timeout:
x = self.eol[self.sendendr].encode() #same line ending
break
except OSError:
self.log("Reopening socket")
self.mainSocket = self.sock_create()
break #restart reading
if not x:
self.log("Socket closed, reopening")
self.mainSocket = self.sock_create()
break #restart reading
try:
serout += x.decode('ascii')
except UnicodeDecodeError:
pass
#Doing this to make sure we match correctly everytime
#and to take into account the \r\n situation
if x == self.eol[self.sendendr][0].encode():
break
tmp = serout.strip() #to log the device output unmodified
if(len(tmp) != 0):
self.log("DEV", repr(serout))
self.queue_serread.put(tmp)
self.sock_close(self.mainSocket)
def thread_SerialWrite(self):
"""
Sender thread.
JOB: Sends commands to the device. Breaks large commands into pieces to not have problems with missing parts.
"""
while self.run["send"] is True:
cmd = self.queue_serwrite.get(block=True)
cmd = str(cmd) #in case someone writes numbers in yml
if cmd is None:
break
else:
lcmd = len(cmd)
#Skip endline for y/n stuff
#NOTE: also works for 0 len cmds for sending an CR
if lcmd != 1:
cmd += self.eol[self.sendendr]
while True:
try:
#Improve handling of large commands sent to the device
if lcmd > self.largeCommand:
lim = int((lcmd/2)-1)
self.mainSocket.sendall(cmd[0:lim].encode())
time.sleep(0.25)
self.mainSocket.sendall(cmd[lim:].encode())
else:
self.mainSocket.sendall(cmd.encode())
break #Exit loop
except OSError:
#Loop until socket is back
self.log("Waiting for socket to send stuff")
time.sleep(1)
continue
self.log("SENT", repr(cmd))
def thread_StateWatcher(self):
"""
STATE WATCHER: looks for the current state of the device
"""
while(self.run["state_watcher"] is True):
serout = self.getDeviceOutput()
#Speed things up a bit
if serout == "":
continue
for marker in self.statewatcher_markers:
match = False
if self.statewatcher_markers[marker] not in self.prompts:
#If marker is not a prompt, just look for it in the output
if marker in serout:
match = True
else:
#If the marker is a prompt, we need to make sure we don't also
#consider it when it is part of a command sent to the device. So
#we try to see if there is something after it.
try:
if len(serout.strip().split(marker)[1]) == 0:
match = True
except IndexError:
continue
if match is True:
current_state = self.statewatcher_markers[marker]
self.log("FOUND", current_state, "state in", serout)
#Run the critical modifiers, if any are present for the state
try:
actions = self.triggers[current_state]
for opt in actions:
if opt in self.critical_modifiers:
self.modifiers[opt](current_state)
except KeyError:
pass
#Notify everyone of the new state
self.updateDeviceState(current_state)
#Run the triggers of the state
if self.opt_RunTriggers is True:
try:
for act in self.triggers[current_state]:
if act not in self.modifiers.keys():
self.sendDeviceCmd(act)
elif act not in self.critical_modifiers:
#Run the rest of the normal modifiers, in order
self.modifiers[act](current_state)
except KeyError:
pass
def thread_MyTest(self):
"""
ACTUAL TEST thread. Looks for states and executes stuff.
"""
test_len = len(self.test_seq)
test_idx = 0
while self.run["test"] is True:
if test_idx == test_len:
if self.infiniteTest is True:
self.counter["test_loop"] += 1
self.counter["test_timeouts"] = self.test_max_timeouts #Reset the timeouts possible
self.log("GOT TO LOOP.....", self.counter["test_loop"])
test_idx = 0
else:
break
required_state = self.test_seq[test_idx]
#
## See if we need to wait for some user input
###
try:
self.log("\n\n\n", self.user_inp[required_state], "\n\n\n")
#NOTE: stop timer while waiting for user input
self.mainTimer = self.timer_stopTimer(self.mainTimer)
input("EXECUTE ACTION AND PRESS ENTER")
print("\nCONTINUING\n")
test_idx += 1
#Restart timer
self.mainTimer = self.timer_startTimer(self.mainTimer)
continue
except KeyError:
pass
#
## See if we need to run some actions
###
try:
#Handle RANDOM actions
if self.tossCoin() is True:
self.log("RUNNING ACTIONS:", required_state, "=", self.actions[required_state])
for elem in self.actions[required_state]:
#Run any modifiers in actions
try:
self.modifiers[elem](required_state)
continue
except KeyError:
pass
if self.mod_RunLocal is False:
self.sendDeviceCmd(elem)
self.waitDevicePrompt(elem)
else:
self.runLocalCommand(elem)
test_idx += 1
# Revert back to defaults
self.e_PromptWait(required_state)
self.d_runLocal(required_state)
continue
except KeyError:
pass
#
## See if we have any modifiers
###
try:
self.log("FOUND MODIFIER:", self.modifiers[required_state], "in state", required_state)
#Needed for sleep option
self.mainTimer = self.timer_stopTimer(self.mainTimer)
self.modifiers[required_state](required_state)
test_idx += 1
#Restart timer
self.mainTimer = self.timer_startTimer(self.mainTimer)
continue
except KeyError:
pass
self.log("Looking for:", self.test_seq[test_idx]) #idx might change
current_state = self.getDeviceState()
if self.opt_IgnoreStates is True:
self.log("IGNORED STATE", current_state)
continue
# If the required state is found
if required_state == current_state:
self.log("MOVED TO STATE=", required_state)
test_idx += 1
# State changed and it isn't what we expect
else:
ignore = False
try:
if self.triggers[current_state][0] == "NOTSTRICT":
ignore = True
except KeyError:
pass
if self.strictStates is False or ignore is True:
self.log("STATE", current_state, "unexpected, but welcomed")
elif ignore is False:
self.log("FOUND=", current_state, ", BUT WAS LOOKING FOR:", required_state)
self.mytest_failed()
#TIMEOUT until next state
self.mainTimer = self.timer_startTimer(self.mainTimer)
self.mytest_ok()
"""
-----------------------------------------INTERNAL APIs
"""
def e_RunTriggers(self, state):
#Already set, no need to do it again
if self.opt_RunTriggers is True:
return
self.log("ENABLING TRIGGERS")
self.opt_RunTriggers = True
def d_RunTriggers(self, state):
#Already set, no need to do it again
if self.opt_RunTriggers is False:
return
self.log("DISABLING TRIGGERS")
self.opt_RunTriggers = False
def e_IgnoreStates(self, state):
#Already set, no need to do it again
if self.opt_IgnoreStates is True:
return
self.log("IGNORING STATES")
self.opt_IgnoreStates = True
if self.telnetTest is True:
#Only on telnet, close the socket now, as this is probably a reboot
self.sock_close(self.mainSocket)
def d_IgnoreStates (self, state):
#Already set, no need to do it again
if self.opt_IgnoreStates is False:
return
self.log("WATCHING STATES")
self.opt_IgnoreStates = False
def e_RandomExecution(self, state):
self.log("RANDOM EXECUTION")
self.opt_RandomExec = True
def d_RandomExecution(self, state):
self.log("STOP RANDOM EXECUTION")
self.opt_RandomExec = False
def e_PromptWait(self, state):
if self.mod_PromptWait is not True:
self.log("WAITING FOR PROMPT AGAIN!")
self.mod_PromptWait = True
def d_PromptWait(self, state):
self.log("SENDING COMMANDS WITHOUT PROMPT WAIT!")
self.mod_PromptWait = False
def e_runLocal(self, state):
self.log("RUNNING ON LOCAL PC!")
self.mod_RunLocal = True
def d_runLocal(self, state):
self.log("RUNNING ON DEVICE")
self.mod_RunLocal = False
def runLocalCommand(self, command):
res = subprocess.call(command, shell=True)
#TODO: retain full command output
self.log("Command" + command + " return status " + str(res))
def countTrigger(self, state):
try:
self.counter[state] += 1
except KeyError:
self.counter[state] = 1
self.log("COUNTING for \'" + state + "\'...got to ", self.counter[state])
#Display all counting stats everytime:
for elem in self.counter:
self.log("COUNT FOR", elem, "is", self.counter[elem])
def timeCommand(self, state):
self.log("TIMING NEXT COMMAND")
self.opt_TimeCmd = True
def notStrict(self, state):
self.log("State", state, "treated as NOT STRICT!")
def sleepRandom(self, state):
duration = random.randint(self.sleep_min, self.sleep_max)
self.log("ZzzzZZzzzzzzZzzzz....(", duration, "seconds )....")
time.sleep(duration)
self.log("....WAKE UP!")
def tossCoin(self):
if self.opt_RandomExec is False:
return True
else:
ret = random.choice([True, False])
self.log("Random coin toss showed", ret)
return ret
def getDeviceOutput(self):
"""
Wrapper over serial receive queue. Blocks until data is available.
Returns "" if queue is closing.
"""
serout = self.queue_serread.get(block=True)
self.queue_serread.task_done()
if serout is None:
return ""
else:
return serout
def sendDeviceCmd(self, cmd):
"""
Wrapper over serial send queue.
"""
self.queue_serwrite.put(cmd)
def getDeviceState(self):
"""
Wrapper over state queue. Blocks until data is available.
Returns "" if queue is closing.
"""
state = self.queue_state.get(block=True)
self.queue_state.task_done()
if state is None:
return ""
else:
return state
def waitDevicePrompt(self, cmd):
"""
Wait until we see something defined as a device prompt. All other states
are ignored and put back in the queue. Prompts are consumed.
This now blocks until it sees a prompt. If the timeout is triggered we
try a recovery and wait again, which should also help this. If it does
not, something bad happened.
"""
if self.mod_PromptWait is True:
self.log("Waiting for prompt for elem", cmd)
else:
time.sleep(1)
return
#Here we time the command from start
if self.opt_TimeCmd is True:
startOfPromptWait = datetime.datetime.now()
while self.opt_IgnoreStates is False:
#Look just for prompts, put everything else back
state = self.getDeviceState()
if state in self.prompts:
self.log("Found prompt!")
break
else:
self.updateDeviceState(state)
time.sleep(0.2)
#Until the prompt wait is over
if self.opt_TimeCmd is True:
self.opt_TimeCmd = False
endOfPromptWait = datetime.datetime.now()
self.log("Command", repr(cmd), "took", str(endOfPromptWait - startOfPromptWait))
def updateDeviceState(self, state):
"""
Wrapperr over state queue.
"""
self.queue_state.put(state)
def getResult(self, block=True):
"""
Wrapper over result queue. Blocks until data is available.
"""
ret = None
try:
res = self.queue_result.get(block)
except queue.Empty:
res = None
if res is not None:
self.queue_result.task_done()
self.log("GOT RESULT:", res)
try:
ret = self.retval[res]
self.log("RETURNING:", ret)
except KeyError:
self.log("RET VALUE UNKNOWN! Update retval option!")
ret = -98
elif block is True:
self.log("RESULT QUEUE FAILED! GENERIC ERROR!")
ret = -99
return ret
def setResult(self, res):
"""
Wrapper over result queue. Does some filtering of the final message.
"""
try:
self.queue_result.put_nowait(res)
except queue.QueueFull:
print("FAILED TO SET RESULT")
pass
def timer_startTimer(self, timer):
"""
Starts or restarts a timer using the class options (timeout and mytest_timeout)
"""
if self.timeout == 0:
self.log("Test has no timeout!")
return None
try:
if timer is not None:
timer.cancel()
del timer
timer = threading.Timer(self.timeout, self.mytest_timeout)
timer.start()
except UnboundLocalError:
self.log("ERROR starting timer!")
timer = None
return timer
def timer_stopTimer(self, timer):
"""
Just stops a timer
"""
try:
if timer is not None:
timer.cancel()
del timer
except UnboundLocalError:
self.log("ERROR stopping timer!")
timer = None
pass
return None
def sock_create(self):
if self.telnetTest is True and self.sleep_sockWait != 0:
#On telnet it might close before the IGNORE STATES part
self.e_IgnoreStates(None)
self.d_RunTriggers(None)
time.sleep(self.sleep_sockWait) #wait a bit before restarting connection
self.log("Opening socket")
connected = False
while not connected:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.server, self.port))
time.sleep(2)
if self.telnetTest is False:
#on serial, send an endl when creating the socket
s.sendall(self.eol[self.sendendr].encode())
connected = s.recv(1)
self.log("Socket online")
s.setblocking(0)
s.settimeout(1) #seconds
#We might have missed something on serial
#On telnet this is important
self.opt_IgnoreStates = False
self.opt_RunTriggers = True
return s
def sock_close(self, s):
if s is not None:
self.log("Closing socket")
s.close()
s = None
def logNoPrint(self, *args):
outtext = ""
for elem in args:
outtext += str(elem)
outtext += " "
try:
self.file_test.write(str(datetime.datetime.now()) + ' - ' + outtext + "\n")
return outtext
except ValueError:
return ""
def log(self, *args):
print(str(datetime.datetime.now()), self.logNoPrint("+++>", *args))
def print_test(self):
## First let's check the test. This is here to also handle the case
## where the setup function is overwritten. Added global revision to
## see it better at start of file.
global revision
ask = False
try:
if self.info['overwatcher revision required'] != revision:
print("\nOverwatcher revision mismatch! Please check the test!\n")
ask = True
except KeyError:
print("\nNo revision information in test. Please add info!\n")
ask = True
if ask is True:
input("\n\nTest should be checked before running!")
input("Press CTRL-C to stop or ENTER to continue!")
self.file_test.write(self.name + "\n\n")
self.file_test.write(self.full_name + "\n\n")
for elem in self.info:
if elem == "version":
self.file_test.write(elem + " - " + str(self.info[elem][0]) + "\n")
else:
self.file_test.write(elem + " - " + str(self.info[elem]) + "\n")
self.file_test.write("\n\n")
self.file_test.write("MARKERS:\n")
self.file_test.write(str(self.markers) + "\n")
self.file_test.write("MARKERS CFG:\n")
self.file_test.write(str(self.markers_cfg) + "\n")
self.file_test.write("TRIGGERS:\n")
self.file_test.write(str(self.triggers) + "\n")
self.file_test.write("CONF SEQ:\n")
self.file_test.write(str(self.config_seq) + "\n")
self.file_test.write("TEST SEQ:\n")
self.file_test.write(str(self.test_seq) + "\n")
self.file_test.write("USER_INP\n")
self.file_test.write(str(self.user_inp) + "\n")
self.file_test.write("ACTIONS:\n")
self.file_test.write(str(self.actions) + "\n")
self.file_test.write("RUN TRIGGERS=" + str(self.opt_RunTriggers) + "\n")
self.file_test.write("IGNORE STATES=" + str(self.opt_IgnoreStates) + "\n")
self.file_test.write("\n\nTEST START:\n\n")
def cleanAll(self):
print(self.run)
for elem in self.run:
self.run[elem] = False
print("Ended", elem)
self.queue_state.put(None)
self.queue_serread.put(None)
self.queue_serwrite.put(None)
print(self.th)
#NOTE: result watcher is not in list!
for thread in self.th:
print("Joining with", thread)
self.th[thread].join()
print("Joined with", thread)
print("CLOSING FILE")
self.file_test.close()
print("CLOSED FILE")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Ultra-light test framework")
parser.add_argument('test', help='YAML test file to run')
parser.add_argument('--server', help='IP to telnet to',
default='localhost')
parser.add_argument('--port', help='Port to telnet to',
type=int, default=3000)
parser.add_argument('--telnet', help='Run test over telnet to device',
action='store_true')
parser.add_argument('--endr', help='Send a \r\n instead of just \n',
action='store_true')
args = parser.parse_args()
test = Overwatcher(args.test, server=args.server, port=args.port, runAsTelnetTest=args.telnet, endr=args.endr)
| |
"""Functions copypasted from newer versions of numpy.
"""
from __future__ import division, print_function, absolute_import
import warnings
import sys
from warnings import WarningMessage
import re
from functools import wraps
import numpy as np
from scipy._lib._version import NumpyVersion
if NumpyVersion(np.__version__) > '1.7.0.dev':
_assert_warns = np.testing.assert_warns
else:
def _assert_warns(warning_class, func, *args, **kw):
r"""
Fail unless the given callable throws the specified warning.
This definition is copypasted from numpy 1.9.0.dev.
The version in earlier numpy returns None.
Parameters
----------
warning_class : class
The class defining the warning that `func` is expected to throw.
func : callable
The callable to test.
*args : Arguments
Arguments passed to `func`.
**kwargs : Kwargs
Keyword arguments passed to `func`.
Returns
-------
The value returned by `func`.
"""
with warnings.catch_warnings(record=True) as l:
warnings.simplefilter('always')
result = func(*args, **kw)
if not len(l) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
if not l[0].category is warning_class:
raise AssertionError("First warning for %s is not a "
"%s( is %s)" % (func.__name__, warning_class, l[0]))
return result
if NumpyVersion(np.__version__) >= '1.10.0':
from numpy import broadcast_to
else:
# Definition of `broadcast_to` from numpy 1.10.0.
def _maybe_view_as_subclass(original_array, new_array):
if type(original_array) is not type(new_array):
# if input was an ndarray subclass and subclasses were OK,
# then view the result as that subclass.
new_array = new_array.view(type=type(original_array))
# Since we have done something akin to a view from original_array, we
# should let the subclass finalize (if it has it implemented, i.e., is
# not None).
if new_array.__array_finalize__:
new_array.__array_finalize__(original_array)
return new_array
def _broadcast_to(array, shape, subok, readonly):
shape = tuple(shape) if np.iterable(shape) else (shape,)
array = np.array(array, copy=False, subok=subok)
if not shape and array.shape:
raise ValueError('cannot broadcast a non-scalar to a scalar array')
if any(size < 0 for size in shape):
raise ValueError('all elements of broadcast shape must be non-'
'negative')
broadcast = np.nditer(
(array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'],
op_flags=['readonly'], itershape=shape, order='C').itviews[0]
result = _maybe_view_as_subclass(array, broadcast)
if not readonly and array.flags.writeable:
result.flags.writeable = True
return result
def broadcast_to(array, shape, subok=False):
return _broadcast_to(array, shape, subok=subok, readonly=True)
if NumpyVersion(np.__version__) >= '1.9.0':
from numpy import unique
else:
# the return_counts keyword was added in 1.9.0
def unique(ar, return_index=False, return_inverse=False, return_counts=False):
"""
Find the unique elements of an array.
Returns the sorted unique elements of an array. There are three optional
outputs in addition to the unique elements: the indices of the input array
that give the unique values, the indices of the unique array that
reconstruct the input array, and the number of times each unique value
comes up in the input array.
Parameters
----------
ar : array_like
Input array. This will be flattened if it is not already 1-D.
return_index : bool, optional
If True, also return the indices of `ar` that result in the unique
array.
return_inverse : bool, optional
If True, also return the indices of the unique array that can be used
to reconstruct `ar`.
return_counts : bool, optional
If True, also return the number of times each unique value comes up
in `ar`.
.. versionadded:: 1.9.0
Returns
-------
unique : ndarray
The sorted unique values.
unique_indices : ndarray, optional
The indices of the first occurrences of the unique values in the
(flattened) original array. Only provided if `return_index` is True.
unique_inverse : ndarray, optional
The indices to reconstruct the (flattened) original array from the
unique array. Only provided if `return_inverse` is True.
unique_counts : ndarray, optional
The number of times each of the unique values comes up in the
original array. Only provided if `return_counts` is True.
.. versionadded:: 1.9.0
Notes
-----
Taken over from numpy 1.12.0-dev (c8408bf9c). Omitted examples,
see numpy documentation for those.
"""
ar = np.asanyarray(ar).flatten()
optional_indices = return_index or return_inverse
optional_returns = optional_indices or return_counts
if ar.size == 0:
if not optional_returns:
ret = ar
else:
ret = (ar,)
if return_index:
ret += (np.empty(0, np.bool),)
if return_inverse:
ret += (np.empty(0, np.bool),)
if return_counts:
ret += (np.empty(0, np.intp),)
return ret
if optional_indices:
perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')
aux = ar[perm]
else:
ar.sort()
aux = ar
flag = np.concatenate(([True], aux[1:] != aux[:-1]))
if not optional_returns:
ret = aux[flag]
else:
ret = (aux[flag],)
if return_index:
ret += (perm[flag],)
if return_inverse:
iflag = np.cumsum(flag) - 1
inv_idx = np.empty(ar.shape, dtype=np.intp)
inv_idx[perm] = iflag
ret += (inv_idx,)
if return_counts:
idx = np.concatenate(np.nonzero(flag) + ([ar.size],))
ret += (np.diff(idx),)
return ret
if NumpyVersion(np.__version__) > '1.12.0.dev':
polyvalfromroots = np.polynomial.polynomial.polyvalfromroots
else:
def polyvalfromroots(x, r, tensor=True):
r"""
Evaluate a polynomial specified by its roots at points x.
This function is copypasted from numpy 1.12.0.dev.
If `r` is of length `N`, this function returns the value
.. math:: p(x) = \prod_{n=1}^{N} (x - r_n)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `r`.
If `r` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`r` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor is ``True`` the shape will be r.shape[1:]
+ x.shape; that is, each polynomial is evaluated at every value of `x`.
If `tensor` is ``False``, the shape will be r.shape[1:]; that is, each
polynomial is evaluated only for the corresponding broadcast value of
`x`. Note that scalars have shape (,).
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with with
themselves and with the elements of `r`.
r : array_like
Array of roots. If `r` is multidimensional the first index is the
root index, while the remaining indices enumerate multiple
polynomials. For instance, in the two dimensional case the roots of
each polynomial may be thought of as stored in the columns of `r`.
tensor : boolean, optional
If True, the shape of the roots array is extended with ones on the
right, one for each dimension of `x`. Scalars have dimension 0 for
this action. The result is that every column of coefficients in `r`
is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `r` for the evaluation. This keyword is useful
when `r` is multidimensional. The default value is True.
Returns
-------
values : ndarray, compatible object
The shape of the returned array is described above.
See Also
--------
polyroots, polyfromroots, polyval
Examples
--------
>>> from numpy.polynomial.polynomial import polyvalfromroots
>>> polyvalfromroots(1, [1,2,3])
0.0
>>> a = np.arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> polyvalfromroots(a, [-1, 0, 1])
array([[ -0., 0.],
[ 6., 24.]])
>>> r = np.arange(-2, 2).reshape(2,2) # multidimensional coefficients
>>> r # each column of r defines one polynomial
array([[-2, -1],
[ 0, 1]])
>>> b = [-2, 1]
>>> polyvalfromroots(b, r, tensor=True)
array([[-0., 3.],
[ 3., 0.]])
>>> polyvalfromroots(b, r, tensor=False)
array([-0., 0.])
"""
r = np.array(r, ndmin=1, copy=0)
if r.dtype.char in '?bBhHiIlLqQpP':
r = r.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray):
if tensor:
r = r.reshape(r.shape + (1,)*x.ndim)
elif x.ndim >= r.ndim:
raise ValueError("x.ndim must be < r.ndim when tensor == "
"False")
return np.prod(x - r, axis=0)
try:
from numpy.testing import suppress_warnings
except ImportError:
class suppress_warnings(object):
"""
Context manager and decorator doing much the same as
``warnings.catch_warnings``.
However, it also provides a filter mechanism to work around
https://bugs.python.org/issue4180.
This bug causes Python before 3.4 to not reliably show warnings again
after they have been ignored once (even within catch_warnings). It
means that no "ignore" filter can be used easily, since following
tests might need to see the warning. Additionally it allows easier
specificity for testing warnings and can be nested.
Parameters
----------
forwarding_rule : str, optional
One of "always", "once", "module", or "location". Analogous to
the usual warnings module filter mode, it is useful to reduce
noise mostly on the outmost level. Unsuppressed and unrecorded
warnings will be forwarded based on this rule. Defaults to "always".
"location" is equivalent to the warnings "default", match by exact
location the warning warning originated from.
Notes
-----
Filters added inside the context manager will be discarded again
when leaving it. Upon entering all filters defined outside a
context will be applied automatically.
When a recording filter is added, matching warnings are stored in the
``log`` attribute as well as in the list returned by ``record``.
If filters are added and the ``module`` keyword is given, the
warning registry of this module will additionally be cleared when
applying it, entering the context, or exiting it. This could cause
warnings to appear a second time after leaving the context if they
were configured to be printed once (default) and were already
printed before the context was entered.
Nesting this context manager will work as expected when the
forwarding rule is "always" (default). Unfiltered and unrecorded
warnings will be passed out and be matched by the outer level.
On the outmost level they will be printed (or caught by another
warnings context). The forwarding rule argument can modify this
behaviour.
Like ``catch_warnings`` this context manager is not threadsafe.
Examples
--------
>>> with suppress_warnings() as sup:
... sup.filter(DeprecationWarning, "Some text")
... sup.filter(module=np.ma.core)
... log = sup.record(FutureWarning, "Does this occur?")
... command_giving_warnings()
... # The FutureWarning was given once, the filtered warnings were
... # ignored. All other warnings abide outside settings (may be
... # printed/error)
... assert_(len(log) == 1)
... assert_(len(sup.log) == 1) # also stored in log attribute
Or as a decorator:
>>> sup = suppress_warnings()
>>> sup.filter(module=np.ma.core) # module must match exact
>>> @sup
>>> def some_function():
... # do something which causes a warning in np.ma.core
... pass
"""
def __init__(self, forwarding_rule="always"):
self._entered = False
# Suppressions are either instance or defined inside one with block:
self._suppressions = []
if forwarding_rule not in {"always", "module", "once", "location"}:
raise ValueError("unsupported forwarding rule.")
self._forwarding_rule = forwarding_rule
def _clear_registries(self):
if hasattr(warnings, "_filters_mutated"):
# clearing the registry should not be necessary on new pythons,
# instead the filters should be mutated.
warnings._filters_mutated()
return
# Simply clear the registry, this should normally be harmless,
# note that on new pythons it would be invalidated anyway.
for module in self._tmp_modules:
if hasattr(module, "__warningregistry__"):
module.__warningregistry__.clear()
def _filter(self, category=Warning, message="", module=None, record=False):
if record:
record = [] # The log where to store warnings
else:
record = None
if self._entered:
if module is None:
warnings.filterwarnings(
"always", category=category, message=message)
else:
module_regex = module.__name__.replace('.', r'\.') + '$'
warnings.filterwarnings(
"always", category=category, message=message,
module=module_regex)
self._tmp_modules.add(module)
self._clear_registries()
self._tmp_suppressions.append(
(category, message, re.compile(message, re.I), module, record))
else:
self._suppressions.append(
(category, message, re.compile(message, re.I), module, record))
return record
def filter(self, category=Warning, message="", module=None):
"""
Add a new suppressing filter or apply it if the state is entered.
Parameters
----------
category : class, optional
Warning class to filter
message : string, optional
Regular expression matching the warning message.
module : module, optional
Module to filter for. Note that the module (and its file)
must match exactly and cannot be a submodule. This may make
it unreliable for external modules.
Notes
-----
When added within a context, filters are only added inside
the context and will be forgotten when the context is exited.
"""
self._filter(category=category, message=message, module=module,
record=False)
def record(self, category=Warning, message="", module=None):
"""
Append a new recording filter or apply it if the state is entered.
All warnings matching will be appended to the ``log`` attribute.
Parameters
----------
category : class, optional
Warning class to filter
message : string, optional
Regular expression matching the warning message.
module : module, optional
Module to filter for. Note that the module (and its file)
must match exactly and cannot be a submodule. This may make
it unreliable for external modules.
Returns
-------
log : list
A list which will be filled with all matched warnings.
Notes
-----
When added within a context, filters are only added inside
the context and will be forgotten when the context is exited.
"""
return self._filter(category=category, message=message, module=module,
record=True)
def __enter__(self):
if self._entered:
raise RuntimeError("cannot enter suppress_warnings twice.")
self._orig_show = warnings.showwarning
self._filters = warnings.filters
warnings.filters = self._filters[:]
self._entered = True
self._tmp_suppressions = []
self._tmp_modules = set()
self._forwarded = set()
self.log = [] # reset global log (no need to keep same list)
for cat, mess, _, mod, log in self._suppressions:
if log is not None:
del log[:] # clear the log
if mod is None:
warnings.filterwarnings(
"always", category=cat, message=mess)
else:
module_regex = mod.__name__.replace('.', r'\.') + '$'
warnings.filterwarnings(
"always", category=cat, message=mess,
module=module_regex)
self._tmp_modules.add(mod)
warnings.showwarning = self._showwarning
self._clear_registries()
return self
def __exit__(self, *exc_info):
warnings.showwarning = self._orig_show
warnings.filters = self._filters
self._clear_registries()
self._entered = False
del self._orig_show
del self._filters
def _showwarning(self, message, category, filename, lineno,
*args, **kwargs):
use_warnmsg = kwargs.pop("use_warnmsg", None)
for cat, _, pattern, mod, rec in (
self._suppressions + self._tmp_suppressions)[::-1]:
if (issubclass(category, cat) and
pattern.match(message.args[0]) is not None):
if mod is None:
# Message and category match, either recorded or ignored
if rec is not None:
msg = WarningMessage(message, category, filename,
lineno, **kwargs)
self.log.append(msg)
rec.append(msg)
return
# Use startswith, because warnings strips the c or o from
# .pyc/.pyo files.
elif mod.__file__.startswith(filename):
# The message and module (filename) match
if rec is not None:
msg = WarningMessage(message, category, filename,
lineno, **kwargs)
self.log.append(msg)
rec.append(msg)
return
# There is no filter in place, so pass to the outside handler
# unless we should only pass it once
if self._forwarding_rule == "always":
if use_warnmsg is None:
self._orig_show(message, category, filename, lineno,
*args, **kwargs)
else:
self._orig_showmsg(use_warnmsg)
return
if self._forwarding_rule == "once":
signature = (message.args, category)
elif self._forwarding_rule == "module":
signature = (message.args, category, filename)
elif self._forwarding_rule == "location":
signature = (message.args, category, filename, lineno)
if signature in self._forwarded:
return
self._forwarded.add(signature)
if use_warnmsg is None:
self._orig_show(message, category, filename, lineno, *args,
**kwargs)
else:
self._orig_showmsg(use_warnmsg)
def __call__(self, func):
"""
Function decorator to apply certain suppressions to a whole
function.
"""
@wraps(func)
def new_func(*args, **kwargs):
with self:
return func(*args, **kwargs)
return new_func
| |
import argparse
import sys
import time
import math
import unittest
import contextlib
import numpy as np
import six
import paddle.fluid as fluid
import paddle
import net
import utils
def parse_args():
parser = argparse.ArgumentParser("PaddlePaddle Word2vec infer example")
parser.add_argument(
'--dict_path',
type=str,
default='./data/data_c/1-billion_dict_word_to_id_',
help="The path of dic")
parser.add_argument(
'--infer_epoch',
action='store_true',
required=False,
default=False,
help='infer by epoch')
parser.add_argument(
'--infer_step',
action='store_true',
required=False,
default=False,
help='infer by step')
parser.add_argument(
'--test_dir', type=str, default='test_data', help='test file address')
parser.add_argument(
'--print_step', type=int, default='500000', help='print step')
parser.add_argument(
'--start_index', type=int, default='0', help='start index')
parser.add_argument(
'--start_batch', type=int, default='1', help='start index')
parser.add_argument(
'--end_batch', type=int, default='13', help='start index')
parser.add_argument(
'--last_index', type=int, default='100', help='last index')
parser.add_argument(
'--model_dir', type=str, default='model', help='model dir')
parser.add_argument(
'--use_cuda', type=int, default='0', help='whether use cuda')
parser.add_argument(
'--batch_size', type=int, default='5', help='batch_size')
parser.add_argument('--emb_size', type=int, default='64', help='batch_size')
args = parser.parse_args()
return args
def infer_epoch(args, vocab_size, test_reader, use_cuda, i2w):
""" inference function """
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
emb_size = args.emb_size
batch_size = args.batch_size
with fluid.scope_guard(fluid.core.Scope()):
main_program = fluid.Program()
with fluid.program_guard(main_program):
values, pred = net.infer_network(vocab_size, emb_size)
for epoch in range(start_index, last_index + 1):
copy_program = main_program.clone()
model_path = model_dir + "/pass-" + str(epoch)
fluid.io.load_params(
executor=exe, dirname=model_path, main_program=copy_program)
accum_num = 0
accum_num_sum = 0.0
t0 = time.time()
step_id = 0
for data in test_reader():
step_id += 1
b_size = len([dat[0] for dat in data])
wa = np.array(
[dat[0] for dat in data]).astype("int64").reshape(
b_size, 1)
wb = np.array(
[dat[1] for dat in data]).astype("int64").reshape(
b_size, 1)
wc = np.array(
[dat[2] for dat in data]).astype("int64").reshape(
b_size, 1)
label = [dat[3] for dat in data]
input_word = [dat[4] for dat in data]
para = exe.run(
copy_program,
feed={
"analogy_a": wa,
"analogy_b": wb,
"analogy_c": wc,
"all_label":
np.arange(vocab_size).reshape(vocab_size, 1),
},
fetch_list=[pred.name, values],
return_numpy=False)
pre = np.array(para[0])
val = np.array(para[1])
for ii in range(len(label)):
top4 = pre[ii]
accum_num_sum += 1
for idx in top4:
if int(idx) in input_word[ii]:
continue
if int(idx) == int(label[ii][0]):
accum_num += 1
break
if step_id % 1 == 0:
print("step:%d %d " % (step_id, accum_num))
print("epoch:%d \t acc:%.3f " %
(epoch, 1.0 * accum_num / accum_num_sum))
def infer_step(args, vocab_size, test_reader, use_cuda, i2w):
""" inference function """
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
emb_size = args.emb_size
batch_size = args.batch_size
with fluid.scope_guard(fluid.core.Scope()):
main_program = fluid.Program()
with fluid.program_guard(main_program):
values, pred = net.infer_network(vocab_size, emb_size)
for epoch in range(start_index, last_index + 1):
for batchid in range(args.start_batch, args.end_batch):
copy_program = main_program.clone()
model_path = model_dir + "/pass-" + str(epoch) + (
'/batch-' + str(batchid * args.print_step))
fluid.io.load_params(
executor=exe,
dirname=model_path,
main_program=copy_program)
accum_num = 0
accum_num_sum = 0.0
t0 = time.time()
step_id = 0
for data in test_reader():
step_id += 1
b_size = len([dat[0] for dat in data])
wa = np.array(
[dat[0] for dat in data]).astype("int64").reshape(
b_size, 1)
wb = np.array(
[dat[1] for dat in data]).astype("int64").reshape(
b_size, 1)
wc = np.array(
[dat[2] for dat in data]).astype("int64").reshape(
b_size, 1)
label = [dat[3] for dat in data]
input_word = [dat[4] for dat in data]
para = exe.run(
copy_program,
feed={
"analogy_a": wa,
"analogy_b": wb,
"analogy_c": wc,
"all_label":
np.arange(vocab_size).reshape(vocab_size, 1),
},
fetch_list=[pred.name, values],
return_numpy=False)
pre = np.array(para[0])
val = np.array(para[1])
for ii in range(len(label)):
top4 = pre[ii]
accum_num_sum += 1
for idx in top4:
if int(idx) in input_word[ii]:
continue
if int(idx) == int(label[ii][0]):
accum_num += 1
break
if step_id % 1 == 0:
print("step:%d %d " % (step_id, accum_num))
print("epoch:%d \t acc:%.3f " %
(epoch, 1.0 * accum_num / accum_num_sum))
t1 = time.time()
if __name__ == "__main__":
args = parse_args()
start_index = args.start_index
last_index = args.last_index
test_dir = args.test_dir
model_dir = args.model_dir
batch_size = args.batch_size
dict_path = args.dict_path
use_cuda = True if args.use_cuda else False
print("start index: ", start_index, " last_index:", last_index)
vocab_size, test_reader, id2word = utils.prepare_data(
test_dir, dict_path, batch_size=batch_size)
print("vocab_size:", vocab_size)
if args.infer_step:
infer_step(
args,
vocab_size,
test_reader=test_reader,
use_cuda=use_cuda,
i2w=id2word)
else:
infer_epoch(
args,
vocab_size,
test_reader=test_reader,
use_cuda=use_cuda,
i2w=id2word)
| |
# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import imp
import os
import platform
import re
import subprocess
import sys
from . import config_option
from . import prompt
from digits import device_query
from digits.utils import parse_version
class CaffeOption(config_option.FrameworkOption):
@staticmethod
def config_file_key():
return 'caffe_root'
@classmethod
def prompt_title(cls):
return 'Caffe'
@classmethod
def prompt_message(cls):
return 'Where is caffe installed?'
def optional(self):
#TODO: make this optional
return False
def suggestions(self):
suggestions = []
if 'CAFFE_ROOT' in os.environ:
d = os.environ['CAFFE_ROOT']
try:
suggestions.append(prompt.Suggestion(
self.validate(d), 'R',
desc='CAFFE_ROOT', default=True))
except config_option.BadValue:
pass
if 'CAFFE_HOME' in os.environ:
d = os.environ['CAFFE_HOME']
try:
default = True
if len(suggestions) > 0:
default = False
suggestions.append(prompt.Suggestion(
self.validate(d), 'H',
desc='CAFFE_HOME', default=default))
except config_option.BadValue:
pass
suggestions.append(prompt.Suggestion('<PATHS>', 'P',
desc='PATH/PYTHONPATH', default=True))
return suggestions
@staticmethod
def is_path():
return True
@classmethod
def validate(cls, value):
if not value:
return value
if value == '<PATHS>':
# Find the executable
executable = cls.find_executable('caffe')
if not executable:
executable = cls.find_executable('caffe.exe')
if not executable:
raise config_option.BadValue('caffe binary not found in PATH')
cls.validate_version(executable)
# Find the python module
try:
imp.find_module('caffe')
except ImportError:
raise config_option.BadValue('caffe python package not found in PYTHONPATH')
return value
else:
# Find the executable
value = os.path.abspath(value)
if not os.path.isdir(value):
raise config_option.BadValue('"%s" is not a directory' % value)
expected_path = os.path.join(value, 'build', 'tools', 'caffe')
if not os.path.exists(expected_path):
raise config_option.BadValue('caffe binary not found at "%s"' % value)
cls.validate_version(expected_path)
# Find the python module
pythonpath = os.path.join(value, 'python')
sys.path.insert(0, pythonpath)
try:
imp.find_module('caffe')
except ImportError as e:
raise config_option.BadValue('Error while importing caffe from "%s": %s' % (
pythonpath, e.message))
finally:
# Don't actually add this until apply() is called
sys.path.pop(0)
return value
@staticmethod
def find_executable(program):
"""
Finds an executable by searching through PATH
Returns the path to the executable or None
"""
for path in os.environ['PATH'].split(os.pathsep):
path = path.strip('"')
executable = os.path.join(path, program)
if os.path.isfile(executable) and os.access(executable, os.X_OK):
return executable
return None
@classmethod
def validate_version(cls, executable):
"""
Utility for checking the caffe version from within validate()
Throws BadValue
Arguments:
executable -- path to a caffe executable
"""
minimum_version = parse_version(0,11,0)
version = cls.get_version(executable)
if version is None:
raise config_option.BadValue('Could not get version information from caffe at "%s". Are you using the NVIDIA fork?'
% executable)
elif minimum_version > version:
raise config_option.BadValue('Required version "%s" is greater than "%s". Upgrade your installation.'
% (
'.'.join(str(n) for n in minimum_version),
'.'.join(str(n) for n in version)
))
else:
return True
@staticmethod
def get_version(executable):
"""
Returns the caffe version as a (MAJOR, MINOR, PATCH) tuple or None
Arguments:
executable -- path to a caffe executable
"""
# TODO: check `caffe --version` when it's implemented
NVIDIA_SUFFIX = '-nv'
if platform.system() == 'Linux':
p = subprocess.Popen(['ldd', executable],
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
if p.wait():
raise config_option.BadValue(p.stderr.read().strip())
else:
libname = 'libcaffe'
caffe_line = None
# Search output for caffe library
for line in p.stdout:
if libname in line:
caffe_line = line
break
if caffe_line is None:
raise config_option.BadValue('%s not found in ldd output' % libname)
# Read the symlink for libcaffe from ldd output
symlink = caffe_line.split()[2]
filename = os.path.basename(os.path.realpath(symlink))
# Check for the nvidia suffix
if NVIDIA_SUFFIX not in filename:
raise config_option.BadValue('Library at "%s" does not have expected suffix "%s". Are you using the NVIDIA/caffe fork?'
% (filename, NVIDIA_SUFFIX))
# parse the version string
match = re.match(r'%s%s\.so\.(\S+)$'
% (libname, NVIDIA_SUFFIX), filename)
if match:
version_str = match.group(1)
return parse_version(version_str)
else:
return None
elif platform.system() == 'Darwin':
# XXX: guess and let the user figure out errors later
return parse_version(0,11,0)
elif platform.system() == 'Windows':
# XXX: guess and let the user figure out errors later
return parse_version(0,11,0)
else:
print 'WARNING: platform "%s" not supported' % platform.system()
return None
def _set_config_dict_value(self, value):
if not value:
self._config_dict_value = None
else:
if value == '<PATHS>':
executable = self.find_executable('caffe')
if not executable:
executable = self.find_executable('caffe.exe')
else:
executable = os.path.join(value, 'build', 'tools', 'caffe')
version = self.get_version(executable)
if version >= parse_version(0,12):
multi_gpu = True
else:
multi_gpu = False
# TODO: ask caffe for this information
cuda_enabled = len(device_query.get_devices()) > 0
self._config_dict_value = {
'executable': executable,
'version': version,
'multi_gpu': multi_gpu,
'cuda_enabled': cuda_enabled,
}
def apply(self):
if self._config_file_value:
# Suppress GLOG output for python bindings
GLOG_minloglevel = os.environ.pop('GLOG_minloglevel', None)
# Show only "ERROR" and "FATAL"
os.environ['GLOG_minloglevel'] = '2'
if self._config_file_value != '<PATHS>':
# Add caffe/python to PATH
p = os.path.join(self._config_file_value, 'python')
sys.path.insert(0, p)
# Add caffe/python to PYTHONPATH
# so that build/tools/caffe is aware of python layers there
os.environ['PYTHONPATH'] = '%s:%s' % (p, os.environ.get('PYTHONPATH'))
# for Windows environment, loading h5py before caffe solves the issue mentioned in
# https://github.com/NVIDIA/DIGITS/issues/47#issuecomment-206292824
import h5py
try:
import caffe
except ImportError:
print 'Did you forget to "make pycaffe"?'
raise
# Strange issue with protocol buffers and pickle - see issue #32
sys.path.insert(0, os.path.join(
os.path.dirname(caffe.__file__), 'proto'))
# Turn GLOG output back on for subprocess calls
if GLOG_minloglevel is None:
del os.environ['GLOG_minloglevel']
else:
os.environ['GLOG_minloglevel'] = GLOG_minloglevel
| |
import modular_core.libfundamental as lfu
from modular_core.libfundamental import modular_object_qt as modular_object
import modular_core.libsimcomponents as lsc
import modular_core.libmath as lm
import modular_core.libgeometry as lgeo
import modular_core.libfitroutine as lfr
import modular_core.libpostprocess as lpp
import modular_core.libcriterion as lc
import modular_core.libmodcomponents as lmc
import stringchemical as chemfast
import stringchemical_timeout as chemfast_timeout
import sys, time, types, random, traceback
import numpy as np
from math import log as log
import cStringIO as sio
import pdb
_system_string_ = '__initialized_system_string__'
if __name__ == 'chemical.scripts.chemicallite':
if lfu.gui_pack is None: lfu.find_gui_pack()
lgm = lfu.gui_pack.lgm
lgd = lfu.gui_pack.lgd
lgb = lfu.gui_pack.lgb
if __name__ == '__main__':
print 'this is the chemical lite module library!'
module_name = 'chemical'
run_param_keys = lmc.run_param_keys +\
['Variables', 'Functions', 'Reactions', 'Species']
# this is a handle to set param specific info per p-space location
def run_params_to_location(ensem):
global _system_string_
def make_rxn_string(rxn):
used = '+'.join([''.join(['(', str(agent[1]), ')',
agent[0]]) for agent in rxn.used])
prod = '+'.join([''.join(['(', str(agent[1]), ')',
agent[0]]) for agent in rxn.produced])
return '->'.join([used, str(rxn.rate), prod])
def int_fix(cnt):
if float(cnt) < 1: return 0
else: return cnt
params = ensem.run_params.partition['system']
sub_spec = [':'.join([spec.label, str(int_fix(spec.initial_count))])
for spec in params['species'].values()]
spec_string = '<species>' + ','.join(sub_spec)
sub_var = [':'.join([key, str(var.value)]) for key, var in
params['variables'].items()]
variable_string = '<variables>' + ','.join(sub_var)
def check_ext(afunc):
extcnt = afunc.count('external_signal')
fixed = []
for exts in range(extcnt):
leads = afunc.find('external_signal(')
subfunc = afunc[leads+16:]
presig = afunc[:leads+16]
postsig = subfunc[subfunc.find('&'):]
filename = subfunc[:subfunc.find(postsig)]
#filename = subfunc[:subfunc.find('&')]
with open(filename,'r') as handle:
extlines = handle.readlines()
extstrx = sio.StringIO()
extstry = sio.StringIO()
for eline in extlines:
eline = eline.strip()
if not eline.count(',') > 0: continue
elx,ely = eline.split(',')
extstrx.write(str(elx))
extstrx.write('$')
extstry.write(str(ely))
extstry.write('$')
fixhash = '%#%'
extstrx.write('@')
fixval = extstrx.getvalue() + extstry.getvalue()
fixed.append((fixhash,fixval))
afunc = presig + fixhash + postsig
for fix in fixed: afunc = afunc.replace(fix[0],fix[1])
#for fix in fixed: afunc = afunc.replace(fix[0],'---')
return afunc
sub_func = ['='.join([key, fu.func_statement.replace(',', '&')])
for key, fu in params['functions'].items()]
sub_func = [check_ext(sf) for sf in sub_func]
function_string = '<functions>' + ','.join(sub_func)
sub_rxn = ','.join([make_rxn_string(rxn)
for rxn in params['reactions']])
reaction_string = '<reactions>' + sub_rxn
sub_end = lc.read_criteria(params['end_criteria'], '')
end_string = '<end>' + sub_end
sub_capt = lc.read_criteria(params['capture_criteria'], '')
capture_string = '<capture>' + sub_capt
targs = params['plot_targets']
sub_targ = ','.join(targs[3:] + targs[:3])
target_string = '<targets>' + sub_targ + '||'
system_string = spec_string + variable_string +\
function_string + reaction_string + end_string +\
capture_string + target_string
_system_string_ = system_string
def generate_gui_templates_qt(window, ensemble):
set_module_memory_(ensemble)
plot_target_labels = ['iteration', 'time'] +\
ensemble.run_params['species'].keys() +\
ensemble.run_params['variables'].keys() +\
ensemble.run_params['functions'].keys()
panel_template_lookup =\
lmc.generate_panel_template_lookup_standard(
window, ensemble, plot_target_labels)
panel_template_lookup.append(('variables',
lgm.generate_add_remove_select_inspect_box_template(
window = window, key = 'variables',
labels = ['Add Variable', 'Remove Variable'],
wheres = [ensemble._children_,
ensemble.run_params['variables']],
parent = ensemble,
selector_handle = (ensemble._module_memory_[0],
'variable_selector'),
memory_handle = (ensemble._module_memory_[0],
'variable_selected_memory'),
base_class = variable)))
panel_template_lookup.append(('functions',
lgm.generate_add_remove_select_inspect_box_template(
window = window, key = 'functions',
labels = ['Add Function', 'Remove Function'],
wheres = [ensemble._children_,
ensemble.run_params['functions']],
parent = ensemble,
selector_handle = (ensemble._module_memory_[0],
'function_selector'),
memory_handle = (ensemble._module_memory_[0],
'function_selected_memory'),
base_class = function_cont)))
panel_template_lookup.append(('reactions',
lgm.generate_add_remove_select_inspect_box_template(
window = window, key = 'reactions',
labels = ['Add Reaction', 'Remove Reaction'],
wheres = [ensemble._children_,
ensemble.run_params['reactions']],
parent = ensemble,
selector_handle = (ensemble._module_memory_[0],
'reaction_selector'),
memory_handle = (ensemble._module_memory_[0],
'reaction_selected_memory'),
base_class = reaction,
function_handles = [[(ensemble, '_make_rxn_')],
None, None])))
panel_template_lookup.append(('species',
lgm.generate_add_remove_select_inspect_box_template(
window = window, key = 'species',
labels = ['Add Species', 'Remove Species'],
wheres = [ensemble._children_,
ensemble.run_params['species']],
parent = ensemble,
selector_handle = (ensemble._module_memory_[0],
'species_selector'),
memory_handle = (ensemble._module_memory_[0],
'species_selected_memory'),
base_class = species,
function_handles = [[(ensemble, '_make_spec_')],
None, None])))
return lmc.generate_gui_templates_qt(window,
ensemble, lookup = panel_template_lookup)
def set_module_memory_(ensem):
ensem._module_memory_ = [lfu.data_container(
output_plan_selected_memory = 'Simulation',
variable_selected_memory = 'None',
function_selected_memory = 'None',
reaction_selected_memory = 'None',
species_selected_memory = 'None')]
def set_parameters(ensem):
set_module_memory_(ensem)
if 'end_criteria' in ensem.run_params.keys():
for crit in ensem.run_params['end_criteria']:
crit._destroy_()
if 'capture_criteria' in ensem.run_params.keys():
for crit in ensem.run_params['capture_criteria']:
crit._destroy_()
if 'variables' in ensem.run_params.keys():
for key, val in ensem.run_params['variables'].items():
ensem.run_params['variables'][key]._destroy_()
if 'species' in ensem.run_params.keys():
for key, val in ensem.run_params['species'].items():
ensem.run_params['species'][key]._destroy_()
if 'reactions' in ensem.run_params.keys():
for val in ensem.run_params['reactions']: val._destroy_()
if 'functions' in ensem.run_params.keys():
for key, val in ensem.run_params['functions'].items():
ensem.run_params['functions'][key]._destroy_()
if ensem.postprocess_plan.post_processes:
for proc in ensem.postprocess_plan.post_processes:
proc._destroy_()
ensem.simulation_plan.reset_criteria_lists()
ensem.run_params['variables'] = {}
ensem.run_params['species'] = {}
ensem.run_params['reactions'] = []
ensem.run_params['functions'] = {}
ensem.run_params['plot_targets'] = ['iteration', 'time']
ensem.postprocess_plan.reset_process_list()
output_plan = ensem.run_params['output_plans']['Simulation']
output_plan.targeted = ['iteration', 'time']
for dex in range(len(output_plan.outputs)):
output_plan.outputs[dex] = ['iteration', 'time']
ensem.run_params.create_partition('system',
[ 'variables', 'species', 'reactions', 'functions',
'end_criteria', 'capture_criteria', 'plot_targets' ])
ensem.cartographer_plan.parameter_space_mobjs =\
ensem.run_params.partition['system']
ensem.run_params.create_partition('template owners',
['variables', 'functions', 'reactions', 'species'])
def parse_variable_line(*args):
data = args[0]
ensem = args[1]
split = [item.strip() for item in data.split(':')]
name, value = split[0], split[1]
varib = variable(label = name, value = value)
return name, varib
def parse_function_line(*args):
data = args[0]
ensem = args[1]
split = [item.strip() for item in data.split(':')]
name, value = split[0], split[1]
func = function_cont(label = name, func_statement = value)
return name, func
def parse_reaction_line(*args):
data = args[0]
ensem = args[1]
def left_right_process(left, right):
left = [(left[k + 1], int(left[k])) for k in
[num*2 for num in range(len(left)/2)]]
right = [(right[k + 1], int(right[k])) for k in
[num*2 for num in range(len(right)/2)]]
return left, right
data = data.split(' ')
try: label = ' '.join(data[data.index(':') + 1:])
except ValueError: label = 'a reaction'
try: divider = [item.find('-') for item in data].index(0)
except:
try: divider = [item.find('-') for item in data].index(1)
except:
print 'cant interpret divider in reaction'
return []
if data[divider] == '<->':
rates = [data[divider - 1], data[divider + 1]]
left = [item for item in data[:divider - 1] if item != '+']
try: right = [item for item in data[divider + 2:data.index(':')] if item != '+']
except ValueError: right = [item for item in data[divider + 2:] if item != '+']
left, right = left_right_process(left, right)
rxn1 = reaction(label, rates[0],
propensity_scheme = 'classic', used = left,
produced = right, parent = ensem)
rxn2 = reaction(label, rates[1],
propensity_scheme = 'classic', used = right,
produced = left, parent = ensem)
ensem._children_.extend([rxn1, rxn2])
return [rxn1, rxn2]
elif data[divider] == '->':
rates = [data[divider - 1]]
left = [item for item in data[:divider - 1] if item != '+']
try:
right = [item for item in
data[divider + 1:data.index(':')]
if item != '+']
except ValueError:
right = [item for item in data[divider + 1:] if item != '+']
left, right = left_right_process(left, right)
rxn = reaction(label, rates[0], propensity_scheme = 'classic',
used = left, produced = right, parent = ensem)
ensem._children_.append(rxn)
return [rxn]
elif data[divider] == '<-':
rates = [data[divider + 1]]
left = [item for item in data[:divider] if item != '+']
try: right = [item for item in data[divider + 2:data.index(':')] if item != '+']
except ValueError:
right = [item for item in data[divider + 2:] if item != '+']
left, right = left_right_process(left, right)
rxn = reaction(label, rates[0], propensity_scheme = 'classic',
used = right, produced = left, parent = ensem)
ensem._children_.append(rxn)
return [rxn]
if data != ['']:
print 'unable to parse reaction: ' + str(data)
pdb.set_trace()
return []
def parse_species_line(*args):
data = args[0]
ensem = args[1]
data = [dat.strip() for dat in data.split(':')]
spec, value = data[0], int(data[1])
new = species(spec, parent = ensem,
initial_count = value,
current_count = value)
return spec, new
def parse_mcfg(lines, *args):
support = [['variables', 'functions', 'reactions', 'species'],
[parse_variable_line, parse_function_line,
parse_reaction_line, parse_species_line]]
lmc.parse_mcfg(lines, args[0], args[1], support)
def write_mcfg(*args):
run_params = args[0]
ensem = args[1]
lines = ['']
lmc.params_to_lines(run_params, 'variables', lines)
lmc.params_to_lines(run_params, 'functions', lines)
lmc.params_to_lines(run_params, 'reactions', lines)
lmc.params_to_lines(run_params, 'species', lines)
return lmc.write_mcfg(args[0], args[1], lines)
class sim_system(lsc.sim_system_external):
def encode(self):
global _system_string_
self.system_string = _system_string_
return
# this can simply grab the global info thats set at a p-space
# location specific time
'''#
def make_rxn_string(rxn):
used = '+'.join([''.join(['(', str(agent[1]), ')',
agent[0]]) for agent in rxn.used])
prod = '+'.join([''.join(['(', str(agent[1]), ')',
agent[0]]) for agent in rxn.produced])
return '->'.join([used, str(rxn.rate), prod])
def int_fix(cnt):
if float(cnt) < 1: return 0
else: return cnt
sub_spec = [':'.join([spec.label,
str(int_fix(spec.initial_count))])
for spec in self.params['species'].values()]
spec_string = '<species>' + ','.join(sub_spec)
sub_var = [':'.join([key, str(var.value)]) for key, var in
self.params['variables'].items()]
variable_string = '<variables>' + ','.join(sub_var)
sub_func = ['='.join([key, fu.func_statement.replace(',', '&')])
for key, fu in self.params['functions'].items()]
function_string = '<functions>' + ','.join(sub_func)
sub_rxn = ','.join([make_rxn_string(rxn) for rxn in
self.params['reactions']])
reaction_string = '<reactions>' + sub_rxn
sub_end = self.read_criteria(
self.params['end_criteria'], '')
end_string = '<end>' + sub_end
sub_capt = self.read_criteria(
self.params['capture_criteria'], '')
capture_string = '<capture>' + sub_capt
targs = self.params['plot_targets']
sub_targ = ','.join(targs[3:] + targs[:3])
target_string = '<targets>' + sub_targ + '||'
self.system_string = spec_string + variable_string +\
function_string + reaction_string + end_string +\
capture_string + target_string
'''#
def iterate(self):
try:
#seed = int(time.time()) + self.identifier
if self.timeout:
self.data = self.finalize_data(
*chemfast_timeout.simulate(
self.system_string, self.timeout))
#self.system_string, seed, self.timeout))
else:
self.data = self.finalize_data(
*chemfast.simulate(self.system_string))
#*chemfast.simulate(self.system_string, seed))
except ValueError:
traceback.print_exc(file=sys.stdout)
print 'simulation failed; aborting'
self.bAbort = True
raise ValueError
except:
traceback.print_exc(file=sys.stdout)
print 'simulation failed; aborting'
self.bAbort = True
class variable(modular_object):
def _set_label_(self, value):
before = self._label
if modular_object._set_label_(self, value):
del self.parent.run_params['variables'][before]
self.parent.run_params['variables'][self._label] = self
def __init__(self, *args, **kwargs):
if 'label' not in kwargs.keys(): kwargs['label'] = 'variable'
self.impose_default('value', 1.0, **kwargs)
self.brand_new = True
parameter_space_templates =\
[lgeo.interface_template_p_space_axis(instance = self,
#p_sp_bounds = ['-10e64', '10e64'],
p_sp_bounds = [0.0, sys.float_info.max],
parent = self, key = 'value')]
modular_object.__init__(self, *args,
parameter_space_templates =\
parameter_space_templates, **kwargs)
def to_string(self):
self.ensem = None
return '\t' + self.label + ' : ' + str(self.value)
def set_pspace_settables(self, *args, **kwargs):
self.parameter_space_templates =\
[lgeo.interface_template_p_space_axis(parent = self,
p_sp_bounds = self._p_sp_bounds_[0],
instance = self, key = 'value')]
lfu.modular_object_qt.set_pspace_settables(self, *args, **kwargs)
def set_settables(self, *args, **kwargs):
ensem = args[1]
self.parent = ensem
window = args[0]
if self.brand_new:
ensem.run_params['plot_targets'].append(self.label)
plan = ensem.run_params['output_plans']['Simulation']
plan.targeted.append(self.label)
plan.rewidget(True)
for subtargeted in plan.outputs:
subtargeted.append(self.label)
self.brand_new = not self.brand_new
#dictionary_support = lgm.dictionary_support_mason(window)
where_reference = ensem.run_params['variables']
cartographer_support = lgm.cartographer_mason(window)
self.handle_widget_inheritance(*args, **kwargs)
self.set_pspace_settables(*args, **kwargs)
self.widg_templates.append(
lgm.interface_template_gui(
widgets = ['spin'],
doubles = [[True]],
initials = [[float(self.value)]],
instances = [[self]],
keys = [['value']],
box_labels = ['Variable Value'],
mason = cartographer_support,
parameter_space_templates =\
[self.parameter_space_templates[0]]))
self.widg_templates.append(
lgm.interface_template_gui(
widgets = ['text'],
#mason = dictionary_support,
#wheres = [[where_reference]],
keys = [['label']],
instances = [[self]],
initials = [[self.label]],
box_labels = ['Variable Name']))
lfu.modular_object_qt.set_settables(
self, *args, from_sub = True)
class function_cont(modular_object):
def __init__(self, *args, **kwargs):
if 'label' not in kwargs.keys(): kwargs['label'] = 'function'
self.impose_default('func_statement', '', **kwargs)
self.brand_new = True
modular_object.__init__(self, *args, **kwargs)
def to_string(self):
return '\t' + self.label + ' : ' + self.func_statement
def set_settables(self, *args, **kwargs):
ensem = args[1]
frame = args[0]
if self.brand_new:
ensem.run_params['plot_targets'].append(self.label)
plan = ensem.run_params['output_plans']['Simulation']
plan.targeted.append(self.label)
plan.rewidget(True)
for subtargeted in plan.outputs:
subtargeted.append(self.label)
self.brand_new = not self.brand_new
self.handle_widget_inheritance(*args, **kwargs)
where_reference = ensem.run_params['functions']
#dictionary_support = lgm.dictionary_support_mason(frame)
self.handle_widget_inheritance(*args, **kwargs)
self.widg_templates.append(
lgm.interface_template_gui(
keys = [['func_statement']],
instances = [[self]],
widgets = ['text'],
minimum_sizes = [[(200, 75)]],
box_labels = ['Function Statement'],
initials = [[self.func_statement]]))
self.widg_templates.append(
lgm.interface_template_gui(
widgets = ['text'],
#mason = dictionary_support,
wheres = [[where_reference]],
keys = [['label']],
instances = [[self]],
initials = [[self.label]],
box_labels = ['Function Name']))
lfu.modular_object_qt.set_settables(
self, *args, from_sub = True)
class reaction(modular_object):
def __init__(self, label = 'another reaction',
rate = float(10.0), propensity_scheme = 'classic',
propensity_function_maker = None, parent = None,
occurrences = [], used = None, produced = None,
visible_attributes = ['label', 'propensity_scheme',
'rate', 'used', 'produced']):
self.system = None
self.rate = rate
if occurrences is None: self.occurrences = []
else: self.occurrences = occurrences
#instances of these are somehow coupled unless a
# used and produced list is passed to init,
# EVEN an empty list will do (used = [] does not!)
if used is None: self.used = []
else: self.used = used
if produced is None: self.produced = []
else: self.produced = produced
self.propensity = 1.0
self.propensity_minimum = 1e-30
self.propensity_scheme = propensity_scheme
parameter_space_templates =\
[lgeo.interface_template_p_space_axis(parent = self,
p_sp_bounds = [0.0000001, 1000.0],
instance = self, key = 'rate')]
modular_object.__init__(self, label = label, parent = parent,
visible_attributes = visible_attributes,
parameter_space_templates = parameter_space_templates)
def to_string(self):
def agents_to_line(agents):
if agents:
_line = [str(pair[1]) + ' ' + pair[0] for pair in agents]
_line = ' '.join(_line)
else: _line = 'nothing'
return _line
used_line = agents_to_line(self.used)
produced_line = agents_to_line(self.produced)
rxn_string = ' '.join([used_line, str(self.rate),
'->', produced_line])
rxn_string = '\t' + rxn_string + ' : ' + self.label
return rxn_string
def react(self, system):
enough_agent = [system.species[agent[0]].current_count >=\
agent[1] for agent in self.used]
try:
enough_agent.index(False)
return False
except ValueError:
for agent in self.used:
system.species[agent[0]].current_count -= int(agent[1])
for agent in self.produced:
system.species[agent[0]].current_count += int(agent[1])
try:
self.occurrences.append((system.iteration, system.time[-1]))
except IndexError:
self.occurrences.append((0, 0))
return True
return False
def determine_propensity(self, system):
try:
return self.propensity_scheme(system)
except TypeError:
if self.propensity_scheme == 'classic':
self.propensity_scheme =\
self.classic_propensity
return self.determine_propensity(system)
elif self.propensity_scheme == '':
self.revert_to_classic_propensity()
def revert_to_classic_propensity(self):
print '\nreaction propensity function failed'
print 'reverting to classic propensity scheme for:'
print '\t', self.label, '\n'
self.propensity_scheme = self.classic_propensity
def classic_propensity(self, system):
population = 1.0
for agent in self.used:
for k in range(int(agent[1])):
population *= float(system.species[
agent[0]].current_count - k)
self.propensity = population * self.rate
if self.propensity > self.propensity_minimum:
return self.propensity
else:
return 0.0
def verify_agents(self, spec_list):
def clean_list(agents):
agents = [agent for agent in agents if agent[0] in spec_list]
return agents
self.used = clean_list(self.used)
self.produced = clean_list(self.produced)
def set_pspace_settables(self, *args, **kwargs):
self.parameter_space_templates =\
[lgeo.interface_template_p_space_axis(parent = self,
p_sp_bounds = self._p_sp_bounds_[0],
instance = self, key = 'rate')]
lfu.modular_object_qt.set_pspace_settables(self, *args, **kwargs)
def set_settables(self, *args, **kwargs):
ensem = args[1]
window = args[0]
spec_list = ensem.run_params['species'].keys()
cartographer_support = lgm.cartographer_mason(window)
self.handle_widget_inheritance(*args, **kwargs)
self.set_pspace_settables(*args, **kwargs)
#self.parameter_space_templates =\
# [lgeo.interface_template_p_space_axis(parent = self,
# p_sp_bounds = self._p_sp_bounds_[0],
# instance = self, key = 'rate')]
#self.parameter_space_templates[0].set_settables(*args, **kwargs)
left_template = lgm.interface_template_gui(
panel_position = (0, 2),
mason = cartographer_support,
layout = 'vertical',
keys = [['label'], ['rate']],
instances = [[self], [self]],
widgets = ['text', 'text'],
minimum_sizes = [[(400, 100)], [(100, 100)]],
box_labels = ['Reaction Name', 'Reaction Rate'],
initials = [[self.label], [self.rate]],
parameter_space_templates = [None,
self.parameter_space_templates[0]])
self.verify_agents(spec_list)
agents_template = lgm.interface_template_gui(
panel_position = (0, 0),
layout = 'horizontal',
widgets = ['check_spin_list', 'check_spin_list'],
keys = [['used'], ['produced']],
instances = [[self], [self]],
labels = [spec_list, spec_list],
box_labels = ['Reagents', 'Products'])
self.widg_templates.append(
lgm.interface_template_gui(
widgets = ['splitter'],
orientations = [['horizontal']],
templates = [[left_template, agents_template]]))
modular_object.set_settables(self, *args, from_sub = True)
class species(modular_object):
def _set_label_(self, value):
before = self._label
if modular_object._set_label_(self, value):
del self.parent.run_params['species'][before]
self.parent.run_params['species'][self._label] = self
def __init__(self, label = 'another species', initial_count = 0,
current_count = None, visible_attributes = \
['label', 'initial_count'], parent = None):
self.system = None
self.initial_count = initial_count
if current_count == None: self.current_count = initial_count
else: self.current_count = current_count
self.brand_new = True
parameter_space_templates =\
[lgeo.interface_template_p_space_axis(instance = self,
key = 'initial_count', p_sp_bounds = [0, 1000000],
p_sp_increment = 1, p_sp_continuous = False,
parent = self)]
modular_object.__init__(self, label = label,
visible_attributes = visible_attributes,
parameter_space_templates = parameter_space_templates)
def to_string(self):
self.ensem = None
return '\t' + self.label + ' : ' + str(self.initial_count)
def set_pspace_settables(self, *args, **kwargs):
self.parameter_space_templates =\
[lgeo.interface_template_p_space_axis(instance = self,
key = 'initial_count', parent = self,
p_sp_bounds = self._p_sp_bounds_[0],
p_sp_increment = self._p_sp_increments_[0],
p_sp_continuous = False)]
lfu.modular_object_qt.set_pspace_settables(self, *args, **kwargs)
def set_settables(self, *args, **kwargs):
window = args[0]
self.parent = args[1]
ensem = self.parent
cartographer_support = lgm.cartographer_mason(window)
self.handle_widget_inheritance(*args, **kwargs)
self.set_pspace_settables(*args, **kwargs)
if self.brand_new:
ensem.run_params['plot_targets'].append(self.label)
plan = ensem.run_params['output_plans']['Simulation']
plan.targeted.append(self.label)
plan.rewidget(True)
for subtargeted in plan.outputs:
subtargeted.append(self.label)
self.brand_new = not self.brand_new
label_data_links = [lfu.interface_template_dependance(
(self, 'label', self.label),
linkages = [(ensem.run_params,
'plot_targets', True,
'append_list')])]
#this will cause a bug if a propensity_function_maker class
# with a .key attribute which should not be linked to the
# species name exists - so do not use one with this module
#on the other hand, a subclass which uses .key exploits this
# this bug can be fixed with name mangling on the base class!!
label_data_links.extend([lfu.interface_template_dependance(
(self, 'label', self.label), linkages =\
[(ensem.run_params['output_plans']['Simulation'],
'targeted', True, 'append_list')])])
[label_data_links.extend([lfu.interface_template_dependance(
(self, 'label', self.label), linkages =\
[(ensem.run_params['output_plans']['Simulation'],
'outputs', True, 'append_list_nested', dex)])])
for dex in range(4)]
label_data_links.extend(
[lfu.interface_template_dependance(
(self, 'label', self.label),
linkages = [(rxn, 'used', True,
'append_tuples_list', 0, 0)])
for rxn in ensem.run_params['reactions']])
label_data_links.extend(
[lfu.interface_template_dependance(
(self, 'label', self.label),
linkages = [(rxn, 'produced', True,
'append_tuples_list', 0, 0)])
for rxn in ensem.run_params['reactions']])
window = args[1]
#self.handle_widget_inheritance(*args, **kwargs)
self.widg_templates.append(
lgm.interface_template_gui(
mason = cartographer_support,
widgets = ['spin'],
instances = [[self]],
keys = [['initial_count']],
minimum_values = [[0]],
maximum_values = [[sys.maxint]],
initials = [[self.initial_count]],
box_labels = ['Initial Count'],
parameter_space_templates =\
[self.parameter_space_templates[0]]))
self.widg_templates.append(
lgm.interface_template_gui(
keys = [['label']],
minimum_sizes = [[(150, 50)]],
instances = [[self]],
widgets = ['text'],
box_labels = ['Species Name']))
modular_object.set_settables(self, *args, from_sub = True)
| |
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import logging
import logging.handlers
import signal
import simplejson
import socket
import sys, traceback
import time
import threading
import zookeeper
from optparse import OptionParser
from Runner import Runner
logger = logging.getLogger()
options = None
args = None
ZOOKEEPER_OPEN_ACL_UNSAFE = {"perms":0x1f, "scheme":"world", "id" :"anyone"}
timeout = 10
connected = False
conn_cv = threading.Condition()
session_time = 100000
class ZooKeeperCommunicator(threading.Thread):
hmsZkPrefix = "/clusters"
def __init__(self, zkservers, credential):
threading.Thread.__init__(self)
logger.debug('Initializing ZooKeeperCommunicator thread.')
zookeeper.set_debug_level(zookeeper.LOG_LEVEL_DEBUG)
self.zh = None
self.zkservers = zkservers
self.lock = threading.Lock()
self.acl = [ZOOKEEPER_OPEN_ACL_UNSAFE]
self.safeMode = True
self.credential = credential
def auth_callback(self, zh, result_code):
conn_cv.acquire()
conn_cv.notify()
conn_cv.release()
def conn_callback(self, zh, *args):
conn_cv.acquire()
conn_cv.notify()
conn_cv.release()
def start(self):
conn_cv.acquire()
self.zh = zookeeper.init(self.zkservers, self.conn_callback, session_time)
conn_cv.wait()
conn_cv.release()
if self.credential!=None:
print "credential: "+self.credential
conn_cv.acquire()
zookeeper.add_auth(self.zh, "digest", self.credential, self.auth_callback)
conn_cv.wait()
conn_cv.release()
logger.info("ZooKeeper connection established.")
def __del__(self):
zookeeper.close(self.zh)
def locate(self):
hostname = socket.gethostname()
try:
children = sorted(zookeeper.get_children(self.zh, self.hmsZkPrefix))
for child in children:
znode = self.hmsZkPrefix + '/' + child + '/' + hostname
if zookeeper.exists(self.zh, znode, None)!=None:
self.znode = znode
self.actionNode = znode + '/action'
self.statusNode = '/status'
stat, acl = zookeeper.get_acl(self.zh, self.statusNode)
self.acl = acl
if zookeeper.OK == self.aget():
self.safeMode = False
break
except:
self.safeMode = True
if self.safeMode != False:
logger.warn("Can not locate " + hostname + " in zookeeper, sleep " + str(timeout) + " seconds.")
if self.lock.locked():
self.lock.release()
def update(self, zh, node, object):
buffer = simplejson.dumps(object)
if zookeeper.exists(zh, node, None) != None:
zookeeper.delete(zh, node, 0)
zookeeper.create(zh, node, buffer, self.acl, 0)
def enqueue(self, zh, node, object):
buffer = simplejson.dumps(object)
zookeeper.create(zh, node, buffer, self.acl, zookeeper.SEQUENCE)
def launch(self, zh, workLogNode, actionNode, statusNode):
state = {}
data = zookeeper.get(zh, actionNode, 0)
jsonp = simplejson.loads(data[0])
state['cmdPath'] = jsonp['cmdPath']
state['actionPath'] = actionNode
state['actionId'] = jsonp['actionId']
state['host'] = self.znode
state['status']='STARTING'
self.update(zh, workLogNode, state)
logger.info("Launch: "+simplejson.dumps(jsonp))
dispatcher = Runner()
try:
result = dispatcher.run(jsonp)
logger.info("Result: "+simplejson.dumps(result))
if "exit_code" in result and result['exit_code']==0:
state['status']='SUCCEEDED'
else:
state['status']='FAILED'
except:
logger.exception('Execution error: '+actionNode)
state['status']='FAILED'
self.update(zh, workLogNode, state)
self.enqueue(zh, statusNode, state)
def aget(self):
return zookeeper.aget_children(self.zh, self.actionNode, self.queue_watcher, self.queue_callback)
def queue_watcher(self, zh, event, state, path):
if zookeeper.OK != self.aget():
logger.error('Fail to monitor action queue for: '+self.actionNode)
self.safeMode = True
def queue_callback(self, zh, rc, data):
if zookeeper.OK == rc:
try:
for child in sorted(data):
action = self.actionNode + '/' + child
workLog = self.actionNode + '/' + child + '/worklog'
statusLog = self.statusNode + '/status-'
""" Launch the task if the task has not been executed """
if zookeeper.exists(zh, workLog, None) == None:
self.launch(zh, workLog, action, statusLog)
else:
""" If task has been previous launched, check for partial execution """
buffer = zookeeper.get(zh, workLog, 0)
state = simplejson.loads(buffer[0])
""" If task is incompleted in execution, launch again """
if 'status' in state and state['status'] == 'STARTING':
logger.info('Relaunch '+child)
self.launch(zh, workLog, action, statusLog)
else:
""" If the task has been launched, and completed, update status queue """
if zookeeper.exists(zh, statusLog, None) == None:
logger.info('Update status.')
self.update(zh, statusLog, state)
except NoNodeException, err:
""" Skip no node exception """
except Exception, err:
logger.exception(err)
else:
if zookeeper.NONODE == rc:
self.safeMode = True
if self.lock.locked():
self.lock.release()
def run(self):
self.locate()
while True:
try:
self.lock.acquire()
if self.safeMode == True:
time.sleep(timeout)
zookeeper.close(self.zh)
conn_cv.acquire()
self.zh = zookeeper.init(self.zkservers, self.conn_callback, session_time)
conn_cv.wait()
conn_cv.release()
self.locate()
if self.safeMode == False:
if zookeeper.OK != zookeeper.aget_children(self.zh, self.actionNode, self.queue_watcher, None):
logger.error('Fail to monitor action queue for: '+self.actionNode+', activate safe mode.')
self.safeMode = True
except Exception, err:
logger.exception(err)
def main(argv=None):
# Allow Ctrl-C
signal.signal(signal.SIGINT, signal.SIG_DFL)
parser = OptionParser()
parser.add_option('-v', '--verbose',
dest='verbose',
default=False,
action='store_true',
help='Verbose logging. (default: %default)')
parser.add_option('--zkservers',
dest='zkservers',
default='localhost:2181',
help='Comma-separated list of host:port pairs. (default: %default)')
global options
global args
(options, args) = parser.parse_args()
if options.verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s %(filename)s:%(lineno)d - %(message)s")
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logger.info('Starting Zookeeper python example: %s' % ' '.join(sys.argv))
zc = ZooKeeperCommunicator("localhost:2181")
zc.start()
zc.join()
if __name__ == '__main__':
main()
| |
# Copyright (c) 2015-2017 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
import pytest
import sh
from molecule import config
from molecule.verifier import testinfra
from molecule.verifier.lint import flake8
@pytest.fixture
def molecule_verifier_section_data():
return {
'verifier': {
'name': 'testinfra',
'options': {
'foo': 'bar',
'vvv': True,
'verbose': True,
},
'additional_files_or_dirs': [
'../foo.py',
'../bar.py',
'../baz',
],
'env': {
'foo': 'bar',
},
'lint': {
'name': 'flake8',
},
}
}
@pytest.fixture
def testinfra_instance(molecule_verifier_section_data, config_instance):
config_instance.merge_dicts(config_instance.config,
molecule_verifier_section_data)
return testinfra.Testinfra(config_instance)
@pytest.fixture
def inventory_file(testinfra_instance):
return testinfra_instance._config.provisioner.inventory_file
def test_config_private_member(testinfra_instance):
assert isinstance(testinfra_instance._config, config.Config)
def test_default_options_property(inventory_file, testinfra_instance):
x = {'connection': 'ansible', 'ansible-inventory': inventory_file}
assert x == testinfra_instance.default_options
def test_default_options_property_updates_debug(inventory_file,
testinfra_instance):
testinfra_instance._config.args = {'debug': True}
x = {
'connection': 'ansible',
'ansible-inventory': inventory_file,
'debug': True
}
assert x == testinfra_instance.default_options
def test_default_options_property_updates_sudo(
inventory_file, testinfra_instance, patched_testinfra_get_tests):
testinfra_instance._config.args = {'sudo': True}
x = {
'connection': 'ansible',
'ansible-inventory': inventory_file,
'sudo': True
}
assert x == testinfra_instance.default_options
def test_default_env_property(testinfra_instance):
assert 'MOLECULE_FILE' in testinfra_instance.default_env
assert 'MOLECULE_INVENTORY_FILE' in testinfra_instance.default_env
assert 'MOLECULE_SCENARIO_DIRECTORY' in testinfra_instance.default_env
assert 'MOLECULE_INSTANCE_CONFIG' in testinfra_instance.default_env
def test_additional_files_or_dirs_property(testinfra_instance):
x = [
'../foo.py',
'../bar.py',
'../baz',
]
assert x == testinfra_instance.additional_files_or_dirs
def test_env_property(testinfra_instance):
assert 'bar' == testinfra_instance.env['foo']
assert 'ANSIBLE_CONFIG' in testinfra_instance.env
assert 'ANSIBLE_ROLES_PATH' in testinfra_instance.env
assert 'ANSIBLE_LIBRARY' in testinfra_instance.env
assert 'ANSIBLE_FILTER_PLUGINS' in testinfra_instance.env
def test_lint_property(testinfra_instance):
assert isinstance(testinfra_instance.lint, flake8.Flake8)
@pytest.fixture
def molecule_verifier_lint_invalid_section_data():
return {
'verifier': {
'name': 'testinfra',
'lint': {
'name': 'invalid',
},
}
}
def test_lint_property_raises(molecule_verifier_lint_invalid_section_data,
patched_logger_critical, testinfra_instance):
testinfra_instance._config.merge_dicts(
testinfra_instance._config.config,
molecule_verifier_lint_invalid_section_data)
with pytest.raises(SystemExit) as e:
testinfra_instance.lint
assert 1 == e.value.code
msg = "Invalid lint named 'invalid' configured."
patched_logger_critical.assert_called_once_with(msg)
def test_name_property(testinfra_instance):
assert 'testinfra' == testinfra_instance.name
def test_enabled_property(testinfra_instance):
assert testinfra_instance.enabled
def test_directory_property(testinfra_instance):
parts = testinfra_instance.directory.split(os.path.sep)
assert ['molecule', 'default', 'tests'] == parts[-3:]
@pytest.fixture
def molecule_verifier_directory_section_data():
return {
'verifier': {
'name': 'testinfra',
'directory': '/tmp/foo/bar'
},
}
def test_directory_property_overriden(
testinfra_instance, molecule_verifier_directory_section_data):
testinfra_instance._config.merge_dicts(
testinfra_instance._config.config,
molecule_verifier_directory_section_data)
assert '/tmp/foo/bar' == testinfra_instance.directory
def test_options_property(inventory_file, testinfra_instance):
x = {
'connection': 'ansible',
'ansible-inventory': inventory_file,
'foo': 'bar',
'vvv': True,
'verbose': True,
}
assert x == testinfra_instance.options
def test_options_property_handles_cli_args(inventory_file, testinfra_instance):
testinfra_instance._config.args = {'debug': True}
x = {
'connection': 'ansible',
'ansible-inventory': inventory_file,
'foo': 'bar',
'debug': True,
'vvv': True,
'verbose': True,
}
assert x == testinfra_instance.options
def test_bake(patched_testinfra_get_tests, inventory_file, testinfra_instance):
testinfra_instance.bake()
x = [
str(sh.testinfra),
'--ansible-inventory={}'.format(inventory_file),
'--connection=ansible',
'-vvv',
'--foo=bar',
'foo.py',
'bar.py',
'../foo.py',
'../bar.py',
'../baz',
]
result = str(testinfra_instance._testinfra_command).split()
assert sorted(x) == sorted(result)
def test_execute(patched_logger_info, patched_run_command,
patched_testinfra_get_tests, patched_logger_success,
testinfra_instance):
testinfra_instance._testinfra_command = 'patched-command'
testinfra_instance.execute()
patched_run_command.assert_called_once_with('patched-command', debug=False)
msg = 'Executing Testinfra tests found in {}/...'.format(
testinfra_instance.directory)
patched_logger_info.assert_called_once_with(msg)
msg = 'Verifier completed successfully.'
patched_logger_success.assert_called_once_with(msg)
def test_execute_does_not_execute(patched_run_command, patched_logger_warn,
testinfra_instance):
testinfra_instance._config.config['verifier']['enabled'] = False
testinfra_instance.execute()
assert not patched_run_command.called
msg = 'Skipping, verifier is disabled.'
patched_logger_warn.assert_called_once_with(msg)
def test_does_not_execute_without_tests(
patched_run_command, patched_logger_warn, testinfra_instance):
testinfra_instance.execute()
assert not patched_run_command.called
msg = 'Skipping, no tests found.'
patched_logger_warn.assert_called_once_with(msg)
def test_execute_bakes(patched_run_command, patched_testinfra_get_tests,
testinfra_instance):
testinfra_instance.execute()
assert testinfra_instance._testinfra_command is not None
assert 1 == patched_run_command.call_count
def test_executes_catches_and_exits_return_code(
patched_run_command, patched_testinfra_get_tests, testinfra_instance):
patched_run_command.side_effect = sh.ErrorReturnCode_1(
sh.testinfra, b'', b'')
with pytest.raises(SystemExit) as e:
testinfra_instance.execute()
assert 1 == e.value.code
| |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from awx.main.models import ActivityStream, JobTemplate, Job, NotificationTemplate
@pytest.mark.django_db
def test_create_job_template(run_module, admin_user, project, inventory):
module_args = {
'name': 'foo', 'playbook': 'helloworld.yml',
'project': project.name, 'inventory': inventory.name,
'extra_vars': {'foo': 'bar'},
'job_type': 'run',
'state': 'present'
}
result = run_module('tower_job_template', module_args, admin_user)
jt = JobTemplate.objects.get(name='foo')
assert jt.extra_vars == '{"foo": "bar"}'
assert result == {
"name": "foo",
"id": jt.id,
"changed": True,
"invocation": {
"module_args": module_args
}
}
assert jt.project_id == project.id
assert jt.inventory_id == inventory.id
@pytest.mark.django_db
def test_job_launch_with_prompting(run_module, admin_user, project, inventory, machine_credential):
JobTemplate.objects.create(
name='foo',
project=project,
playbook='helloworld.yml',
ask_variables_on_launch=True,
ask_inventory_on_launch=True,
ask_credential_on_launch=True
)
result = run_module('tower_job_launch', dict(
job_template='foo',
inventory=inventory.name,
credential=machine_credential.name,
extra_vars={"var1": "My First Variable",
"var2": "My Second Variable",
"var3": "My Third Variable"
}
), admin_user)
assert result.pop('changed', None), result
job = Job.objects.get(id=result['id'])
assert job.extra_vars == '{"var1": "My First Variable", "var2": "My Second Variable", "var3": "My Third Variable"}'
assert job.inventory == inventory
assert [cred.id for cred in job.credentials.all()] == [machine_credential.id]
@pytest.mark.django_db
def test_job_template_with_new_credentials(
run_module, admin_user, project, inventory,
machine_credential, vault_credential):
result = run_module('tower_job_template', dict(
name='foo',
playbook='helloworld.yml',
project=project.name,
inventory=inventory.name,
credentials=[machine_credential.name, vault_credential.name]
), admin_user)
assert not result.get('failed', False), result.get('msg', result)
assert result.get('changed', False), result
jt = JobTemplate.objects.get(pk=result['id'])
assert set([machine_credential.id, vault_credential.id]) == set([
cred.pk for cred in jt.credentials.all()])
prior_ct = ActivityStream.objects.count()
result = run_module('tower_job_template', dict(
name='foo',
playbook='helloworld.yml',
project=project.name,
inventory=inventory.name,
credentials=[machine_credential.name, vault_credential.name]
), admin_user)
assert not result.get('failed', False), result.get('msg', result)
assert not result.get('changed', True), result
jt.refresh_from_db()
assert result['id'] == jt.id
assert set([machine_credential.id, vault_credential.id]) == set([
cred.pk for cred in jt.credentials.all()])
assert ActivityStream.objects.count() == prior_ct
@pytest.mark.django_db
def test_job_template_with_survey_spec(run_module, admin_user, project, inventory, survey_spec):
result = run_module('tower_job_template', dict(
name='foo',
playbook='helloworld.yml',
project=project.name,
inventory=inventory.name,
survey_spec=survey_spec,
survey_enabled=True
), admin_user)
assert not result.get('failed', False), result.get('msg', result)
assert result.get('changed', False), result
jt = JobTemplate.objects.get(pk=result['id'])
assert jt.survey_spec == survey_spec
prior_ct = ActivityStream.objects.count()
result = run_module('tower_job_template', dict(
name='foo',
playbook='helloworld.yml',
project=project.name,
inventory=inventory.name,
survey_spec=survey_spec,
survey_enabled=True
), admin_user)
assert not result.get('failed', False), result.get('msg', result)
assert not result.get('changed', True), result
jt.refresh_from_db()
assert result['id'] == jt.id
assert jt.survey_spec == survey_spec
assert ActivityStream.objects.count() == prior_ct
@pytest.mark.django_db
def test_job_template_with_survey_encrypted_default(run_module, admin_user, project, inventory, silence_warning):
spec = {
"spec": [
{
"index": 0,
"question_name": "my question?",
"default": "very_secret_value",
"variable": "myvar",
"type": "password",
"required": False
}
],
"description": "test",
"name": "test"
}
for i in range(2):
result = run_module('tower_job_template', dict(
name='foo',
playbook='helloworld.yml',
project=project.name,
inventory=inventory.name,
survey_spec=spec,
survey_enabled=True
), admin_user)
assert not result.get('failed', False), result.get('msg', result)
assert result.get('changed', False), result # not actually desired, but assert for sanity
silence_warning.assert_called_once_with(
"The field survey_spec of job_template {0} has encrypted data and "
"may inaccurately report task is changed.".format(result['id']))
@pytest.mark.django_db
def test_associate_only_on_success(run_module, admin_user, organization, project):
jt = JobTemplate.objects.create(
name='foo',
project=project,
playbook='helloworld.yml',
ask_inventory_on_launch=True,
)
create_kwargs = dict(
notification_configuration={
'url': 'http://www.example.com/hook',
'headers': {
'X-Custom-Header': 'value123'
},
'password': 'bar'
},
notification_type='webhook',
organization=organization
)
nt1 = NotificationTemplate.objects.create(name='nt1', **create_kwargs)
nt2 = NotificationTemplate.objects.create(name='nt2', **create_kwargs)
jt.notification_templates_error.add(nt1)
# test preservation of error NTs when success NTs are added
result = run_module('tower_job_template', dict(
name='foo',
playbook='helloworld.yml',
project=project.name,
notification_templates_success=['nt2']
), admin_user)
assert not result.get('failed', False), result.get('msg', result)
assert result.get('changed', True), result
assert list(jt.notification_templates_success.values_list('id', flat=True)) == [nt2.id]
assert list(jt.notification_templates_error.values_list('id', flat=True)) == [nt1.id]
# test removal to empty list
result = run_module('tower_job_template', dict(
name='foo',
playbook='helloworld.yml',
project=project.name,
notification_templates_success=[]
), admin_user)
assert not result.get('failed', False), result.get('msg', result)
assert result.get('changed', True), result
assert list(jt.notification_templates_success.values_list('id', flat=True)) == []
assert list(jt.notification_templates_error.values_list('id', flat=True)) == [nt1.id]
| |
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import db
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
FLOATING_IP_OPTIONAL_ATTRS = ['fixed_ip']
# TODO(berrange): Remove NovaObjectDictCompat
@obj_base.NovaObjectRegistry.register
class FloatingIP(obj_base.NovaPersistentObject, obj_base.NovaObject,
obj_base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Added _get_addresses_by_instance_uuid()
# Version 1.2: FixedIP <= version 1.2
# Version 1.3: FixedIP <= version 1.3
# Version 1.4: FixedIP <= version 1.4
# Version 1.5: FixedIP <= version 1.5
# Version 1.6: FixedIP <= version 1.6
# Version 1.7: FixedIP <= version 1.11
# Version 1.8: FixedIP <= version 1.12
VERSION = '1.8'
fields = {
'id': fields.IntegerField(),
'address': fields.IPAddressField(),
'fixed_ip_id': fields.IntegerField(nullable=True),
'project_id': fields.UUIDField(nullable=True),
'host': fields.StringField(nullable=True),
'auto_assigned': fields.BooleanField(),
'pool': fields.StringField(nullable=True),
'interface': fields.StringField(nullable=True),
'fixed_ip': fields.ObjectField('FixedIP', nullable=True),
}
obj_relationships = {
'fixed_ip': [('1.0', '1.1'), ('1.2', '1.2'), ('1.3', '1.3'),
('1.4', '1.4'), ('1.5', '1.5'), ('1.6', '1.6'),
('1.7', '1.11'), ('1.8', '1.12')],
}
@staticmethod
def _from_db_object(context, floatingip, db_floatingip,
expected_attrs=None):
if expected_attrs is None:
expected_attrs = []
for field in floatingip.fields:
if field not in FLOATING_IP_OPTIONAL_ATTRS:
floatingip[field] = db_floatingip[field]
if ('fixed_ip' in expected_attrs and
db_floatingip['fixed_ip'] is not None):
floatingip.fixed_ip = objects.FixedIP._from_db_object(
context, objects.FixedIP(context), db_floatingip['fixed_ip'])
floatingip._context = context
floatingip.obj_reset_changes()
return floatingip
def obj_load_attr(self, attrname):
if attrname not in FLOATING_IP_OPTIONAL_ATTRS:
raise exception.ObjectActionError(
action='obj_load_attr',
reason='attribute %s is not lazy-loadable' % attrname)
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
if self.fixed_ip_id is not None:
self.fixed_ip = objects.FixedIP.get_by_id(
self._context, self.fixed_ip_id, expected_attrs=['network'])
else:
self.fixed_ip = None
@obj_base.remotable_classmethod
def get_by_id(cls, context, id):
db_floatingip = db.floating_ip_get(context, id)
# XXX joins fixed.instance
return cls._from_db_object(context, cls(context), db_floatingip,
expected_attrs=['fixed_ip'])
@obj_base.remotable_classmethod
def get_by_address(cls, context, address):
db_floatingip = db.floating_ip_get_by_address(context, str(address))
return cls._from_db_object(context, cls(context), db_floatingip)
@obj_base.remotable_classmethod
def get_pool_names(cls, context):
return [x['name'] for x in db.floating_ip_get_pools(context)]
@obj_base.remotable_classmethod
def allocate_address(cls, context, project_id, pool, auto_assigned=False):
return db.floating_ip_allocate_address(context, project_id, pool,
auto_assigned=auto_assigned)
@obj_base.remotable_classmethod
def associate(cls, context, floating_address, fixed_address, host):
db_fixed = db.floating_ip_fixed_ip_associate(context,
str(floating_address),
str(fixed_address),
host)
if db_fixed is None:
return None
floating = FloatingIP(
context=context, address=floating_address, host=host,
fixed_ip_id=db_fixed['id'],
fixed_ip=objects.FixedIP._from_db_object(
context, objects.FixedIP(context), db_fixed,
expected_attrs=['network']))
return floating
@obj_base.remotable_classmethod
def deallocate(cls, context, address):
return db.floating_ip_deallocate(context, str(address))
@obj_base.remotable_classmethod
def destroy(cls, context, address):
db.floating_ip_destroy(context, str(address))
@obj_base.remotable_classmethod
def disassociate(cls, context, address):
db_fixed = db.floating_ip_disassociate(context, str(address))
return cls(context=context, address=address,
fixed_ip_id=db_fixed['id'],
fixed_ip=objects.FixedIP._from_db_object(
context, objects.FixedIP(context), db_fixed,
expected_attrs=['network']))
@obj_base.remotable_classmethod
def _get_addresses_by_instance_uuid(cls, context, instance_uuid):
return db.instance_floating_address_get_all(context, instance_uuid)
@classmethod
def get_addresses_by_instance(cls, context, instance):
return cls._get_addresses_by_instance_uuid(context, instance['uuid'])
@obj_base.remotable
def save(self):
updates = self.obj_get_changes()
if 'address' in updates:
raise exception.ObjectActionError(action='save',
reason='address is not mutable')
if 'fixed_ip_id' in updates:
reason = 'fixed_ip_id is not mutable'
raise exception.ObjectActionError(action='save', reason=reason)
# NOTE(danms): Make sure we don't pass the calculated fixed_ip
# relationship to the DB update method
updates.pop('fixed_ip', None)
db_floatingip = db.floating_ip_update(self._context, str(self.address),
updates)
self._from_db_object(self._context, self, db_floatingip)
@obj_base.NovaObjectRegistry.register
class FloatingIPList(obj_base.ObjectListBase, obj_base.NovaObject):
# Version 1.3: FloatingIP 1.2
# Version 1.4: FloatingIP 1.3
# Version 1.5: FloatingIP 1.4
# Version 1.6: FloatingIP 1.5
# Version 1.7: FloatingIP 1.6
# Version 1.8: FloatingIP 1.7
# Version 1.9: FloatingIP 1.8
fields = {
'objects': fields.ListOfObjectsField('FloatingIP'),
}
obj_relationships = {
'objects': [('1.0', '1.0'), ('1.1', '1.1'), ('1.2', '1.1'),
('1.3', '1.2'), ('1.4', '1.3'), ('1.5', '1.4'),
('1.6', '1.5'), ('1.7', '1.6'), ('1.8', '1.7'),
('1.9', '1.8')],
}
VERSION = '1.9'
@obj_base.remotable_classmethod
def get_all(cls, context):
db_floatingips = db.floating_ip_get_all(context)
return obj_base.obj_make_list(context, cls(context),
objects.FloatingIP, db_floatingips)
@obj_base.remotable_classmethod
def get_by_host(cls, context, host):
db_floatingips = db.floating_ip_get_all_by_host(context, host)
return obj_base.obj_make_list(context, cls(context),
objects.FloatingIP, db_floatingips)
@obj_base.remotable_classmethod
def get_by_project(cls, context, project_id):
db_floatingips = db.floating_ip_get_all_by_project(context, project_id)
return obj_base.obj_make_list(context, cls(context),
objects.FloatingIP, db_floatingips)
@obj_base.remotable_classmethod
def get_by_fixed_address(cls, context, fixed_address):
db_floatingips = db.floating_ip_get_by_fixed_address(
context, str(fixed_address))
return obj_base.obj_make_list(context, cls(context),
objects.FloatingIP, db_floatingips)
@obj_base.remotable_classmethod
def get_by_fixed_ip_id(cls, context, fixed_ip_id):
db_floatingips = db.floating_ip_get_by_fixed_ip_id(context,
fixed_ip_id)
return obj_base.obj_make_list(context, cls(), FloatingIP,
db_floatingips)
@staticmethod
def make_ip_info(address, pool, interface):
return {'address': str(address),
'pool': pool,
'interface': interface}
@obj_base.remotable_classmethod
def create(cls, context, ip_info, want_result=False):
db_floatingips = db.floating_ip_bulk_create(context, ip_info,
want_result=want_result)
if want_result:
return obj_base.obj_make_list(context, cls(), FloatingIP,
db_floatingips)
@obj_base.remotable_classmethod
def destroy(cls, context, ips):
db.floating_ip_bulk_destroy(context, ips)
| |
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RL environments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import random
import gym
from gym.spaces import Box
import numpy as np
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import video_utils
from tensor2tensor.layers import modalities
from tensor2tensor.utils import metrics
from tensor2tensor.utils import registry
import tensorflow as tf
Frame = collections.namedtuple(
# Order of elements reflects time progression within a frame.
"Frame", ("observation", "reward", "unclipped_reward", "done", "action")
)
class Observation(object):
"""Encoded observations.
Args:
data: Encoded observation.
decode_fn: Function for decoding observation.
"""
def __init__(self, data, decode_fn):
self.data = data
self._decode = decode_fn
def __eq__(self, other):
"""Equality comparison based on encoded data."""
if isinstance(other, Observation):
return self.data == other.data
else:
return False
def __ne__(self, other):
"""For consistency with __eq__."""
return not self == other
def decode(self):
"""Decode the observation."""
return self._decode(self.data)
class _Noncopyable(object):
def __init__(self, obj):
self.obj = obj
def __deepcopy__(self, memo):
return self
def make_gym_env(name, timesteps_limit=-1):
env = gym.make(name)
if timesteps_limit != -1:
# Replace TimeLimit Wrapper with one of proper time step limit.
if isinstance(env, gym.wrappers.TimeLimit):
env = env.env
env = gym.wrappers.TimeLimit(env,
max_episode_steps=timesteps_limit)
return env
class EnvSimulationProblem(video_utils.VideoProblem):
"""Base Problem class for use with world models.
Attributes:
action_space: Gym action space. Should be overridden in derived classes.
reward_range: Tuple (min, max) representing the range of rewards. Limits
should be integer (discrete rewards).
"""
action_space = None
reward_range = (-1, 1)
@property
def num_actions(self):
return self.action_space.n
@property
def num_rewards(self):
(min_reward, max_reward) = self.reward_range
return max_reward - min_reward + 1
def hparams(self, defaults, unused_model_hparams):
p = defaults
p.modality = {
"inputs": modalities.VideoModality,
"input_reward": modalities.SymbolModalityWeightsAll,
"input_action": modalities.SymbolModalityWeightsAll,
"targets": modalities.VideoModality,
"target_reward": modalities.SymbolModalityWeightsAll,
"target_action": modalities.SymbolModalityWeightsAll,
}
p.vocab_size = {
"inputs": 256,
"input_reward": self.num_rewards,
"input_action": self.num_actions,
"targets": 256,
"target_reward": self.num_rewards,
"target_action": self.num_actions,
}
p.input_space_id = problem.SpaceID.IMAGE
p.target_space_id = problem.SpaceID.IMAGE
class T2TEnv(EnvSimulationProblem):
"""Abstract class representing a batch of environments.
Attributes:
history: List of finished rollouts, where rollout is a list of Frames.
batch_size: Number of environments played simultaneously.
observation_space: Gym observation space. Should be overridden in derived
classes.
name: Problem name for generating filenames. Should be overridden in
derived classes.
Args:
batch_size: Number of environments in a batch.
"""
observation_space = None
name = None
def __init__(self, batch_size, *args, **kwargs):
super(T2TEnv, self).__init__(*args, **kwargs)
self.batch_size = batch_size
self._rollouts_by_epoch_and_split = collections.OrderedDict()
self.current_epoch = None
with tf.Graph().as_default() as tf_graph:
self._tf_graph = _Noncopyable(tf_graph)
self._decoded_image_p = _Noncopyable(
tf.placeholder(dtype=tf.uint8, shape=(None, None, None))
)
self._encoded_image_t = _Noncopyable(
tf.image.encode_png(self._decoded_image_p.obj)
)
self._encoded_image_p = _Noncopyable(tf.placeholder(tf.string))
self._decoded_image_t = _Noncopyable(
tf.image.decode_png(self._encoded_image_p.obj)
)
self._session = _Noncopyable(tf.Session())
def __str__(self):
"""Returns a string representation of the environment for debug purposes."""
raise NotImplementedError
def start_new_epoch(self, epoch, load_data_dir=None):
if not isinstance(epoch, int):
raise ValueError("Epoch should be integer, got {}".format(epoch))
if epoch in self._rollouts_by_epoch_and_split:
raise ValueError("Epoch {} already registered".format(epoch))
self.current_epoch = epoch
self._current_epoch_rollouts = []
self._rollouts_by_epoch_and_split[epoch] = collections.defaultdict(list)
self._current_batch_frames = [None for _ in range(self.batch_size)]
self._current_batch_rollouts = [[] for _ in range(self.batch_size)]
if load_data_dir is not None:
self._load_epoch_data(load_data_dir)
def current_epoch_rollouts(self, split=None, minimal_rollout_frames=0):
# TODO(kc): order of rollouts (by splits) is a bit uncontrolled
# (rollouts_by_split.values() reads dict values), is it a problem?
rollouts_by_split = self._rollouts_by_epoch_and_split[self.current_epoch]
if not rollouts_by_split:
if split is not None:
raise ValueError(
"generate_data() should first be called in the current epoch"
)
else:
rollouts = self._current_epoch_rollouts
else:
if split is not None:
rollouts = rollouts_by_split[split]
else:
rollouts = [
rollout
for rollouts in rollouts_by_split.values()
for rollout in rollouts
]
return [rollout for rollout in rollouts
if len(rollout) >= minimal_rollout_frames]
def _preprocess_observations(self, obs):
"""Transforms a batch of observations.
Can be overridden in derived classes.
Args:
obs: A batch of observations.
Returns:
Transformed batch of observations.
"""
return obs
def _decode_png(self, encoded_observation):
"""Decodes a single observation from PNG."""
return self._session.obj.run(
self._decoded_image_t.obj,
feed_dict={self._encoded_image_p.obj: encoded_observation}
)
def _encode_observations(self, observations):
"""Encodes observations as PNG."""
return [
Observation(
self._session.obj.run(
self._encoded_image_t.obj,
feed_dict={self._decoded_image_p.obj: observation}
),
self._decode_png
)
for observation in observations
]
def _step(self, actions):
"""Makes a step in all environments without recording history.
Should be overridden in derived classes.
Should not do any preprocessing of the observations and rewards; this
should be done in _preprocess_*.
Args:
actions: Batch of actions.
Returns:
(obs, rewards, dones) - batches of observations, rewards and done flags
respectively.
"""
raise NotImplementedError
def step(self, actions):
"""Makes a step in all environments.
Does any preprocessing and records frames.
Args:
actions: Batch of actions.
Returns:
(obs, rewards, dones) - batches of observations, rewards and done flags
respectively.
Raises:
ValueError: when the data for current epoch has already been loaded.
"""
if self._rollouts_by_epoch_and_split[self.current_epoch]:
raise ValueError(
"Data for current epoch has already been loaded from disk."
)
(obs, unclipped_rewards, dones) = self._step(actions)
obs = self._preprocess_observations(obs)
(min_reward, max_reward) = self.reward_range
rewards = np.around(np.clip(unclipped_rewards, min_reward, max_reward))
unclipped_rewards = unclipped_rewards.astype(np.float64)
encoded_obs = self._encode_observations(obs)
for (rollout, frame, action) in zip(
self._current_batch_rollouts, self._current_batch_frames, actions
):
rollout.append(frame._replace(action=action))
# orud = (observation, reward, unclipped_reward, done)
self._current_batch_frames = [
Frame(*orud, action=None)
for orud in zip(encoded_obs, rewards, unclipped_rewards, dones)
]
return (obs, rewards, dones)
def _reset(self, indices):
"""Resets environments at given indices without recording history.
Args:
indices: Indices of environments to reset.
Returns:
Batch of initial observations of reset environments.
"""
raise NotImplementedError
def reset(self, indices=None):
"""Resets environments at given indices.
Does any preprocessing and adds rollouts to history.
Args:
indices: Indices of environments to reset.
Returns:
Batch of initial observations of reset environments.
Raises:
ValueError: when there's no current epoch.
"""
if self.current_epoch is None:
raise ValueError(
"No current epoch. start_new_epoch() should first be called."
)
if indices is None:
indices = np.arange(self.batch_size)
new_obs = self._reset(indices)
new_obs = self._preprocess_observations(new_obs)
encoded_obs = self._encode_observations(new_obs)
for (index, ob) in zip(indices, encoded_obs):
frame = self._current_batch_frames[index]
if frame is not None:
rollout = self._current_batch_rollouts[index]
rollout.append(frame._replace(action=0))
self._current_epoch_rollouts.append(rollout)
self._current_batch_rollouts[index] = []
self._current_batch_frames[index] = Frame(
observation=ob, reward=0, unclipped_reward=0, done=False, action=None
)
return new_obs
def close(self):
"""Cleanups any resources.
Can be overridden in derived classes.
"""
self._session.obj.close()
@property
def num_channels(self):
"""Number of color channels in each frame."""
raise NotImplementedError
def eval_metrics(self):
eval_metrics = [
metrics.Metrics.ACC, metrics.Metrics.ACC_PER_SEQ,
metrics.Metrics.IMAGE_RMSE
]
return eval_metrics
@property
def extra_reading_spec(self):
"""Additional data fields to store on disk and their decoders."""
field_names = ("frame_number", "action", "reward", "done")
data_fields = {
name: tf.FixedLenFeature([1], tf.int64) for name in field_names
}
decoders = {
name: tf.contrib.slim.tfexample_decoder.Tensor(tensor_key=name)
for name in field_names
}
return (data_fields, decoders)
@property
def frame_height(self):
return self.observation_space.shape[0]
@property
def frame_width(self):
return self.observation_space.shape[1]
@property
def only_keep_videos_from_0th_frame(self):
return False
def _generate_frames(self, rollouts):
for rollout in rollouts:
for (frame_number, frame) in enumerate(rollout):
yield {
"frame_number": [frame_number],
"epoch": [self.current_epoch],
"image/encoded": [frame.observation.data],
"image/format": ["png"],
"image/height": [self.frame_height],
"image/width": [self.frame_width],
"action": [int(frame.action)],
"reward": [int(frame.reward - self.reward_range[0])],
"unclipped_reward": [float(frame.unclipped_reward)],
"done": [int(frame.done)]
}
@staticmethod
def _calc_num_frames(rollouts):
return sum(len(rollout) for rollout in rollouts)
def _split_current_epoch(self):
"""Splits frames in the current epoch according to self.dataset_splits.
Rollouts can be broken on shard boundary. This is desirable when we have
few long rollouts and we want to make sure we have data in the dev set.
"""
num_frames = self._calc_num_frames(self._current_epoch_rollouts)
num_shards = sum(split["shards"] for split in self.dataset_splits)
shard_size = num_frames // num_shards
splits = self.dataset_splits
num_saved_frames = 0
split_index = 0
split_begin_index = 0
rollouts_by_split = collections.defaultdict(list)
def split_size(split_index):
return splits[split_index]["shards"] * shard_size
for rollout in self._current_epoch_rollouts:
num_saved_frames_current_rollout = 0
# Split the rollout into chunks corresponding to dataset splits. In most
# cases there should be only one chunk. On dataset split boundary there
# will be two. If a rollout is longer then the size of a dataset split,
# there might be more.
while num_saved_frames_current_rollout < len(rollout):
max_chunk_length = (
split_begin_index + split_size(split_index) - num_saved_frames
)
if split_index == len(splits) - 1:
# Put the remainder in the last split to preserve the ordering.
max_chunk_length = len(rollout)
rollout_chunk = rollout[
num_saved_frames_current_rollout:
(num_saved_frames_current_rollout + max_chunk_length)
]
rollouts_by_split[splits[split_index]["split"]].append(rollout_chunk)
num_saved_frames_current_rollout += len(rollout_chunk)
num_saved_frames += len(rollout_chunk)
if num_saved_frames == split_begin_index + split_size(split_index):
split_begin_index += split_size(split_index)
split_index = min(split_index + 1, len(splits) - 1)
self._rollouts_by_epoch_and_split[self.current_epoch] = rollouts_by_split
self._current_epoch_rollouts = []
def splits_and_paths(self, data_dir):
"""List of pairs (split, paths) for the current epoch."""
filepath_fns = {
problem.DatasetSplit.TRAIN: self.training_filepaths,
problem.DatasetSplit.EVAL: self.dev_filepaths,
problem.DatasetSplit.TEST: self.test_filepaths,
}
def append_epoch(paths):
return [
"{}.{}".format(path, self.current_epoch)
for path in paths
]
# We set shuffled=True as we don't want to shuffle on disk later.
return [
(split["split"], append_epoch(filepath_fns[split["split"]](
data_dir, split["shards"], shuffled=True
)))
for split in self.dataset_splits
]
def filepattern(self, data_dir, mode, shard=None, only_last=False):
filepattern = super(T2TEnv, self).filepattern(
data_dir, mode, shard
)
if only_last:
filepattern += ".{}".format(self.current_epoch)
return filepattern
def generate_data(self, data_dir, tmp_dir=None, task_id=-1):
"""Saves the current epoch rollouts to disk, split into train/dev sets."""
if not self._rollouts_by_epoch_and_split[self.current_epoch]:
# Data not loaded from disk.
self._split_current_epoch()
rollouts_by_split = self._rollouts_by_epoch_and_split[self.current_epoch]
splits_and_paths = self.splits_and_paths(data_dir)
for (split, paths) in splits_and_paths:
rollouts = rollouts_by_split[split]
num_frames = self._calc_num_frames(rollouts)
shard_size = num_frames // len(paths)
frame_gen = self._generate_frames(rollouts)
for (path_index, path) in enumerate(paths):
limit = shard_size
# Put the remainder in the last shard to preserve the ordering.
if path_index == len(paths) - 1:
limit = None
generator_utils.generate_files(
itertools.islice(frame_gen, limit), [path],
cycle_every_n=float("inf")
)
def _load_epoch_data(self, data_dir):
any_files_found = False
all_files_found = True
any_shard_empty = False
for split, paths in self.splits_and_paths(data_dir):
try:
any_shard_empty |= self._load_epoch_split(split, paths)
any_files_found = True
except tf.errors.NotFoundError:
all_files_found = False
if any_shard_empty or (not all_files_found and any_files_found):
raise ValueError("Some data is missing, the experiment might've been "
"interupted during generating data.")
def _load_epoch_split(self, split, paths):
epoch = self.current_epoch
last_frame_number = -1
any_shard_empty = False
current_rollout = []
for path in paths:
this_shard_empty = True
for example in tf.python_io.tf_record_iterator(path):
this_shard_empty = False
result = tf.train.Example.FromString(example)
feature = result.features.feature
def get_feature_value(key, list_name):
return getattr(feature[key], list_name).value[0] # pylint: disable=cell-var-from-loop
fields = {
key: get_feature_value(key, list_name)
for (key, list_name) in [
("image/encoded", "bytes_list"), ("reward", "int64_list"),
("unclipped_reward", "float_list"), ("done", "int64_list"),
("action", "int64_list")
]
}
fields["reward"] += self.reward_range[0]
fields["done"] = bool(fields["done"])
fields["observation"] = Observation(
fields["image/encoded"], self._decode_png
)
del fields["image/encoded"]
frame = Frame(**fields)
frame_number = get_feature_value("frame_number", "int64_list")
if frame_number == last_frame_number + 1:
current_rollout.append(frame)
else:
self._rollouts_by_epoch_and_split[epoch][split].append(
current_rollout)
current_rollout = [frame]
last_frame_number = frame_number
any_shard_empty |= this_shard_empty
self._rollouts_by_epoch_and_split[epoch][split].append(
current_rollout
)
return any_shard_empty
class T2TGymEnv(T2TEnv):
"""Class representing a batch of Gym environments.
Do not register it, instead create subclass with hardcoded __init__
arguments and register this subclass.
"""
noop_action = 0
def __init__(self, base_env_name=None, batch_size=1, grayscale=False,
resize_height_factor=2, resize_width_factor=2,
base_env_timesteps_limit=-1, max_num_noops=0, **kwargs):
if base_env_name is None:
base_env_name = self.base_env_name
self._base_env_name = base_env_name
super(T2TGymEnv, self).__init__(batch_size, **kwargs)
self.grayscale = grayscale
self.resize_height_factor = resize_height_factor
self.resize_width_factor = resize_width_factor
if not self.name:
# Set problem name if not registered.
self.name = "Gym%s" % base_env_name
self._envs = [make_gym_env(base_env_name, base_env_timesteps_limit)
for _ in range(self.batch_size)]
# max_num_noops works only with atari envs.
if max_num_noops > 0:
assert self._envs[0].unwrapped.get_action_meanings()[
self.noop_action
] == "NOOP"
self.max_num_noops = max_num_noops
orig_observ_space = self._envs[0].observation_space
if not all(env.observation_space == orig_observ_space
for env in self._envs):
raise ValueError("All environments must use the same observation space.")
self.observation_space = self._derive_observation_space(orig_observ_space)
self.action_space = self._envs[0].action_space
if not all(env.action_space == self.action_space for env in self._envs):
raise ValueError("All environments must use the same action space.")
with self._tf_graph.obj.as_default():
self._resize = dict()
orig_height, orig_width = orig_observ_space.shape[:2]
self._img_batch_t = _Noncopyable(tf.placeholder(
dtype=tf.uint8, shape=(None, orig_height, orig_width, 3)))
height, width = self.observation_space.shape[:2]
resized = tf.image.resize_images(self._img_batch_t.obj,
[height, width],
tf.image.ResizeMethod.AREA)
resized = tf.cast(resized, tf.as_dtype(self.observation_space.dtype))
if self.grayscale:
resized = tf.image.rgb_to_grayscale(resized)
self._resized_img_batch_t = _Noncopyable(resized)
@property
def base_env_name(self):
return self._base_env_name
@property
def num_channels(self):
return self.observation_space.shape[2]
def _derive_observation_space(self, orig_observ_space):
height, width, channels = orig_observ_space.shape
if self.grayscale:
channels = 1
resized_height = height // self.resize_height_factor
resized_width = width // self.resize_width_factor
shape = (resized_height, resized_width, channels)
return Box(low=orig_observ_space.low.min(),
high=orig_observ_space.high.max(), shape=shape,
dtype=orig_observ_space.dtype)
def __str__(self):
return "T2TGymEnv(%s)" % ", ".join([str(env) for env in self._envs])
def _preprocess_observations(self, obs):
return self._session.obj.run(self._resized_img_batch_t.obj,
feed_dict={self._img_batch_t.obj: obs})
def _step(self, actions):
(obs, rewards, dones, _) = zip(*[
env.step(action) for (env, action) in zip(self._envs, actions)
])
return tuple(map(np.stack, (obs, rewards, dones)))
def _reset(self, indices):
def reset_with_noops(env):
"""Resets environment and applies random number of NOOP actions on it."""
obs = env.reset()
try:
num_noops = random.randint(1, self.max_num_noops)
except ValueError:
num_noops = 0
for _ in range(num_noops):
(obs, _, done, _) = env.step(self.noop_action)
if done:
obs = env.reset()
return obs
return np.stack([reset_with_noops(self._envs[index]) for index in indices])
def close(self):
for env in self._envs:
env.close()
class DummyWorldModelProblem(EnvSimulationProblem):
"""Dummy Problem for world model prediction."""
def __init__(self, action_space, reward_range, frame_height, frame_width):
super(DummyWorldModelProblem, self).__init__()
self.action_space = action_space
self.reward_range = reward_range
self._frame_height = frame_height
self._frame_width = frame_width
@property
def frame_height(self):
"""Height of each frame."""
return self._frame_height
@property
def frame_width(self):
"""Width of each frame."""
return self._frame_width
# Atari registration.
# Game list from our list of ROMs
# Removed because XDeterministic-v4 did not exist:
# * adventure
# * defender
# * kaboom
ATARI_GAMES = [
"air_raid", "alien", "amidar", "assault", "asterix", "asteroids",
"atlantis", "bank_heist", "battle_zone", "beam_rider", "berzerk", "bowling",
"boxing", "breakout", "carnival", "centipede", "chopper_command",
"crazy_climber", "demon_attack", "double_dunk", "elevator_action", "enduro",
"fishing_derby", "freeway", "frostbite", "gopher", "gravitar", "hero",
"ice_hockey", "jamesbond", "journey_escape", "kangaroo", "krull",
"kung_fu_master", "montezuma_revenge", "ms_pacman", "name_this_game",
"phoenix", "pitfall", "pong", "pooyan", "private_eye", "qbert", "riverraid",
"road_runner", "robotank", "seaquest", "skiing", "solaris",
"space_invaders", "star_gunner", "tennis", "time_pilot", "tutankham",
"up_n_down", "venture", "video_pinball", "wizard_of_wor", "yars_revenge",
"zaxxon"
]
# List from paper:
# https://arxiv.org/pdf/1805.11593.pdf
# plus frostbite.
ATARI_GAMES_WITH_HUMAN_SCORE = [
"alien", "amidar", "assault", "asterix", "asteroids",
"atlantis", "bank_heist", "battle_zone", "beam_rider", "bowling",
"boxing", "breakout", "chopper_command",
"crazy_climber", "demon_attack", "double_dunk", "enduro",
"fishing_derby", "freeway", "frostbite", "gopher", "gravitar", "hero",
"ice_hockey", "jamesbond", "kangaroo", "krull",
"kung_fu_master", "montezuma_revenge", "ms_pacman", "name_this_game",
"pitfall", "pong", "private_eye", "qbert", "riverraid",
"road_runner", "seaquest", "solaris",
"up_n_down", "video_pinball", "yars_revenge",
]
ATARI_WHITELIST_GAMES = [
"amidar",
"bank_heist",
"berzerk",
"boxing",
"crazy_climber",
"freeway",
"frostbite",
"gopher",
"kung_fu_master",
"ms_pacman",
"pong",
"qbert",
"seaquest",
]
# Games on which model-free does better than model-based at this point.
ATARI_CURIOUS_GAMES = [
"bank_heist",
"boxing",
"enduro",
"kangaroo",
"road_runner",
"up_n_down",
]
# Games on which based should work.
ATARI_DEBUG_GAMES = [
"crazy_climber",
"freeway",
"pong",
]
# Different ATARI game modes in OpenAI Gym. Full list here:
# https://github.com/openai/gym/blob/master/gym/envs/__init__.py
ATARI_GAME_MODES = [
"Deterministic-v0", # 0.25 repeat action probability, 4 frame skip.
"Deterministic-v4", # 0.00 repeat action probability, 4 frame skip.
"NoFrameskip-v0", # 0.25 repeat action probability, 1 frame skip.
"NoFrameskip-v4", # 0.00 repeat action probability, 1 frame skip.
"-v0", # 0.25 repeat action probability, (2 to 5) frame skip.
"-v4" # 0.00 repeat action probability, (2 to 5) frame skip.
]
def camel_case_name(snake_case_name):
return "".join([w[0].upper() + w[1:] for w in snake_case_name.split("_")])
def register_game(game_name, game_mode="Deterministic-v4"):
"""Create and register problems for the game.
Args:
game_name: str, one of the games in ATARI_GAMES, e.g. "bank_heist".
game_mode: the frame skip and sticky keys config.
Raises:
ValueError: if game_name or game_mode are wrong.
"""
if game_name not in ATARI_GAMES:
raise ValueError("Game %s not in ATARI_GAMES" % game_name)
if game_mode not in ATARI_GAME_MODES:
raise ValueError("Unknown ATARI game mode: %s." % game_mode)
camel_game_name = camel_case_name(game_name) + game_mode
# Create and register the Problem
cls = type("Gym%sRandom" % camel_game_name,
(T2TGymEnv,), {"base_env_name": camel_game_name})
registry.register_problem(cls)
# Register the atari games with all of the possible modes.
for atari_game in ATARI_GAMES:
for atari_game_mode in ATARI_GAME_MODES:
register_game(atari_game, game_mode=atari_game_mode)
| |
#!/usr/bin/env python
#
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs unit and integrations tests on the library."""
import argparse
import json
import logging
import platform
import build
import gendeps
import shakaBuildHelpers
class _HandleMixedListsAction(argparse.Action):
'''Action to handle comma-separated and space-separated lists.
When input can be given as a comma-separated list or a space-
separated list, default actions and types don't work. For example
if you had |'a,b,c' 'd'| you can get |['a,b,c', 'd']| or
|[['a','b','c'], 'd']|.
This action will expand the comma-separated lists and merge then with
the space separated lists so you will get |['a', 'b', 'c', 'd']|.
'''
def __call__(self, parser, namespace, new_values, option_string=None):
merged = getattr(namespace, self.dest) or []
for value in new_values:
merged += value.split(',')
setattr(namespace, self.dest, merged)
class _HandleKeyValuePairs(argparse.Action):
'''Action to handle key-value pairs and convert to a dictionary.
Input is a key-value pair separated by '='.
These keys and values are stored in a dictionary which can accumulate
multiple values from the command-line.
'''
def __call__(self, parser, namespace, new_argument, option_string=None):
merged = getattr(namespace, self.dest) or {}
key, value = new_argument.split('=', 1)
merged[key] = value
setattr(namespace, self.dest, merged)
def _KeyValueValidator(argument):
'''To validate the option has a key value pair format.
When you forget to provide the option in key=value format,
it reminds you by throwing an error before executing any tests.
'''
keyValuePair = [str for str in argument.split('=') if str != ''];
if len(keyValuePair) == 2:
return argument
else:
raise argparse.ArgumentTypeError(
'Received %s but expecting format of key=value' % argument
)
def _IntGreaterThanZero(x):
i = int(x)
if i <= 0:
raise argparse.ArgumentTypeError('%s is not greater than zero' % x)
return i
def _GetDefaultBrowsers():
"""Use the platform name to get which browsers can be tested."""
if shakaBuildHelpers.is_linux():
# For MP4 support on Linux Firefox, install gstreamer1.0-libav.
# Opera on Linux only supports MP4 for Ubuntu 15.04+, so it is not in the
# default list of browsers for Linux at this time.
return ['Chrome','Edge','Firefox']
if shakaBuildHelpers.is_darwin():
return ['Chrome','Edge','Firefox','Safari']
if shakaBuildHelpers.is_windows() or shakaBuildHelpers.is_cygwin():
return ['Chrome','Edge','Firefox','IE']
raise Error('Unrecognized system: %s' % platform.uname()[0])
class Launcher:
"""A stateful object for parsing arguments and running Karma commands.
A launcher that holds the state of parsing arguments and builds and
executes the resulting Karma command. The process is split into sections so
that other scripts can inject their own logic between calls.
For example:
l = Launcher('Launch Karma tests')
l.parser.add_argument('custom_flag')
l.ParseArguments(args)
l.ResolveBrowsers(['Chrome'])
if l.parsed_args.custom_flag:
do_custom_logic
l.RunCommand(karma_conf_path)
"""
def __init__(self, description):
self.karma_config = {}
self.parsed_args = None
self.parser = argparse.ArgumentParser(
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter)
running_commands = self.parser.add_argument_group(
'Running',
'These commands affect how tests are ran.')
logging_commands = self.parser.add_argument_group(
'Logging',
'These commands affect what gets logged and how the logs will appear.')
networking_commands = self.parser.add_argument_group(
'Networking',
'These commands affect how Karma works over a network.')
pre_launch_commands = self.parser.add_argument_group(
'Pre-Launch',
'These commands are handled before the tests start running.')
running_commands.add_argument(
'--browsers',
help='Specify which browsers to run tests on as a space-separated or '
'comma-separated list. Use "--browsers help" to see a list of '
'available browsers on this platform.',
action=_HandleMixedListsAction,
nargs='+')
running_commands.add_argument(
'--exclude-browsers',
help='Browsers to skip as a comma-separated or space-separated list.',
action=_HandleMixedListsAction,
nargs='+')
running_commands.add_argument(
'--no-browsers',
help='Instead of Karma starting browsers, Karma will wait for a '
'browser to connect to it.',
action='store_true')
running_commands.add_argument(
'--single-run',
help='Run the test when browsers capture and exit.',
dest='single_run',
action='store_true',
default=True)
running_commands.add_argument(
'--no-single-run',
help='Do not shut down Karma when tests are complete.',
dest='single_run',
action='store_false')
running_commands.add_argument(
'--random',
help='Run the tests in a random order. This can be used with --seed '
'to control the random order. If used without --seed, a seed '
'will be generated.',
action='store_true')
running_commands.add_argument(
'--seed',
help='Set the seed that will be used by --random. If used without '
'--random, this will have no effect.',
type=int)
running_commands.add_argument(
'--filter',
help='Specify a regular expression to limit which tests run.',
type=str,
dest='filter')
running_commands.add_argument(
'--use-xvfb',
help='Run tests without opening browser windows. Requires Linux '
'and xvfb.',
action='store_true')
running_commands.add_argument(
'--quick',
help='Skip integration tests.',
action='store_true')
running_commands.add_argument(
'--external',
help='Run tests that require external resources. This will require a '
'fast connection to the open internet.',
action='store_true')
running_commands.add_argument(
'--drm',
help='Run tests that require DRM.',
action='store_true')
running_commands.add_argument(
'--quarantined',
help='Run tests that have been quarantined.',
action='store_true')
running_commands.add_argument(
'--runs',
help='Set the number of times each test should run. (default '
'%(default)s) ',
type=_IntGreaterThanZero,
default=1,
dest='runs')
running_commands.add_argument(
'--uncompiled',
help='Use the uncompiled source code when running the tests. This can '
'be used to make debugging easier.',
action='store_true')
running_commands.add_argument(
'--auto-watch',
help='Auto watch source files and run on change.',
dest='auto_watch',
action='store_true',
default=False)
running_commands.add_argument(
'--no-auto-watch',
help='Do not watch source files',
dest='auto_watch',
action='store_false')
running_commands.add_argument(
'--capture-timeout',
help='Kill the browser if it does not capture in the given time [ms]. '
'(default %(default)s)',
type=int,
default=60000)
running_commands.add_argument(
'--delay-tests',
help='Insert an artificial delay between tests, in seconds. '
'This can be helpful when tracking down asynchronous test '
'pollution, in which an async process belonging to one test may '
'trigger a failure after other tests have begun. '
'(default %(const)s)',
type=int,
default=None,
const=2,
nargs='?')
running_commands.add_argument(
'--spec-hide-passed',
help='If provided, configure the spec reporter to hide passing tests.',
action='store_true',
default=False)
running_commands.add_argument(
'--test-custom-asset',
help='Run asset playback tests on a custom manifest URI.',
type=str,
default=None)
running_commands.add_argument(
'--test-custom-license-server',
help='Configure license servers for the custom asset playback test. '
'May be specified multiple times to configure multiple key '
'systems.',
type=_KeyValueValidator,
metavar='KEY_SYSTEM_ID=LICENSE_SERVER_URI',
action=_HandleKeyValuePairs)
running_commands.add_argument(
'--test-timeout',
help='Sets the test timeout value [ms] (default %(default)s)',
dest='test_timeout',
default=120000,
type=int)
running_commands.add_argument(
'--no-babel',
help="Don't use Babel to convert ES6 to ES5.",
dest='babel',
action='store_false')
logging_commands.add_argument(
'--colors',
help='Use colors when reporting and printing logs.',
action='store_true',
dest='colors',
default=True)
logging_commands.add_argument(
'--no-colors',
help='Do not use colors when reporting or printing logs',
action='store_false',
dest='colors')
logging_commands.add_argument(
'--log-level',
help='Set the type of log messages that Karma will print.',
choices=['disable', 'error', 'warn', 'info', 'debug'],
default='error')
logging_commands.add_argument(
'--html-coverage-report',
help='Generate HTML-formatted code coverage reports in the "coverage" '
'folder.',
action='store_true')
logging_commands.add_argument(
'--enable-logging',
help='Print log messages from tests and limits the type of log '
'messages printed. If --enable-logging is not given, no logs '
'will be printed. (default %(const)s)',
choices=['none', 'error', 'warning', 'info', 'debug', 'v1', 'v2'],
default='none',
const='info',
dest='logging',
nargs='?')
logging_commands.add_argument(
'--reporters',
help='Specify which reporters to use as a space-separated or '
'comma-separated list. Possible options are dots, progress, '
'junit, growl, or coverage.',
action=_HandleMixedListsAction,
nargs='+')
logging_commands.add_argument(
'--report-slower-than',
help='Report tests that are slower than the given time [ms].',
type=int)
networking_commands.add_argument(
'--port',
help='Port where the server is running.',
type=int)
networking_commands.add_argument(
'--hostname',
help='Specify the hostname to be used when capturing browsers. This '
'defaults to localhost.',
default='localhost')
pre_launch_commands.add_argument(
'--force',
help='Force a rebuild of the project before running tests. This will '
'have no effect if --no-build is set.',
action='store_true')
pre_launch_commands.add_argument(
'--no-build',
help='Skip building the project before running tests.',
action='store_false',
dest='build',
default=True)
pre_launch_commands.add_argument(
'--print-command',
help='Print the command passed to Karma before passing it to Karma.',
action='store_true')
def ParseArguments(self, args):
"""Parse the given arguments.
Uses the parser definition to parse |args| and populates
|self.karma_config|.
"""
self.parsed_args = self.parser.parse_args(args)
self.karma_config = {}
pass_through = [
'auto_watch',
'babel',
'capture_timeout',
'colors',
'drm',
'external',
'filter',
'hostname',
'html_coverage_report',
'log_level',
'logging',
'port',
'quarantined',
'quick',
'random',
'report_slower_than',
'seed',
'single_run',
'uncompiled',
'delay_tests',
'spec_hide_passed',
'test_custom_asset',
'test_custom_license_server',
'test_timeout',
]
# Check each value before setting it to avoid passing null values.
for name in pass_through:
value = getattr(self.parsed_args, name, None)
if value is not None:
self.karma_config[name] = value
if self.parsed_args.reporters:
self.karma_config['reporters'] = self.parsed_args.reporters
def ResolveBrowsers(self, default_browsers):
"""Decide what browsers we should use.
This is separate from ParseArguments so that other tools can insert
additional logic to derive a browser list from the parsed arguments.
"""
assert(default_browsers and len(default_browsers))
if self.parsed_args.no_browsers:
logging.warning('In this mode browsers must manually connect to karma.')
elif self.parsed_args.browsers:
self.karma_config['browsers'] = self.parsed_args.browsers
else:
logging.warning('Using default browsers: %s', default_browsers)
self.karma_config['browsers'] = default_browsers
# Check if there are any browsers that we should remove
if self.parsed_args.exclude_browsers and 'browsers' in self.karma_config:
all_browsers = set(self.karma_config['browsers'])
bad_browsers = set(self.parsed_args.exclude_browsers)
if bad_browsers - all_browsers:
raise RuntimeError('Attempting to exclude unselected browsers: %s' %
','.join(bad_browsers - all_browsers))
good_browsers = all_browsers - bad_browsers
self.karma_config['browsers'] = list(good_browsers)
def RunCommand(self, karma_conf):
"""Build a command and send it to Karma for execution.
Uses |self.parsed_args| and |self.karma_config| to build and run a Karma
command.
"""
if self.parsed_args.use_xvfb and not shakaBuildHelpers.is_linux():
logging.error('xvfb can only be used on Linux')
return 1
if not shakaBuildHelpers.update_node_modules():
logging.error('Failed to update node modules')
return 1
karma = shakaBuildHelpers.get_node_binary('karma')
cmd = ['xvfb-run', '--auto-servernum'] if self.parsed_args.use_xvfb else []
cmd += karma + ['start']
cmd += [karma_conf] if karma_conf else []
cmd += ['--settings', json.dumps(self.karma_config)]
# There is no need to print a status here as the gendep and build
# calls will print their own status updates.
if self.parsed_args.build:
if gendeps.main([]) != 0:
logging.error('Failed to generate project dependencies')
return 1
if build.main(['--force'] if self.parsed_args.force else []) != 0:
logging.error('Failed to build project')
return 1
# Before Running the command, print the command.
if self.parsed_args.print_command:
logging.info('Karma Run Command')
logging.info('%s', cmd)
# Run the command.
results = []
for run in range(self.parsed_args.runs):
logging.info('Running test (%d / %d, %d failed so far)...',
run + 1, self.parsed_args.runs, len(results) - results.count(0))
results.append(shakaBuildHelpers.execute_get_code(cmd))
# Print a summary of the results.
if self.parsed_args.runs > 1:
logging.info('All runs completed. %d / %d runs passed.',
results.count(0),
len(results))
logging.info('Results (exit code): %r', results)
else:
logging.info('Run complete')
logging.info('Result (exit code): %d', results[0])
return 0 if all(result == 0 for result in results) else 1
def Run(args):
launcher = Launcher('Shaka Player Test Runner Script')
launcher.ParseArguments(args)
launcher.ResolveBrowsers(_GetDefaultBrowsers())
return launcher.RunCommand(None)
def main(args):
return Run(args)
if __name__ == '__main__':
shakaBuildHelpers.run_main(main)
| |
# Copyright 2013 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import re
import sys
import ethtool
from oslo_log import log as logging
from command_utils import execute
from eswitchd.common import constants
LOG = logging.getLogger(__name__)
class pciUtils(object):
ETH_PATH = "/sys/class/net/%(interface)s"
ETH_DEV = ETH_PATH + "/device"
ETH_PORT = ETH_PATH + "/dev_id"
PF_MLX_DEV_PATH = "/sys/class/infiniband/*"
VENDOR_PATH = ETH_DEV + '/vendor'
DEVICE_TYPE_PATH = ETH_DEV + '/virtfn%(vf_num)s/device'
_VIRTFN_RE = re.compile("virtfn(?P<vf_num>\d+)")
VFS_PATH = ETH_DEV + "/virtfn*"
def get_vfs_info(self, pf):
vfs_info = {}
try:
dev_path = self.ETH_DEV % {'interface': pf}
dev_info = os.listdir(dev_path)
for dev_filename in dev_info:
result = self._VIRTFN_RE.match(dev_filename)
if result and result.group('vf_num'):
dev_file = os.path.join(dev_path, dev_filename)
vf_pci = os.readlink(dev_file).strip("./")
vf_num = result.group('vf_num')
vf_device_type = self.get_vf_device_type(pf, vf_num)
vfs_info[vf_pci] = {'vf_num': vf_num,
'vf_device_type': vf_device_type}
except Exception:
LOG.exception("PCI device %s not found", pf)
return vfs_info
def get_dev_attr(self, attr_path):
try:
fd = open(attr_path)
return fd.readline().strip()
except IOError:
return
def verify_vendor_pf(self, pf, vendor_id=constants.VENDOR):
vendor_path = pciUtils.VENDOR_PATH % {'interface': pf}
if self.get_dev_attr(vendor_path) == vendor_id:
return True
else:
return False
def get_vf_device_type(self, pf, vf_num):
device_vf_type = None
device_type_file = pciUtils.DEVICE_TYPE_PATH % {'interface': pf,
'vf_num': vf_num}
try:
with open(device_type_file, 'r') as fd:
device_type = fd.read()
device_type = device_type.strip(os.linesep)
if device_type in constants.CX3_VF_DEVICE_TYPE_LIST:
device_vf_type = constants.CX3_VF_DEVICE_TYPE
elif device_type in constants.CX4_VF_DEVICE_TYPE_LIST:
device_vf_type = constants.CX4_VF_DEVICE_TYPE
elif device_type in constants.CX5_VF_DEVICE_TYPE_LIST:
device_vf_type = constants.CX5_VF_DEVICE_TYPE
except IOError:
pass
return device_vf_type
def is_sriov_pf(self, pf):
vfs_path = pciUtils.VFS_PATH % {'interface': pf}
vfs = glob.glob(vfs_path)
if vfs:
return True
else:
return
def get_interface_type(self, ifc):
cmd = ['ip', '-o', 'link', 'show', 'dev', ifc]
try:
result = execute(cmd, root_helper=None)
except Exception as e:
LOG.warning("Failed to execute command %s due to %s", cmd, e)
raise
if result.find('link/ether') != -1:
return 'eth'
elif result.find('link/infiniband') != -1:
return 'ib'
else:
return None
def is_ifc_module(self, ifc, fabric_type):
modules = {'eth': 'mlx4_en', 'ib': 'ipoib'}
if modules[fabric_type] in ethtool.get_module(ifc):
return True
def filter_ifcs_module(self, ifcs, fabric_type):
return [ifc for ifc in ifcs if self.is_ifc_module(ifc, fabric_type)]
def get_auto_pf(self, fabric_type):
def log_error_and_exit(err_msg):
LOG.error(err_msg)
sys.exit(1)
mlnx_pfs = [ifc for ifc in ethtool.get_devices()
if self.verify_vendor_pf(ifc)]
if not mlnx_pfs:
log_error_and_exit("Didn't find any Mellanox devices.")
mlnx_pfs = [ifc for ifc in mlnx_pfs if self.is_sriov_pf(ifc)]
if not mlnx_pfs:
log_error_and_exit("Didn't find Mellanox NIC "
"with SR-IOV capabilities.")
mlnx_pfs = self.filter_ifcs_module(mlnx_pfs, fabric_type)
if not mlnx_pfs:
log_error_and_exit("Didn't find Mellanox NIC of type %s with "
"SR-IOV capabilites." % fabric_type)
if len(mlnx_pfs) != 1:
log_error_and_exit("Found multiple PFs %s. Configure Manually."
% mlnx_pfs)
return mlnx_pfs[0]
def get_eth_vf(self, dev):
vf_path = pciUtils.ETH_DEV % {'interface': dev}
try:
device = os.readlink(vf_path)
vf = device.split('/')[3]
return vf
except Exception:
return None
def get_pf_pci(self, pf, type=None):
vf = self.get_eth_vf(pf)
if vf:
if type == 'normal':
return vf
else:
return vf[:-2]
return None
def get_pf_mlx_dev(self, pci_id):
paths = glob.glob(pciUtils.PF_MLX_DEV_PATH)
for path in paths:
id = os.readlink(path).split('/')[5]
if pci_id == id:
return path.split('/')[-1]
def get_guid_index(self, pf_mlx_dev, dev, hca_port):
guid_index = None
path = constants.GUID_INDEX_PATH % (pf_mlx_dev, dev, hca_port)
with open(path) as fd:
guid_index = fd.readline().strip()
return guid_index
def get_eth_port(self, dev):
port_path = pciUtils.ETH_PORT % {'interface': dev}
try:
with open(port_path) as f:
dev_id = int(f.read(), 0)
return dev_id + 1
except IOError:
return
def get_vfs_macs_ib(self, fabric_details):
if fabric_details['pf_device_type'] == constants.CX3_VF_DEVICE_TYPE:
return self.get_vfs_macs_ib_cx3(fabric_details)
elif fabric_details['pf_device_type'] == constants.CX4_VF_DEVICE_TYPE:
return self.get_vfs_macs_ib_cx4(fabric_details)
def get_vfs_macs_ib_cx3(self, fabric_details):
hca_port = fabric_details['hca_port']
pf_mlx_dev = fabric_details['pf_mlx_dev']
macs_map = {}
guids_path = constants.ADMIN_GUID_PATH % (pf_mlx_dev, hca_port,
'[1-9]*')
paths = glob.glob(guids_path)
for path in paths:
vf_index = path.split('/')[-1]
with open(path) as f:
guid = f.readline().strip()
if guid == constants.INVALID_GUID_CX3:
mac = constants.INVALID_MAC
else:
head = guid[:6]
tail = guid[-6:]
mac = ":".join(re.findall('..?', head + tail))
macs_map[str(int(vf_index))] = mac
return macs_map
def get_vfs_macs_ib_cx4(self, fabric_details):
vfs = fabric_details['vfs']
macs_map = {}
for vf in vfs.values():
vf_num = vf['vf_num']
pf_mlx_dev = fabric_details['pf_mlx_dev']
guid_path = constants.CX4_GUID_NODE_PATH % {'module': pf_mlx_dev,
'vf_num': vf_num}
with open(guid_path) as f:
guid = f.readline().strip()
head = guid[:8]
tail = guid[-9:]
mac = head + tail
macs_map[vf_num] = mac
return macs_map
def get_device_address(self, hostdev):
domain = hostdev.attrib['domain'][2:]
bus = hostdev.attrib['bus'][2:]
slot = hostdev.attrib['slot'][2:]
function = hostdev.attrib['function'][2:]
dev = "%.4s:%.2s:%2s.%.1s" % (domain, bus, slot, function)
return dev
| |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from savanna import context
import savanna.tests.unit.conductor.base as test_base
SAMPLE_CLUSTER = {
"plugin_name": "test_plugin",
"hadoop_version": "test_version",
"tenant_id": "tenant_1",
"name": "test_cluster",
"user_keypair_id": "my_keypair",
"node_groups": [
{
"name": "ng_1",
"flavor_id": "42",
"node_processes": ["p1", "p2"],
"count": 1
},
{
"name": "ng_2",
"flavor_id": "42",
"node_processes": ["p3", "p4"],
"count": 3
}
],
"cluster_configs": {
"service_1": {
"config_2": "value_2"
},
"service_2": {
"config_1": "value_1"
}
},
}
class ClusterTest(test_base.ConductorManagerTestCase):
def test_cluster_create_list_delete(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
self.assertIsInstance(cluster_db_obj, dict)
lst = self.api.cluster_get_all(ctx)
self.assertEqual(len(lst), 1)
cl_id = lst[0]["id"]
self.api.cluster_destroy(ctx, cl_id)
lst = self.api.cluster_get_all(ctx)
self.assertEqual(len(lst), 0)
def test_duplicate_cluster_create(self):
ctx = context.ctx()
self.api.cluster_create(ctx, SAMPLE_CLUSTER)
with self.assertRaises(RuntimeError):
self.api.cluster_create(ctx, SAMPLE_CLUSTER)
def test_cluster_fields(self):
ctx = context.ctx()
cl_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
self.assertIsInstance(cl_db_obj, dict)
for key, val in SAMPLE_CLUSTER.items():
if key == 'node_groups':
#this will be checked separately
continue
self.assertEqual(val, cl_db_obj.get(key),
"Key not found %s" % key)
for ng in cl_db_obj["node_groups"]:
ng.pop("created_at")
ng.pop("updated_at")
ng.pop("id")
self.assertEqual(ng.pop("cluster_id"), cl_db_obj["id"])
ng.pop("image_id")
self.assertEqual(ng.pop("instances"), [])
ng.pop("node_configs")
ng.pop("node_group_template_id")
ng.pop("volume_mount_prefix")
ng.pop("volumes_size")
ng.pop("volumes_per_node")
self.assertListEqual(SAMPLE_CLUSTER["node_groups"],
cl_db_obj["node_groups"])
def test_cluster_update_status(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
_id = cluster_db_obj["id"]
updated_cl = self.api.cluster_update(ctx, _id, {"status": "Active"})
self.assertIsInstance(updated_cl, dict)
self.assertEqual(updated_cl["status"], "Active")
get_cl_obj = self.api.cluster_get(ctx, _id)
self.assertEqual(updated_cl, get_cl_obj)
def _ng_in_cluster(self, cluster_db_obj, ng_id):
for ng in cluster_db_obj["node_groups"]:
if ng["id"] == ng_id:
return ng
return None
def test_add_node_group(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
_id = cluster_db_obj["id"]
node_group = {
"name": "ng_3",
"flavor_id": "42",
"node_processes": ["p3", "p4"],
"count": 5
}
cluster = self.api.node_group_add(ctx, _id, node_group)
ng_id = cluster["node_groups"][-1]["id"]
cluster_db_obj = self.api.cluster_get(ctx, _id)
found_ng = self._ng_in_cluster(cluster_db_obj, ng_id)
self.assertTrue(found_ng, "New Node Group not found")
def test_update_node_group(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
_id = cluster_db_obj["id"]
self.assertEqual(len(cluster_db_obj["node_groups"]), 2)
ng_id = cluster_db_obj["node_groups"][-1]["id"]
self.api.node_group_update(ctx, ng_id, {"image_id": "test_image"})
cluster_db_obj = self.api.cluster_get(ctx, _id)
found_ng = self._ng_in_cluster(cluster_db_obj, ng_id)
self.assertTrue(found_ng, "Updated Node Group not found")
for ng in cluster_db_obj["node_groups"]:
if ng["id"] != ng_id:
continue
self.assertEqual(ng["image_id"], "test_image")
def test_delete_node_group(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
_id = cluster_db_obj["id"]
ng_id = cluster_db_obj["node_groups"][-1]["id"]
self.api.node_group_remove(ctx, ng_id)
cluster_db_obj = self.api.cluster_get(ctx, _id)
found_ng = self._ng_in_cluster(cluster_db_obj, ng_id)
self.assertFalse(found_ng, "Node Group is still in a CLuster")
with self.assertRaises(RuntimeError):
self.api.node_group_remove(ctx, ng_id)
def _add_instance(self, ctx, ng_id):
instance = {
"instance_name": "additional_vm"
}
return self.api.instance_add(ctx, ng_id, instance)
def test_add_instance(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
_id = cluster_db_obj["id"]
ng_id = cluster_db_obj["node_groups"][-1]["id"]
count = cluster_db_obj["node_groups"][-1]["count"]
self._add_instance(ctx, ng_id)
cluster_db_obj = self.api.cluster_get(ctx, _id)
for ng in cluster_db_obj["node_groups"]:
if ng["id"] != ng_id:
continue
self.assertEqual(count + 1, ng["count"])
self.assertEqual("additional_vm",
ng["instances"][0]["instance_name"])
def test_update_instance(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
_id = cluster_db_obj["id"]
ng_id = cluster_db_obj["node_groups"][-1]["id"]
node_group = self._add_instance(ctx, ng_id)
instance_id = node_group["instances"][-1]["id"]
self.api.instance_update(context, instance_id,
{"management_ip": "1.1.1.1"})
cluster_db_obj = self.api.cluster_get(ctx, _id)
for ng in cluster_db_obj["node_groups"]:
if ng["id"] != ng_id:
continue
self.assertEqual("1.1.1.1", ng["instances"][0]["management_ip"])
def test_remove_instance(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
_id = cluster_db_obj["id"]
ng_id = cluster_db_obj["node_groups"][-1]["id"]
count = cluster_db_obj["node_groups"][-1]["count"]
node_group = self._add_instance(ctx, ng_id)
instance_id = node_group["instances"][-1]["id"]
cluster_db_obj = self.api.cluster_get(ctx, _id)
for ng in cluster_db_obj["node_groups"]:
if ng["id"] != ng_id:
continue
self.assertEqual(count + 1, ng["count"])
self.api.instance_remove(ctx, instance_id)
cluster_db_obj = self.api.cluster_get(ctx, _id)
for ng in cluster_db_obj["node_groups"]:
if ng["id"] != ng_id:
continue
self.assertEqual(count, ng["count"])
with self.assertRaises(RuntimeError):
self.api.instance_remove(ctx, instance_id)
| |
import os
import sys
import fnmatch
import re
try:
import urllib.request as urllib2
except ImportError:
import urllib2
#Convenient methods
def webfileExists(url):
try:
response = urllib2.urlopen(urllib2.Request(url)).read(1)
return True
except:
return False
def webfileDownload(url):
if webfileExists(url):
lines = []
for line in urllib2.urlopen(url).read(20000):
lines.append(line)
for i in range(0, len(lines)):
lines[i] = chr(lines[i])
return "".join(lines)
else:
return ""
def all_indices(value, qlist):
indices = []
idx = -1
while True:
try:
idx = qlist.index(value, idx + 1)
indices.append(idx)
except ValueError:
break
return indices
#My own code
def testArgs(arguments, argCount, precise):
if len(arguments[1:]) < argCount:
print("[ERROR] Not enough arguments. Use -h for help")
return False
elif len(arguments[1:]) > argCount and precise == True:
print("[ERROR] To many arguments. Use -h for help")
return False
return True
def showHelp():
print("Markdown To Html")
print("Convert markdown to html")
print("")
print("Usage: MarkdownToHtml.py {INPUTFILE} [OUTPUTFILE] [OPTIONS]")
print("")
print("INPUTFILE: This must be an .md or .markdown file")
print("OUTPUTFILE: The name of the generated html file. If none specified this will be the same as the input file")
print("OPTIONS:")
print(" -t Set the title of the webpage")
print(" -c Set the charset of the webpage ")
print(" -i Include a javascript library. You can use the google libraries or from your own url.")
print(" Google libraries: AngularJS Angular_Material Dojo Ext-Core Jquery MooTools Prototype scriptaculous SPF SWFobject threejs webfontloader")
print(" Usage: {libary}{version} or {adress-to-library} e.g. -i jQuery1.11.3 or -i \"https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js\"")
print(" -d Download the included libraries")
print(" -nomin Do not use the minimized versions of the google libraries")
print(" -a Include javascript and stylesheets from disk.")
print(" You can specify a folder; this wil load all files in the folder")
print(" Or you can specify a .ass or .assets file; The specified files will automaticly be loaded.")
print(" -nocss This will exclude ALL css from the build")
print(" -nojs This will exclude ALL js from the build, including the includes from google")
#End of help
return
def readFile(filename, extensions):
if len(extensions) != 0 and os.path.splitext(filename)[1] not in extensions:
print("[ERROR] Wrong extension. Use -h for help")
return []
elif os.path.exists(filename) and not os.access(os.path.dirname(filename), os.W_OK):
file = open(filename, 'r')
buffer = list(file.read())
if len(buffer) == 0:
print("[ERROR] File is empty: " + filename)
file.close()
return buffer
else:
print("[ERROR] Could not open file: " + filename)
return []
def writeFile(filename, extension, data):
file = open(os.path.splitext(filename)[0] + extension, 'w')
file.write(data)
def convert(buffer, args):
result = []
floating_data = []
#Standard html rules
result.append("<!DOCTYPE html>" + "\n")
result.append("<html>" + "\n")
if len(args) > 0:
result.append("\t" + "<head>" + "\n")
if "-t" in args and not fnmatch.fnmatch(args[args.index("-t") + 1], "-?"):
result.append("\t\t" + "<title>" + args[args.index("-t") + 1] + "</title>" + "\n")
if "-c" in args and not fnmatch.fnmatch(args[args.index("-c") + 1], "-?"):
result.append("\t\t" + "<meta http-equiv=\"Content-Type\" content=\"text/html; charset=" + args[args.index("-c") + 1] + "\">" + "\n")
if "-i" in args and not "-nojs" in args:
for i in all_indices("-i", args):
if fnmatch.fnmatch(args[i + 1], "http://*") or fnmatch.fnmatch(args[i + 1], "https://*"):
name = args[i + 1].split("/")[len(args[i + 1].split("/")) - 1]
#Test if it exists
print("[INFO] Searching for a downloaded version of " + name + "..." + int(80 - len("[INFO] Searching for a downloaded version of " + name + "...")) * " " + " ", end='')
if os.path.exists("./assets/js/" + name):
print("[ ok ]")
continue
else:
print("[fail]")
if "-d" in args:
#Download the file
print("[INFO] Downloading " + name + "..." + int(80 - len("[INFO] Downloading " + name + "...")) * " " + " ", end='')
if webfileExists(args[i + 1]):
print("[ ok ]")
writeFile("./assets/js/" + name, ".js", webfileDownload(args[i + 1]))
result.append("\t\t" + "<script type=\"text/javascript\" src=\"" + name + "\"></script>" + "\n")
else:
print("[fail]")
print("[ERROR] Download failed (" + args[i + 1] + ")")
else:
print("[INFO] Validating " + name + "..." + int(80 - len("[INFO] Validating " + name + "...")) * " " + " ", end='')
if webfileExists(args[i + 1]):
print("[ ok ]")
result.append("\t\t" + "<script type=\"text/javascript\" src=\"" + args[i + 1] + "\"></script>" + "\n")
else:
print("[fail]")
print("[ERROR] File not found (" + args[i + 1] + ")")
elif not fnmatch.fnmatch(args[i + 1], "-?"):
name1 = name2 = re.split('(\d+)', args[i + 1])[0].lower()
version = "".join(re.split('(\d+)', args[i + 1])[1:])
if version == version.replace(".", "") and name1.endswith("r"):
name1 = name1[0:len(name1) - 1]
name2 = name2[0:len(name2) - 1]
version = "r" + version
#Exceptions (Turned out to be a lot more then expected)
if name1 == "angular":
name1 = "angularjs"
elif name2 == "angularjs":
name2 = "angular"
elif name1 == "angular-material":
name1 = "angular_material"
elif name2 == "angular_material":
name2 = "angular-material"
elif name2 == "dojo":
name2 = "dojo/dojo"
elif name2 == "mootools":
name2 = "mootools-yui-compressed"
elif name1 == "three":
name1 = "threejs"
elif name2 == "threejs":
name2 = "three"
elif name1 == "webfontloader":
name1 = "webfont"
name2 = "webfont"
#Test if they exist...
if "-nomin" in args:
print("[INFO] Searching for a downloaded version of " + name1 + "." + version + ".js..." + int(80 - len("[INFO] Searching for a downloaded version of " + name1 + "." + version + ".js...")) * " " + " ", end='')
if os.path.exists("./assets/js/" + name1 + "." + version + ".js"):
print("[ ok ]")
continue
else:
print("[fail]")
else:
print("[INFO] Searching for a downloaded version of " + name1 + "." + version + ".js..." + int(80 - len("[INFO] Searching for a downloaded version of " + name1 + "." + version + ".js...")) * " " + " ", end='')
if os.path.exists("./assets/js/" + name1 + "." + version + ".min.js"):
print("[ ok ]")
continue
else:
url = "https://ajax.googleapis.com/ajax/libs/" + name1 + "/" + version + "/" + name2 + ".min.js"
if os.path.exists("./assets/js/" + name1 + "." + version + ".js"):
if not webfileExists(url):
print("[ ok ]")
continue
else:
print("[fail]")
if "-d" in args:
#Link the not minimized version if requested
if "-nomin" in args:
url = "https://ajax.googleapis.com/ajax/libs/" + name1 + "/" + version + "/" + name2 + ".js"
print("[INFO] Downloading " + name1 + "." + version + ".js" + "..." + int(80 - len("[INFO] Downloading " + name1 + "." + version + ".js" + "...")) * " " + " ", end='')
if webfileExists(url):
print("[ ok ]")
writeFile("./assets/js/" + name1 + "." + version + ".js", ".js", webfileDownload(url))
result.append("\t\t" + "<script type=\"text/javascript\" src=\"" + name1 + ".js" + "\"></script>" + "\n")
else:
print("[fail]")
print("[ERROR] Library not found. Try manual url (" + url + ")")
#Try to link the minized version if exist
else:
url = "https://ajax.googleapis.com/ajax/libs/" + name1 + "/" + version + "/" + name2 + ".min.js"
if webfileExists(url):
#No cheats allowed remember? :p
print("[INFO] Downloading " + name1 + "." + version + ".min.js" + "..." + int(80 - len("[INFO] Downloading " + name1 + "." + version + ".min.js" + "...")) * " " + " ", end='')
print("[ ok ]")
writeFile("./assets/js/" + name1 + "." + version + ".min.js", ".js", webfileDownload(url))
result.append("\t\t" + "<script type=\"text/javascript\" src=\"" + name1 + ".min.js" + "\"></script>" + "\n")
#Minimized version not found. Try to link the full version
else:
url = "https://ajax.googleapis.com/ajax/libs/" + name1 + "/" + version + "/" + name2 + ".js"
print("[INFO] Downloading " + name1 + "." + version + ".js" + "..." + int(80 - len("[INFO] Downloading " + name1 + "." + version + ".js" + "...")) * " " + " ", end='')
if webfileExists(url):
print("[ ok ]")
writeFile("./assets/js/" + name1 + "." + version + ".js", ".js", webfileDownload(url))
result.append("\t\t" + "<script type=\"text/javascript\" src=\"" + name1 + ".js" + "\"></script>" + "\n")
else:
print("[fail]")
print("[ERROR] Library not found. Try manual url (" + url + ")")
else:
#Link the not minimized version if requested
if "-nomin" in args:
url = "https://ajax.googleapis.com/ajax/libs/" + name1 + "/" + version + "/" + name2 + ".js"
print("[INFO] Validating " + name1 + "." + version + ".js" + "..." + int(80 - len("[INFO] Validating " + name1 + "." + version + ".js" + "...")) * " " + " ", end='')
if webfileExists(url):
print("[ ok ]")
result.append("\t\t" + "<script type=\"text/javascript\" src=\"" + url + "\"></script>" + "\n")
else:
print("[fail]")
print("[ERROR] Library not found. Try manual url (" + url + ")")
#Try to link the minized version if exist
else:
url = "https://ajax.googleapis.com/ajax/libs/" + name1 + "/" + version + "/" + name2 + ".min.js"
if webfileExists(url):
#Same cheats again... I'm sorry
print("[INFO] Validating " + name1 + "." + version + ".min.js" + "..." + int(80 - len("[INFO] Validating " + name1 + "." + version + ".min.js" + "...")) * " " + " ", end='')
print("[ ok ]")
result.append("\t\t" + "<script type=\"text/javascript\" src=\"" + url + "\"></script>" + "\n")
#Minimized version not found. Try to link the full version
else:
url = "https://ajax.googleapis.com/ajax/libs/" + name1 + "/" + version + "/" + name2 + ".js"
print("[INFO] Validating " + name1 + "." + version + ".js" + "..." + int(80 - len("[INFO] Validating " + name1 + "." + version + ".js" + "...")) * " " + " ", end='')
if webfileExists(url):
print("[ ok ]")
result.append("\t\t" + "<script type=\"text/javascript\" src=\"" + url + "\"></script>" + "\n")
else:
print("[fail]")
print("[ERROR] Library not found. Try manual url (" + url + ")")
if "-a" in args and not fnmatch.fnmatch(args[args.index("-a") + 1], "-?"):
if len(os.path.splitext(args[args.index("-a") + 1])[1]) > 0:
if os.path.splitext(args[args.index("-a") + 1])[1] in [".ass", ".assets"]:
assets_buffer = "".join(readFile(args[args.index("-a") + 1], [".ass", ".assets"])).split("\n")
print(assets_buffer)
else:
print("[ERROR] Invalid assets file. Use -h for help")
elif not ("-nocss" in args and "-nojs" in args):
if "-nojs" not in args:
for dirpath, dirnames, filenames in os.walk(args[args.index("-a") + 1]):
for filename in [f for f in filenames if f.endswith(".js")]:
result.append("\t\t" + "<script type=\"text/javascript\" src=\"" + dirpath.replace("\\", "/") + "/" + filename + "\"></script>" + "\n")
if "-nocss" not in args:
for dirpath, dirnames, filenames in os.walk(args[args.index("-a") + 1]):
for filename in [f for f in filenames if f.endswith(".css")]:
result.append("\t\t" + "<link rel=\"stylesheet\" type=\"text/css\" href=\"" + dirpath.replace("\\", "/") + "/" + filename + "\" />" + "\n")
else:
print("[ERROR] Completely useless argument... (-a -nojs -nocss)")
result.append("\t" + "</head>" + "\n")
result.append("\t" + "<body>" + "\n")
print("[INFO] Constructing body..." + int(80 - len("[INFO] Constructing body...")) * " " + " ", end='')
inPre = False
inTag = False
for each in "".join(buffer).split("\n"):
#Test if in pre
if inPre == True:
if "</pre>" in each:
result.append("\t\t" + "</pre>" + "\n")
inPre = False
continue
else:
result.append("\t\t\t" + each + "\n")
continue
elif "<pre" in each:
result.append("\t\t" + each.strip() + "\n")
inPre = True
continue
#if each is in tag
if fnmatch.fnmatch(each.strip(), "<*>"):
result.append("\t\t\t" + each + "\n")
inTag = True
if inTag == True:
if fnmatch.fnmatch(each.strip(), "</*>"):
inTag = False
continue
else:
continue
#Add the link with title to the floating data
if fnmatch.fnmatch(each.strip(), '[[]*[]]:*\"*\"'):
floating_data.append([ each[each.find("[") + 1:each.find("]")], each[each.find(":") + 1:each.find("\"")].strip(), each[each.find("\"") + 1:len(each.rstrip()) - 1] ])
continue
#Add the link without title to the floating data
elif fnmatch.fnmatch(each.strip(), '[[]*[]]:*'):
floating_data.append([ each[each.find("[") + 1:each.find("]")], each[each.find(":") + 1:len(each.rstrip())].strip(), "" ])
continue
#Identation system
splitted_buffer = "".join(buffer).split("\n")
if splitted_buffer[splitted_buffer.index(each) - 1].find("* ", 0, len(splitted_buffer[splitted_buffer.index(each) - 1])) + 2 == len(each) - len(each.lstrip()) or splitted_buffer[splitted_buffer.index(each) - 1].find("+ ", 0, len(splitted_buffer[splitted_buffer.index(each) - 1])) + 2 == len(each) - len(each.lstrip()) or splitted_buffer[splitted_buffer.index(each) - 1].find("- ", 0, len(splitted_buffer[splitted_buffer.index(each) - 1])) + 2 == len(each) - len(each.lstrip()) or (len(splitted_buffer[splitted_buffer.index(each) - 1]) - len(splitted_buffer[splitted_buffer.index(each) - 1].lstrip()) == len(each) - len(each.lstrip()) and len(each) - len(each.lstrip()) > 0):
if "</ul>" in result[len(result) - 1]:
#Change into multiline list
if "<li>" in result[len(result) - 2] and "</li>" in result[len(result) - 2]:
result.append("\t\t\t\t" + each.lstrip() + "\n")
result.append("\t\t\t" + "</li>" + "\n")
result.append("\t\t" + "</ul>" + "\n")
result[len(result) - 4] = "\t\t\t\t" + result[len(result) - 5][7:len(result[len(result) - 5]) - 6] + "\n"
result[len(result) - 5] = "\t\t\t" + "<li>" + "\n"
#Extend multiline list
elif not "<li>" in result[len(result) - 2] and "</li>" in result[len(result) - 2]:
result[len(result) - 2] = "\t\t\t\t" + each.lstrip() + "\n"
result[len(result) - 1] = "\t\t\t" + "</li>" + "\n"
result.append("\t\t" + "</ul>" + "\n")
continue
splitted_buffer = "".join(buffer).split("\n")
if len(splitted_buffer[splitted_buffer.index(each) - 1].strip().replace(".","")) > 0 and splitted_buffer[splitted_buffer.index(each) - 1].strip().replace(".","")[0].isdigit() or (len(splitted_buffer[splitted_buffer.index(each) - 1]) - len(splitted_buffer[splitted_buffer.index(each) - 1].lstrip()) == len(each) - len(each.lstrip()) and len(each) - len(each.lstrip()) > 0):
if "</ol>" in result[len(result) - 1] or "</ol>" in result[len(result) - 2]:
#Change into multiline list
if "<li>" in result[len(result) - 2] and "</li>" in result[len(result) - 2]:
result.append("\t\t\t\t" + each.lstrip() + "\n")
result.append("\t\t\t" + "</li>" + "\n")
result.append("\t\t" + "</ol>" + "\n")
result[len(result) - 4] = "\t\t\t\t" + result[len(result) - 5][7:len(result[len(result) - 5]) - 6] + "\n"
result[len(result) - 5] = "\t\t\t" + "<li>" + "\n"
#Extend multiline list
elif not "<li>" in result[len(result) - 2] and "</li>" in result[len(result) - 2]:
result[len(result) - 2] = "\t\t\t\t" + each.lstrip() + "\n"
result[len(result) - 1] = "\t\t\t" + "</li>" + "\n"
result.append("\t\t" + "</ol>" + "\n")
continue
#The indented code blocks
if len(each) - len(each.lstrip()) >= 4:
each = each.replace("<", "<").replace(">", ">")
if len(each.replace(" ", "")) > 0:
#Change into multiline paragraph
if "<pre>" in result[len(result) - 1] and "</pre>" in result[len(result) - 1]:
result.append("\t\t\t" + result[len(result) - 1][7:len(result[len(result) - 1]) - 7] + "\n")
result.append("\t\t\t" + each[4:len(each)] + "\n")
result.append("\t\t" + "</pre>" + "\n")
#change the old line
result[len(result) - 4] = "\t\t" + "<pre>" + "\n";
#Extend multiline paragraph
elif not "<pre>" in result[len(result) - 1] and "</pre>" in result[len(result) - 1]:
result[len(result) - 1] = "\t\t\t" + each[4:len(each)] + "\n"
result.append("\t\t" + "</pre>" + "\n")
#Old fasioned singleline paragraph
else:
result.append("\t\t" + "<pre>" + each[4:len(each)] + "</pre>" + "\n")
continue
#Standard h6
if "######" in each and each.strip().replace("#", "") == "":
token_pos = each.find("######", 0, len(each)) + 6
result.append("\t\t" + "<h6>" + each[token_pos:len(each)] + "</h6>" + "\n")
#Standard h5
elif "#####" in each and each.strip().replace("#", "") == "":
token_pos = each.find("#####", 0, len(each)) + 5
result.append("\t\t" + "<h5>" + each[token_pos:len(each)] + "</h5>" + "\n")
#Standard h4
elif "####" in each and each.strip().replace("#", "") == "":
token_pos = each.find("####", 0, len(each)) + 4
result.append("\t\t" + "<h4>" + each[token_pos:len(each)] + "</h4>" + "\n")
#Standard h3
elif "###" in each and each.strip().replace("#", "") == "":
token_pos = each.find("###", 0, len(each)) + 3
result.append("\t\t" + "<h3>" + each[token_pos:len(each)] + "</h3>" + "\n")
#Standard h2
elif "##" in each and each.strip().replace("#", "") == "":
token_pos = each.find("##", 0, len(each)) + 2
result.append("\t\t" + "<h2>" + each[token_pos:len(each)] + "</h2>" + "\n")
#Standard h1
elif "#" in each and each.strip().replace("#", "") == "":
token_pos = each.find("#", 0, len(each)) + 1
result.append("\t\t" + "<h1>" + each[token_pos:len(each)] + "</h1>" + "\n")
#Alternative h1
elif each.strip().replace("=", "") == "" and "=" in each:
result[len(result) - 1] = "\t\t" + "<h1>" + result[len(result) - 1][5:len(result[len(result) - 1]) - 5].strip() + "</h1>" + "\n"
#Alternative h2
elif each.strip().replace("-", "") == "" and "-" in each:
result[len(result) - 1] = "\t\t" + "<h2>" + result[len(result) - 1][5:len(result[len(result) - 1]) - 5].strip() + "</h2>" + "\n"
#Unorganized list
elif each.strip()[0:2] == "* " or each.strip()[0:2] == "+ " or each.strip()[0:2] == "- ":
if "* " in each:
token_pos = each.find("* ", 0, len(each)) + 2
elif "+ " in each:
token_pos = each.find("+ ", 0, len(each)) + 2
elif "- " in each:
token_pos = each.find("- ", 0, len(each)) + 2
if "</ul>" in result[len(result) - 1]:
result[len(result) - 1] = "\t\t\t" + "<li>" + each[token_pos:len(each)].lstrip() + "</li>" + "\n"
result.append("\t\t" + "</ul>" + "\n")
else:
result.append("\t\t" + "<ul>" + "\n")
result.append("\t\t\t" + "<li>" + each[token_pos:len(each)].lstrip() + "</li>" + "\n")
result.append("\t\t" + "</ul>" + "\n")
#Organized list
elif len(each.strip().replace(".","")) > 0 and each.strip().replace(".","")[0].isdigit():
if fnmatch.fnmatch(each.strip()[0:2], "? "):
token_pos = len(each) - len(each.lstrip()) + 2
elif fnmatch.fnmatch(each.strip()[0:3], "?? ") or fnmatch.fnmatch(each.strip()[0:3], "?. "):
token_pos = len(each) - len(each.lstrip()) + 3
elif fnmatch.fnmatch(each.strip()[0:4], "??. "):
token_pos = len(each) - len(each.lstrip()) + 4
if "</ol>" in result[len(result) - 1]:
result[len(result) - 1] = "\t\t\t" + "<li>" + each[token_pos:len(each)].lstrip() + "</li>" + "\n"
result.append("\t\t" + "</ol>" + "\n")
else:
result.append("\t\t" + "<ol>" + "\n")
result.append("\t\t\t" + "<li>" + each[token_pos:len(each)].lstrip() + "</li>" + "\n")
result.append("\t\t" + "</ol>" + "\n")
elif each != "" and each.strip()[0] == ">":
result.append("\t\t" + "<blockquote>" + each.lstrip()[1:len(each.lstrip())] + "</blockquote>" + "\n")
#And eventually place the rest of the text in paragraphs
elif len(each.replace(" ", "")) > 0:
#Change into multiline paragraph
if "<p>" in result[len(result) - 1] and "</p>" in result[len(result) - 1]:
result.append("\t\t\t" + result[len(result) - 1][5:len(result[len(result) - 1]) - 5] + "\n")
result.append("\t\t\t" + each + "\n")
result.append("\t\t" + "</p>" + "\n")
#change the old line
result[len(result) - 4] = "\t\t" + "<p>" + "\n";
#Extend multiline paragraph
elif not "<p>" in result[len(result) - 1] and "</p>" in result[len(result) - 1]:
result[len(result) - 1] = "\t\t\t" + each + "\n"
result.append("\t\t" + "</p>" + "\n")
#Old fasioned singleline paragraph
else:
result.append("\t\t" + "<p>" + each + "</p>" + "\n")
#Add the blank lines
elif result[len(result) - 1] != "\n":
result.append("\n");
#end of loop
print("[ ok ]")
#Standard html rules
result.append("\t" + "</body>" + "\n")
result.append("</html>" + "\n")
print("[INFO] Adding text effects..." + int(80 - len("[INFO] Adding text effects...")) * " " + " ", end='')
#Text effects and links and images
all_found = False
inPre = False
while all_found == False:
all_found = True
for x in range(0, len(result)):
each = result[x]
#if each is in tag
if "<pre" in each:
inPre = True
if inPre == True:
if "</pre>" in each:
inPre = False
continue
else:
continue
#if double space replace with linebreak
if " " in each[len(each) - 3:len(each)]:
result[x] = each[0:len(each) - 3] + "<br/>" + "\n"
#The standard text effects
if each.find("**", each.find("**") + 1) != -1:
result[x] = each[0:each.find("**")] + "<b>" + each[each.find("**") + 2:each.find("**", each.find("**") + 1)] + "</b>" + each[each.find("**", each.find("**") + 1) + 2:len(each)]
all_found = False
elif each.find("*", each.find("*") + 1) != -1:
result[x] = each[0:each.find("*")] + "<i>" + each[each.find("*") + 1:each.find("*", each.find("*") + 1)] + "</i>" + each[each.find("*", each.find("*") + 1) + 1:len(each)]
all_found = False
elif each.find("__", each.find("__") + 1) != -1:
result[x] = each[0:each.find("__")] + "<u>" + each[each.find("__") + 2:each.find("__", each.find("__") + 1)] + "</u>" + each[each.find("__", each.find("__") + 1) + 2:len(each)]
all_found = False
elif each.find("~~", each.find("~~") + 1) != -1:
result[x] = each[0:each.find("~~")] + "<s>" + each[each.find("~~") + 2:each.find("~~", each.find("~~") + 1)] + "</s>" + each[each.find("~~", each.find("~~") + 1) + 2:len(each)]
all_found = False
elif each.find("`", each.find("`") + 1) != -1:
result[x] = each[0:each.find("`")] + "<code>" + each[each.find("`") + 1:each.find("`", each.find("`") + 1)] + "</code>" + each[each.find("`", each.find("`") + 1) + 1:len(each)]
all_found = False
#Image with title and size
elif fnmatch.fnmatch(each, "*![[]*[]](*\"*\" =*x*)*"):
result[x] = each[0:each.find("!")] + "<img src=\"" + each[each.find("(") + 1:each.find("\"")].strip() + "\" alt=\"" + each[each.find("[") + 1:each.find("]")] + "\" title=\"" + each[each.find("\"") + 1:each.find("\"", each.find("\"") + 1)] + "\" style=\"width: " + each[each.find(" =") + 2:each.find("x", each.find(" ="))].strip() + "px; height: " + each[each.find("x", each.find(" =")) + 1:each.find(")")].strip() + "px;\" />" + each[each.find(")") + 1:len(each)]
all_found = False
#Image with title and without size
elif fnmatch.fnmatch(each, "*![[]*[]](*\"*\")*"):
result[x] = each[0:each.find("!")] + "<img src=\"" + each[each.find("(") + 1:each.find("\"")].strip() + "\" alt=\"" + each[each.find("[") + 1:each.find("]")] + "\" title=\"" + each[each.find("\"") + 1:each.find("\"", each.find("\"") + 1)] + "\" />" + each[each.find(")") + 1:len(each)]
all_found = False
#Image without title and with size
elif fnmatch.fnmatch(each, "*![[]*[]](* =*x*)*"):
result[x] = each[0:each.find("!")] + "<img src=\"" + each[each.find("(") + 1:each.find(")")].strip() + "\" alt=\"" + each[each.find("[") + 1:each.find("]")] + "\" style=\"width: " + each[each.find(" =") + 2:each.find("x", each.find(" ="))].strip() + "px; height: " + each[each.find("x", each.find(" =")) + 1:each.find(")")].strip() + "px;\" />" + each[each.find(")") + 1:len(each)]
all_found = False
#Image without title and size
elif fnmatch.fnmatch(each, "*![[]*[]](*)*"):
result[x] = each[0:each.find("!")] + "<img src=\"" + each[each.find("(") + 1:each.find(")")].strip() + "\" alt=\"" + each[each.find("[") + 1:each.find("]")] + "\" />" + each[each.find(")") + 1:len(each)]
all_found = False
#Link with title
elif fnmatch.fnmatch(each, "*[[]*[]](*\"*\")*") or fnmatch.fnmatch(each, "*[[]*[]] (*\"*\")*"):
result[x] = each[0:each.find("[")] + "<a href=\"" + each[each.find("(") + 1:each.find("\"")].strip() + "\" title=\"" + each[each.find("\"") + 1:each.find("\"", each.find("\"") + 1)] + "\">" + each[each.find("[") + 1:each.find("]")] + "</a>" + each[each.find(")") + 1:len(each)]
all_found = False
#Link without title
elif fnmatch.fnmatch(each, "*[[]*[]](*)*") or fnmatch.fnmatch(each, "*[[]*[]] (*)*"):
result[x] = each[0:each.find("[")] + "<a href=\"" + each[each.find("(") + 1:each.find(")")].strip() + "\">" + each[each.find("[") + 1:each.find("]")] + "</a>" + each[each.find(")") + 1:len(each)]
all_found = False
#Link with floating data
elif fnmatch.fnmatch(each, "*[[]*[]][[]*[]]*") or fnmatch.fnmatch(each, "*[[]*[]] [[]*[]]*"):
link_data = []
regex = each[each.find("[", each.find("[") + 1) + 1:each.find("]", each.find("]") + 1)]
for i in floating_data:
if i[0] == regex:
link_data = i
break
if len(link_data) > 0:
if len(link_data[2]) > 0:
result[x] = each[0:each.find("[")] + "<a href=\"" + link_data[1] + "\" title=\"" + link_data[2] + "\">" + each[each.find("[") + 1:each.find("]")] + "</a>" + each[each.find("]", each.find("]") + 1) + 1:len(each)]
else:
result[x] = each[0:each.find("[")] + "<a href=\"" + link_data[1] + "\">" + each[each.find("[") + 1:each.find("]")] + "</a>" + each[each.find("]", each.find("]") + 1) + 1:len(each)]
all_found = False
else:
print("")
print("[ERROR] Dead link found...")
print("[INFO] Going on..." + int(80 - len("[INFO] Going on...")) * " " + " ", end='')
#Link with floating data and only text
elif fnmatch.fnmatch(each, "*[[]*[]]*"):
link_data = []
regex = each[each.find("[") + 1:each.find("]")]
for i in floating_data:
if i[0] == regex:
link_data = i
break
if len(link_data) > 0:
if len(link_data[2]) > 0:
result[x] = each[0:each.find("[")] + "<a href=\"" + link_data[1] + "\" title=\"" + link_data[2] + "\">" + link_data[0] + "</a>" + each[each.find("]") + 1:len(each)]
else:
result[x] = each[0:each.find("[")] + "<a href=\"" + link_data[1] + "\">" + link_data[0] + "</a>" + each[each.find("]") + 1:len(each)]
all_found = False
#end of for loop
#end of while loop
print("[ ok ]")
#finally return the converted markdown
return result
def main():
if testArgs(sys.argv, 1, False):
if sys.argv[1] == "-h":
showHelp()
else:
buffer = readFile(sys.argv[1], [".md", ".markdown"])
if len(buffer) > 0:
writeFile(sys.argv[1], ".html", "".join(convert(buffer, sys.argv[2:])))
main()
| |
# Copyright 2012 Red Hat, Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Translation function factory
"""
import gettext
import os
import six
from oslo_i18n import _lazy
from oslo_i18n import _locale
from oslo_i18n import _message
__all__ = [
'TranslatorFactory',
]
# magic gettext number to separate context from message
CONTEXT_SEPARATOR = _message.CONTEXT_SEPARATOR
class TranslatorFactory(object):
"Create translator functions"
def __init__(self, domain, localedir=None):
"""Establish a set of translation functions for the domain.
:param domain: Name of translation domain,
specifying a message catalog.
:type domain: str
:param localedir: Directory with translation catalogs.
:type localedir: str
"""
self.domain = domain
if localedir is None:
variable_name = _locale.get_locale_dir_variable_name(domain)
localedir = os.environ.get(variable_name)
self.localedir = localedir
def _make_translation_func(self, domain=None):
"""Return a translation function ready for use with messages.
The returned function takes a single value, the unicode string
to be translated. The return type varies depending on whether
lazy translation is being done. When lazy translation is
enabled, :class:`Message` objects are returned instead of
regular :class:`unicode` strings.
The domain argument can be specified to override the default
from the factory, but the localedir from the factory is always
used because we assume the log-level translation catalogs are
installed in the same directory as the main application
catalog.
"""
if domain is None:
domain = self.domain
t = gettext.translation(domain,
localedir=self.localedir,
fallback=True)
# Use the appropriate method of the translation object based
# on the python version.
m = t.gettext if six.PY3 else t.ugettext
def f(msg):
"""oslo_i18n.gettextutils translation function."""
if _lazy.USE_LAZY:
return _message.Message(msg, domain=domain)
return m(msg)
return f
def _make_contextual_translation_func(self, domain=None):
"""Return a translation function ready for use with context messages.
The returned function takes two values, the context of
the unicode string, the unicode string to be translated.
The returned type is the same as
:method:`TranslatorFactory._make_translation_func`.
The domain argument is the same as
:method:`TranslatorFactory._make_translation_func`.
"""
if domain is None:
domain = self.domain
t = gettext.translation(domain,
localedir=self.localedir,
fallback=True)
# Use the appropriate method of the translation object based
# on the python version.
m = t.gettext if six.PY3 else t.ugettext
def f(ctx, msg):
"""oslo.i18n.gettextutils translation with context function."""
if _lazy.USE_LAZY:
msgid = (ctx, msg)
return _message.Message(msgid, domain=domain,
has_contextual_form=True)
msgctx = "%s%s%s" % (ctx, CONTEXT_SEPARATOR, msg)
s = m(msgctx)
if CONTEXT_SEPARATOR in s:
# Translation not found
return msg
return s
return f
def _make_plural_translation_func(self, domain=None):
"""Return a plural translation function ready for use with messages.
The returned function takes three values, the single form of
the unicode string, the plural form of the unicode string,
the count of items to be translated.
The returned type is the same as
:method:`TranslatorFactory._make_translation_func`.
The domain argument is the same as
:method:`TranslatorFactory._make_translation_func`.
"""
if domain is None:
domain = self.domain
t = gettext.translation(domain,
localedir=self.localedir,
fallback=True)
# Use the appropriate method of the translation object based
# on the python version.
m = t.ngettext if six.PY3 else t.ungettext
def f(msgsingle, msgplural, msgcount):
"""oslo.i18n.gettextutils plural translation function."""
if _lazy.USE_LAZY:
msgid = (msgsingle, msgplural, msgcount)
return _message.Message(msgid, domain=domain,
has_plural_form=True)
return m(msgsingle, msgplural, msgcount)
return f
@property
def primary(self):
"The default translation function."
return self._make_translation_func()
@property
def contextual_form(self):
"""The contextual translation function.
The returned function takes two values, the context of
the unicode string, the unicode string to be translated.
"""
return self._make_contextual_translation_func()
@property
def plural_form(self):
"""The plural translation function.
The returned function takes three values, the single form of
the unicode string, the plural form of the unicode string,
the count of items to be translated.
"""
return self._make_plural_translation_func()
def _make_log_translation_func(self, level):
return self._make_translation_func(self.domain + '-log-' + level)
@property
def log_info(self):
"Translate info-level log messages."
return self._make_log_translation_func('info')
@property
def log_warning(self):
"Translate warning-level log messages."
return self._make_log_translation_func('warning')
@property
def log_error(self):
"Translate error-level log messages."
return self._make_log_translation_func('error')
@property
def log_critical(self):
"Translate critical-level log messages."
return self._make_log_translation_func('critical')
| |
# Author: Travis Oliphant
# 2003
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
exp, cos, sin, size, polyval, polyint, log10
def sawtooth(t,width=1):
"""Returns a periodic sawtooth waveform with period 2*pi
which rises from -1 to 1 on the interval 0 to width*2*pi
and drops from 1 to -1 on the interval width*2*pi to 2*pi
width must be in the interval [0,1]
"""
t,w = asarray(t), asarray(width)
w = asarray(w + (t-t))
t = asarray(t + (w-w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape,ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y,mask1,nan)
# take t modulo 2*pi
tmod = mod(t,2*pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1-mask1) & (tmod < w*2*pi)
tsub = extract(mask2,tmod)
wsub = extract(mask2,w)
place(y,mask2,tsub / (pi*wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1-mask1) & (1-mask2)
tsub = extract(mask3,tmod)
wsub = extract(mask3,w)
place(y,mask3, (pi*(wsub+1)-tsub)/(pi*(1-wsub)))
return y
def square(t,duty=0.5):
"""Returns a periodic square-wave waveform with period 2*pi
which is +1 from 0 to 2*pi*duty and -1 from 2*pi*duty to 2*pi
duty must be in the interval [0,1]
"""
t,w = asarray(t), asarray(duty)
w = asarray(w + (t-t))
t = asarray(t + (w-w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape,ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y,mask1,nan)
# take t modulo 2*pi
tmod = mod(t,2*pi)
# on the interval 0 to duty*2*pi function is
# 1
mask2 = (1-mask1) & (tmod < w*2*pi)
tsub = extract(mask2,tmod)
wsub = extract(mask2,w)
place(y,mask2,1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1-mask1) & (1-mask2)
tsub = extract(mask3,tmod)
wsub = extract(mask3,w)
place(y,mask3,-1)
return y
def gausspulse(t,fc=1000,bw=0.5,bwr=-6,tpr=-60,retquad=0,retenv=0):
"""Return a gaussian modulated sinusoid: exp(-a t^2) exp(1j*2*pi*fc)
If retquad is non-zero, then return the real and imaginary parts
(inphase and quadrature)
If retenv is non-zero, then return the envelope (unmodulated signal).
Otherwise, return the real part of the modulated sinusoid.
Inputs:
t -- Input array.
fc -- Center frequency (Hz).
bw -- Fractional bandwidth in frequency domain of pulse (Hz).
bwr -- Reference level at which fractional bandwidth is calculated (dB).
tpr -- If t is 'cutoff', then the function returns the cutoff time for when the
pulse amplitude falls below tpr (in dB).
retquad -- Return the quadrature (imaginary) as well as the real part of the signal
retenv -- Return the envelope of th signal.
"""
if fc < 0:
raise ValueError, "Center frequency (fc=%.2f) must be >=0." % fc
if bw <= 0:
raise ValueError, "Fractional bandwidth (bw=%.2f) must be > 0." % bw
if bwr >= 0:
raise ValueError, "Reference level for bandwidth (bwr=%.2f) must " \
"be < 0 dB" % bwr
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
ref = pow(10, bwr/ 20)
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
#
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
a = -(pi*fc*bw)**2 / (4*log(ref))
if t == 'cutoff': # compute cut_off point
# Solve exp(-a tc**2) = tref for tc
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
if tpr >= 0:
raise ValueError, "Reference level for time cutoff must be < 0 dB"
tref = pow(10, tpr / 20)
return sqrt(-log(tref)/a)
yenv = exp(-a*t*t)
yI = yenv * cos(2*pi*fc*t)
yQ = yenv * sin(2*pi*fc*t)
if not retquad and not retenv:
return yI
if not retquad and retenv:
return yI, yenv
if retquad and not retenv:
return yI, yQ
if retquad and retenv:
return yI, yQ, yenv
def chirp(t, f0=0, t1=1, f1=100, method='linear', phi=0, qshape=None):
"""Frequency-swept cosine generator.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
f0 : float or ndarray, optional
Frequency (in Hz) of the waveform at time 0. If `f0` is an
ndarray, it specifies the frequency change as a polynomial in
`t` (see Notes below).
t1 : float, optional
Time at which `f1` is specified.
f1 : float, optional
Frequency (in Hz) of the waveform at time `t1`.
method : {'linear', 'quadratic', 'logarithmic'}, optional
Kind of frequency sweep.
phi : float
Phase offset, in degrees.
qshape : {'convex', 'concave'}
If method is 'quadratic', `qshape` specifies its shape.
Notes
-----
If `f0` is an array, it forms the coefficients of a polynomial in
`t` (see `numpy.polval`). The polynomial determines the waveform
frequency change in time. In this case, the values of `f1`, `t1`,
`method`, and `qshape` are ignored.
"""
# Convert to radians.
phi *= pi / 180
if size(f0) > 1:
# We were given a polynomial.
return cos(2*pi*polyval(polyint(f0),t)+phi)
if method in ['linear','lin','li']:
beta = (f1-f0)/t1
phase_angle = 2*pi * (f0*t + 0.5*beta*t*t)
elif method in ['quadratic','quad','q']:
if qshape == 'concave':
mxf = max(f0,f1)
mnf = min(f0,f1)
f1,f0 = mxf, mnf
elif qshape == 'convex':
mxf = max(f0,f1)
mnf = min(f0,f1)
f1,f0 = mnf, mxf
else:
raise ValueError("qshape must be either 'concave' or 'convex' but "
"a value of %r was given." % qshape)
beta = (f1-f0)/t1/t1
phase_angle = 2*pi * (f0*t + beta*t*t*t/3)
elif method in ['logarithmic','log','lo']:
if f1 <= f0:
raise ValueError(
"For a logarithmic sweep, f1=%f must be larger than f0=%f."
% (f1, f0))
beta = log10(f1-f0)/t1
phase_angle = 2*pi * (f0*t + (pow(10,beta*t)-1)/(beta*log(10)))
else:
raise ValueError("method must be 'linear', 'quadratic', or "
"'logarithmic' but a value of %r was given." % method)
return cos(phase_angle + phi)
| |
import datetime
import base64
from bitmovin import Bitmovin, Encoding, HTTPSInput, S3Output, H264CodecConfiguration, AACCodecConfiguration, \
H264Profile, StreamInput, SelectionMode, Stream, EncodingOutput, ACLEntry, ACLPermission, ProgressiveTSMuxing, \
MuxingStream, CloudRegion, HlsManifest, VariantStream, RawID3Tag, FrameIdID3Tag, PlainTextID3Tag, \
ID3TagPositionMode
from bitmovin.errors import BitmovinError
API_KEY = '<YOUR_API_KEY>'
# https://<INSERT_YOUR_HTTP_HOST>/<INSERT_YOUR_HTTP_PATH>
HTTPS_INPUT_HOST = '<INSERT_YOUR_HTTP_HOST>'
HTTPS_INPUT_PATH = '<INSERT_YOUR_HTTP_PATH>'
S3_OUTPUT_ACCESSKEY = '<YOUR_S3_OUTPUT_ACCESSKEY>'
S3_OUTPUT_SECRETKEY = '<YOUR_S3_OUTPUT_SECRETKEY>'
S3_OUTPUT_BUCKETNAME = '<YOUR_S3_OUTPUT_BUCKETNAME>'
date_component = str(datetime.datetime.now()).replace(' ', '_').replace(':', '-').split('.')[0].replace('_', '__')
OUTPUT_BASE_PATH = 'your/output/base/path/{}/'.format(date_component)
def main():
bitmovin = Bitmovin(api_key=API_KEY)
https_input = HTTPSInput(name='create_simple_encoding HTTPS input', host=HTTPS_INPUT_HOST)
https_input = bitmovin.inputs.HTTPS.create(https_input).resource
s3_output = S3Output(access_key=S3_OUTPUT_ACCESSKEY,
secret_key=S3_OUTPUT_SECRETKEY,
bucket_name=S3_OUTPUT_BUCKETNAME,
name='Sample S3 Output')
s3_output = bitmovin.outputs.S3.create(s3_output).resource
encoding = Encoding(name='example hls progressive ts encoding with various id3 tags',
cloud_region=CloudRegion.GOOGLE_EUROPE_WEST_1)
encoding = bitmovin.encodings.Encoding.create(encoding).resource
video_codec_configuration_480p = H264CodecConfiguration(name='example_video_codec_configuration_480p',
bitrate=1200000,
height=480,
profile=H264Profile.MAIN,
rate=None)
video_codec_configuration_480p = bitmovin.codecConfigurations.H264.create(video_codec_configuration_480p).resource
video_codec_configuration_360p = H264CodecConfiguration(name='example_video_codec_configuration_360p',
bitrate=800000,
height=360,
profile=H264Profile.MAIN,
rate=None)
video_codec_configuration_360p = bitmovin.codecConfigurations.H264.create(video_codec_configuration_360p).resource
audio_codec_configuration = AACCodecConfiguration(name='example_audio_codec_configuration_english',
bitrate=128000,
rate=44100)
audio_codec_configuration = bitmovin.codecConfigurations.AAC.create(audio_codec_configuration).resource
video_input_stream = StreamInput(input_id=https_input.id,
input_path=HTTPS_INPUT_PATH,
selection_mode=SelectionMode.AUTO)
audio_input_stream = StreamInput(input_id=https_input.id,
input_path=HTTPS_INPUT_PATH,
selection_mode=SelectionMode.AUTO)
video_stream_480p = Stream(codec_configuration_id=video_codec_configuration_480p.id,
input_streams=[video_input_stream], name='Sample Stream 480p')
video_stream_480p = bitmovin.encodings.Stream.create(object_=video_stream_480p,
encoding_id=encoding.id).resource
video_stream_360p = Stream(codec_configuration_id=video_codec_configuration_360p.id,
input_streams=[video_input_stream], name='Sample Stream 360p')
video_stream_360p = bitmovin.encodings.Stream.create(object_=video_stream_360p,
encoding_id=encoding.id).resource
audio_stream = Stream(codec_configuration_id=audio_codec_configuration.id,
input_streams=[audio_input_stream], name='Sample Stream AUDIO')
audio_stream = bitmovin.encodings.Stream.create(object_=audio_stream,
encoding_id=encoding.id).resource
acl_entry = ACLEntry(permission=ACLPermission.PUBLIC_READ)
video_muxing_stream_480p = MuxingStream(video_stream_480p.id)
video_muxing_stream_360p = MuxingStream(video_stream_360p.id)
audio_muxing_stream = MuxingStream(audio_stream.id)
muxing_480p_path = '480p'
muxing_480p_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + muxing_480p_path,
acl=[acl_entry])
muxing_480p = ProgressiveTSMuxing(segment_length=4,
filename='progressive.ts',
streams=[video_muxing_stream_480p, audio_muxing_stream],
outputs=[muxing_480p_output],
name='Sample Muxing 480p')
muxing_480p = bitmovin.encodings.Muxing.ProgressiveTS.create(object_=muxing_480p,
encoding_id=encoding.id).resource
muxing_360p_path = '360p'
muxing_360p_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + muxing_360p_path,
acl=[acl_entry])
muxing_360p = ProgressiveTSMuxing(segment_length=4,
filename='progressive.ts',
streams=[video_muxing_stream_360p, audio_muxing_stream],
outputs=[muxing_360p_output],
name='Sample Muxing 360p')
muxing_360p = bitmovin.encodings.Muxing.ProgressiveTS.create(object_=muxing_360p,
encoding_id=encoding.id).resource
raw_id3_tag_1 = RawID3Tag(position_mode=ID3TagPositionMode.TIME,
time=1.0,
bytes_=base64.b64encode(b'My awesome Raw ID3 Tag #1').decode('utf-8'),
name='Raw ID3 #1', description='Just some descriptive information')
raw_id3_tag_2 = RawID3Tag(position_mode=ID3TagPositionMode.TIME,
time=2.0,
bytes_=base64.b64encode(b'My awesome Raw ID3 Tag #2').decode('utf-8'),
name='Raw ID3 #2', description='Just some descriptive information')
frame_id_id3_tag_1 = FrameIdID3Tag(position_mode=ID3TagPositionMode.TIME,
time=5.12,
frame_id='ABCD',
bytes_=base64.b64encode(b'My awesome FrameId ID3 Tag #1').decode('utf-8'),
name='FrameId ID3 #1', description='Just some descriptive information')
frame_id_id3_tag_2 = FrameIdID3Tag(position_mode=ID3TagPositionMode.TIME,
time=6.3422172,
frame_id='EFGH',
bytes_=base64.b64encode(b'My awesome FrameId ID3 Tag #2').decode('utf-8'),
name='FrameId ID3 #2', description='Just some descriptive information')
plain_text_id3_tag_1 = PlainTextID3Tag(position_mode=ID3TagPositionMode.TIME,
time=8.34,
frame_id='IJKL',
text='My awesome PlainText ID3 Tag #1',
name='PlainText ID3 #1', description='Just some descriptive information')
plain_text_id3_tag_2 = PlainTextID3Tag(position_mode=ID3TagPositionMode.TIME,
time=9.013,
frame_id='MNOP',
text='My awesome PlainText ID3 Tag #2',
name='PlainText ID3 #2', description='Just some descriptive information')
bitmovin.encodings.Muxing.ProgressiveTS.ID3Tags.Raw.create(object_=raw_id3_tag_1,
encoding_id=encoding.id,
muxing_id=muxing_360p.id)
bitmovin.encodings.Muxing.ProgressiveTS.ID3Tags.Raw.create(object_=raw_id3_tag_1,
encoding_id=encoding.id,
muxing_id=muxing_480p.id)
bitmovin.encodings.Muxing.ProgressiveTS.ID3Tags.Raw.create(object_=raw_id3_tag_2,
encoding_id=encoding.id,
muxing_id=muxing_360p.id)
bitmovin.encodings.Muxing.ProgressiveTS.ID3Tags.Raw.create(object_=raw_id3_tag_2,
encoding_id=encoding.id,
muxing_id=muxing_480p.id)
bitmovin.encodings.Muxing.ProgressiveTS.ID3Tags.FrameId.create(object_=frame_id_id3_tag_1,
encoding_id=encoding.id,
muxing_id=muxing_360p.id)
bitmovin.encodings.Muxing.ProgressiveTS.ID3Tags.FrameId.create(object_=frame_id_id3_tag_1,
encoding_id=encoding.id,
muxing_id=muxing_480p.id)
bitmovin.encodings.Muxing.ProgressiveTS.ID3Tags.FrameId.create(object_=frame_id_id3_tag_2,
encoding_id=encoding.id,
muxing_id=muxing_360p.id)
bitmovin.encodings.Muxing.ProgressiveTS.ID3Tags.FrameId.create(object_=frame_id_id3_tag_2,
encoding_id=encoding.id,
muxing_id=muxing_480p.id)
bitmovin.encodings.Muxing.ProgressiveTS.ID3Tags.PlainText.create(object_=plain_text_id3_tag_1,
encoding_id=encoding.id,
muxing_id=muxing_360p.id)
bitmovin.encodings.Muxing.ProgressiveTS.ID3Tags.PlainText.create(object_=plain_text_id3_tag_1,
encoding_id=encoding.id,
muxing_id=muxing_480p.id)
bitmovin.encodings.Muxing.ProgressiveTS.ID3Tags.PlainText.create(object_=plain_text_id3_tag_2,
encoding_id=encoding.id,
muxing_id=muxing_360p.id)
bitmovin.encodings.Muxing.ProgressiveTS.ID3Tags.PlainText.create(object_=plain_text_id3_tag_2,
encoding_id=encoding.id,
muxing_id=muxing_480p.id)
bitmovin.encodings.Encoding.start(encoding_id=encoding.id)
try:
bitmovin.encodings.Encoding.wait_until_finished(encoding_id=encoding.id)
except BitmovinError as bitmovin_error:
print("Exception occurred while waiting for encoding to finish: {}".format(bitmovin_error))
manifest_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH,
acl=[acl_entry])
hls_manifest = HlsManifest(manifest_name='master.m3u8', outputs=[manifest_output],
name='Sample HLS Manifest - Master - ProgressiveTS+ID3')
hls_manifest = bitmovin.manifests.HLS.create(object_=hls_manifest).resource
variant_stream_480p = VariantStream(closed_captions='NONE',
segment_path='{}/'.format(muxing_480p_path),
uri='480p.m3u8',
encoding_id=encoding.id,
stream_id=video_stream_480p.id,
muxing_id=muxing_480p.id)
variant_stream_480p = bitmovin.manifests.HLS.VariantStream.create(manifest_id=hls_manifest.id,
object_=variant_stream_480p)
variant_stream_360p = VariantStream(closed_captions='NONE',
segment_path='{}/'.format(muxing_360p_path),
uri='360p.m3u8',
encoding_id=encoding.id,
stream_id=video_stream_360p.id,
muxing_id=muxing_360p.id)
variant_stream_360p = bitmovin.manifests.HLS.VariantStream.create(manifest_id=hls_manifest.id,
object_=variant_stream_360p)
bitmovin.manifests.HLS.start(manifest_id=hls_manifest.id)
try:
bitmovin.manifests.HLS.wait_until_finished(manifest_id=hls_manifest.id)
except BitmovinError as bitmovin_error:
print("Exception occurred while waiting for HLS manifest creation to finish: {}".format(bitmovin_error))
if __name__ == '__main__':
main()
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training routines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import sys
import numpy as np
import six
from tensorflow.python import keras
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import callbacks
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class BatchCounterCallback(callbacks.Callback):
def __init__(self):
self.batch_begin_count = 0
self.batch_end_count = 0
def on_batch_begin(self, *args, **kwargs):
self.batch_begin_count += 1
def on_batch_end(self, *args, **kwargs):
self.batch_end_count += 1
class TestTrainingWithDataset(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_calling_model_on_same_dataset(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
optimizer = 'rmsprop'
loss = 'mse'
metrics = ['mae']
model.compile(
optimizer,
loss,
metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
# Call fit with validation data
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_training_and_eval_methods_on_dataset(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
optimizer = 'rmsprop'
loss = 'mse'
metrics = ['mae', metrics_module.CategoricalAccuracy()]
model.compile(
optimizer,
loss,
metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat() # Infinite dataset.
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(dataset, steps=2)
# Test with validation data
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
# Test with validation split
with self.assertRaisesRegexp(
ValueError, '`validation_split` argument is not supported when '):
model.fit(dataset,
epochs=1, steps_per_epoch=2, verbose=0,
validation_split=0.5, validation_steps=2)
# Test with sample weight.
sample_weight = np.random.random((10,))
with self.assertRaisesRegexp(
ValueError, r'`sample_weight` argument is not supported .+dataset'):
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=0,
sample_weight=sample_weight)
# Test invalid usage
with self.assertRaisesRegexp(
ValueError, 'The `batch_size` argument must not be specified'):
model.fit(dataset, batch_size=10, epochs=1, steps_per_epoch=2,
verbose=0)
with self.assertRaisesRegexp(
ValueError, 'The `batch_size` argument must not be specified'):
model.predict(dataset, batch_size=10, steps=2, verbose=0)
with self.assertRaisesRegexp(
ValueError, 'The `batch_size` argument must not be specified'):
model.evaluate(dataset, batch_size=10, steps=2, verbose=0)
with self.assertRaisesRegexp(
ValueError, '(you should not specify a target)|'
'(`y` argument is not supported when using dataset as input.)'):
model.fit(dataset, dataset,
epochs=1, steps_per_epoch=2, verbose=0)
# With an infinite dataset, `steps_per_epoch`/`steps` argument is required.
with self.assertRaisesRegexp(
ValueError, 'the `steps_per_epoch` argument'):
model.fit(dataset, epochs=1, verbose=0)
with self.assertRaisesRegexp(ValueError,
'the `steps` argument'):
model.evaluate(dataset, verbose=0)
with self.assertRaisesRegexp(ValueError,
'the `steps` argument'):
model.predict(dataset, verbose=0)
@keras_parameterized.run_with_all_model_types(exclude_models='sequential')
@keras_parameterized.run_all_keras_modes
def test_training_and_eval_methods_on_multi_input_output_dataset(self):
input_a = keras.layers.Input(shape=(3,), name='input_1')
input_b = keras.layers.Input(shape=(3,), name='input_2')
dense = keras.layers.Dense(4, name='dense')
dropout = keras.layers.Dropout(0.5, name='dropout')
branch_a = [input_a, dense]
branch_b = [input_b, dense, dropout]
model = testing_utils.get_multi_io_model(branch_a, branch_b)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
input_a_np = np.random.random((10, 3)).astype(dtype=np.float32)
input_b_np = np.random.random((10, 3)).astype(dtype=np.float32)
output_d_np = np.random.random((10, 4)).astype(dtype=np.float32)
output_e_np = np.random.random((10, 4)).astype(dtype=np.float32)
# Test with tuples
dataset_tuple = dataset_ops.Dataset.from_tensor_slices((
(input_a_np, input_b_np), (output_d_np, output_e_np)))
dataset_tuple = dataset_tuple.repeat(100)
dataset_tuple = dataset_tuple.batch(10)
model.fit(dataset_tuple, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset_tuple, steps=2, verbose=1)
predict_dataset_tuple = dataset_ops.Dataset.from_tensor_slices(
(input_a_np, input_b_np))
# TODO(b/123360757): Remove below assertion once predict() supports
# muti-input datasets.
with self.assertRaisesRegexp(ValueError,
'Error when checking model input'):
model.predict(predict_dataset_tuple, steps=1)
# Test with dict
input_dict = {'input_1': input_a_np, 'input_2': input_b_np}
if testing_utils.get_model_type() == 'subclass':
output_dict = {'output_1': output_d_np, 'output_2': output_e_np}
else:
output_dict = {'dense': output_d_np, 'dropout': output_e_np}
dataset_dict = dataset_ops.Dataset.from_tensor_slices((
input_dict, output_dict))
dataset_dict = dataset_dict.repeat(100)
dataset_dict = dataset_dict.batch(10)
model.fit(dataset_dict, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset_dict, steps=2, verbose=1)
predict_dataset_dict = dataset_ops.Dataset.from_tensor_slices(
input_dict)
predict_dataset_dict = predict_dataset_dict.repeat(100)
predict_dataset_dict = predict_dataset_dict.batch(10)
model.predict(predict_dataset_dict, steps=1)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_dataset_with_sample_weights(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
optimizer = 'rmsprop'
loss = 'mse'
metrics = ['mae', metrics_module.CategoricalAccuracy()]
model.compile(
optimizer,
loss,
metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
sample_weights = np.ones((10), np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets,
sample_weights))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(dataset, steps=2)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_dataset_with_sample_weights_correctness(self):
x = keras.layers.Input(shape=(1,), name='input')
y = keras.layers.Dense(
1, kernel_initializer='ones', bias_initializer='zeros', name='dense')(x)
model = keras.Model(x, y)
optimizer = 'rmsprop'
loss = 'mse'
model.compile(optimizer, loss)
inputs = np.array([[0], [1], [2], [3]], np.float32)
targets = np.array([[2], [4], [6], [8]], np.float32)
sample_weights = np.array([0.25, 0.5, 0.75, 1], np.float32)
ds = dataset_ops.Dataset.from_tensor_slices((inputs, targets,
sample_weights)).batch(2)
result = model.evaluate(ds, verbose=1)
# The per sample loss is multipled by the corresponding sample weight. The
# average of these weighted losses is the return value of the `evaluate`
# call. For example, in the test above the average weighted loss is
# calculated in the following manner:
# ((2-0)^2) * 0.25 + ((4-1)^2) * 0.5 + ((6-2)^2 * 0.75) + ((8-3)^2 * 1)
# equals 42.5 / 4 = 10.625
self.assertEqual(result, 10.625)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_dataset_with_sparse_labels(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
optimizer = 'rmsprop'
model.compile(
optimizer,
loss='sparse_categorical_crossentropy',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=10, dtype=np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
@keras_parameterized.run_all_keras_modes
def test_dataset_fit_correctness(self):
class SumLayer(keras.layers.Layer):
def build(self, _):
self.w = self.add_weight('w', ())
def call(self, inputs):
return keras.backend.sum(inputs, axis=1, keepdims=True) + self.w * 0
model = keras.Sequential([SumLayer(input_shape=(2,))])
model.compile(
'rmsprop',
loss='mae',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.zeros((40, 2), dtype=np.float32)
inputs[10:20, :] = 2
inputs[20:30, :] = 1
inputs[30:, :] = 4
targets = np.zeros((40, 1), dtype=np.float32)
# Test correctness with `steps_per_epoch`.
train_dataset = dataset_ops.Dataset.from_tensor_slices(
(inputs, targets)).batch(10)
val_dataset = dataset_ops.Dataset.from_tensor_slices(
(inputs, targets)).batch(10)
history = model.fit(train_dataset,
epochs=2, steps_per_epoch=2, verbose=1,
validation_data=val_dataset, validation_steps=2)
self.assertAllClose(history.history['loss'],
[inputs[:20].sum() / 20, inputs[20:].sum() / 20])
# The validation dataset will be reset at the end of each validation run.
self.assertAllClose(history.history['val_loss'],
[inputs[:20].sum() / 20, inputs[:20].sum() / 20])
# Test correctness with dataset reset.
train_dataset = dataset_ops.Dataset.from_tensor_slices(
(inputs, targets)).batch(10)
val_dataset = dataset_ops.Dataset.from_tensor_slices(
(inputs, targets)).batch(10)
history = model.fit(train_dataset,
epochs=2, verbose=1, validation_data=val_dataset)
self.assertAllClose(
history.history['loss'],
[inputs.sum() / 40, inputs.sum() / 40])
self.assertAllClose(
history.history['val_loss'],
[inputs.sum() / 40, inputs.sum() / 40])
@tf_test_util.run_deprecated_v1
def test_dataset_input_shape_validation(self):
with self.cached_session():
model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
model.compile(optimizer='rmsprop', loss='mse')
# User forgets to batch the dataset
inputs = np.zeros((10, 3))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
with self.assertRaisesRegexp(
ValueError,
r'expected (.*?) to have shape \(3,\) but got array with shape \(1,\)'
):
model.train_on_batch(dataset)
# Wrong input shape
inputs = np.zeros((10, 5))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
with self.assertRaisesRegexp(ValueError,
r'expected (.*?) to have shape \(3,\)'):
model.train_on_batch(dataset)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_finite_dataset_known_cardinality_no_steps_arg(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
model.compile(
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.zeros((100, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=100, dtype=np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.batch(10)
batch_counter = BatchCounterCallback()
history = model.fit(dataset, epochs=2, verbose=1, callbacks=[batch_counter])
self.assertLen(history.history['loss'], 2)
self.assertEqual(batch_counter.batch_end_count, 20)
model.evaluate(dataset)
out = model.predict(dataset)
self.assertEqual(out.shape[0], 100)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_finite_dataset_unknown_cardinality_no_steps_arg(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
model.compile(
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.zeros((100, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=100, dtype=np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.filter(lambda x, y: True).batch(10)
self.assertEqual(keras.backend.get_value(cardinality.cardinality(dataset)),
cardinality.UNKNOWN)
batch_counter = BatchCounterCallback()
history = model.fit(dataset, epochs=2, verbose=1, callbacks=[batch_counter])
self.assertLen(history.history['loss'], 2)
self.assertEqual(batch_counter.batch_end_count, 20)
model.evaluate(dataset)
out = model.predict(dataset)
self.assertEqual(out.shape[0], 100)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_finite_dataset_unknown_cardinality_no_step_with_train_and_val(self):
class CaptureStdout(object):
def __enter__(self):
self._stdout = sys.stdout
string_io = six.StringIO()
sys.stdout = string_io
self._stringio = string_io
return self
def __exit__(self, *args):
self.output = self._stringio.getvalue()
sys.stdout = self._stdout
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
model.compile(
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.zeros((100, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=100, dtype=np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.filter(lambda x, y: True).batch(10)
self.assertEqual(
keras.backend.get_value(cardinality.cardinality(dataset)),
cardinality.UNKNOWN)
batch_counter = BatchCounterCallback()
with CaptureStdout() as capture:
history = model.fit(
dataset,
epochs=2,
callbacks=[batch_counter],
validation_data=dataset.take(3))
lines = capture.output.splitlines()
self.assertIn('10/10', lines[-1])
self.assertLen(history.history['loss'], 2)
# The first epoch will invoke batch begin 11 times, since it doesn't know
# the cardinality. The second epoch should just invoke 10 times.
if (testing_utils.should_run_eagerly()
or testing_utils.should_run_tf_function()):
expected_batch_begin_count = 21
else:
expected_batch_begin_count = 20
self.assertEqual(batch_counter.batch_begin_count,
expected_batch_begin_count)
self.assertEqual(batch_counter.batch_end_count, 20)
model.evaluate(dataset)
out = model.predict(dataset)
self.assertEqual(out.shape[0], 100)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_finite_dataset_unknown_cardinality_out_of_data(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
model.compile(
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.zeros((100, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=100, dtype=np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.filter(lambda x, y: True).batch(10)
self.assertEqual(
keras.backend.get_value(cardinality.cardinality(dataset)),
cardinality.UNKNOWN)
batch_counter = BatchCounterCallback()
with test.mock.patch.object(logging, 'warning') as mock_log:
# steps_per_epoch (200) is greater than the dataset size (100). As this is
# unexpected, training will stop and not make it to the second epoch.
history = model.fit(
dataset,
epochs=2,
verbose=1,
callbacks=[batch_counter],
steps_per_epoch=200)
self.assertIn(
'ran out of data; interrupting training.', str(mock_log.call_args))
self.assertIn(
'can generate at least '
'`steps_per_epoch * epochs` batches (in this case, 400 batches). '
'You may need to use the repeat() function when '
'building your dataset.', str(mock_log.call_args))
self.assertLen(history.history['loss'], 1)
self.assertEqual(batch_counter.batch_end_count, 10)
model.evaluate(dataset)
out = model.predict(dataset)
self.assertEqual(out.shape[0], 100)
@keras_parameterized.run_all_keras_modes
def test_with_external_loss(self):
inp = keras.Input(shape=(4,), name='inp1')
out = keras.layers.Dense(2)(inp)
model = keras.Model(inp, out)
model.add_loss(math_ops.reduce_mean(out))
model.compile('rmsprop')
x = np.ones((10, 4))
# dataset contains only features, no labels.
dataset = dataset_ops.Dataset.from_tensor_slices(x).repeat(10).batch(10)
model.fit(dataset)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_train_eval_with_steps(self):
# See b/142880049 for more details.
inp = keras.Input(shape=(4,), name='inp1')
out = keras.layers.Dense(2)(inp)
model = keras.Model(inp, out)
model.compile(
'rmsprop', loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.zeros((100, 4), dtype=np.float32)
targets = np.random.randint(0, 2, size=100, dtype=np.int32)
training_ds = dataset_ops.Dataset.from_tensor_slices(
(inputs, targets)).repeat().batch(10)
# Create eval dataset with generator, so that dataset won't contain the
# overall size metadata. Without eval_steps, we expect to run through all
# the data in this dataset every epoch.
def gen():
for _ in range(100):
yield (np.zeros(4, dtype=np.float32),
np.random.randint(0, 2, size=1, dtype=np.int32))
eval_ds = dataset_ops.Dataset.from_generator(
generator=gen,
output_types=('float64', 'int32'),
output_shapes=([4], [1])).batch(100)
batch_counter = BatchCounterCallback()
model.fit(
training_ds,
steps_per_epoch=10,
epochs=10,
validation_data=eval_ds,
callbacks=[batch_counter]
)
# Expect 10 batch from training per epoch.
self.assertEqual(batch_counter.batch_end_count, 100)
class TestMetricsWithDatasets(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_metrics_correctness_with_dataset(self):
layers = [
keras.layers.Dense(8, activation='relu', input_dim=4,
kernel_initializer='ones'),
keras.layers.Dense(1, activation='sigmoid', kernel_initializer='ones')
]
model = testing_utils.get_model_from_layers(layers, (4,))
model.compile(
loss='binary_crossentropy',
metrics=['accuracy', metrics_module.BinaryAccuracy()],
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
np.random.seed(123)
x = np.random.randint(10, size=(100, 4)).astype(np.float32)
y = np.random.randint(2, size=(100, 1)).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.batch(10)
outs = model.evaluate(dataset, steps=10)
self.assertEqual(np.around(outs[1], decimals=1), 0.5)
self.assertEqual(np.around(outs[2], decimals=1), 0.5)
y = np.zeros((100, 1), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
outs = model.evaluate(dataset, steps=10)
self.assertEqual(outs[1], 0.)
self.assertEqual(outs[2], 0.)
if __name__ == '__main__':
test.main()
| |
#!/usr/bin/env python
################################################################################
# DATE: 2017/03/09
#
# SCRIPT: bland_altmant_plot.py
#
# VERSION: 1.3
#
# AUTHORS: Miguel A Ibarra <miguelib@ufl.edu>
# Matt Thoburn <mthoburn@ufl.edu>
# Oleksandr Moskalenko <om@rc.ufl.edu>
#
# DESCRIPTION: This script takes a a wide format file and makes a Bland-Altman plot
#
# The output is a set of graphs and spreadsheets of flags
#
################################################################################
# Import future libraries
# Import built-in libraries
import os
import logging
import argparse
from itertools import combinations
from argparse import RawDescriptionHelpFormatter
# Import add-on libraries
import matplotlib
import pandas as pd
matplotlib.use('Agg')
import statsmodels.api as sm
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from statsmodels.sandbox.regression.predstd import wls_prediction_std
# Import local data libraries
from secimtools.dataManager import logger as sl
from secimtools.dataManager.flags import Flags
from secimtools.dataManager.interface import wideToDesign
# Import local plotting libraries
from secimtools.visualManager import module_bar as bar
from secimtools.visualManager import module_lines as lines
from secimtools.visualManager import module_scatter as scatter
from secimtools.visualManager.manager_color import colorHandler
from secimtools.visualManager.manager_figure import figureHandler
def getOptions():
""" Function to pull in arguments """
description = """ The Bland-Altman plot (BA-plot) is commonly used to look
at concordance of samples. It is useful for looking at variability between
replicates. This script generates BA-plots for all pairwise combinations of
samples, or if group information is provided it will only report pairwise
combinations within the group.
A linear regression is also performed on the BA-plots to identify samples
whose residuals are beyond a cutoff. For each feature (row) in the dataset,
a sample is flagged as an outlier if the Pearson normalized residuals are
greater than a cutoff (--filter_cutoff). Or if the leverage statistics
(DFFITS and Cook's D) flag the feature as a leverage point.
The script outputs a separate set of the flags for samples and features.
Two sets of plots are output: (1) Bland-Altman plots for pairwise
comparisons are saved to a pdf specified by (--ba). (2) Bar graphs of
summarized flags are saved by (--flag_summary).
"""
parser = argparse.ArgumentParser(description=description,
formatter_class=RawDescriptionHelpFormatter)
# Standard Input
Standard = parser.add_argument_group(title='Standard input',
description='Standard input for SECIM tools.')
Standard.add_argument('-i',"--input", dest="input", action='store',
required=True, help="Input dataset in wide format.")
Standard.add_argument('-d',"--design", dest="design", action='store',
required=True, help="Design file.")
Standard.add_argument('-id',"--ID", dest="uniqID", action='store',
required=True, help="Name of the column with unique identifiers.")
Standard.add_argument('-g',"--group", dest="group", action='store',
required=False, help="Group/treatment identifier in design file"\
" [Optional].")
# Tool output
output = parser.add_argument_group(title='Required input',
description='Additional required input for this tool.')
output.add_argument('-f',"--figure", dest="baName", action='store',
required=True, help="Name of the output PDF for Bland-Altman plots.")
output.add_argument('-fd',"--flag_dist", dest="distName", action='store',
required=True, help="Name of the output TSV for distribution flags.")
output.add_argument('-fs',"--flag_sample", dest="flagSample", action='store',
required=True, help="Name of the output TSV for sample flags.")
output.add_argument('-ff',"--flag_feature", dest="flagFeature", action='store',
required=True, help="Name of the output TSV for feature flags.")
## AMM added following 2 arguments
output.add_argument('-pf',"--prop_feature", dest="propFeature", action='store',
required=True, help="Name of the output TSV for proportion of features.")
output.add_argument('-ps',"--prop_sample", dest="propSample", action='store',
required=True, help="Name of the output TSV for proportion of samples.")
# Tool Input
tool = parser.add_argument_group(title='Optional Settings')
tool.add_argument('-po',"--process_only", dest="processOnly",
action='store', nargs='+', default=False, required=False,
help="Only process the given groups (list groups separated by"\
" spaces) [Optional].")
tool.add_argument('-rc',"--resid_cutoff", dest="residCutoff",
action='store', default=3, type=int, required=False,
help="Cutoff value for flagging outliers [default=3].")
tool.add_argument('-sfc',"--sample_flag_cutoff", dest="sampleCutoff",
action='store', default=.20, type=float, required=False,
help="Proportion cutoff value when flagging samples [default=0.20].")
tool.add_argument('-ffc',"--feature_flag_cutoff", dest="featureCutoff",
action='store', default=.05, type=float, required=False,
help="Proportion cutoff value when flagging features [default=0.05].")
group4 = parser.add_argument_group(title='Development Settings')
group4.add_argument("--debug", dest="debug", action='store_true',
required=False, help="Add debugging log output.")
args = parser.parse_args()
# Check if sample cutoff is within 0-1
if (args.sampleCutoff > 1) | (args.sampleCutoff < 0):
parser.error('sample_flag_cutoff must be a number between 0 and 1')
if (args.featureCutoff > 1) | (args.featureCutoff < 0):
parser.error('feature_flag_cutoff must be a number between 0 and 1')
# Standardize paths
args.input = os.path.abspath(args.input)
args.design = os.path.abspath(args.design)
args.baName = os.path.abspath(args.baName)
args.distName = os.path.abspath(args.distName)
args.flagSample = os.path.abspath(args.flagSample)
args.flagFeature = os.path.abspath(args.flagFeature)
# AMM added following 2
args.propFeature = os.path.abspath(args.propFeature)
args.propSamplee = os.path.abspath(args.propSample)
return args
def summarizeFlags(dat, flags, combos):
""" Given a set of flags calculate the proportion of times a feature is
flagged.
:Arguments:
:type dat: interface.wideToDesign
:param dat: A wideToDesign object that will be used to get sample and
feature information.
:type flags: pandas.DataFrame
:param flags: A Dataframe of flags for all pairwise comparisons.
:type combos: list
:param combos: combinations of flags
:Returns:
:rtype: tuple of pandas.DataFrame
:return: Two DataFrames, the first has the proportion of samples that
were flagged. The second has the proportion of features flagged.
"""
# Create a data frame that is the same dimensions as wide. Where each cell
# will be the sum of flags.
flagSum = pd.DataFrame(index=flags.index, columns=dat.wide.columns)
flagSum.fillna(0, inplace=True)
# Create a data frame that is the same dimensions as wide. Where each cell
# is the total number of comparisons.
flagTotal = pd.DataFrame(index=flags.index, columns=dat.wide.columns)
flagTotal.fillna(0, inplace=True)
# Create a data frame that is the same dimensions as wide. Where each cell
# will be the sum of flags.
flagSum_p = pd.DataFrame(index=flags.index, columns=dat.wide.columns)
flagSum_p.fillna(0, inplace=True)
# Create a data frame that is the same dimensions as wide. Where each cell
# is the total number of comparisons.
flagTotal_p = pd.DataFrame(index=flags.index, columns=dat.wide.columns)
flagTotal_p.fillna(0, inplace=True)
# Create a data frame that is the same dimensions as wide. Where each cell
# will be the sum of flags.
flagSum_c = pd.DataFrame(index=flags.index, columns=dat.wide.columns)
flagSum_c.fillna(0, inplace=True)
# Create a data frame that is the same dimensions as wide. Where each cell
# is the total number of comparisons.
flagTotal_c = pd.DataFrame(index=flags.index, columns=dat.wide.columns)
flagTotal_c.fillna(0, inplace=True)
# Create a data frame that is the same dimensions as wide. Where each cell
# will be the sum of flags.
flagSum_d = pd.DataFrame(index=flags.index, columns=dat.wide.columns)
flagSum_d.fillna(0, inplace=True)
# Create a data frame that is the same dimensions as wide. Where each cell
# is the total number of comparisons.
flagTotal_d = pd.DataFrame(index=flags.index, columns=dat.wide.columns)
flagTotal_d.fillna(0, inplace=True)
for sampleID in dat.sampleIDs:
# Get list of flags that contain the current sampleID
flagList = ["flag_{0}_{1}".format(c[0], c[1]) for c in combos if sampleID in c]
flagList_p = ["flag_pearson_{0}_{1}".format(c[0], c[1]) for c in combos if sampleID in c]
flagList_c = ["flag_cooks_{0}_{1}".format(c[0], c[1]) for c in combos if sampleID in c]
flagList_d = ["flag_dffits_{0}_{1}".format(c[0], c[1]) for c in combos if sampleID in c]
# Sum the flags in flags for the current sampleID
flagSum.loc[:, sampleID] = flags[flagList].sum(axis=1).values
flagSum_p.loc[:, sampleID] = flags[flagList_p].sum(axis=1).values
flagSum_c.loc[:, sampleID] = flags[flagList_c].sum(axis=1).values
flagSum_d.loc[:, sampleID] = flags[flagList_d].sum(axis=1).values
# Get the totals of possible flags in flags for the current sampleID
flagTotal.loc[:, sampleID] = flags[flagList].count(axis=1).values
flagTotal_p.loc[:, sampleID] = flags[flagList_p].count(axis=1).values
flagTotal_c.loc[:, sampleID] = flags[flagList_c].count(axis=1).values
flagTotal_d.loc[:, sampleID] = flags[flagList_d].count(axis=1).values
# Calculate the proportion of samples and features using the marginal sums.
propSample = flagSum.sum(axis=0) / flagTotal.sum(axis=0)
propFeature = flagSum.sum(axis=1) / flagTotal.sum(axis=1)
# Calculate the proportion of samples and features using the marginal sums.
propSample_p = flagSum_p.sum(axis=0) / flagTotal_p.sum(axis=0)
propFeature_p = flagSum_p.sum(axis=1) / flagTotal_p.sum(axis=1)
# Calculate the proportion of samples and features using the marginal sums.
propSample_c = flagSum_c.sum(axis=0) / flagTotal_c.sum(axis=0)
propFeature_c = flagSum_c.sum(axis=1) / flagTotal_c.sum(axis=1)
# Calculate the proportion of samples and features using the marginal sums.
propSample_d = flagSum_d.sum(axis=0) / flagTotal_d.sum(axis=0)
propFeature_d = flagSum_d.sum(axis=1) / flagTotal_d.sum(axis=1)
return propSample, propFeature, propSample_p, propFeature_p, propSample_c, propFeature_c, propSample_d, propFeature_d
def plotFlagDist(propSample, propFeature, pdf):
"""
Plot the distribution of proportion of samples and features that
were outliers.
:Arguments:
:type propSample: pandas.DataFrame
:param propSample: Data frame of the proportion of samples flagged as
an outlier.
:type propFeature: pandas.DataFrame
:param propFeature: Data frame of the proportion of features flagged as
an outlier.
:type pdf: string
:param pdf: Filename of pdf to save plots.
:Returns:
:rtype: matplotlib.backends.backend_pdf.PdfPages
:returns: Saves two bar plots to pdf.
"""
# sort samples
propSample.sort_values(inplace=True,ascending=False)
# sort compounds
propFeature.sort_values(inplace=True,ascending=False)
# Make Plots
## Open pdf for plotting
ppFlag = PdfPages(pdf)
# Open figure handler instance
fh = figureHandler(proj='2d')
keys = list(propSample.head(30).keys())
# Plotting quickBar
#breakpoint()
bar.quickBar(ax=fh.ax[0],y=list(propSample.head(30)),x=keys)
# Formating axis
fh.formatAxis(xlim=(0,len(keys) + 1), ylim="ignore", xTitle="Sample ID",
yTitle="Proportion of features that were outliers.")
# Save Figure in PDF
ppFlag.savefig(fh.fig, bbox_inches='tight')
## Plot samples
# Open figure handler instance
fh = figureHandler(proj='2d')
keys = list(propFeature.head(30).keys())
# Plot bar plot
bar.quickBar(ax=fh.ax[0],y=list(propFeature.head(30)),x=keys)
# Format Axis
fh.formatAxis(xlim=(0,len(keys) + 1), ylim="ignore", xTitle="Feature ID",
yTitle="Proportion of samples that a feature was an outlier.")
# Plot samples
ppFlag.savefig(fh.fig, bbox_inches="tight")
## Close pdf
ppFlag.close()
def buildTitle(dat, xName, yName):
""" Build plot title.
:Arguments:
:type dat: interface.wideToDesign
:param dat: A wide to design object
:type xName: string
:param xName: String containing the sampleID for x
:type yName: string
:param yName: String containing the sampleID for y
:Returns:
:rtype: string
:returns: A string containing the plot title.
"""
# If groups are the same, add group information to the plot.
if dat.group and dat.design.loc[xName, dat.group] == dat.design.loc[yName, dat.group]:
group = dat.design.loc[xName, dat.group]
title = '{0}\n{1} vs {2}'.format(group, xName, yName)
else:
title = '{0} vs {1}'.format(xName, yName)
# If there are missing values add the count to the title.
try:
if dat.missing == 1:
title = title + '\n1 missing value'
elif dat.missing > 0:
title = title + '\n{} missing values'.format(dat.missing)
except:
pass
return title
def runRegression(x, y):
""" Run a linear regression.
:Arguments:
:type x: pandas.Series
:param x: Series of first sample, treated as independent variable.
:type y: pandas.Series
:param y: Series of second sample, treated as dependent variables.
:Returns:
:rtype: tuple of pandas.Series and pandas.DataFrame
:returns: A tuple of Series and data frames:
* lower (pd.Series): Series of values for lower confidence interval.
* upper (pd.Series): Series of values for upper confidence interval.
* fitted (pd.Series): Series of fitted values.
* resid (pd.DataFrame): DataFrame containing residuals and Pearson
normalized residuals to have unit variance.
"""
# Fit linear regression
# Drop missing values for the regression
y=y.apply(float)
x=x.apply(float)
model = sm.OLS(y, x, missing='drop')
results = model.fit()
# Get fit and influence stats
fitted = results.fittedvalues
infl = results.get_influence()
CD = infl.cooks_distance
(dffits, thresh) = infl.dffits
DF = abs(dffits) > thresh
influence = pd.DataFrame({'cooksD': CD[0], 'cooks_pval': CD[1], 'dffits': DF},
index=fitted.index)
# Get Residuals
presid = pd.Series(results.resid_pearson, index=results.resid.index)
resid = pd.concat([results.resid, presid], axis=1)
resid.columns = pd.Index(['resid', 'resid_pearson'])
# Get 95% CI
prstd, lower, upper = wls_prediction_std(results)
return lower, upper, fitted, resid, influence
def makeBA(x, y, ax, fh):
""" Function to make BA Plot comparing x vs y.
:Arguments:
:type x: pandas.Series
:param x: Series of first sample, treated as independent variable.
:type y: pandas.Series
:param y: Series of second sample, treated as dependent variables.
:type ax: matplotlib.axis
:param ax: Axis which to plot.
:type fh: figureHandler
:param fh: figure to draw BA plots onto.
:Returns:
:rtype: pandas.Series
:returns: A Series containing Boolean values with True
indicating a value is more extreme than CI and False indicating a
value falls inside CI.
"""
# Make BA plot
x = x.apply(float)
y = y.apply(float)
diff = x - y
mean = (x + y) / 2
# Drop missing for current comparison
diff.dropna(inplace=True)
mean.dropna(inplace=True)
# Get Upper and Lower CI from regression
lower, upper, fitted, resid, infl = runRegression(mean, diff)
mask1 = abs(resid['resid_pearson']) > cutoff
mask2 = infl['cooks_pval'] <= 0.5
mask3 = infl['dffits']
mask = mask1 | mask2 | mask3
# Create BA plot
scatter.scatter2D(ax=ax, x=mean[~mask], y=diff[~mask],colorList='b')
scatter.scatter2D(ax=ax, x=mean[mask], y=diff[mask], colorList='r')
# Plot regression lines
ax.plot(mean, lower, 'r:')
ax.plot(mean, fitted, 'r')
ax.axhline(0, color='k')
ax.plot(mean, upper, 'r:')
#Adjust axes
fh.formatAxis(axnum=1,xlim='ignore',ylim='ignore',axTitle='Bland-Altman Plot',
xTitle='Mean\n{0} & {1}'.format(x.name, y.name),
yTitle='Difference\n{0} - {1}'.format(x.name, y.name),grid=False)
return mask, mask1, mask2, mask3
def makeScatter(x, y, ax, fh):
""" Plot a scatter plot of x vs y.
:Arguments:
:type x: pandas.Series
:param x: Series of first sample, treated as independent variable.
:type y: pandas.Series
:param y: Series of second sample, treated as dependent variables.
:type ax: matplotlib.axis
:param ax: Axis which to plot.
:type fh: figureHandler
:param fh: figure to draw BA plots onto.
:Returns:
:rtype: matplotlib.axis
:returns: A matplotlib axis with a scatter plot.
"""
#logger.info('{0}, {1}'.format(x.name, y.name))
# Get Upper and Lower CI from regression
lower, upper, fitted, resid, infl = runRegression(x, y)
# Plot scatter
scatter.scatter2D(x=x,y=y,ax=ax,colorList = list("b"))
# Plot regression lines
# If there are missing data, x and the result vectors won't have the same
# dimensions. First filter x by the index of the fitted values then plot.
x2 = x.loc[fitted.index]
lines.drawCutoff(x=x2,y=lower,ax=ax)
lines.drawCutoff(x=x2,y=fitted,ax=ax)
lines.drawCutoff(x=x2,y=upper,ax=ax)
# Adjust plot
fh.formatAxis(axnum=0,xTitle=x.name,yTitle=y.name,axTitle='Scatter plot',grid=False)
def iterateCombo(dat, combo, pdf):
""" A function to iterate generate all plots and flags.
:Arguments:
:type dat: interface.wideToDesign
:param dat: A wideToDesign object containing wide and design information.
:param tuple combo: A tuple of pairwise combination for current sample.
:type pdf: matplotlib.backends.backend_pdf.PdfPages
:param pdf: Handler for multi-page PDF that will contain all plots.
:Updates:
:type pdf: matplotlib.backends.backend_pdf.PdfPages
:param pdf: Handler for multi-page PDF that will contain all plots.
:Returns:
:rtype flag: interface.Flags
:param flag: A Flags object with outlier flags.
"""
# Current combination
c1 = combo[0]
c2 = combo[1]
# Set up figure with 2 subplots
fh = figureHandler(proj='2d',numAx=2,numRow=2,numCol=2,arrangement=[(0,0,1,2),(0,1,1,2)])
# Scatter Plot of c1 vs c2
makeScatter(dat.wide.loc[:, c1], dat.wide.loc[:, c2], fh.ax[0],fh)
# BA plot of c1 vs c2
outlier, pearson, cooks, dffits = makeBA(dat.wide.loc[:, c1], dat.wide.loc[:, c2], fh.ax[1],fh)
# Build plot title
title = buildTitle(dat, c1, c2)
# Add plot title to the figure
fh.formatAxis(figTitle=title)
# Stablishing a tight layout for the figure
plt.tight_layout(pad=2,w_pad=.05)
# Shinking figure
fh.shrink(top=.85,bottom=.25,left=.15,right=.9)
# Output figure to pdf
fh.addToPdf(dpi=90,pdfPages=pdf)
# Create flags
flag = Flags(index=dat.wide.index)
flag.addColumn(column='flag_{0}_{1}'.format(c1, c2), mask=outlier)
flag.addColumn(column='flag_pearson_{0}_{1}'.format(c1, c2), mask=pearson)
flag.addColumn(column='flag_cooks_{0}_{1}'.format(c1, c2), mask=cooks)
flag.addColumn(column='flag_dffits_{0}_{1}'.format(c1, c2), mask=dffits)
return flag.df_flags
def main(args):
# Import data
dat = wideToDesign(args.input, args.design, args.uniqID, args.group,
logger=logger)
# Get a list of samples to process, if processOnly is specified only
# analyze specified group.
if args.processOnly:
dat.design = dat.design[dat.design[args.group].isin(args.processOnly)]
toProcess = dat.design.index
dat.sampleIDs = toProcess.tolist()
# Create dataframe with sampleIDs that are to be analyzed.
dat.keep_sample(dat.sampleIDs)
# Get list of pairwise combinations. If group is specified, only do
# within group combinations.
combos = list()
if args.group:
# If group is given, only do within group pairwise combinations
logger.info('Only doing within group, pairwise comparisons.')
for groupName, dfGroup in dat.design.groupby(dat.group):
combos.extend(list(combinations(dfGroup.index, 2)))
else:
logger.info('Doing all pairwise comparisons. This could take a while!')
# Get all pairwise combinations for all samples
combos.extend(list(combinations(dat.sampleIDs, 2)))
# Open a multiple page PDF for plots
ppBA = PdfPages(args.baName)
# Loop over combinations and generate plots and return a list of flags.
logger.info('Generating flags and plots.')
flags = [iterateCombo(dat, combo, ppBA) for combo in combos]
# Close PDF with plots
ppBA.close()
# Merge flags
logger.info('Merging outlier flags.')
merged = Flags.merge(flags)
# Summarize flags
logger.info('Summarizing outlier flags.')
propSample, propFeature, propSample_p, propFeature_p, propSample_c, propFeature_c, propSample_d, propFeature_d = summarizeFlags(dat, merged, combos)
plotFlagDist(propSample, propFeature, args.distName)
## AMM - output sample and feature proportions
propSample.to_csv(args.propSample, sep='\t')
propFeature.to_csv(args.propFeature, sep='\t')
# Create sample level flags
flag_sample = Flags(index=dat.sampleIDs)
flag_sample.addColumn(column='flag_sample_BA_outlier',
mask=(propSample >= args.sampleCutoff))
flag_sample.addColumn(column='flag_sample_BA_pearson',
mask=(propSample_p >= args.sampleCutoff))
flag_sample.addColumn(column='flag_sample_BA_cooks',
mask=(propSample_c >= args.sampleCutoff))
flag_sample.addColumn(column='flag_sample_BA_dffits',
mask=(propSample_d >= args.sampleCutoff))
flag_sample.df_flags.index.name = "sampleID"
flag_sample.df_flags.to_csv(args.flagSample, sep='\t')
# Create metabolite level flags
flag_metabolite = Flags(dat.wide.index)
flag_metabolite.addColumn(column='flag_feature_BA_outlier',
mask=(propFeature >= args.featureCutoff))
flag_metabolite.addColumn(column='flag_feature_BA_pearson',
mask=(propFeature_p >= args.featureCutoff))
flag_metabolite.addColumn(column='flag_feature_BA_cooks',
mask=(propFeature_c >= args.featureCutoff))
flag_metabolite.addColumn(column='flag_feature_BA_dffits',
mask=(propFeature_d >= args.featureCutoff))
flag_metabolite.df_flags.to_csv(args.flagFeature, sep='\t')
# Finish Script
logger.info("Script Complete!")
if __name__ == '__main__':
args = getOptions()
global cutoff
cutoff = args.residCutoff
logger = logging.getLogger()
if args.debug:
sl.setLogger(logger, logLevel='debug')
else:
sl.setLogger(logger)
logger.info('Importing Data')
main(args)
| |
# Copyright 2016-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Base matplotlib plotter module"""
from abc import abstractmethod, ABCMeta
from collections import defaultdict as ddict
import matplotlib.pyplot as plt
from trappy.plotter import AttrConf
from trappy.plotter.Constraint import ConstraintManager
from trappy.plotter.PlotLayout import PlotLayout
from trappy.plotter.AbstractDataPlotter import AbstractDataPlotter
from trappy.plotter.ColorMap import ColorMap
class StaticPlot(AbstractDataPlotter):
"""
This class uses :mod:`trappy.plotter.Constraint.Constraint` to
represent different permutations of input parameters. These
constraints are generated by creating an instance of
:mod:`trappy.plotter.Constraint.ConstraintManager`.
:param traces: The input data
:type traces: a list of :mod:`trappy.trace.FTrace`,
:mod:`trappy.trace.SysTrace`, :mod:`trappy.trace.BareTrace`
or :mod:`pandas.DataFrame` or a single instance of them.
:param column: specifies the name of the column to
be plotted.
:type column: (str, list(str))
:param templates: TRAPpy events
.. note::
This is not required if a :mod:`pandas.DataFrame` is
used
:type templates: :mod:`trappy.base.Base`
:param filters: Filter the column to be plotted as per the
specified criteria. For Example:
::
filters =
{
"pid": [ 3338 ],
"cpu": [0, 2, 4],
}
:type filters: dict
:param per_line: Used to control the number of graphs
in each graph subplot row
:type per_line: int
:param concat: Draw all the pivots on a single graph
:type concat: bool
:param permute: Draw one plot for each of the traces specified
:type permute: bool
:param drawstyle: This argument is forwarded to the matplotlib
corresponding :func:`matplotlib.pyplot.plot` call
drawing style.
.. note::
step plots are not currently supported for filled
graphs
:param xlim: A tuple representing the upper and lower xlimits
:type xlim: tuple
:param ylim: A tuple representing the upper and lower ylimits
:type ylim: tuple
:param title: A title describing all the generated plots
:type title: str
:param style: Created pre-styled graphs loaded from
:mod:`trappy.plotter.AttrConf.MPL_STYLE`
:type style: bool
:param signals: A string of the type event_name:column
to indicate the value that needs to be plotted
.. note::
- Only one of `signals` or both `templates` and
`columns` should be specified
- Signals format won't work for :mod:`pandas.DataFrame`
input
:type signals: str
:param legend_ncol: A positive integer that represents the
number of columns in the legend
:type legend_ncol: int
"""
__metaclass__ = ABCMeta
def __init__(self, traces, templates, **kwargs):
self._fig = None
self._layout = None
super(StaticPlot, self).__init__(traces=traces,
templates=templates)
self.set_defaults()
for key in kwargs:
if key in AttrConf.ARGS_TO_FORWARD:
self._attr["args_to_forward"][key] = kwargs[key]
else:
self._attr[key] = kwargs[key]
if "signals" in self._attr:
self._describe_signals()
self._check_data()
if "column" not in self._attr:
raise RuntimeError("Value Column not specified")
zip_constraints = not self._attr["permute"]
self.c_mgr = ConstraintManager(traces, self._attr["column"],
self.templates, self._attr["pivot"],
self._attr["filters"],
zip_constraints=zip_constraints)
def savefig(self, *args, **kwargs):
"""Save the plot as a PNG fill. This calls into
:mod:`matplotlib.figure.savefig`
"""
if self._fig is None:
self.view()
self._fig.savefig(*args, **kwargs)
@abstractmethod
def set_defaults(self):
"""Sets the default attrs"""
self._attr["width"] = AttrConf.WIDTH
self._attr["length"] = AttrConf.LENGTH
self._attr["per_line"] = AttrConf.PER_LINE
self._attr["concat"] = AttrConf.CONCAT
self._attr["filters"] = {}
self._attr["style"] = True
self._attr["permute"] = False
self._attr["pivot"] = AttrConf.PIVOT
self._attr["xlim"] = AttrConf.XLIM
self._attr["ylim"] = AttrConf.YLIM
self._attr["title"] = AttrConf.TITLE
self._attr["args_to_forward"] = {}
self._attr["map_label"] = {}
self._attr["_legend_handles"] = []
self._attr["_legend_labels"] = []
self._attr["legend_ncol"] = AttrConf.LEGEND_NCOL
def view(self, test=False):
"""Displays the graph"""
if test:
self._attr["style"] = True
AttrConf.MPL_STYLE["interactive"] = False
permute = self._attr["permute"] and not self._attr["concat"]
if self._attr["style"]:
with plt.rc_context(AttrConf.MPL_STYLE):
self._resolve(permute, self._attr["concat"])
else:
self._resolve(permute, self._attr["concat"])
def make_title(self, constraint, pivot, permute, concat):
"""Generates a title string for an axis"""
if concat:
return str(constraint)
if permute:
return constraint.get_data_name()
elif pivot != AttrConf.PIVOT_VAL:
return "{0}: {1}".format(self._attr["pivot"], self._attr["map_label"].get(pivot, pivot))
else:
return ""
def add_to_legend(self, series_index, handle, constraint, pivot, concat, permute):
"""
Add series handles and names to the legend
A handle is returned from a plot on an axis
e.g. Line2D from axis.plot()
"""
self._attr["_legend_handles"][series_index] = handle
legend_labels = self._attr["_legend_labels"]
if concat and pivot == AttrConf.PIVOT_VAL:
legend_labels[series_index] = self._attr["column"]
elif concat:
legend_labels[series_index] = "{0}: {1}".format(
self._attr["pivot"],
self._attr["map_label"].get(pivot, pivot)
)
elif permute:
legend_labels[series_index] = constraint._template.name + ":" + constraint.column
else:
legend_labels[series_index] = str(constraint)
def _resolve(self, permute, concat):
"""Determine what data to plot on which axis"""
pivot_vals, len_pivots = self.c_mgr.generate_pivots(permute)
pivot_vals = list(pivot_vals)
num_of_axes = len(self.c_mgr) if concat else len_pivots
# Create a 2D Layout
self._layout = PlotLayout(
self._attr["per_line"],
num_of_axes,
width=self._attr["width"],
length=self._attr["length"],
title=self._attr['title'])
self._fig = self._layout.get_fig()
# Determine what constraint to plot and the corresponding pivot value
if permute:
legend_len = self.c_mgr._max_len
pivots = [y for _, y in pivot_vals]
c_dict = {c : str(c) for c in self.c_mgr}
c_list = sorted(c_dict.items(), key=lambda x: (x[1].split(":")[-1], x[1].split(":")[0]))
constraints = [c[0] for c in c_list]
cp_pairs = [(c, p) for c in constraints for p in sorted(set(pivots))]
else:
legend_len = len_pivots if concat else len(self.c_mgr)
pivots = pivot_vals
cp_pairs = [(c, p) for c in self.c_mgr for p in pivots if p in c.result]
# Initialise legend data and colormap
self._attr["_legend_handles"] = [None] * legend_len
self._attr["_legend_labels"] = [None] * legend_len
if "colors" in self._attr:
self._cmap = ColorMap.rgb_cmap(self._attr["colors"])
else:
self._cmap = ColorMap(legend_len)
# Group constraints/series with the axis they are to be plotted on
figure_data = ddict(list)
for i, (constraint, pivot) in enumerate(cp_pairs):
axis = self._layout.get_axis(constraint.trace_index if concat else i)
figure_data[axis].append((constraint, pivot))
# Plot each axis
for axis, series_list in figure_data.iteritems():
self.plot_axis(
axis,
series_list,
permute,
self._attr["concat"],
self._attr["args_to_forward"]
)
if self._attr["xlim"]:
axis.set_xlim(self._attr["xlim"])
if self._attr["ylim"]:
axis.set_ylim(self._attr["ylim"])
# Show legend
legend = self._fig.legend(self._attr["_legend_handles"],
self._attr["_legend_labels"],
loc='lower center',
ncol=self._attr["legend_ncol"],
borderaxespad=0.)
legend.get_frame().set_facecolor('#F4F4F4')
self._layout.finish(num_of_axes)
def plot_axis(self, axis, series_list, permute, concat, args_to_forward):
"""Internal Method called to plot data (series_list) on a given axis"""
raise NotImplementedError("Method Not Implemented")
| |
# (c) Copyright 2018 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import policy
from flask import abort
from flask import Blueprint
from flask import jsonify
from flask import request
import json
from keystoneauth1 import loading
from keystoneauth1 import session
from novaclient import client as novaClient
import os
from oslo_config import cfg
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
bp = Blueprint('compute', __name__)
CONF = cfg.CONF
def get_compute_client(req):
try:
loader = loading.get_plugin_loader('v3token')
auth = loader.load_from_options(
auth_url=CONF.keystone_authtoken.auth_url,
token=req.headers.get('X-Auth-Token'),
project_name=CONF.keystone_authtoken.project_name,
project_domain_name=CONF.keystone_authtoken.project_domain_name
)
sess = session.Session(auth=auth,
verify=not CONF.keystone_authtoken.insecure)
compute_client = novaClient.Client(
# api version for live_migrate with block_migration='auto'
'2.25',
endpoint_type="internalURL",
session=sess
)
return compute_client
except Exception as e:
LOG.error(e)
abort(500, 'Failed to get compute novaclient')
def complete_with_errors_response(msg, contents):
response = jsonify({'error_msg': msg, 'contents': contents})
response.status_code = 500
return response
@bp.route("/api/v2/compute/services/<hostname>", methods=['GET'])
@policy.enforce('lifecycle:get_compute')
def compute_services_status(hostname):
"""Get the compute services status for a compute host
.. :quickref: Compute; Get the compute services status
**Example Request**:
.. sourcecode:: http
GET /api/v2/compute/services/<hostname> HTTP/1.1
Content-Type: application/json
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
{
"nova-compute": "enabled"
}
"""
# mock for getting nova service status for a compute host
if cfg.CONF.testing.use_mock:
mock_json = "tools/compute-mock-data.json"
json_file = os.path.join(
os.path.dirname(os.path.dirname(__file__)), mock_json)
with open(json_file) as f:
return jsonify(json.load(f)['compute_services_status'])
compute_client = get_compute_client(request)
compute_services = compute_client.services.list(host=hostname)
if len(compute_services) == 0:
msg = 'No compute service for %s' % hostname
LOG.error(msg)
abort(410, msg)
services = dict()
for service in compute_services:
binary = getattr(service, 'binary', None)
if binary:
services[binary] = \
getattr(service, 'status', None) == 'enabled'
return jsonify(services)
@bp.route("/api/v2/compute/services/<hostname>/disable", methods=['PUT'])
@policy.enforce('lifecycle:update_compute')
def compute_disable_services(hostname):
"""Disable the compute services for a compute host
.. :quickref: Compute; Disable the compute services
**Example Request**:
.. sourcecode:: http
PUT /api/v2/compute/services/<hostname>/disable HTTP/1.1
Content-Type: application/json
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
[{
"binary": "nova-compute",
"id": 1
}]
"""
# mock for running nova disable service for a compute host
if cfg.CONF.testing.use_mock:
mock_json = "tools/compute-mock-data.json"
json_file = os.path.join(
os.path.dirname(os.path.dirname(__file__)), mock_json)
with open(json_file) as f:
return jsonify(json.load(f)['disable_compute_services'])
compute_client = get_compute_client(request)
compute_services = compute_client.services.list(host=hostname)
if len(compute_services) == 0:
msg = 'No compute service for %s' % hostname
LOG.error(msg)
abort(410, msg)
failed = []
disabled = []
for service in compute_services:
binary = getattr(service, 'binary', '')
id = getattr(service, 'id')
status = getattr(service, 'status', '')
if status == 'enabled':
try:
compute_client.services.disable(hostname, binary)
disabled.append({'id': id, 'binary': binary})
except Exception as ex:
failed.append({'id': id, 'binary': binary, 'error': str(ex)})
LOG.error(
'Failed to disable compute service for %s id = %s' +
'binary = % s' % (hostname, id, binary))
LOG.error(ex)
else:
# already disabled, will not call
disabled.append({'id': id, 'binary': binary})
if len(failed) > 0:
return complete_with_errors_response(
'Completed disabling compute services with errors',
{'failed': failed, 'disabled': disabled})
return jsonify(disabled)
@bp.route("/api/v2/compute/services/<hostname>/enable", methods=['PUT'])
@policy.enforce('lifecycle:update_compute')
def compute_enable_services(hostname):
"""Enable the compute services for a compute host
.. :quickref: Compute; Enable the compute services
**Example Request**:
.. sourcecode:: http
PUT /api/v2/compute/services/<hostname>/enable HTTP/1.1
Content-Type: application/json
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
[{
"binary": "nova-compute",
"id": 1
}]
"""
# mock for running nova disable service for a compute host
if cfg.CONF.testing.use_mock:
mock_json = "tools/compute-mock-data.json"
json_file = os.path.join(
os.path.dirname(os.path.dirname(__file__)), mock_json)
with open(json_file) as f:
return jsonify(json.load(f)['enable_compute_services'])
compute_client = get_compute_client(request)
compute_services = compute_client.services.list(host=hostname)
if len(compute_services) == 0:
msg = 'No compute service for %s' % hostname
LOG.error(msg)
abort(410, msg)
failed = []
enabled = []
for service in compute_services:
binary = getattr(service, 'binary', '')
id = getattr(service, 'id')
status = getattr(service, 'status', '')
if status == 'disabled':
try:
compute_client.services.enable(hostname, binary)
enabled.append({'id': id, 'binary': binary})
except Exception as ex:
failed.append({'id': id, 'binary': binary, 'error': str(ex)})
LOG.error(
'Failed to enable compute service for %s id = %s' +
'binary = % s' % (hostname, id, binary))
LOG.error(ex)
else:
# already enabled, will not call
enabled.append({'id': id, 'binary': binary})
if len(failed) > 0:
return complete_with_errors_response(
'Completed enabling compute services with errors',
{'failed': failed, 'enabled': enabled})
return jsonify(enabled)
@bp.route("/api/v2/compute/services/<hostname>", methods=['DELETE'])
@policy.enforce('lifecycle:update_compute')
def compute_delete_services(hostname):
"""Delete the compute services for a compute host
.. :quickref: Compute; Delete the compute services
**Example Request**:
.. sourcecode:: http
DELETE /api/v2/compute/services/<hostname> HTTP/1.1
Content-Type: application/json
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
[{
"binary": "nova-compute"
"id": 1
}]
"""
# mock for running nova delete service for a compute host
if cfg.CONF.testing.use_mock:
mock_json = "tools/compute-mock-data.json"
json_file = os.path.join(
os.path.dirname(os.path.dirname(__file__)), mock_json)
with open(json_file) as f:
return jsonify(json.load(f)['delete_compute_services'])
compute_client = get_compute_client(request)
compute_services = compute_client.services.list(host=hostname)
if len(compute_services) == 0:
msg = 'No compute service for %s' % hostname
LOG.error(msg)
abort(410, msg)
failed = []
deleted = []
for service in compute_services:
binary = getattr(service, 'binary', '')
id = getattr(service, 'id')
try:
compute_client.services.delete(id)
deleted.append({'id': id, 'binary': binary})
except Exception as ex:
failed.append({'id': id, 'binary': binary, 'error': str(ex)})
LOG.error(
'Failed to delete compute service for %s id = %s binary = %s'
% (hostname, id, binary))
LOG.error(ex)
if len(failed) > 0:
return complete_with_errors_response(
'Completed deleting compute services with errors',
{'failed': failed, 'deleted': deleted})
return jsonify(deleted)
@bp.route("/api/v2/compute/aggregates/<hostname>", methods=['DELETE'])
@policy.enforce('lifecycle:update_compute')
def compute_delete_aggregates(hostname):
"""Delete the aggregates for a compute host
.. :quickref: Compute; Delete aggregates for a compute host
**Example Request**:
.. sourcecode:: http
DELETE /api/v2/compute/aggregates/<hostname> HTTP/1.1
Content-Type: application/json
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
[{
"availability_zone": "test-az",
"id": 3,
"name": "agg_group3"
}, {
"availability_zone": null,
"id": 1,
"name": "agg_group1"
}, {
"availability_zone": null,
"id": 2,
"name": "agg_group2"
}]
"""
# mock for running nova delete aggregates for a compute host
# mock contains partial failure
if cfg.CONF.testing.use_mock:
mock_json = "tools/compute-mock-data.json"
json_file = os.path.join(
os.path.dirname(os.path.dirname(__file__)), mock_json)
with open(json_file) as f:
return complete_with_errors_response(
'Completed deleting aggregates with errors',
json.load(f)['delete_aggregates'])
compute_client = get_compute_client(request)
# list all the aggregates
compute_aggregates = compute_client.aggregates.list()
if len(compute_aggregates) == 0:
msg = 'No aggregates found for %s ' % hostname
LOG.info(msg)
abort(410, msg)
# get details so we can decide which one we need to
# remove compute host
aggregates = []
for aggr in compute_aggregates:
details = compute_client.aggregates.get(aggr)
id = getattr(aggr, 'id')
name = getattr(aggr, 'name')
az = getattr(aggr, 'availability_zone')
if hostname in getattr(details, 'hosts', []):
aggregates.append({
'id': id, 'name': name, 'availability_zone': az})
if len(aggregates) == 0:
msg = 'No aggregates found for %s ' % hostname
LOG.info(msg)
abort(410, msg)
failed = []
deleted = []
for aggr in aggregates:
id = aggr['id']
name = aggr['name']
az = aggr['availability_zone']
try:
compute_client.aggregates.remove_host(id, hostname)
deleted.append({'id': id, 'name': name, 'availability_zone': az})
except Exception as ex:
failed.append({
'id': id, 'name': name,
'availability_zone': az, 'error': str(ex)})
LOG.error(
'Failed to delete aggregate for %s id = %s name = %s '
'availability_zone = %s' % (hostname, id, name, az))
LOG.error(ex)
if len(failed) > 0:
return complete_with_errors_response(
'Completed deleting aggregates with errors',
{'failed': failed, 'deleted': deleted})
return jsonify(deleted)
@bp.route(
"/api/v2/compute/instances/<src_hostname>/<target_hostname>/migrate",
methods=['PUT'])
@policy.enforce('lifecycle:update_compute')
def compute_migrate_instances(src_hostname, target_hostname):
"""Migrate instances of a compute host to another compute host
.. :quickref: Compute; Live migrate instances of a compute host
**Example Request**:
.. sourcecode:: http
PUT
/api/v2/compute/instances/<src_hostname>/<target_hostname>/migrate
HTTP/1.1
Content-Type: application/json
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
[{
"id": "8279e65d-6e87-4a50-b789-96edd753fbb2",
"name": "test3"
}, {
"id": "1d51f18f-27fd-4c34-a0aa-c07a5e9462e7",
"name": "test2"
}]
"""
# mock for running nova instance live migrating for a compute host
if cfg.CONF.testing.use_mock:
mock_json = "tools/compute-mock-data.json"
json_file = os.path.join(
os.path.dirname(os.path.dirname(__file__)), mock_json)
with open(json_file) as f:
return jsonify(json.load(f)['migrate_instances'])
compute_client = get_compute_client(request)
search_opts = {
'all_tenants': 1, # all tenants
'host': src_hostname
}
instances = compute_client.servers.list(search_opts=search_opts)
if len(instances) == 0:
msg = 'No instances found for %s' % src_hostname
LOG.info(msg)
abort(410, msg)
migrating = [] # list of migrating instance ids and names
failed = [] # list of failed instance ids, names and errors
for inst in instances:
id = getattr(inst, 'id')
name = getattr(inst, 'name')
try:
compute_client.servers.live_migrate(
id, target_hostname, block_migration='auto')
migrating.append({'id': id, 'name': name})
except Exception as ex:
failed.append({'id': id, 'name': name, 'error': str(ex)})
LOG.error(
'Failed to start migrating instance of %s id = %s name = %s' %
(src_hostname, id, name))
LOG.error(ex)
if len(failed) > 0:
return complete_with_errors_response(
'Completed migrating instances with errors',
{'failed': failed, 'migrating': migrating})
return jsonify(migrating)
@bp.route("/api/v2/compute/instances/<hostname>", methods=['GET'])
@policy.enforce('lifecycle:get_compute')
def compute_get_instances(hostname):
"""Return instances of a compute host
.. :quickref: Compute; Get instances of a compute host
**Example Request**:
.. sourcecode:: http
GET /api/v2/compute/instances/<hostname> HTTP/1.1
Content-Type: application/json
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
[{
"id": "ab3622db-648b-435d-b9af-f279e57bd8c9",
"name": "test4",
"status": "ACTIVE"
}]
"""
# mock for running nova instance list indicating whether all instances for
# a compute host are migrated
if cfg.CONF.testing.use_mock:
mock_json = "tools/compute-mock-data.json"
json_file = os.path.join(
os.path.dirname(os.path.dirname(__file__)), mock_json)
with open(json_file) as f:
return jsonify(json.load(f)['get_instances'])
compute_client = get_compute_client(request)
try:
search_opts = {
'all_tenants': 1, # all tenants
'host': hostname
}
instances = compute_client.servers.list(search_opts=search_opts)
ret_instances = [
{'id': getattr(inst, 'id'), 'name': getattr(inst, 'name'),
'status': getattr(inst, 'status')} for inst in instances
]
return jsonify(ret_instances)
except Exception as e:
msg = \
'Failed to get instances for compute host %s' % hostname
LOG.error(msg)
LOG.error(e)
abort(500, msg)
| |
'Define categories of Action and their consequences in the World.'
__author__ = 'Nick Montfort'
__copyright__ = 'Copyright 2011 Nick Montfort'
__license__ = 'ISC'
__version__ = '0.5.0.0'
__status__ = 'Development'
import copy
import re
def generator(num):
'Provides unique, increasing integers.'
while 1:
yield num
num += 1
ACTION_ID = generator(1)
class Action(object):
'Abstract base class for things done by an agent in the world.'
def __init__(self, verb, agent, category, **keywords):
if self.__class__ == Action:
raise StandardError('Attempt to instantiate abstract base ' +
'class action_model.Action')
self.id = ACTION_ID.next()
self.verb = verb
self.agent = agent
self.cause = self.agent
self.salience = 0.5
for i in ['salience', 'template', 'force']:
if i in keywords:
setattr(self, i, keywords[i])
for i in ['behave', 'configure', 'modify', 'sense']:
setattr(self, i, (category == i))
self._category = category
self.preconditions = []
self.start = None
self.final = False
self.failed = []
self.refusal = None
self.enlightened = []
def __str__(self):
'Describes the action in a one-line string.'
string = ':' + str(self.id) + ': '
if self.refusal is not None:
string += 'Refused '
elif len(self.failed) > 0:
string += 'Failed '
string += self.verb.upper() + ' (' + self._category + ') '
for i in ['agent', 'direct', 'indirect', 'direction', 'utterance',
'preposition', 'modality', 'force', 'manner', 'feature',
'old_value', 'new_value', 'old_link', 'old_parent',
'new_link', 'new_parent', 'target', 'cause', 'start']:
if hasattr(self, i):
string += i + '=' + str(getattr(self, i)) + ' '
return string[:-1]
@property
def category(self):
'Returns the category (behave, configure, etc.) as a lowercase string.'
return self._category
@property
def end(self):
"Return the action's end time. All actions have duration 1 now."
return self.start + 1
def check_refusal(self, world):
'If the agent refuses to do the action, update the reason.'
if (not self.agent == '@cosmos' and
hasattr(world.item[self.agent], 'refuses')):
agent = world.item[self.agent]
for (wont_do, state, reason) in agent.refuses:
if self.match_string(wont_do):
if type(state) == list:
if world.room_of(self.agent) in state:
self.refusal = reason
break
elif state(world):
self.refusal = reason
break
if self.refusal is None:
if self.verb == 'leave':
room = world.room_of(self.agent)
if world.can_see(self.agent, str(room)):
if room.exit(self.direction) is None:
if self.direction not in room.exits:
self.refusal = ('[' + self.agent +
'/s] [see/v] no way to do that')
else:
self.refusal = room.exits[self.direction]
else:
if self.direction in ['up', 'down']:
self.refusal = ('[' + self.agent +
'/s] [find/not/v] any way to ' +
'go [direction]')
if self.refusal is not None:
self.refusal = re.sub('\[\*', '[' + self.agent,
self.refusal)
def match_string(self, event_test):
'Does the string indicate this action?'
to_match = event_test.split()
for i in to_match:
if re.search(i, str(self)) is None:
return False
return True
def undo(self, world):
'Make the world as if this action had never happened.'
self.change(world, False)
def do(self, world):
'Perform the action, updating the world.'
to_be_done = []
aware = set()
self.start = world.ticks
for actor in world.concept:
# Did the actor see the agent or direct object (if any) beforehand?
# If the actor performed the action, the actor is aware of it.
if (actor == self.agent or world.can_see(actor, self.agent) or
(hasattr(self, 'direct') and
world.can_see(actor, self.direct))):
aware.add(actor)
self.check_refusal(world)
if self.refusal is None:
self.check_preconditions(world)
can_respond = world.respondents(self)
if len(self.failed) == 0:
for tag in can_respond:
if world.item[tag].prevent(world, self):
self.failed.append(['prevented_by', tag])
if len(self.failed) == 0:
for tag in can_respond:
to_be_done += world.item[tag].react(world, self)
self.change(world)
if hasattr(self, 'entails'):
to_be_done += self.entails(world)
else:
for tag in can_respond:
to_be_done += world.item[tag].react_to_failed(world, self)
for actor in world.concept:
# Did the actor see the agent at the end of the action, for
# instance, if the agent entered a room?
if world.can_see(actor, self.agent):
aware.add(actor)
for actor in aware:
world.concept[actor].act[self.id] = copy.deepcopy(self)
world.act[self.id] = self
return to_be_done
def moved_somewhere_different(self, actor):
'Tells whether this action caused the actor to move elsewhere.'
return (self.configure and self.direct == actor and
not self.old_parent == self.new_parent)
def change(self, world, making_change=True):
'Alter the world. Only Modify and Configure actions do it.'
pass
def check_allowed(self, condition, world):
'Does the "allowed" rule of the parent let the Item become a child?'
head, tag, link, parent = condition
reason = None
# First, the Item cannot be a room; rooms can only be
# children of @cosmos.
if world.item[tag].room:
reason = 'rooms_cannot_move'
# Next, the Item can't be made the child of itself or
# of any descendant of itself.
elif tag in [parent] + world.ancestors(parent):
reason = 'not_own_descendant'
# Next, if the Item is an amount of Substance (liquid,
# powder, etc.), there are different cases.
elif world.item[tag].substance:
substance = tag.partition('_')[0]
# 'in' works if the amount is being placed in a
# vessel, or if a source is being replenished, or if
# the amount is being moved to the substance item.
if link == 'in':
if not ((world.item[parent].substance and
parent == substance) or
(hasattr(world.item[parent], 'source') and
world.item[parent].source == substance) or
(hasattr(world.item[parent], 'vessel') and
len(world.item[parent].children) == 0)):
reason = 'substance_contained'
# 'of' does not work; Substances cannot be held by
# themselves, without vessels.
elif link == 'of':
reason = 'substance_contained'
# There is no case for 'on' -- it falls through to success.
# 'on' generally works -- an amount can be poured
# onto anything. A Configure action will be entailed
# immediately and the amount will be moved to the root
# Substance Item.
# Check the Item's own allowed rule:
elif not world.item[parent].allowed(tag, link, world):
reason = head + "_" + link
# Finally, if there have been no other failures, continue
# to test to see if the parent, with this new child,
# is still allowed in the grandparent, and so on up the
# tree. This is done for now rather expensively. A copy
# of the world is made and, in it, the item is added as a
# child of the new parent. Then, the testing proceeds.
elif reason is None and not world.item[parent].parent == '@cosmos':
met = True
test = copy.deepcopy(world)
test.item[parent].add_child(link, tag)
met &= test.item[parent].allowed(tag, link, test)
while met and not parent == '@cosmos':
tag = parent
parent = test.item[tag].parent
link = test.item[tag].link
met &= test.item[parent].allowed(tag, link, test)
if not met:
reason = head + '_' + link
return reason
def check_preconditions(self, world):
'Determine if any of the preconditions fail, and why.'
for condition in self.pre(world):
failure = []
head = condition[0]
if head == 'allowed':
reason = self.check_allowed(condition, world)
if reason is not None:
failure.append([reason, agent, tag])
elif head[:10] == 'can_access':
_, agent, tag_list = condition
met = False
accessible_tags = world.accessible(agent)
for tag in tag_list:
if tag in accessible_tags:
met = True
if not met:
failure.append(condition)
elif head == 'can_see':
_, agent, tag = condition
reason = world.prevents_sight(agent, tag)
if reason is not None:
failure.append([reason, agent, tag])
elif head == 'configure_to_different':
_, child, link, parent = condition
if (world.item[child].link == link and
world.item[child].parent == parent):
failure.append(condition)
elif head == 'exit_exists':
_, tag, direction = condition
if direction not in world.room_of(tag).exits:
failure.append(condition)
elif head == 'has_feature':
_, tag, feature = condition
if not hasattr(world.item[tag], feature):
failure.append(condition)
elif head == 'has_value':
_, tag, feature, value = condition
if (hasattr(world.item[tag], feature) and
not getattr(world.item[tag], feature) == value):
failure.append(condition)
elif head == 'modify_to_different':
_, tag, feature, value = condition
if (hasattr(world.item[tag], feature) and
getattr(world.item[tag], feature) == value):
failure.append(condition)
elif head == 'never':
failure.append([head + '_' + condition[1]])
elif head == 'parent_is':
_, child, link, parent = condition
if (not world.item[child].link == link and
not world.item[child].parent == parent):
failure.append(condition)
self.preconditions.append(((len(failure) == 0), condition))
self.failed += failure
def show(self):
'Return verb, agent, cause, preconditions, type, any postcondition.'
string = '\n'
for (met, condition) in self.preconditions:
if not type(condition) == str:
condition = ' '.join(str(pre_part) for pre_part in condition)
string += ['#####> ', '/ / / '][met] + condition + '\n'
string += str(self) + '\n'
if hasattr(self, 'post'):
success = (len(self.failed) == 0) and self.refusal is None
string += [' ##### ', r'\ \ \ '][success]
string += ' '.join(str(post_part) for post_part in self.post())
string += '\n'
return string
class Behave(Action):
'An action that itself changes nothing, e.g., jumping up and down.'
def __init__(self, verb, agent, **keywords):
# Behave actions may have 'direct' 'indirect' 'direction' and/or 'utterance'
for i in ['direct', 'indirect', 'target', 'direction', 'utterance']:
if i in keywords:
setattr(self, i, keywords[i])
del keywords[i]
self.force = 0.2
Action.__init__(self, verb, agent, 'behave', **keywords)
def pre(self, _):
"""Preconditions for Behave:
The agent must be able to access all objects. If trying to consume
food or drink, it must be consumable. If trying to leave, an exit
must exist."""
pre_list = []
if hasattr(self, 'direct'):
pre_list.append(('can_access_direct', self.agent, [self.direct]))
if hasattr(self, 'indirect'):
pre_list.append(('can_access_indirect', self.agent,
[self.indirect]))
if hasattr(self, 'target'):
pre_list.append(('can_see', self.agent, self.target))
if self.verb in ['drink', 'eat']:
pre_list.append(('has_feature', self.direct, 'consumable'))
if self.verb == 'leave':
pre_list.append(('exit_exists', self.agent, self.direction))
return pre_list
def entails(self, world):
"""Entailed Actions for Behave:
Configure the actor to a new Room after leaving, remove food after
eating."""
actions = []
if len(self.failed) > 0:
return actions
# When an actor leaves in direction that is an exit, the Behave
# action entails a new action: A Configure action that moves the actor
# to the new room or through the door.
room = world.room_of(self.agent)
if self.verb == 'leave' and room.exit(self.direction) is not None:
goal = room.exits[self.direction]
link = None
if goal is not None and world.item[goal].door:
link = 'through'
else:
link = 'in'
new = Configure('enter', self.agent,
template='[agent/s] [arrive/v]',
direct=self.agent, new=(link, goal), salience=0.1)
actions.append(new)
if self.verb in ['drink', 'eat']:
if hasattr(self, 'direct'):
to_be_consumed = self.direct
else:
_, to_be_consumed = world.item[self.indirect].children[0]
if world.item[to_be_consumed].substance:
new_parent = ('in', to_be_consumed.partition('_')[0])
else:
new_parent = ('of', '@cosmos')
actions.append(Configure('polish_off', '@cosmos',
direct=to_be_consumed,
new=new_parent, salience=0))
return actions
class Configure(Action):
'An action that repositions an item in the item tree.'
def __init__(self, verb, agent, **keywords):
# Configure Actions must have 'direct' and 'new'.
self.direct = keywords['direct']
del keywords['direct']
self.new_link = keywords['new'][0]
self.new_parent = keywords['new'][1]
del keywords['new']
# 'old' is optional; if missing, any initial link and parent are fine.
if 'old' in keywords:
self.old_link = keywords['old'][0]
self.old_parent = keywords['old'][1]
del keywords['old']
self.force = 0.2
Action.__init__(self, verb, agent, 'configure', **keywords)
def set_old_if_unset(self, world):
'Set old_link and old_parent if they have been left off.'
if not hasattr(self, 'old_link') and not hasattr(self, 'old_parent'):
self.old_link = world.item[self.direct].link
self.old_parent = world.item[self.direct].parent
def change(self, world, making_change=True):
'Put the item in the new (or old) arrangement in the tree.'
self.set_old_if_unset(world)
# If the Action failed, it itself had no consequence in the world.
# In this case, there is nothing to do or reverse. However, it's
# necessary to set the old_link and old_parent in the previous step
# so that they will be there when the failed Action is later checked.
if len(self.failed) > 0:
return
seen_by = {}
if making_change:
for actor in world.concept:
seen_by[actor] = world.can_see(actor, self.direct)
if (actor in [self.agent, self.direct] or
world.can_see(actor, self.direct)):
# Before the Action, the Actor can see the Item.
# Update the Item's departure from the "from" Item.
new_from = copy.deepcopy(world.item[self.old_parent])
new_from.remove_child(self.old_link, self.direct,
making_change)
if not world.can_see(actor, self.old_parent):
new_from.blank()
world.transfer(new_from, actor, self.end)
# Now make the event's changes in the world.
world.item[self.old_parent].remove_child(self.old_link, self.direct,
making_change)
world.item[self.new_parent].add_child(self.new_link, self.direct,
making_change)
item = world.item[self.direct]
if making_change:
item.parent = self.new_parent
item.link = self.new_link
for actor in world.concept:
room_tag = str(world.room_of(actor))
# If the item disappeared from sight, transfer it out...
if seen_by[actor] and not world.can_see(actor, self.direct):
world.transfer_out(item, actor, self.end)
if (actor == self.agent or actor == self.direct or
world.can_see(actor, self.direct)):
# After the Action, the Actor can see the Item.
# Update the Item itself ...
world.transfer(item, actor, self.end)
new_to = copy.deepcopy(world.item[self.new_parent])
if (actor == self.new_parent or
world.can_see(actor, self.new_parent)):
# If the "to" Item is visible, update it fully.
world.transfer(new_to, actor, self.end)
# If the "to" Item is a Room, update other visible Rooms.
if new_to.room:
for view_tag in new_to.view:
if world.can_see(actor, view_tag):
world.transfer(world.item[view_tag], actor,
self.end)
else:
if (actor == self.direct and
not world.can_see(actor, room_tag)):
# Moved into a dark room; blank out the "to" item.
new_to.blank()
new_to.add_child(self.new_link, self.direct,
making_change)
world.transfer(new_to, actor, self.end)
if (room_tag in world.concept[actor].item and
world.concept[actor].item[room_tag].blanked and
world.can_see(actor, room_tag)):
world.transfer(world.item[room_tag], actor, self.end)
look_at = Sense('examine', actor,
modality='sight', direct=room_tag)
look_at.cause = ':' + str(self.id) + ':'
self.enlightened.append(look_at)
else:
item.parent = self.old_parent
item.link = self.old_link
def pre(self, world):
"""Preconditions for Configure:
Only @cosmos may Configure Items that are part_of others, Doors, or
SharedThings. Configure requires a new link and parent. To be configured
from "in" a container, the container (if it opens) must be open. To go
"in" or "through" something, that Item must (if it opens) be open. Be
able to access the Item and (in most cases) the new parent. The Item
must be allowed in the new parent."""
pre_list = []
if not self.agent == '@cosmos':
if world.item[self.direct].link == 'part_of':
pre_list.append(('never', 'configure_parts'))
if world.item[self.direct].door:
pre_list.append(('never', 'configure_doors'))
if hasattr(world.item[self.direct], 'sharedthing'):
pre_list.append(('never', 'configure_sharedthings'))
pre_list.append(('configure_to_different', self.direct,
self.new_link, self.new_parent))
if (hasattr(self, 'old_link') and self.old_link == 'in' and
hasattr(world.item[self.old_parent], 'open')):
pre_list.append(('has_value', self.old_parent, 'open', True))
if (self.new_link in ['in', 'through'] and
hasattr(world.item[self.new_parent], 'open')):
pre_list.append(('has_value', self.new_parent, 'open', True))
if hasattr(self, 'old_link') and hasattr(self, 'old_parent'):
pre_list.append(('parent_is', self.direct, self.old_link,
self.old_parent))
pre_list.append(('can_access_direct', self.agent, [self.direct]))
if (not self.new_parent == '@cosmos' and
not world.item[self.new_parent].room):
pre_list.append(('can_access_indirect', self.agent,
[self.new_parent]))
pre_list.append(('allowed', self.direct, self.new_link,
self.new_parent))
return pre_list
def post(self):
'Postcondition: Item is in a new arrangement.'
return ('parent_is', self.direct, self.new_link, self.new_parent)
def entails(self, world):
"""Entailed Actions for Configure:
Passing through Doors into new Rooms, looking at new Rooms,
replenishing a source with a Substance and evaporating/dissipating a
Substance. Also, looking at newly-lit Items."""
actions = []
if len (self.failed) > 0:
return actions
if self.new_link == 'through':
if world.item[self.new_parent].door:
rooms = world.item[self.new_parent].connects[:]
rooms.remove(self.old_parent)
goal = rooms[0]
actions.append(Configure('pass_through', self.agent,
template=('[agent/s] [emerge/v] from [' +
self.new_parent + '/o]'),
new=('in', goal), direct=self.direct))
else:
room = self.new_parent
actions.append(Configure('fall', self.agent,
template='[direct/s] [drop/v] to the ground',
new=('in', room), direct=self.direct))
elif (world.item[self.direct].actor and
not self.old_parent == self.new_parent and
not self.new_parent == '@cosmos'):
room = self.new_parent
look_at = Sense('examine', self.direct,
modality='sight', direct=room)
look_at.cause = ':' + str(self.id) + ':'
actions.append(look_at)
elif world.item[self.direct].substance:
substance = self.direct.partition('_')[0]
if (self.new_link == 'in' and
hasattr(world.item[self.old_parent], 'source') and
world.item[self.old_parent].source == substance):
_, amount = world.item[substance].children[0]
actions.append(Configure('replenish', '@cosmos',
new=('in', self.old_parent),
direct=amount, salience=0))
elif (not hasattr(world.item[self.new_parent], 'vessel') and
not (hasattr(world.item[self.new_parent], 'source') and
world.item[self.new_parent].source == substance) and
not self.new_parent == substance):
# The substance was poured onto something, and needs to vanish.
actions.append(Configure('vanish', '@cosmos',
new=('in', substance),
template='the [' + self.direct +
'/s] [is/v] gone [now]',
direct=self.direct,))
actions += self.enlightened
return actions
class Modify(Action):
"An action that changes some Item's state, the value of a feature."
def __init__(self, verb, agent, **keywords):
# Modify actions must have 'direct', 'feature', and 'new'
self.direct = keywords['direct']
del keywords['direct']
self.feature = keywords['feature']
del keywords['feature']
self.new_value = keywords['new']
del keywords['new']
# 'old' is optional; if missing, any initial value is fine
if 'old' in keywords:
self.old_value = keywords['old']
del keywords['old']
# 'indirect' is optional, used only when an agent is using a tool
if 'indirect' in keywords:
self.indirect = keywords['indirect']
del keywords['indirect']
self.force = 0.2
Action.__init__(self, verb, agent, 'modify', **keywords)
def change(self, world, making_change=True):
'Alter the state of the Item to the new (or old) one.'
# If attributes are missing, indicating that any values work for this
# modify event, they are set with using the values in the world at
# this point. This allows the event to be undone later with the
# correct old value put back into place.
#
item = world.item[self.direct]
if not hasattr(self, 'old_value'):
self.old_value = getattr(item, self.feature)
# If the event failed, it itself had no consequence in the world.
# Thus there is nothing to do or reverse.
if len(self.failed) > 0:
return
# Make the change.
value = (self.old_value, self.new_value)[making_change]
setattr(item, self.feature, value)
# Update the item in actors who can perceive this event. Also, check
# to see if the actor's room became visible and needs an update.
if making_change:
for actor in world.concept:
if (actor in [self.agent, self.direct] or
world.can_see(actor, self.direct)):
world.transfer(item, actor, self.end)
room_tag = str(world.room_of(actor))
if (room_tag in world.concept[actor].item and
world.concept[actor].item[room_tag].blanked and
world.can_see(actor, room_tag)):
world.transfer(world.item[room_tag], actor, self.end)
look_at = Sense('examine', actor,
modality='sight', direct=room_tag)
look_at.cause = ':' + str(self.id) + ':'
self.enlightened.append(look_at)
def pre(self, world):
"""Preconditions for Modify:
The Item must have the feature being modified. Modify requires a
different value. The old value, if specified, must match. The item
must be accessible by the agent. If opening an Item, it must (if
lockable) be unlocked. If burning an Item, fire must be accessible.
If unlocking an Item, the key must be accessible."""
pre_list = [('has_feature', self.direct, self.feature)]
pre_list.append(('modify_to_different', self.direct, self.feature,
self.new_value))
if hasattr(self, 'old_value'):
pre_list.append(('has_value', self.direct, self.feature,
self.old_value))
pre_list.append(('can_access_direct', self.agent, [self.direct]))
if self.feature == 'open' and self.new_value:
if hasattr(world.item[self.direct], 'locked'):
pre_list.append(('has_value', self.direct, 'locked', False))
if self.feature == 'burnt':
flames = [i for i in world.item if
hasattr(world.item[i], 'flame') and world.item[i].flame]
pre_list.append(('can_access_flames', self.agent, flames))
if self.feature == 'locked':
if hasattr(world.item[self.direct], 'key'):
pre_list.append(('can_access_key', self.agent,
[world.item[self.direct].key]))
else:
if not self.agent == '@cosmos':
pre_list.append(('never', 'permanently_locked'))
return pre_list
def post(self):
"Postcondition: Item's feature has a new value."
return ('has_value', self.direct, self.feature, str(self.new_value))
def entails(self, _):
'Entailed Actions for Modify: Just looking at newly-lit Items.'
actions = self.enlightened
return actions
class Sense(Action):
'A perception that can update a concept.'
def __init__(self, verb, agent, **keywords):
# Sense Actions must have 'direct' and 'modality'.
for i in ['direct', 'modality']:
setattr(self, i, keywords[i])
del keywords[i]
self.force = 0.0
Action.__init__(self, verb, agent, 'sense', **keywords)
def pre(self, _):
"""Preconditions for Sense:
The agent must be able to see the direct object if looking, access it
if touching."""
pre_list = []
if self.modality == 'sight':
pre_list.append(('can_see', self.agent, self.direct))
if self.modality == 'touch':
pre_list.append(('can_access_direct', self.agent, [self.direct]))
return pre_list
| |
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_utils import importutils
import six
from neutron.agent.common import config
from neutron.agent.linux import interface
from neutron.agent.linux import iptables_manager
from neutron.common import constants as constants
from neutron.common import ipv6_utils
from neutron.common import log
from neutron.i18n import _LI
from neutron.openstack.common import log as logging
from neutron.services.metering.drivers import abstract_driver
LOG = logging.getLogger(__name__)
NS_PREFIX = 'qrouter-'
WRAP_NAME = 'neutron-meter'
EXTERNAL_DEV_PREFIX = 'qg-'
TOP_CHAIN = WRAP_NAME + "-FORWARD"
RULE = '-r-'
LABEL = '-l-'
config.register_interface_driver_opts_helper(cfg.CONF)
config.register_use_namespaces_opts_helper(cfg.CONF)
cfg.CONF.register_opts(interface.OPTS)
class IptablesManagerTransaction(object):
__transactions = {}
def __init__(self, im):
self.im = im
transaction = self.__transactions.get(im, 0)
transaction += 1
self.__transactions[im] = transaction
def __enter__(self):
return self.im
def __exit__(self, type, value, traceback):
transaction = self.__transactions.get(self.im)
if transaction == 1:
self.im.apply()
del self.__transactions[self.im]
else:
transaction -= 1
self.__transactions[self.im] = transaction
class RouterWithMetering(object):
def __init__(self, conf, router):
self.conf = conf
self.id = router['id']
self.router = router
self.ns_name = NS_PREFIX + self.id if conf.use_namespaces else None
self.iptables_manager = iptables_manager.IptablesManager(
namespace=self.ns_name,
binary_name=WRAP_NAME,
use_ipv6=ipv6_utils.is_enabled())
self.metering_labels = {}
class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver):
def __init__(self, plugin, conf):
self.plugin = plugin
self.conf = conf or cfg.CONF
self.routers = {}
if not self.conf.interface_driver:
raise SystemExit(_('An interface driver must be specified'))
LOG.info(_LI("Loading interface driver %s"),
self.conf.interface_driver)
self.driver = importutils.import_object(self.conf.interface_driver,
self.conf)
def _update_router(self, router):
r = self.routers.get(router['id'],
RouterWithMetering(self.conf, router))
r.router = router
self.routers[r.id] = r
return r
@log.log
def update_routers(self, context, routers):
# disassociate removed routers
router_ids = set(router['id'] for router in routers)
for router_id, rm in six.iteritems(self.routers):
if router_id not in router_ids:
self._process_disassociate_metering_label(rm.router)
for router in routers:
old_gw_port_id = None
old_rm = self.routers.get(router['id'])
if old_rm:
old_gw_port_id = old_rm.router['gw_port_id']
gw_port_id = router['gw_port_id']
if gw_port_id != old_gw_port_id:
if old_rm:
with IptablesManagerTransaction(old_rm.iptables_manager):
self._process_disassociate_metering_label(router)
if gw_port_id:
self._process_associate_metering_label(router)
elif gw_port_id:
self._process_associate_metering_label(router)
@log.log
def remove_router(self, context, router_id):
if router_id in self.routers:
del self.routers[router_id]
def get_external_device_name(self, port_id):
return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def _process_metering_label_rules(self, rm, rules, label_chain,
rules_chain):
im = rm.iptables_manager
ext_dev = self.get_external_device_name(rm.router['gw_port_id'])
if not ext_dev:
return
for rule in rules:
remote_ip = rule['remote_ip_prefix']
if rule['direction'] == 'egress':
dir_opt = '-o %s -s %s' % (ext_dev, remote_ip)
else:
dir_opt = '-i %s -d %s' % (ext_dev, remote_ip)
if rule['excluded']:
ipt_rule = '%s -j RETURN' % dir_opt
im.ipv4['filter'].add_rule(rules_chain, ipt_rule,
wrap=False, top=True)
else:
ipt_rule = '%s -j %s' % (dir_opt, label_chain)
im.ipv4['filter'].add_rule(rules_chain, ipt_rule,
wrap=False, top=False)
def _process_associate_metering_label(self, router):
self._update_router(router)
rm = self.routers.get(router['id'])
with IptablesManagerTransaction(rm.iptables_manager):
labels = router.get(constants.METERING_LABEL_KEY, [])
for label in labels:
label_id = label['id']
label_chain = iptables_manager.get_chain_name(WRAP_NAME +
LABEL + label_id,
wrap=False)
rm.iptables_manager.ipv4['filter'].add_chain(label_chain,
wrap=False)
rules_chain = iptables_manager.get_chain_name(WRAP_NAME +
RULE + label_id,
wrap=False)
rm.iptables_manager.ipv4['filter'].add_chain(rules_chain,
wrap=False)
rm.iptables_manager.ipv4['filter'].add_rule(TOP_CHAIN, '-j ' +
rules_chain,
wrap=False)
rm.iptables_manager.ipv4['filter'].add_rule(label_chain,
'',
wrap=False)
rules = label.get('rules')
if rules:
self._process_metering_label_rules(rm, rules,
label_chain,
rules_chain)
rm.metering_labels[label_id] = label
def _process_disassociate_metering_label(self, router):
rm = self.routers.get(router['id'])
if not rm:
return
with IptablesManagerTransaction(rm.iptables_manager):
labels = router.get(constants.METERING_LABEL_KEY, [])
for label in labels:
label_id = label['id']
if label_id not in rm.metering_labels:
continue
label_chain = iptables_manager.get_chain_name(WRAP_NAME +
LABEL + label_id,
wrap=False)
rules_chain = iptables_manager.get_chain_name(WRAP_NAME +
RULE + label_id,
wrap=False)
rm.iptables_manager.ipv4['filter'].remove_chain(label_chain,
wrap=False)
rm.iptables_manager.ipv4['filter'].remove_chain(rules_chain,
wrap=False)
del rm.metering_labels[label_id]
@log.log
def add_metering_label(self, context, routers):
for router in routers:
self._process_associate_metering_label(router)
@log.log
def update_metering_label_rules(self, context, routers):
for router in routers:
self._update_metering_label_rules(router)
def _update_metering_label_rules(self, router):
rm = self.routers.get(router['id'])
if not rm:
return
with IptablesManagerTransaction(rm.iptables_manager):
labels = router.get(constants.METERING_LABEL_KEY, [])
for label in labels:
label_id = label['id']
label_chain = iptables_manager.get_chain_name(WRAP_NAME +
LABEL + label_id,
wrap=False)
rules_chain = iptables_manager.get_chain_name(WRAP_NAME +
RULE + label_id,
wrap=False)
rm.iptables_manager.ipv4['filter'].empty_chain(rules_chain,
wrap=False)
rules = label.get('rules')
if rules:
self._process_metering_label_rules(rm, rules,
label_chain,
rules_chain)
@log.log
def remove_metering_label(self, context, routers):
for router in routers:
self._process_disassociate_metering_label(router)
@log.log
def get_traffic_counters(self, context, routers):
accs = {}
for router in routers:
rm = self.routers.get(router['id'])
if not rm:
continue
for label_id, label in rm.metering_labels.items():
chain = iptables_manager.get_chain_name(WRAP_NAME + LABEL +
label_id, wrap=False)
chain_acc = rm.iptables_manager.get_traffic_counters(
chain, wrap=False, zero=True)
if not chain_acc:
continue
acc = accs.get(label_id, {'pkts': 0, 'bytes': 0})
acc['pkts'] += chain_acc['pkts']
acc['bytes'] += chain_acc['bytes']
accs[label_id] = acc
return accs
| |
"""
Discrete Fourier Transforms - basic.py
"""
# Created by Pearu Peterson, August,September 2002
from __future__ import division, print_function, absolute_import
__all__ = ['fft','ifft','fftn','ifftn','rfft','irfft',
'fft2','ifft2']
from numpy import zeros, swapaxes
import numpy
from . import _fftpack
import atexit
atexit.register(_fftpack.destroy_zfft_cache)
atexit.register(_fftpack.destroy_zfftnd_cache)
atexit.register(_fftpack.destroy_drfft_cache)
atexit.register(_fftpack.destroy_cfft_cache)
atexit.register(_fftpack.destroy_cfftnd_cache)
atexit.register(_fftpack.destroy_rfft_cache)
del atexit
def istype(arr, typeclass):
return issubclass(arr.dtype.type, typeclass)
def _datacopied(arr, original):
"""
Strict check for `arr` not sharing any data with `original`,
under the assumption that arr = asarray(original)
"""
if arr is original:
return False
if not isinstance(original, numpy.ndarray) and hasattr(original, '__array__'):
return False
return arr.base is None
# XXX: single precision FFTs partially disabled due to accuracy issues
# for large prime-sized inputs.
#
# See http://permalink.gmane.org/gmane.comp.python.scientific.devel/13834
# ("fftpack test failures for 0.8.0b1", Ralf Gommers, 17 Jun 2010,
# @ scipy-dev)
#
# These should be re-enabled once the problems are resolved
def _is_safe_size(n):
"""
Is the size of FFT such that FFTPACK can handle it in single precision
with sufficient accuracy?
Composite numbers of 2, 3, and 5 are accepted, as FFTPACK has those
"""
n = int(n)
if n == 0:
return True
# Divide by 3 until you can't, then by 5 until you can't
for c in (3, 5):
while n % c == 0:
n //= c
# Return True if the remainder is a power of 2
return not n & (n-1)
def _fake_crfft(x, n, *a, **kw):
if _is_safe_size(n):
return _fftpack.crfft(x, n, *a, **kw)
else:
return _fftpack.zrfft(x, n, *a, **kw).astype(numpy.complex64)
def _fake_cfft(x, n, *a, **kw):
if _is_safe_size(n):
return _fftpack.cfft(x, n, *a, **kw)
else:
return _fftpack.zfft(x, n, *a, **kw).astype(numpy.complex64)
def _fake_rfft(x, n, *a, **kw):
if _is_safe_size(n):
return _fftpack.rfft(x, n, *a, **kw)
else:
return _fftpack.drfft(x, n, *a, **kw).astype(numpy.float32)
def _fake_cfftnd(x, shape, *a, **kw):
if numpy.all(list(map(_is_safe_size, shape))):
return _fftpack.cfftnd(x, shape, *a, **kw)
else:
return _fftpack.zfftnd(x, shape, *a, **kw).astype(numpy.complex64)
_DTYPE_TO_FFT = {
# numpy.dtype(numpy.float32): _fftpack.crfft,
numpy.dtype(numpy.float32): _fake_crfft,
numpy.dtype(numpy.float64): _fftpack.zrfft,
# numpy.dtype(numpy.complex64): _fftpack.cfft,
numpy.dtype(numpy.complex64): _fake_cfft,
numpy.dtype(numpy.complex128): _fftpack.zfft,
}
_DTYPE_TO_RFFT = {
# numpy.dtype(numpy.float32): _fftpack.rfft,
numpy.dtype(numpy.float32): _fake_rfft,
numpy.dtype(numpy.float64): _fftpack.drfft,
}
_DTYPE_TO_FFTN = {
# numpy.dtype(numpy.complex64): _fftpack.cfftnd,
numpy.dtype(numpy.complex64): _fake_cfftnd,
numpy.dtype(numpy.complex128): _fftpack.zfftnd,
# numpy.dtype(numpy.float32): _fftpack.cfftnd,
numpy.dtype(numpy.float32): _fake_cfftnd,
numpy.dtype(numpy.float64): _fftpack.zfftnd,
}
def _asfarray(x):
"""Like numpy asfarray, except that it does not modify x dtype if x is
already an array with a float dtype, and do not cast complex types to
real."""
if hasattr(x, "dtype") and x.dtype.char in numpy.typecodes["AllFloat"]:
# 'dtype' attribute does not ensure that the
# object is an ndarray (e.g. Series class
# from the pandas library)
if x.dtype == numpy.half:
# no half-precision routines, so convert to single precision
return numpy.asarray(x, dtype=numpy.float32)
return numpy.asarray(x, dtype=x.dtype)
else:
# We cannot use asfarray directly because it converts sequences of
# complex to sequence of real
ret = numpy.asarray(x)
if ret.dtype == numpy.half:
return numpy.asarray(ret, dtype=numpy.float32)
elif ret.dtype.char not in numpy.typecodes["AllFloat"]:
return numpy.asfarray(x)
return ret
def _fix_shape(x, n, axis):
""" Internal auxiliary function for _raw_fft, _raw_fftnd."""
s = list(x.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0,n)
x = x[index]
return x, False
else:
index = [slice(None)]*len(s)
index[axis] = slice(0,s[axis])
s[axis] = n
z = zeros(s,x.dtype.char)
z[index] = x
return z, True
def _raw_fft(x, n, axis, direction, overwrite_x, work_function):
""" Internal auxiliary function for fft, ifft, rfft, irfft."""
if n is None:
n = x.shape[axis]
elif n != x.shape[axis]:
x, copy_made = _fix_shape(x,n,axis)
overwrite_x = overwrite_x or copy_made
if n < 1:
raise ValueError("Invalid number of FFT data points "
"(%d) specified." % n)
if axis == -1 or axis == len(x.shape)-1:
r = work_function(x,n,direction,overwrite_x=overwrite_x)
else:
x = swapaxes(x, axis, -1)
r = work_function(x,n,direction,overwrite_x=overwrite_x)
r = swapaxes(r, axis, -1)
return r
def fft(x, n=None, axis=-1, overwrite_x=False):
"""
Return discrete Fourier transform of real or complex sequence.
The returned complex array contains ``y(0), y(1),..., y(n-1)`` where
``y(j) = (x * exp(-2*pi*sqrt(-1)*j*np.arange(n)/n)).sum()``.
Parameters
----------
x : array_like
Array to Fourier transform.
n : int, optional
Length of the Fourier transform. If ``n < x.shape[axis]``, `x` is
truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
default results in ``n = x.shape[axis]``.
axis : int, optional
Axis along which the fft's are computed; the default is over the
last axis (i.e., ``axis=-1``).
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
Returns
-------
z : complex ndarray
with the elements::
[y(0),y(1),..,y(n/2),y(1-n/2),...,y(-1)] if n is even
[y(0),y(1),..,y((n-1)/2),y(-(n-1)/2),...,y(-1)] if n is odd
where::
y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k* 2*pi/n), j = 0..n-1
See Also
--------
ifft : Inverse FFT
rfft : FFT of a real sequence
Notes
-----
The packing of the result is "standard": If ``A = fft(a, n)``, then
``A[0]`` contains the zero-frequency term, ``A[1:n/2]`` contains the
positive-frequency terms, and ``A[n/2:]`` contains the negative-frequency
terms, in order of decreasingly negative frequency. So for an 8-point
transform, the frequencies of the result are [0, 1, 2, 3, -4, -3, -2, -1].
To rearrange the fft output so that the zero-frequency component is
centered, like [-4, -3, -2, -1, 0, 1, 2, 3], use `fftshift`.
Both single and double precision routines are implemented. Half precision
inputs will be converted to single precision. Non floating-point inputs
will be converted to double precision. Long-double precision inputs are
not supported.
This function is most efficient when `n` is a power of two, and least
efficient when `n` is prime.
Note that if ``x`` is real-valued then ``A[j] == A[n-j].conjugate()``.
If ``x`` is real-valued and ``n`` is even then ``A[n/2]`` is real.
If the data type of `x` is real, a "real FFT" algorithm is automatically
used, which roughly halves the computation time. To increase efficiency
a little further, use `rfft`, which does the same calculation, but only
outputs half of the symmetrical spectrum. If the data is both real and
symmetrical, the `dct` can again double the efficiency, by generating
half of the spectrum from half of the signal.
Examples
--------
>>> from scipy.fftpack import fft, ifft
>>> x = np.arange(5)
>>> np.allclose(fft(ifft(x)), x, atol=1e-15) # within numerical accuracy.
True
"""
tmp = _asfarray(x)
try:
work_function = _DTYPE_TO_FFT[tmp.dtype]
except KeyError:
raise ValueError("type %s is not supported" % tmp.dtype)
if not (istype(tmp, numpy.complex64) or istype(tmp, numpy.complex128)):
overwrite_x = 1
overwrite_x = overwrite_x or _datacopied(tmp, x)
if n is None:
n = tmp.shape[axis]
elif n != tmp.shape[axis]:
tmp, copy_made = _fix_shape(tmp,n,axis)
overwrite_x = overwrite_x or copy_made
if n < 1:
raise ValueError("Invalid number of FFT data points "
"(%d) specified." % n)
if axis == -1 or axis == len(tmp.shape) - 1:
return work_function(tmp,n,1,0,overwrite_x)
tmp = swapaxes(tmp, axis, -1)
tmp = work_function(tmp,n,1,0,overwrite_x)
return swapaxes(tmp, axis, -1)
def ifft(x, n=None, axis=-1, overwrite_x=False):
"""
Return discrete inverse Fourier transform of real or complex sequence.
The returned complex array contains ``y(0), y(1),..., y(n-1)`` where
``y(j) = (x * exp(2*pi*sqrt(-1)*j*np.arange(n)/n)).mean()``.
Parameters
----------
x : array_like
Transformed data to invert.
n : int, optional
Length of the inverse Fourier transform. If ``n < x.shape[axis]``,
`x` is truncated. If ``n > x.shape[axis]``, `x` is zero-padded.
The default results in ``n = x.shape[axis]``.
axis : int, optional
Axis along which the ifft's are computed; the default is over the
last axis (i.e., ``axis=-1``).
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
Returns
-------
ifft : ndarray of floats
The inverse discrete Fourier transform.
See Also
--------
fft : Forward FFT
Notes
-----
Both single and double precision routines are implemented. Half precision
inputs will be converted to single precision. Non floating-point inputs
will be converted to double precision. Long-double precision inputs are
not supported.
This function is most efficient when `n` is a power of two, and least
efficient when `n` is prime.
If the data type of `x` is real, a "real IFFT" algorithm is automatically
used, which roughly halves the computation time.
"""
tmp = _asfarray(x)
try:
work_function = _DTYPE_TO_FFT[tmp.dtype]
except KeyError:
raise ValueError("type %s is not supported" % tmp.dtype)
if not (istype(tmp, numpy.complex64) or istype(tmp, numpy.complex128)):
overwrite_x = 1
overwrite_x = overwrite_x or _datacopied(tmp, x)
if n is None:
n = tmp.shape[axis]
elif n != tmp.shape[axis]:
tmp, copy_made = _fix_shape(tmp,n,axis)
overwrite_x = overwrite_x or copy_made
if n < 1:
raise ValueError("Invalid number of FFT data points "
"(%d) specified." % n)
if axis == -1 or axis == len(tmp.shape) - 1:
return work_function(tmp,n,-1,1,overwrite_x)
tmp = swapaxes(tmp, axis, -1)
tmp = work_function(tmp,n,-1,1,overwrite_x)
return swapaxes(tmp, axis, -1)
def rfft(x, n=None, axis=-1, overwrite_x=False):
"""
Discrete Fourier transform of a real sequence.
Parameters
----------
x : array_like, real-valued
The data to transform.
n : int, optional
Defines the length of the Fourier transform. If `n` is not specified
(the default) then ``n = x.shape[axis]``. If ``n < x.shape[axis]``,
`x` is truncated, if ``n > x.shape[axis]``, `x` is zero-padded.
axis : int, optional
The axis along which the transform is applied. The default is the
last axis.
overwrite_x : bool, optional
If set to true, the contents of `x` can be overwritten. Default is
False.
Returns
-------
z : real ndarray
The returned real array contains::
[y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2))] if n is even
[y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2)),Im(y(n/2))] if n is odd
where::
y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k*2*pi/n)
j = 0..n-1
See Also
--------
fft, irfft, numpy.fft.rfft
Notes
-----
Within numerical accuracy, ``y == rfft(irfft(y))``.
Both single and double precision routines are implemented. Half precision
inputs will be converted to single precision. Non floating-point inputs
will be converted to double precision. Long-double precision inputs are
not supported.
To get an output with a complex datatype, consider using the related
function `numpy.fft.rfft`.
Examples
--------
>>> from scipy.fftpack import fft, rfft
>>> a = [9, -9, 1, 3]
>>> fft(a)
array([ 4. +0.j, 8.+12.j, 16. +0.j, 8.-12.j])
>>> rfft(a)
array([ 4., 8., 12., 16.])
"""
tmp = _asfarray(x)
if not numpy.isrealobj(tmp):
raise TypeError("1st argument must be real sequence")
try:
work_function = _DTYPE_TO_RFFT[tmp.dtype]
except KeyError:
raise ValueError("type %s is not supported" % tmp.dtype)
overwrite_x = overwrite_x or _datacopied(tmp, x)
return _raw_fft(tmp,n,axis,1,overwrite_x,work_function)
def irfft(x, n=None, axis=-1, overwrite_x=False):
"""
Return inverse discrete Fourier transform of real sequence x.
The contents of `x` are interpreted as the output of the `rfft`
function.
Parameters
----------
x : array_like
Transformed data to invert.
n : int, optional
Length of the inverse Fourier transform.
If n < x.shape[axis], x is truncated.
If n > x.shape[axis], x is zero-padded.
The default results in n = x.shape[axis].
axis : int, optional
Axis along which the ifft's are computed; the default is over
the last axis (i.e., axis=-1).
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
Returns
-------
irfft : ndarray of floats
The inverse discrete Fourier transform.
See Also
--------
rfft, ifft, numpy.fft.irfft
Notes
-----
The returned real array contains::
[y(0),y(1),...,y(n-1)]
where for n is even::
y(j) = 1/n (sum[k=1..n/2-1] (x[2*k-1]+sqrt(-1)*x[2*k])
* exp(sqrt(-1)*j*k* 2*pi/n)
+ c.c. + x[0] + (-1)**(j) x[n-1])
and for n is odd::
y(j) = 1/n (sum[k=1..(n-1)/2] (x[2*k-1]+sqrt(-1)*x[2*k])
* exp(sqrt(-1)*j*k* 2*pi/n)
+ c.c. + x[0])
c.c. denotes complex conjugate of preceding expression.
For details on input parameters, see `rfft`.
To process (conjugate-symmetric) frequency-domain data with a complex
datatype, consider using the related function `numpy.fft.irfft`.
"""
tmp = _asfarray(x)
if not numpy.isrealobj(tmp):
raise TypeError("1st argument must be real sequence")
try:
work_function = _DTYPE_TO_RFFT[tmp.dtype]
except KeyError:
raise ValueError("type %s is not supported" % tmp.dtype)
overwrite_x = overwrite_x or _datacopied(tmp, x)
return _raw_fft(tmp,n,axis,-1,overwrite_x,work_function)
def _raw_fftnd(x, s, axes, direction, overwrite_x, work_function):
""" Internal auxiliary function for fftnd, ifftnd."""
if s is None:
if axes is None:
s = x.shape
else:
s = numpy.take(x.shape, axes)
s = tuple(s)
if axes is None:
noaxes = True
axes = list(range(-x.ndim, 0))
else:
noaxes = False
if len(axes) != len(s):
raise ValueError("when given, axes and shape arguments "
"have to be of the same length")
for dim in s:
if dim < 1:
raise ValueError("Invalid number of FFT data points "
"(%s) specified." % (s,))
# No need to swap axes, array is in C order
if noaxes:
for i in axes:
x, copy_made = _fix_shape(x, s[i], i)
overwrite_x = overwrite_x or copy_made
return work_function(x,s,direction,overwrite_x=overwrite_x)
# We ordered axes, because the code below to push axes at the end of the
# array assumes axes argument is in ascending order.
a = numpy.array(axes, numpy.intc)
abs_axes = numpy.where(a < 0, a + x.ndim, a)
id_ = numpy.argsort(abs_axes)
axes = [axes[i] for i in id_]
s = [s[i] for i in id_]
# Swap the request axes, last first (i.e. First swap the axis which ends up
# at -1, then at -2, etc...), such as the request axes on which the
# operation is carried become the last ones
for i in range(1, len(axes)+1):
x = numpy.swapaxes(x, axes[-i], -i)
# We can now operate on the axes waxes, the p last axes (p = len(axes)), by
# fixing the shape of the input array to 1 for any axis the fft is not
# carried upon.
waxes = list(range(x.ndim - len(axes), x.ndim))
shape = numpy.ones(x.ndim)
shape[waxes] = s
for i in range(len(waxes)):
x, copy_made = _fix_shape(x, s[i], waxes[i])
overwrite_x = overwrite_x or copy_made
r = work_function(x, shape, direction, overwrite_x=overwrite_x)
# reswap in the reverse order (first axis first, etc...) to get original
# order
for i in range(len(axes), 0, -1):
r = numpy.swapaxes(r, -i, axes[-i])
return r
def fftn(x, shape=None, axes=None, overwrite_x=False):
"""
Return multidimensional discrete Fourier transform.
The returned array contains::
y[j_1,..,j_d] = sum[k_1=0..n_1-1, ..., k_d=0..n_d-1]
x[k_1,..,k_d] * prod[i=1..d] exp(-sqrt(-1)*2*pi/n_i * j_i * k_i)
where d = len(x.shape) and n = x.shape.
Parameters
----------
x : array_like
The (n-dimensional) array to transform.
shape : tuple of ints, optional
The shape of the result. If both `shape` and `axes` (see below) are
None, `shape` is ``x.shape``; if `shape` is None but `axes` is
not None, then `shape` is ``scipy.take(x.shape, axes, axis=0)``.
If ``shape[i] > x.shape[i]``, the i-th dimension is padded with zeros.
If ``shape[i] < x.shape[i]``, the i-th dimension is truncated to
length ``shape[i]``.
axes : array_like of ints, optional
The axes of `x` (`y` if `shape` is not None) along which the
transform is applied.
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed. Default is False.
Returns
-------
y : complex-valued n-dimensional numpy array
The (n-dimensional) DFT of the input array.
See Also
--------
ifftn
Notes
-----
If ``x`` is real-valued, then
``y[..., j_i, ...] == y[..., n_i-j_i, ...].conjugate()``.
Both single and double precision routines are implemented. Half precision
inputs will be converted to single precision. Non floating-point inputs
will be converted to double precision. Long-double precision inputs are
not supported.
Examples
--------
>>> from scipy.fftpack import fftn, ifftn
>>> y = (-np.arange(16), 8 - np.arange(16), np.arange(16))
>>> np.allclose(y, fftn(ifftn(y)))
True
"""
return _raw_fftn_dispatch(x, shape, axes, overwrite_x, 1)
def _raw_fftn_dispatch(x, shape, axes, overwrite_x, direction):
tmp = _asfarray(x)
try:
work_function = _DTYPE_TO_FFTN[tmp.dtype]
except KeyError:
raise ValueError("type %s is not supported" % tmp.dtype)
if not (istype(tmp, numpy.complex64) or istype(tmp, numpy.complex128)):
overwrite_x = 1
overwrite_x = overwrite_x or _datacopied(tmp, x)
return _raw_fftnd(tmp,shape,axes,direction,overwrite_x,work_function)
def ifftn(x, shape=None, axes=None, overwrite_x=False):
"""
Return inverse multi-dimensional discrete Fourier transform of
arbitrary type sequence x.
The returned array contains::
y[j_1,..,j_d] = 1/p * sum[k_1=0..n_1-1, ..., k_d=0..n_d-1]
x[k_1,..,k_d] * prod[i=1..d] exp(sqrt(-1)*2*pi/n_i * j_i * k_i)
where ``d = len(x.shape)``, ``n = x.shape``, and ``p = prod[i=1..d] n_i``.
For description of parameters see `fftn`.
See Also
--------
fftn : for detailed information.
"""
return _raw_fftn_dispatch(x, shape, axes, overwrite_x, -1)
def fft2(x, shape=None, axes=(-2,-1), overwrite_x=False):
"""
2-D discrete Fourier transform.
Return the two-dimensional discrete Fourier transform of the 2-D argument
`x`.
See Also
--------
fftn : for detailed information.
"""
return fftn(x,shape,axes,overwrite_x)
def ifft2(x, shape=None, axes=(-2,-1), overwrite_x=False):
"""
2-D discrete inverse Fourier transform of real or complex sequence.
Return inverse two-dimensional discrete Fourier transform of
arbitrary type sequence x.
See `ifft` for more information.
See also
--------
fft2, ifft
"""
return ifftn(x,shape,axes,overwrite_x)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides an interface for working with multiple event files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
import six
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import event_accumulator
from tensorflow.python.summary.impl import directory_watcher
from tensorflow.python.summary.impl import io_wrapper
class EventMultiplexer(object):
"""An `EventMultiplexer` manages access to multiple `EventAccumulator`s.
Each `EventAccumulator` is associated with a `run`, which is a self-contained
TensorFlow execution. The `EventMultiplexer` provides methods for extracting
information about events from multiple `run`s.
Example usage for loading specific runs from files:
```python
x = EventMultiplexer({'run1': 'path/to/run1', 'run2': 'path/to/run2'})
x.Reload()
```
Example usage for loading a directory where each subdirectory is a run
```python
(eg:) /parent/directory/path/
/parent/directory/path/run1/
/parent/directory/path/run1/events.out.tfevents.1001
/parent/directory/path/run1/events.out.tfevents.1002
/parent/directory/path/run2/
/parent/directory/path/run2/events.out.tfevents.9232
/parent/directory/path/run3/
/parent/directory/path/run3/events.out.tfevents.9232
x = EventMultiplexer().AddRunsFromDirectory('/parent/directory/path')
(which is equivalent to:)
x = EventMultiplexer({'run1': '/parent/directory/path/run1', 'run2':...}
```
If you would like to watch `/parent/directory/path`, wait for it to be created
(if necessary) and then periodically pick up new runs, use
`AutoloadingMultiplexer`
@@__init__
@@AddRun
@@AddRunsFromDirectory
@@Reload
@@Runs
@@RunPaths
@@Scalars
@@Graph
@@MetaGraph
@@Histograms
@@CompressedHistograms
@@Images
@@Audio
"""
def __init__(self,
run_path_map=None,
size_guidance=event_accumulator.DEFAULT_SIZE_GUIDANCE,
purge_orphaned_data=True):
"""Constructor for the `EventMultiplexer`.
Args:
run_path_map: Dict `{run: path}` which specifies the
name of a run, and the path to find the associated events. If it is
None, then the EventMultiplexer initializes without any runs.
size_guidance: A dictionary mapping from `tagType` to the number of items
to store for each tag of that type. See
`event_accumulator.EventAccumulator` for details.
purge_orphaned_data: Whether to discard any events that were "orphaned" by
a TensorFlow restart.
"""
logging.info('Event Multiplexer initializing.')
self._accumulators_mutex = threading.Lock()
self._accumulators = {}
self._paths = {}
self._reload_called = False
self._size_guidance = size_guidance
self.purge_orphaned_data = purge_orphaned_data
if run_path_map is not None:
logging.info('Event Multplexer doing initialization load for %s',
run_path_map)
for (run, path) in six.iteritems(run_path_map):
self.AddRun(path, run)
logging.info('Event Multiplexer done initializing')
def AddRun(self, path, name=None):
"""Add a run to the multiplexer.
If the name is not specified, it is the same as the path.
If a run by that name exists, and we are already watching the right path,
do nothing. If we are watching a different path, replace the event
accumulator.
If `Reload` has been called, it will `Reload` the newly created
accumulators.
Args:
path: Path to the event files (or event directory) for given run.
name: Name of the run to add. If not provided, is set to path.
Returns:
The `EventMultiplexer`.
"""
if name is None or name is '':
name = path
accumulator = None
with self._accumulators_mutex:
if name not in self._accumulators or self._paths[name] != path:
if name in self._paths and self._paths[name] != path:
# TODO(danmane) - Make it impossible to overwrite an old path with
# a new path (just give the new path a distinct name)
logging.warning('Conflict for name %s: old path %s, new path %s',
name, self._paths[name], path)
logging.info('Constructing EventAccumulator for %s', path)
accumulator = event_accumulator.EventAccumulator(
path,
size_guidance=self._size_guidance,
purge_orphaned_data=self.purge_orphaned_data)
self._accumulators[name] = accumulator
self._paths[name] = path
if accumulator:
if self._reload_called:
accumulator.Reload()
return self
def AddRunsFromDirectory(self, path, name=None):
"""Load runs from a directory; recursively walks subdirectories.
If path doesn't exist, no-op. This ensures that it is safe to call
`AddRunsFromDirectory` multiple times, even before the directory is made.
If path is a directory, load event files in the directory (if any exist) and
recursively call AddRunsFromDirectory on any subdirectories. This mean you
can call AddRunsFromDirectory at the root of a tree of event logs and
TensorBoard will load them all.
If the `EventMultiplexer` is already loaded this will cause
the newly created accumulators to `Reload()`.
Args:
path: A string path to a directory to load runs from.
name: Optionally, what name to apply to the runs. If name is provided
and the directory contains run subdirectories, the name of each subrun
is the concatenation of the parent name and the subdirectory name. If
name is provided and the directory contains event files, then a run
is added called "name" and with the events from the path.
Raises:
ValueError: If the path exists and isn't a directory.
Returns:
The `EventMultiplexer`.
"""
logging.info('Starting AddRunsFromDirectory: %s', path)
for subdir in GetLogdirSubdirectories(path):
logging.info('Adding events from directory %s', subdir)
rpath = os.path.relpath(subdir, path)
subname = os.path.join(name, rpath) if name else rpath
self.AddRun(subdir, name=subname)
logging.info('Done with AddRunsFromDirectory: %s', path)
return self
def Reload(self):
"""Call `Reload` on every `EventAccumulator`."""
logging.info('Beginning EventMultiplexer.Reload()')
self._reload_called = True
# Build a list so we're safe even if the list of accumulators is modified
# even while we're reloading.
with self._accumulators_mutex:
items = list(self._accumulators.items())
names_to_delete = set()
for name, accumulator in items:
try:
accumulator.Reload()
except (OSError, IOError) as e:
logging.error("Unable to reload accumulator '%s': %s", name, e)
except directory_watcher.DirectoryDeletedError:
names_to_delete.add(name)
with self._accumulators_mutex:
for name in names_to_delete:
logging.warning("Deleting accumulator '%s'", name)
del self._accumulators[name]
logging.info('Finished with EventMultiplexer.Reload()')
return self
def FirstEventTimestamp(self, run):
"""Return the timestamp of the first event of the given run.
This may perform I/O if no events have been loaded yet for the run.
Args:
run: A string name of the run for which the timestamp is retrieved.
Returns:
The wall_time of the first event of the run, which will typically be
seconds since the epoch.
Raises:
KeyError: If the run is not found.
ValueError: If the run has no events loaded and there are no events on
disk to load.
"""
accumulator = self._GetAccumulator(run)
return accumulator.FirstEventTimestamp()
def Scalars(self, run, tag):
"""Retrieve the scalar events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.ScalarEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.Scalars(tag)
def HealthPills(self, run, node_name):
"""Retrieve the scalar events associated with a run and node name.
Args:
run: A string name of the run for which health pills are retrieved.
node_name: A string name of the node for which health pills are retrieved.
Raises:
KeyError: If the run is not found, or the node name is not available for
the given run.
Returns:
An array of `event_accumulator.HealthPillEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.HealthPills(node_name)
def Graph(self, run):
"""Retrieve the graph associated with the provided run.
Args:
run: A string name of a run to load the graph for.
Raises:
KeyError: If the run is not found.
ValueError: If the run does not have an associated graph.
Returns:
The `GraphDef` protobuf data structure.
"""
accumulator = self._GetAccumulator(run)
return accumulator.Graph()
def MetaGraph(self, run):
"""Retrieve the metagraph associated with the provided run.
Args:
run: A string name of a run to load the graph for.
Raises:
KeyError: If the run is not found.
ValueError: If the run does not have an associated graph.
Returns:
The `MetaGraphDef` protobuf data structure.
"""
accumulator = self._GetAccumulator(run)
return accumulator.MetaGraph()
def RunMetadata(self, run, tag):
"""Get the session.run() metadata associated with a TensorFlow run and tag.
Args:
run: A string name of a TensorFlow run.
tag: A string name of the tag associated with a particular session.run().
Raises:
KeyError: If the run is not found, or the tag is not available for the
given run.
Returns:
The metadata in the form of `RunMetadata` protobuf data structure.
"""
accumulator = self._GetAccumulator(run)
return accumulator.RunMetadata(tag)
def Histograms(self, run, tag):
"""Retrieve the histogram events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.HistogramEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.Histograms(tag)
def CompressedHistograms(self, run, tag):
"""Retrieve the compressed histogram events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.CompressedHistogramEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.CompressedHistograms(tag)
def Images(self, run, tag):
"""Retrieve the image events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.ImageEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.Images(tag)
def Audio(self, run, tag):
"""Retrieve the audio events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.AudioEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.Audio(tag)
def Runs(self):
"""Return all the run names in the `EventMultiplexer`.
Returns:
```
{runName: { images: [tag1, tag2, tag3],
scalarValues: [tagA, tagB, tagC],
histograms: [tagX, tagY, tagZ],
compressedHistograms: [tagX, tagY, tagZ],
graph: true, meta_graph: true}}
```
"""
with self._accumulators_mutex:
# To avoid nested locks, we construct a copy of the run-accumulator map
items = list(six.iteritems(self._accumulators))
return {run_name: accumulator.Tags() for run_name, accumulator in items}
def RunPaths(self):
"""Returns a dict mapping run names to event file paths."""
return self._paths
def _GetAccumulator(self, run):
with self._accumulators_mutex:
return self._accumulators[run]
def GetLogdirSubdirectories(path):
"""Returns subdirectories with event files on path."""
if gfile.Exists(path) and not gfile.IsDirectory(path):
raise ValueError('GetLogdirSubdirectories: path exists and is not a '
'directory, %s' % path)
# ListRecursively just yields nothing if the path doesn't exist.
return (
subdir
for (subdir, files) in io_wrapper.ListRecursively(path)
if list(filter(event_accumulator.IsTensorFlowEventsFile, files))
)
| |
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Support for plugins."""
import os
import os.path
import sys
from coverage.exceptions import CoverageException
from coverage.misc import isolate_module
from coverage.plugin import CoveragePlugin, FileTracer, FileReporter
os = isolate_module(os)
class Plugins:
"""The currently loaded collection of coverage.py plugins."""
def __init__(self):
self.order = []
self.names = {}
self.file_tracers = []
self.configurers = []
self.context_switchers = []
self.current_module = None
self.debug = None
@classmethod
def load_plugins(cls, modules, config, debug=None):
"""Load plugins from `modules`.
Returns a Plugins object with the loaded and configured plugins.
"""
plugins = cls()
plugins.debug = debug
for module in modules:
plugins.current_module = module
__import__(module)
mod = sys.modules[module]
coverage_init = getattr(mod, "coverage_init", None)
if not coverage_init:
raise CoverageException(
f"Plugin module {module!r} didn't define a coverage_init function"
)
options = config.get_plugin_options(module)
coverage_init(plugins, options)
plugins.current_module = None
return plugins
def add_file_tracer(self, plugin):
"""Add a file tracer plugin.
`plugin` is an instance of a third-party plugin class. It must
implement the :meth:`CoveragePlugin.file_tracer` method.
"""
self._add_plugin(plugin, self.file_tracers)
def add_configurer(self, plugin):
"""Add a configuring plugin.
`plugin` is an instance of a third-party plugin class. It must
implement the :meth:`CoveragePlugin.configure` method.
"""
self._add_plugin(plugin, self.configurers)
def add_dynamic_context(self, plugin):
"""Add a dynamic context plugin.
`plugin` is an instance of a third-party plugin class. It must
implement the :meth:`CoveragePlugin.dynamic_context` method.
"""
self._add_plugin(plugin, self.context_switchers)
def add_noop(self, plugin):
"""Add a plugin that does nothing.
This is only useful for testing the plugin support.
"""
self._add_plugin(plugin, None)
def _add_plugin(self, plugin, specialized):
"""Add a plugin object.
`plugin` is a :class:`CoveragePlugin` instance to add. `specialized`
is a list to append the plugin to.
"""
plugin_name = f"{self.current_module}.{plugin.__class__.__name__}"
if self.debug and self.debug.should('plugin'):
self.debug.write(f"Loaded plugin {self.current_module!r}: {plugin!r}")
labelled = LabelledDebug(f"plugin {self.current_module!r}", self.debug)
plugin = DebugPluginWrapper(plugin, labelled)
# pylint: disable=attribute-defined-outside-init
plugin._coverage_plugin_name = plugin_name
plugin._coverage_enabled = True
self.order.append(plugin)
self.names[plugin_name] = plugin
if specialized is not None:
specialized.append(plugin)
def __nonzero__(self):
return bool(self.order)
__bool__ = __nonzero__
def __iter__(self):
return iter(self.order)
def get(self, plugin_name):
"""Return a plugin by name."""
return self.names[plugin_name]
class LabelledDebug:
"""A Debug writer, but with labels for prepending to the messages."""
def __init__(self, label, debug, prev_labels=()):
self.labels = list(prev_labels) + [label]
self.debug = debug
def add_label(self, label):
"""Add a label to the writer, and return a new `LabelledDebug`."""
return LabelledDebug(label, self.debug, self.labels)
def message_prefix(self):
"""The prefix to use on messages, combining the labels."""
prefixes = self.labels + ['']
return ":\n".join(" "*i+label for i, label in enumerate(prefixes))
def write(self, message):
"""Write `message`, but with the labels prepended."""
self.debug.write(f"{self.message_prefix()}{message}")
class DebugPluginWrapper(CoveragePlugin):
"""Wrap a plugin, and use debug to report on what it's doing."""
def __init__(self, plugin, debug):
super().__init__()
self.plugin = plugin
self.debug = debug
def file_tracer(self, filename):
tracer = self.plugin.file_tracer(filename)
self.debug.write(f"file_tracer({filename!r}) --> {tracer!r}")
if tracer:
debug = self.debug.add_label(f"file {filename!r}")
tracer = DebugFileTracerWrapper(tracer, debug)
return tracer
def file_reporter(self, filename):
reporter = self.plugin.file_reporter(filename)
self.debug.write(f"file_reporter({filename!r}) --> {reporter!r}")
if reporter:
debug = self.debug.add_label(f"file {filename!r}")
reporter = DebugFileReporterWrapper(filename, reporter, debug)
return reporter
def dynamic_context(self, frame):
context = self.plugin.dynamic_context(frame)
self.debug.write(f"dynamic_context({frame!r}) --> {context!r}")
return context
def find_executable_files(self, src_dir):
executable_files = self.plugin.find_executable_files(src_dir)
self.debug.write(f"find_executable_files({src_dir!r}) --> {executable_files!r}")
return executable_files
def configure(self, config):
self.debug.write(f"configure({config!r})")
self.plugin.configure(config)
def sys_info(self):
return self.plugin.sys_info()
class DebugFileTracerWrapper(FileTracer):
"""A debugging `FileTracer`."""
def __init__(self, tracer, debug):
self.tracer = tracer
self.debug = debug
def _show_frame(self, frame):
"""A short string identifying a frame, for debug messages."""
return "%s@%d" % (
os.path.basename(frame.f_code.co_filename),
frame.f_lineno,
)
def source_filename(self):
sfilename = self.tracer.source_filename()
self.debug.write(f"source_filename() --> {sfilename!r}")
return sfilename
def has_dynamic_source_filename(self):
has = self.tracer.has_dynamic_source_filename()
self.debug.write(f"has_dynamic_source_filename() --> {has!r}")
return has
def dynamic_source_filename(self, filename, frame):
dyn = self.tracer.dynamic_source_filename(filename, frame)
self.debug.write("dynamic_source_filename({!r}, {}) --> {!r}".format(
filename, self._show_frame(frame), dyn,
))
return dyn
def line_number_range(self, frame):
pair = self.tracer.line_number_range(frame)
self.debug.write(f"line_number_range({self._show_frame(frame)}) --> {pair!r}")
return pair
class DebugFileReporterWrapper(FileReporter):
"""A debugging `FileReporter`."""
def __init__(self, filename, reporter, debug):
super().__init__(filename)
self.reporter = reporter
self.debug = debug
def relative_filename(self):
ret = self.reporter.relative_filename()
self.debug.write(f"relative_filename() --> {ret!r}")
return ret
def lines(self):
ret = self.reporter.lines()
self.debug.write(f"lines() --> {ret!r}")
return ret
def excluded_lines(self):
ret = self.reporter.excluded_lines()
self.debug.write(f"excluded_lines() --> {ret!r}")
return ret
def translate_lines(self, lines):
ret = self.reporter.translate_lines(lines)
self.debug.write(f"translate_lines({lines!r}) --> {ret!r}")
return ret
def translate_arcs(self, arcs):
ret = self.reporter.translate_arcs(arcs)
self.debug.write(f"translate_arcs({arcs!r}) --> {ret!r}")
return ret
def no_branch_lines(self):
ret = self.reporter.no_branch_lines()
self.debug.write(f"no_branch_lines() --> {ret!r}")
return ret
def exit_counts(self):
ret = self.reporter.exit_counts()
self.debug.write(f"exit_counts() --> {ret!r}")
return ret
def arcs(self):
ret = self.reporter.arcs()
self.debug.write(f"arcs() --> {ret!r}")
return ret
def source(self):
ret = self.reporter.source()
self.debug.write("source() --> %d chars" % (len(ret),))
return ret
def source_token_lines(self):
ret = list(self.reporter.source_token_lines())
self.debug.write("source_token_lines() --> %d tokens" % (len(ret),))
return ret
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Broadcast'
db.create_table(
'sentry_broadcast', (
(
'id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(
primary_key=True
)
), ('message', self.gf('django.db.models.fields.CharField')(max_length=256)), (
'link', self.gf('django.db.models.fields.URLField')(
max_length=200, null=True, blank=True
)
), (
'badge', self.gf('django.db.models.fields.CharField')(
max_length=32, null=True, blank=True
)
), (
'is_active',
self.gf('django.db.models.fields.BooleanField')(default=True, db_index=True)
), (
'date_added',
self.gf('django.db.models.fields.DateTimeField')()
),
)
)
db.send_create_signal('sentry', ['Broadcast'])
def backwards(self, orm):
# Deleting model 'Broadcast'
db.delete_table('sentry_broadcast')
models = {
'sentry.accessgroup': {
'Meta': {
'unique_together': "(('team', 'name'),)",
'object_name': 'AccessGroup'
},
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.User']",
'symmetrical': 'False'
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'projects': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Project']",
'symmetrical': 'False'
}
),
'team':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}),
'type': ('django.db.models.fields.IntegerField', [], {
'default': '50'
})
},
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Event']",
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.alert': {
'Meta': {
'object_name': 'Alert'
},
'data': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'related_groups': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'related_alerts'",
'symmetrical': 'False',
'through': "orm['sentry.AlertRelatedGroup']",
'to': "orm['sentry.Group']"
}
),
'status': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.alertrelatedgroup': {
'Meta': {
'unique_together': "(('group', 'alert'),)",
'object_name': 'AlertRelatedGroup'
},
'alert':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Alert']"
}),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
})
},
'sentry.broadcast': {
'Meta': {
'object_name': 'Broadcast'
},
'badge': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'blank': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active':
('django.db.models.fields.BooleanField', [], {
'default': 'True',
'db_index': 'True'
}),
'link': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.CharField', [], {
'max_length': '256'
})
},
'sentry.event': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'",
'index_together': "(('group', 'datetime'),)"
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'event_set'",
'null': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments':
('django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'null': 'True'
}),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'time_spent': ('django.db.models.fields.IntegerField', [], {
'null': 'True'
})
},
'sentry.eventmapping': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'EventMapping'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
})
},
'sentry.group': {
'Meta': {
'unique_together': "(('project', 'checksum'),)",
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "'root'",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments':
('django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'null': 'True'
}),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'status': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'time_spent_total': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'times_seen': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
)
},
'sentry.groupassignee': {
'Meta': {
'object_name': 'GroupAssignee',
'db_table': "'sentry_groupasignee'"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'unique': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'sentry_assignee_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.grouphash': {
'Meta': {
'unique_together': "(('project', 'hash'),)",
'object_name': 'GroupHash'
},
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'hash':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouprulestatus': {
'Meta': {
'unique_together': "(('rule', 'group'),)",
'object_name': 'GroupRuleStatus'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'rule':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Rule']"
}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
})
},
'sentry.groupseen': {
'Meta': {
'unique_together': "(('user', 'group'),)",
'object_name': 'GroupSeen'
},
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'db_index': 'False'
}
)
},
'sentry.grouptagkey': {
'Meta': {
'unique_together': "(('project', 'group', 'key'),)",
'object_name': 'GroupTagKey'
},
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouptagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value', 'group'),)",
'object_name': 'GroupTagValue',
'db_table': "'sentry_messagefiltervalue'"
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'null': 'True',
'to': "orm['sentry.Project']"
}
),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'last_updated':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {
'unique_together': "(('team', 'email'),)",
'object_name': 'PendingTeamMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'team': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'pending_member_set'",
'to': "orm['sentry.Team']"
}
),
'type': ('django.db.models.fields.IntegerField', [], {
'default': '50'
})
},
'sentry.project': {
'Meta': {
'unique_together': "(('team', 'slug'),)",
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'owner': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'sentry_owned_project_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True'
}),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50',
'null': 'True'
}),
'status': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']",
'null': 'True'
}
)
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'roles': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
),
'user_added': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'keys_added_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {
'unique_together': "(('project', 'version'),)",
'object_name': 'Release'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.rule': {
'Meta': {
'object_name': 'Rule'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
})
},
'sentry.tagkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'TagKey',
'db_table': "'sentry_filterkey'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'label':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
})
},
'sentry.tagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'TagValue',
'db_table': "'sentry_filtervalue'"
},
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.team': {
'Meta': {
'object_name': 'Team'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'team_memberships'",
'symmetrical': 'False',
'through': "orm['sentry.TeamMember']",
'to': "orm['sentry.User']"
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'owner':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
})
},
'sentry.teammember': {
'Meta': {
'unique_together': "(('team', 'user'),)",
'object_name': 'TeamMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'team': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Team']"
}
),
'type': ('django.db.models.fields.IntegerField', [], {
'default': '50'
}),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'sentry_teammember_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'first_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '128'
})
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'),)",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
}
}
complete_apps = ['sentry']
| |
from django.db import models
from django.core.validators import MinValueValidator,MaxValueValidator,MaxLengthValidator,RegexValidator,ValidationError
from django.db.models import Sum,Avg
import json
from django.contrib.auth.models import User
from django.db.models.signals import post_save
def dqpscore(score):
return str(round(score*100,1))
class CommonOutcome(models.Model):
class Meta:
abstract = True
applied = models.DecimalField(
max_digits=4, decimal_places=3,
validators=[MaxValueValidator(1),MinValueValidator(0)],
blank=True, null=True,
help_text="Enter a decimal such as 0.15"
)
specialized = models.DecimalField(
max_digits=4, decimal_places=3,
validators=[MaxValueValidator(1),MinValueValidator(0)],
blank=True, null=True,
help_text="Enter a decimal such as 0.15"
)
intellectual = models.DecimalField(
max_digits=4, decimal_places=3,
validators=[MaxValueValidator(1),MinValueValidator(0)],
blank=True, null=True,
help_text="Enter a decimal such as 0.15"
)
broad = models.DecimalField(
max_digits=4, decimal_places=3,
validators=[MaxValueValidator(1),MinValueValidator(0)],
blank=True, null=True,
help_text="Enter a decimal such as 0.15"
)
civic = models.DecimalField(
max_digits=4, decimal_places=3,
validators=[MaxValueValidator(1),MinValueValidator(0)],
blank=True, null=True,
help_text="Enter a decimal such as 0.15"
)
weight = models.DecimalField(
max_digits=4, decimal_places=3,
validators=[MaxValueValidator(1),MinValueValidator(0)],
default=1.0,
help_text="Enter a decimal such as 0.15"
)
outcome = models.TextField()
comments = models.TextField(blank=True, null=True)
def __unicode__(self):
return self.outcome
def get_dqp_scores_graphs(self):
return json.dumps({
'scores': [
dqpscore(self.applied),
dqpscore(self.specialized),
dqpscore(self.intellectual),
dqpscore(self.broad),
dqpscore(self.civic)
],
'fields': [
'applied', 'specialized', 'intellectual', 'broad', 'civic'
]
})
def sparkdata(self):
return ','.join(map(dqpscore,(self.applied, self.specialized, self.intellectual, self.broad, self.civic)))
def clean(self):
if None in (self.applied, self.specialized, self.intellectual, self.broad, self.civic) and any((self.applied, self.specialized, self.intellectual, self.broad, self.civic)):
raise ValidationError("DQP Scores must either be completely filled out or completely empty")
if all((self.applied, self.specialized, self.intellectual, self.broad, self.civic)):
sumdqp = sum((self.applied, self.specialized, self.intellectual, self.broad, self.civic))
if sumdqp != 1:
raise ValidationError("The sum of your DQP Scores MUST be 1.0, currently it is %s" % sumdqp)
class Institution(models.Model):
name = models.CharField(
max_length=255,
unique=True,
)
shortname = models.CharField(
max_length=10,
unique=True,
validators=[MaxLengthValidator(10),RegexValidator(regex=r'[a-z_]+',message="You can only use lowercase letters and underscoes")],
help_text="This will be used to view your institution's graphs, for example http://dqp.lanecc.edu/lcc/ would be the DQP landing page for LCC"
)
def __unicode__(self):
return self.name
class Program(models.Model):
#TODO validate that a program/institution combination are unique
institution = models.ForeignKey(Institution)
name = models.CharField(max_length=255)
comments = models.TextField(blank=True, null=True)
class Meta:
unique_together = ('institution', 'name')
def get_dqp_scores_graphs(self):
outcomes = ProgramOutcome.objects.filter(program=self)
if not outcomes:
return False
sums = {'applied':0, 'specialized':0, 'intellectual':0, 'broad':0, 'civic':0}
for outcome in outcomes:
sums['applied'] += outcome.applied * outcome.weight
sums['specialized'] += outcome.specialized * outcome.weight
sums['intellectual'] += outcome.intellectual * outcome.weight
sums['broad'] += outcome.broad * outcome.weight
sums['civic'] += outcome.civic * outcome.weight
for k,v in sums.iteritems():
sums[k] = dqpscore(v)
return json.dumps({
'scores':[
sums['applied'], sums['specialized'], sums['intellectual'], sums['broad'], sums['civic']
],
'fields':[
'Applied', 'Specialized', 'Intellectual', 'Broad', 'Civic'
]
})
def get_course_dqp_scores(self):
courses = Course.objects.all().select_related().filter(program=self)
if not courses:
return False
sums = {'applied':0, 'specialized':0, 'intellectual':0, 'broad':0, 'civic':0}
for course in courses:
course.weight = float(CourseToProgram.objects.get(program_id=self.id, course_id=course.id).weight)
dqp_scores = course.get_dqp_scores()
sums['applied']+= course.weight*dqp_scores['applied']
sums['specialized']+= course.weight*dqp_scores['specialized']
sums['intellectual']+= course.weight*dqp_scores['intellectual']
sums['broad']+= course.weight*dqp_scores['broad']
sums['civic']+= course.weight*dqp_scores['civic']
return sums
def get_course_dqp_scores_graphs(self):
sums = self.get_course_dqp_scores()
if not sums:
return False
for k, v in sums.iteritems():
sums[k] = dqpscore(v)
return json.dumps({
'scores':[
sums['applied'], sums['specialized'], sums['intellectual'], sums['broad'], sums['civic']
],
'fields':[
'Applied', 'Specialized', 'Intellectual', 'Broad', 'Civic'
]
})
def sparkdata(self):
data = self.get_course_dqp_scores()
if not data:
return ""
return ','.join(map(dqpscore,(data['applied'], data['specialized'], data['intellectual'], data['broad'], data['civic'])))
def __unicode__(self):
return self.name
class ProgramOutcome(CommonOutcome):
program = models.ForeignKey(Program)
class Meta:
unique_together = ('program', 'outcome')
class Course(models.Model):
program = models.ManyToManyField(Program, through='CourseToProgram')
course_number = models.CharField(max_length=255,help_text="For example: psych 101")
comments = models.TextField(blank=True, null=True)
institution = models.ForeignKey(Institution)
class Meta:
unique_together = ('institution', 'course_number')
def get_dqp_scores(self):
outcomes = CourseOutcome.objects.filter(course=self)
if not outcomes:
return False
sums = {'applied':0, 'specialized':0, 'intellectual':0, 'broad':0, 'civic':0}
for outcome in outcomes:
sums['applied'] += outcome.applied * outcome.weight
sums['specialized'] += outcome.specialized * outcome.weight
sums['intellectual'] += outcome.intellectual * outcome.weight
sums['broad'] += outcome.broad * outcome.weight
sums['civic'] += outcome.civic * outcome.weight
for k,v in sums.iteritems():
sums[k] = round(v,3)
return sums
def get_dqp_scores_graphs(self):
sums = self.get_dqp_scores()
if not sums:
return False
for k,v in sums.iteritems():
sums[k] = dqpscore(v)
return json.dumps({
'scores':[
sums['applied'], sums['specialized'], sums['intellectual'], sums['broad'], sums['civic']
],
'fields':[
'Applied', 'Specialized', 'Intellectual', 'Broad', 'Civic'
]
})
def sparkdata(self):
data = self.get_dqp_scores()
if not data:
return ""
return ','.join(map(dqpscore,(data['applied'], data['specialized'], data['intellectual'], data['broad'], data['civic'])))
def __unicode__(self):
return self.course_number
class CourseToProgram(models.Model):
course = models.ForeignKey(Course)
program = models.ForeignKey(Program)
weight = models.DecimalField(
max_digits=4, decimal_places=3,
validators=[MaxValueValidator(1),MinValueValidator(0)],
default=1.0
)
class Meta:
unique_together = ('course', 'program')
def __unicode__(self):
return Program.objects.get(id=self.program_id).__unicode__() + "-" + Course.objects.get(id=self.course_id).__unicode__()
def clean(self):
try:
self.course.get_dqp_scores_graphs()
except:
raise ValidationError("You cannot add a course to a program until that course has had DQP scores set (Edit this course first)")
class CourseOutcome(CommonOutcome):
course = models.ForeignKey(Course)
class Meta:
unique_together = ('course', 'outcome')
class UserProfile(models.Model):
# This field is required.
user = models.OneToOneField(User)
# Other fields here
institution = models.ForeignKey(Institution,blank=True, null=True)
| |
# encoding: utf-8
"""
Test data builders for DrawingML XML elements
"""
from ...unitdata import BaseBuilder
class CT_BlipBuilder(BaseBuilder):
__tag__ = 'a:blip'
__nspfxs__ = ('a',)
__attrs__ = ('r:embed', 'r:link', 'cstate')
class CT_BlipFillPropertiesBuilder(BaseBuilder):
__tag__ = 'pic:blipFill'
__nspfxs__ = ('pic',)
__attrs__ = ()
class CT_DrawingBuilder(BaseBuilder):
__tag__ = 'w:drawing'
__nspfxs__ = ('w',)
__attrs__ = ()
class CT_GraphicalObjectBuilder(BaseBuilder):
__tag__ = 'a:graphic'
__nspfxs__ = ('a',)
__attrs__ = ()
class CT_GraphicalObjectDataBuilder(BaseBuilder):
__tag__ = 'a:graphicData'
__nspfxs__ = ('a',)
__attrs__ = ('uri',)
class CT_GraphicalObjectFrameLockingBuilder(BaseBuilder):
__tag__ = 'a:graphicFrameLocks'
__nspfxs__ = ('a',)
__attrs__ = ('noChangeAspect',)
class CT_InlineBuilder(BaseBuilder):
__tag__ = 'wp:inline'
__nspfxs__ = ('wp',)
__attrs__ = ('distT', 'distB', 'distL', 'distR')
class CT_NonVisualDrawingPropsBuilder(BaseBuilder):
__nspfxs__ = ('wp',)
__attrs__ = ('id', 'name', 'descr', 'hidden', 'title')
def __init__(self, tag):
self.__tag__ = tag
super(CT_NonVisualDrawingPropsBuilder, self).__init__()
class CT_NonVisualGraphicFramePropertiesBuilder(BaseBuilder):
__tag__ = 'wp:cNvGraphicFramePr'
__nspfxs__ = ('wp',)
__attrs__ = ()
class CT_NonVisualPicturePropertiesBuilder(BaseBuilder):
__tag__ = 'pic:cNvPicPr'
__nspfxs__ = ('pic',)
__attrs__ = ('preferRelativeResize')
class CT_PictureBuilder(BaseBuilder):
__tag__ = 'pic:pic'
__nspfxs__ = ('pic',)
__attrs__ = ()
class CT_PictureNonVisualBuilder(BaseBuilder):
__tag__ = 'pic:nvPicPr'
__nspfxs__ = ('pic',)
__attrs__ = ()
class CT_Point2DBuilder(BaseBuilder):
__tag__ = 'a:off'
__nspfxs__ = ('a',)
__attrs__ = ('x', 'y')
class CT_PositiveSize2DBuilder(BaseBuilder):
__nspfxs__ = ()
__attrs__ = ('cx', 'cy')
def __init__(self, tag):
self.__tag__ = tag
super(CT_PositiveSize2DBuilder, self).__init__()
class CT_PresetGeometry2DBuilder(BaseBuilder):
__tag__ = 'a:prstGeom'
__nspfxs__ = ('a',)
__attrs__ = ('prst',)
class CT_RelativeRectBuilder(BaseBuilder):
__tag__ = 'a:fillRect'
__nspfxs__ = ('a',)
__attrs__ = ('l', 't', 'r', 'b')
class CT_ShapePropertiesBuilder(BaseBuilder):
__tag__ = 'pic:spPr'
__nspfxs__ = ('pic', 'a')
__attrs__ = ('bwMode',)
class CT_StretchInfoPropertiesBuilder(BaseBuilder):
__tag__ = 'a:stretch'
__nspfxs__ = ('a',)
__attrs__ = ()
class CT_Transform2DBuilder(BaseBuilder):
__tag__ = 'a:xfrm'
__nspfxs__ = ('a',)
__attrs__ = ('rot', 'flipH', 'flipV')
def a_blip():
return CT_BlipBuilder()
def a_blipFill():
return CT_BlipFillPropertiesBuilder()
def a_cNvGraphicFramePr():
return CT_NonVisualGraphicFramePropertiesBuilder()
def a_cNvPicPr():
return CT_NonVisualPicturePropertiesBuilder()
def a_cNvPr():
return CT_NonVisualDrawingPropsBuilder('pic:cNvPr')
def a_docPr():
return CT_NonVisualDrawingPropsBuilder('wp:docPr')
def a_drawing():
return CT_DrawingBuilder()
def a_fillRect():
return CT_RelativeRectBuilder()
def a_graphic():
return CT_GraphicalObjectBuilder()
def a_graphicData():
return CT_GraphicalObjectDataBuilder()
def a_graphicFrameLocks():
return CT_GraphicalObjectFrameLockingBuilder()
def a_pic():
return CT_PictureBuilder()
def a_prstGeom():
return CT_PresetGeometry2DBuilder()
def a_stretch():
return CT_StretchInfoPropertiesBuilder()
def an_ext():
return CT_PositiveSize2DBuilder('a:ext')
def an_extent():
return CT_PositiveSize2DBuilder('wp:extent')
def an_inline():
return CT_InlineBuilder()
def an_nvPicPr():
return CT_PictureNonVisualBuilder()
def an_off():
return CT_Point2DBuilder()
def an_spPr():
return CT_ShapePropertiesBuilder()
def an_xfrm():
return CT_Transform2DBuilder()
| |
import time
import zlib
import json
import urlparse
import hashlib
import datetime
import traceback
from functools import wraps
from collections import defaultdict
from sqlalchemy import func, or_, and_
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import joinedload_all, joinedload
from appcomposer import db, rlock, redis_store
from appcomposer.application import app
from appcomposer.languages import obtain_languages, obtain_groups
from appcomposer.translator.suggestions import translate_texts
from appcomposer.models import TranslatedApp, TranslationUrl, TranslationBundle, ActiveTranslationMessage, TranslationMessageHistory, TranslationKeySuggestion, TranslationValueSuggestion, GoLabOAuthUser, TranslationSyncLog, TranslationSubscription, TranslationNotificationRecipient, RepositoryApp
DEBUG = False
LANGUAGES = obtain_languages()
GROUPS = obtain_groups()
def get_golab_default_user():
default_email = app.config.get('TRANSLATOR_DEFAULT_EMAIL', 'weblab+appcomposer@deusto.es')
default_user = db.session.query(GoLabOAuthUser).filter_by(email = default_email).first()
if default_user is None:
default_user = GoLabOAuthUser(email = default_email, display_name = "AppComposer")
db.session.add(default_user)
try:
db.session.commit()
except IntegrityError:
default_user = db.session.query(GoLabOAuthUser).filter_by(email = default_email).first()
db.session.rollback()
except:
db.session.rollback()
raise
else:
db.session.remove()
default_user = db.session.query(GoLabOAuthUser).filter_by(email = default_email).first()
return default_user
def calculate_content_hash(app_url):
"""Given an App URL generate the hash of the values of the translations. This way, can quickly know if an app was changed or not in a single query, and not do the whole
expensive DB processing for those which have not changed."""
translated_app = db.session.query(TranslatedApp).filter_by(url=app_url).first()
if translated_app is None:
return
translation_url = translated_app.translation_url
if translation_url is None:
return
app_translations = []
#
# Important: all these contents must be sorted
#
# [
# {
# 'lang': 'es_ES',
# 'messages: ' [
# {
# 'key': 'key1',
# 'value': 'message1',
# },
# {
# 'key': 'key2',
# 'value': 'message2',
# }
# ],
# }
# ]
organized_data = {
# es_ES: {
# key: value,
# }
}
for atm in db.session.query(ActiveTranslationMessage).filter(ActiveTranslationMessage.bundle_id == TranslationBundle.id, TranslationBundle.translation_url == translation_url).options(joinedload('bundle')).all():
bundle_key = '{}_{}'.format(atm.bundle.language, atm.bundle.target)
if bundle_key not in organized_data:
organized_data[bundle_key] = {}
organized_data[bundle_key][atm.key] = atm.value
# Sort data
for bundle_key in sorted(organized_data.keys()):
bundle_data = {
'lang': bundle_key,
'messages': []
}
for message_key in sorted(organized_data[bundle_key].keys()):
bundle_data['messages'].append({
'key': message_key,
'value': organized_data[bundle_key][message_key],
})
app_translations.append(bundle_data)
app_translations_str= json.dumps(app_translations)
return unicode(zlib.crc32(app_translations_str))
def _get_or_create_app(app_url, translation_url, metadata):
# Create the translation url if not present
automatic = metadata.get('automatic', True)
attribs = metadata.get('attribs', '')
db_translation_url = db.session.query(TranslationUrl).filter_by(url = translation_url).first()
if not db_translation_url:
db_translation_url = TranslationUrl(url = translation_url, automatic = automatic, attribs = attribs)
db.session.add(db_translation_url)
else:
if db_translation_url.automatic != automatic:
db_translation_url.automatic = automatic
if db_translation_url.attribs != attribs:
db_translation_url.attribs = attribs
SUBSCRIPTION_MECHANISM = 'translation-url'
subscribed_emails = set([ email for email, in db.session.query(TranslationNotificationRecipient.email).filter(TranslationSubscription.translation_url == db_translation_url, TranslationSubscription.mechanism == SUBSCRIPTION_MECHANISM, TranslationSubscription.recipient_id == TranslationNotificationRecipient.id).all() ])
subscription_requests = set(metadata.get('mails', []))
pending_subscriptions = subscription_requests - subscribed_emails
subscriptions_to_delete = subscribed_emails - subscription_requests
if subscriptions_to_delete:
for db_subscription in db.session.query(TranslationSubscription).filter(TranslationSubscription.mechanism == SUBSCRIPTION_MECHANISM, TranslationSubscription.translation_url == db_translation_url, TranslationSubscription.recipient_id == TranslationNotificationRecipient.id, TranslationNotificationRecipient.email.in_(list(subscriptions_to_delete))).all():
db.session.delete(db_subscription)
if pending_subscriptions:
for pending_subscription in pending_subscriptions:
recipient = db.session.query(TranslationNotificationRecipient).filter_by(email = pending_subscription).first()
if not recipient:
recipient = TranslationNotificationRecipient(pending_subscription)
db.session.add(recipient)
db.session.add(TranslationSubscription(translation_url = db_translation_url, recipient = recipient, mechanism = SUBSCRIPTION_MECHANISM))
# Create the app if not present
db_app_url = db.session.query(TranslatedApp).filter_by(url = app_url).first()
if db_app_url:
if db_app_url.translation_url is None:
db_app_url.translation_url = db_translation_url
elif db_app_url.translation_url != db_translation_url:
# If present with a different translation url, copy the old one if possible
_deep_copy_translations(db_app_url.translation_url, db_translation_url)
db_app_url.translation_url = db_translation_url
else:
db_app_url = TranslatedApp(url = app_url, translation_url = db_translation_url)
db.session.add(db_app_url)
return db_translation_url
def _get_or_create_bundle(app_url, translation_url, metadata, language, target, from_developer):
db_translation_url = _get_or_create_app(app_url, translation_url, metadata)
# Create the bundle if not present
db_translation_bundle = db.session.query(TranslationBundle).filter_by(translation_url = db_translation_url, language = language, target = target).first()
if not db_translation_bundle:
db_translation_bundle = TranslationBundle(language, target, db_translation_url, from_developer)
db.session.add(db_translation_bundle)
return db_translation_bundle
def get_bundles_by_key_namespaces(pairs):
""" given a list of pairs (key, namespace), return the list of bundles which contain translations like those """
keys = [ pair['key'] for pair in pairs ]
namespaces = [ pair['namespace'] for pair in pairs if pair['namespace'] ]
pairs_found = {}
if keys and namespaces:
for key, namespace, bundle_id in db.session.query(ActiveTranslationMessage.key, ActiveTranslationMessage.namespace, ActiveTranslationMessage.bundle_id).filter(ActiveTranslationMessage.key.in_(keys), ActiveTranslationMessage.namespace.in_(namespaces), ActiveTranslationMessage.taken_from_default == False).all():
if (key, namespace) not in pairs_found:
pairs_found[key, namespace] = set()
pairs_found[key, namespace].add(bundle_id)
bundle_ids = set()
for pair in pairs:
key = pair['key']
namespace = pair['namespace']
new_bundle_ids = pairs_found.get((key, namespace))
if new_bundle_ids:
bundle_ids.update(new_bundle_ids)
bundles = []
existing_bundles = []
if bundle_ids:
for lang, target in db.session.query(TranslationBundle.language, TranslationBundle.target).filter(TranslationBundle.id.in_(bundle_ids)).all():
key = "%s@%s" % (target, lang)
if key not in existing_bundles:
existing_bundles.append(key)
bundles.append({
'language' : lang,
'target' : target,
})
return bundles
def locking(func):
"""Lock certain complex write operations"""
@wraps(func)
def wrapper(*args, **kwargs):
lock_key = 'locks:{}'.format(func.__name__)
db.session.remove()
counter = 10
import sys, time
while True:
# Maximum: lock for 15 seconds
lock_obj = rlock.lock(lock_key, 15 * 1000)
if lock_obj and lock_obj.resource:
# print '[{}] {} {}'.format(time.asctime(), "Entering", lock_key, lock_obj)
sys.stdout.flush()
try:
return func(*args, **kwargs)
finally:
# print '[{}] {} {}'.format(time.asctime(), "Exiting", lock_key, lock_obj)
sys.stdout.flush()
try:
rlock.unlock(lock_obj)
except:
raise Exception("Unable to lock")
counter = counter - 1
if counter < 0:
raise Exception("Unable to lock")
return wrapper
@locking
def add_full_translation_to_app(user_email, app_url, translation_url, app_metadata, language, target, translated_messages, original_messages, from_developer):
user = db.session.query(GoLabOAuthUser).filter_by(email=user_email).first()
db_translation_bundle = _get_or_create_bundle(app_url, translation_url, app_metadata, language, target, from_developer)
#
# <NO SHIELD NEW BEHAVIOR>
#
# We have recently removed the shields that protected messages to be overriden by users' messages.
# In the past, when a user attempted to update a message which was provided by the developer, we
# automatically discarded it. Now we enable the user to delete it. Furthermore, if the developer
# changes a piece of text, and developers update it in their servers with a different message, we
# now give a higher priority to that message rather to that from the developer.
#
if from_developer:
# If it comes from the developer, now the expected thing is to check if it is different to the current message and, if it is different, check if it
# was different to the last message coming from the developer in the history with developer = True. If it is different (i.e., there has been really a chanage)
# then proceed with the change. Otherwise, discard that message.
#
# In other words, we have to do the translated_messages.pop() thing with those messages where there is a history and developer = True with the last message being equal
if translated_messages is not None:
translated_messages = translated_messages.copy()
active_msgs = db.session.query(ActiveTranslationMessage).filter_by(bundle = db_translation_bundle).all()
active_msgs_by_key = {
# key: value
}
for active_msg in active_msgs:
active_msgs_by_key[active_msg.key] = active_msg.value
# select atm.`key`, atm.value from TranslationMessageHistory atm
# inner join (select max(datetime) as max_date, `key` from TranslationMessageHistory where from_developer = true and bundle_id = 953 group by bundle_id, `key`) atm2
# on atm.datetime = atm2.max_date and atm.`key` = atm2.`key` where from_developer = true and bundle_id = 953;
#
tmh_subquery = db.session.query(
func.max(TranslationMessageHistory.datetime).label('tmh_date'),
TranslationMessageHistory.key.label('tmh_key')
).filter_by(
from_developer=True,
bundle=db_translation_bundle
).group_by(
TranslationMessageHistory.bundle_id, TranslationMessageHistory.key
).subquery()
latest_message_history = db.session.query(
TranslationMessageHistory.key,
TranslationMessageHistory.value
).join(
tmh_subquery,
and_(
tmh_subquery.c.tmh_date == TranslationMessageHistory.datetime,
tmh_subquery.c.tmh_key == TranslationMessageHistory.key
)
).filter(
TranslationMessageHistory.from_developer == True,
TranslationMessageHistory.bundle == db_translation_bundle
)
historic_msgs_by_key = dict(latest_message_history.all())
# key: latest value from developer
# }
for key, value in historic_msgs_by_key.iteritems():
# If the message is the same as it was in the latest message stored from developer,
# and it comes from developer, do not take it into account (since it could be overriding
# the user's message)
if key in translated_messages and value == translated_messages[key] and translated_messages[key] != active_msgs_by_key.get(key):
translated_messages.pop(key, None)
#
# </NO SHIELD NEW BEHAVIOR>
#
if from_developer and not db_translation_bundle.from_developer:
# If this is an existing translation and it comes from a developer, establish that it is from developer
db_translation_bundle.from_developer = from_developer
#
# # CODE COMMENTED as part of the no shield removal:
# if not from_developer and db_translation_bundle.from_developer:
# # If this is an existing translation from a developer and it comes from a user (and not a developer)
# # then it should not be accepted.
# if translated_messages is not None:
# translated_messages = translated_messages.copy()
# for msg in db_translation_bundle.active_messages:
# if msg.from_developer:
# translated_messages.pop(msg.key, None)
# # Continue with the remaining translated_messages
if translated_messages is not None and len(translated_messages) == 0:
translated_messages = None
existing_namespaces = set()
existing_namespace_keys = set()
existing_active_translations_with_namespace_with_default_value = []
# First, update translations
for existing_active_translation in db.session.query(ActiveTranslationMessage).filter_by(bundle = db_translation_bundle).all():
key = existing_active_translation.key
position = original_messages.get(key, {}).get('position')
if position is not None and existing_active_translation.position != position:
existing_active_translation.position = position
category = original_messages.get(key, {}).get('category')
if existing_active_translation.category != category:
existing_active_translation.category = category
namespace = original_messages.get(key, {}).get('namespace')
if existing_active_translation.namespace != namespace:
existing_active_translation.namespace = namespace
tool_id = original_messages.get(key, {}).get('tool_id')
if existing_active_translation.tool_id != tool_id:
existing_active_translation.tool_id = tool_id
fmt = original_messages.get(key, {}).get('format')
if existing_active_translation.fmt != fmt:
existing_active_translation.fmt = fmt
same_tool = original_messages.get(key, {}).get('same_tool')
if existing_active_translation.same_tool != same_tool:
existing_active_translation.same_tool = same_tool
if namespace is not None and existing_active_translation.taken_from_default:
existing_namespaces.add(namespace)
existing_namespace_keys.add(key)
existing_active_translations_with_namespace_with_default_value.append(existing_active_translation)
# Then, check namespaces
if existing_namespaces:
#
# If there are namespaces in the current bundle with words taken from default, maybe those words are already translated somewhere else.
# So I take the existing translations for that (namespace, key, bundle), and if they exist, I use them and delete the current message
#
existing_namespace_translations = {}
_user_ids = set()
if existing_namespace_keys:
for key, namespace, value, current_from_developer, existing_user_id in db.session.query(
ActiveTranslationMessage.key,
ActiveTranslationMessage.namespace,
ActiveTranslationMessage.value,
ActiveTranslationMessage.from_developer,
TranslationMessageHistory.user_id
).filter(
ActiveTranslationMessage.history_id == TranslationMessageHistory.id,
ActiveTranslationMessage.key.in_(list(existing_namespace_keys)),
ActiveTranslationMessage.namespace.in_(list(existing_namespaces)),
ActiveTranslationMessage.bundle_id == TranslationBundle.id,
TranslationBundle.language == db_translation_bundle.language,
TranslationBundle.target == db_translation_bundle.target,
ActiveTranslationMessage.bundle_id != db_translation_bundle.id,
ActiveTranslationMessage.taken_from_default == False,
# And make sure it's in the repository (ignore others)
TranslationBundle.translation_url_id == TranslatedApp.translation_url_id,
RepositoryApp.url == TranslatedApp.url
).all():
existing_namespace_translations[key, namespace] = (value, current_from_developer, existing_user_id)
_user_ids.add(existing_user_id)
existing_users = {}
if _user_ids:
for user in db.session.query(GoLabOAuthUser).filter(GoLabOAuthUser.id.in_(list(_user_ids))).all():
existing_users[user.id] = user
for wrong_message in existing_active_translations_with_namespace_with_default_value:
now = datetime.datetime.utcnow()
pack = existing_namespace_translations.get((wrong_message.key, wrong_message.namespace))
if pack:
value, current_from_developer, existing_user_id = pack
existing_user = existing_users[existing_user_id]
key = wrong_message.key
wrong_history = wrong_message.history
wrong_history_parent_id = wrong_history.id
wrong_message_position = wrong_message.position
wrong_message_category = wrong_message.category
wrong_message_tool_id = wrong_message.tool_id
wrong_message_same_tool = wrong_message.same_tool
wrong_message_fmt = wrong_message.fmt
# 1st) Delete the current translation message
db.session.delete(wrong_message)
# 2nd) Create a new historic translation message
new_db_history = TranslationMessageHistory(db_translation_bundle, key, value, existing_user, now, wrong_history_parent_id,
taken_from_default = False, same_tool = wrong_message_same_tool, tool_id = wrong_message_tool_id, fmt = wrong_message_fmt,
position = wrong_message_position, category = wrong_message_category, from_developer = current_from_developer, namespace = wrong_message.namespace)
db.session.add(new_db_history)
# 3rd) Create a new active translation message
db.session.query(ActiveTranslationMessage).filter_by(bundle=db_translation_bundle, key=key).delete()
new_db_active_translation_message = ActiveTranslationMessage(db_translation_bundle, key, value, new_db_history, now, False, wrong_message_position, wrong_message_category, current_from_developer, namespace, wrong_message_tool_id, wrong_message_same_tool, wrong_message_fmt)
db.session.add(new_db_active_translation_message)
if translated_messages is not None:
# Delete active translations that are going to be replaced
# Store which were the parents of those translations and
# what existing translations don't need to be replaced
unchanged = []
parent_translation_ids = {}
for existing_active_translation in db.session.query(ActiveTranslationMessage).filter_by(bundle = db_translation_bundle).all():
key = existing_active_translation.key
if key in translated_messages:
if (translated_messages[key] and existing_active_translation.value != translated_messages[key]) or (not from_developer and existing_active_translation.taken_from_default):
parent_translation_ids[key] = existing_active_translation.history.id
db.session.delete(existing_active_translation)
else:
unchanged.append(key)
# For each translation message
now = datetime.datetime.utcnow()
for key, value in translated_messages.iteritems():
if value is None:
value = ""
if key not in unchanged and key in original_messages:
position = original_messages[key]['position']
category = original_messages[key]['category']
namespace = original_messages[key]['namespace']
tool_id = original_messages[key]['tool_id']
same_tool = original_messages[key]['same_tool']
fmt = original_messages[key]['format']
same_text = original_messages.get(key, {}).get('text', object()) == value
if value.strip() == "":
taken_from_default = False # Force that it's empty, it's taken from default. After all, empty messages will not appear in the UI
elif from_developer and same_text:
taken_from_default = True
else:
taken_from_default = False
# Create a new history message
parent_translation_id = parent_translation_ids.get(key, None)
db_history = TranslationMessageHistory(db_translation_bundle, key, value, user, now, parent_translation_id, taken_from_default = taken_from_default,
same_tool = same_tool, tool_id = tool_id, fmt = fmt, position = position, category = category, from_developer = from_developer, namespace = namespace)
db.session.add(db_history)
# Establish that thew new active message points to this history message
# We are going to add a new message: delete the one existing if any
db.session.query(ActiveTranslationMessage).filter_by(bundle=db_translation_bundle, key=key).delete()
db_active_translation_message = ActiveTranslationMessage(db_translation_bundle, key, value, db_history, now, taken_from_default, position, category, from_developer, namespace, tool_id, same_tool, fmt)
db.session.add(db_active_translation_message)
if same_text:
# If the message in the original language is the same as in the target language or the value is empty and it shouldn't, then
# it can be two things:
#
# 1) that it has been filled with the original language. In this case it should not be later displayed as a suggestion
# 2) that the message is the same in the original language and in the target language
#
# Given that the original language will be a suggestion anyway, it's better to avoid storing this message as suggestion
continue
if namespace:
#
# If namespace, maybe this key is present in other translations. Therefore, I search for other translations
# out there in other bundles but with same language and target and the same namespace, where they are not from developer
# and I copy my translation to them.
#
for wrong_message in db.session.query(ActiveTranslationMessage).filter(
ActiveTranslationMessage.key == key,
ActiveTranslationMessage.namespace == namespace,
ActiveTranslationMessage.value != value,
ActiveTranslationMessage.bundle_id == TranslationBundle.id,
TranslationBundle.language == db_translation_bundle.language,
TranslationBundle.target == db_translation_bundle.target,
TranslationBundle.id != db_translation_bundle.id,
# And make sure it's in the repository (ignore others)
TranslationBundle.translation_url_id == TranslatedApp.translation_url_id,
RepositoryApp.url == TranslatedApp.url
).options(joinedload_all('bundle')).all():
# wrong_message is a message for same language, target, key and namespace with a different value.
# We must update it with the current credentials
wrong_history = wrong_message.history
wrong_history_parent_id = wrong_history.id
wrong_message_position = wrong_message.position
wrong_message_category = wrong_message.category
wrong_message_bundle = wrong_message.bundle
wrong_message_tool_id = wrong_message.tool_id
wrong_message_same_tool = wrong_message.same_tool
wrong_message_fmt = wrong_message.fmt
# 1st) Delete the current translation message
db.session.delete(wrong_message)
# 2nd) Create a new historic translation message
new_db_history = TranslationMessageHistory(wrong_message_bundle, key, value, user, now, wrong_history_parent_id, taken_from_default = False,
same_tool = wrong_message_same_tool, tool_id = wrong_message_tool_id, fmt = wrong_message_fmt,
position = wrong_message_position, category = wrong_message_category, from_developer = from_developer,
namespace = namespace)
db.session.add(new_db_history)
# 3rd) Create a new active translation message
db.session.query(ActiveTranslationMessage).filter_by(bundle=wrong_message_bundle, key=key).delete()
new_db_active_translation_message = ActiveTranslationMessage(wrong_message_bundle, key, value, new_db_history, now, False, wrong_message_position, wrong_message_category, from_developer, namespace, wrong_message_tool_id, wrong_message_same_tool, wrong_message_fmt)
db.session.add(new_db_active_translation_message)
# Create a suggestion based on the key
db_existing_key_suggestion = db.session.query(TranslationKeySuggestion).filter_by(key = key, value = value, language = language, target = target).first()
if db_existing_key_suggestion:
db_existing_key_suggestion.number += 1
db.session.add(db_existing_key_suggestion)
else:
db_key_suggestion = TranslationKeySuggestion(key = key, language = language, target = target, value = value, number = 1)
db.session.add(db_key_suggestion)
# Create a suggestion based on the value
if original_messages is not None and key in original_messages:
human_key = original_messages[key]['text'][:255]
db_existing_human_key_suggestion = db.session.query(TranslationValueSuggestion).filter_by(human_key = human_key, value = value, language = language, target = target).first()
if db_existing_human_key_suggestion:
db_existing_human_key_suggestion.number += 1
db.session.add(db_existing_human_key_suggestion)
else:
db_human_key_suggestion = TranslationValueSuggestion(human_key = human_key, language = language, target = target, value = value, number = 1)
db.session.add(db_human_key_suggestion)
now = datetime.datetime.utcnow()
existing_keys = [ key for key, in db.session.query(ActiveTranslationMessage.key).filter_by(bundle = db_translation_bundle).all() ]
namespaces = [ v['namespace'] for k, v in original_messages.iteritems() if k not in existing_keys and v['namespace'] ]
if namespaces:
existing_namespaces = {}
_user_ids = set()
if original_messages and namespaces:
for key, namespace, value, current_from_developer, existing_user_id in db.session.query(
ActiveTranslationMessage.key,
ActiveTranslationMessage.namespace,
ActiveTranslationMessage.value,
ActiveTranslationMessage.from_developer,
TranslationMessageHistory.user_id
).filter(
ActiveTranslationMessage.history_id == TranslationMessageHistory.id,
ActiveTranslationMessage.key.in_(original_messages.keys()),
ActiveTranslationMessage.namespace.in_(list(namespaces)),
ActiveTranslationMessage.bundle_id == TranslationBundle.id,
TranslationBundle.language == db_translation_bundle.language,
TranslationBundle.target == db_translation_bundle.target,
ActiveTranslationMessage.taken_from_default == False,
# Only if it is in the repo
TranslationBundle.translation_url_id == TranslatedApp.translation_url_id,
RepositoryApp.url == TranslatedApp.url,
).all():
existing_namespaces[key, namespace] = (value, current_from_developer, existing_user_id)
_user_ids.add(existing_user_id)
existing_users = {}
if _user_ids:
for user in db.session.query(GoLabOAuthUser).filter(GoLabOAuthUser.id.in_(list(_user_ids))).all():
existing_users[user.id] = user
else:
existing_namespaces = {}
existing_users = {}
for key, original_message_pack in original_messages.iteritems():
if key not in existing_keys:
value = original_message_pack['text'] or ''
position = original_message_pack['position']
category = original_message_pack['category']
namespace = original_message_pack['namespace']
tool_id = original_message_pack['tool_id']
same_tool = original_message_pack['same_tool']
fmt = original_message_pack['format']
taken_from_default = True
if value.strip() == '':
taken_from_default = False
# If there is a namespace, try to get the value from other namespaces, and override the current value
current_from_developer = False
existing_user = user
if namespace:
pack = existing_namespaces.get((key, namespace), None)
if pack is not None:
value, current_from_developer, existing_user_id = pack
existing_user = existing_users[existing_user_id]
taken_from_default = False
# Create a new translation establishing that it was generated with the default value (and therefore it should be changed)
db_history = TranslationMessageHistory(db_translation_bundle, key, value, existing_user, now, None, taken_from_default = taken_from_default,
same_tool = same_tool, tool_id = tool_id, fmt = fmt, position = position, category = category,
from_developer = current_from_developer, namespace = namespace)
db.session.add(db_history)
# Establish that thew new active message points to this history message
db.session.query(ActiveTranslationMessage).filter_by(bundle=db_translation_bundle, key=key).delete()
db_active_translation_message = ActiveTranslationMessage(db_translation_bundle, key, value, db_history, now, taken_from_default = taken_from_default, position = position, category = category, from_developer = current_from_developer, namespace = namespace, tool_id = tool_id, same_tool = same_tool, fmt = fmt)
db.session.add(db_active_translation_message)
for existing_key in existing_keys:
if existing_key not in original_messages:
old_translations = db.session.query(ActiveTranslationMessage).filter_by(bundle = db_translation_bundle, key = existing_key).all()
for old_translation in old_translations:
db.session.delete(old_translation)
for key, namespace in db.session.query(ActiveTranslationMessage.key, ActiveTranslationMessage.namespace).filter_by(bundle = db_translation_bundle).group_by(ActiveTranslationMessage.key, ActiveTranslationMessage.namespace).having(func.count(ActiveTranslationMessage.key) > 1).all():
best_chance = None
all_chances = []
for am in db.session.query(ActiveTranslationMessage).filter_by(key = key, namespace = namespace, bundle = db_translation_bundle).all():
all_chances.append(am)
if best_chance is None:
best_chance = am
elif not am.taken_from_default and best_chance.taken_from_default:
best_chance = am
elif am.from_developer and not best_chance.from_developer:
best_chance = am
for chance in all_chances:
if chance != best_chance:
db.session.delete(chance)
# Commit!
try:
db.session.commit()
except IntegrityError:
# Somebody else did this
db.session.rollback()
except:
db.session.rollback()
raise
def register_app_url(app_url, translation_url, metadata):
_get_or_create_app(app_url, translation_url, metadata)
try:
db.session.commit()
except IntegrityError:
# Somebody else did this process
db.session.rollback()
except:
traceback.print_exc()
db.session.rollback()
raise
else:
# Delay the synchronization process
from appcomposer.translator.tasks import task_synchronize_single_app
task_synchronize_single_app.delay(source="register app", single_app_url = app_url)
def retrieve_stored(translation_url, language, target):
db_translation_url = db.session.query(TranslationUrl).filter_by(url = translation_url).first()
if db_translation_url is None:
# Messages, from_developer, automatic
return {}, False, True
bundle = db.session.query(TranslationBundle).filter_by(translation_url = db_translation_url, language = language, target = target).first()
if bundle is None:
# No message, not from developer, automatic = whatever says before
return {}, False, db_translation_url.automatic
response = {}
for message in bundle.active_messages:
response[message.key] = {
'value' : message.value,
'from_default' : message.taken_from_default,
'from_developer' : message.from_developer,
'same_tool': message.same_tool,
'tool_id': message.tool_id,
}
return response, bundle.from_developer, db_translation_url.automatic
SKIP_SUGGESTIONS_IF_STORED = False
def retrieve_suggestions(original_messages, language, target, stored_translations):
original_keys = [ key for key in original_messages ]
if SKIP_SUGGESTIONS_IF_STORED:
original_keys = [ key for key in original_keys if key not in stored_translations ]
original_values = [ original_messages[key]['text'] for key in original_keys ]
original_keys_by_value = {
# value : [key1, key2]
}
for key, original_message_pack in original_messages.iteritems():
value = original_message_pack['text']
if value not in original_keys_by_value:
original_keys_by_value[value] = []
original_keys_by_value[value].append(key)
all_suggestions = {}
current_suggestions = []
# First, key suggestions
key_suggestions_by_key = defaultdict(list)
if original_keys:
for key_suggestion in db.session.query(TranslationKeySuggestion).filter_by(language = language, target = target).filter(TranslationKeySuggestion.key.in_(original_keys)).all():
key_suggestions_by_key[key_suggestion.key].append({
'target' : key_suggestion.value,
'number' : key_suggestion.number,
})
current_suggestions.append(key_suggestions_by_key)
# Second, value suggestions
value_suggestions_by_key = defaultdict(list)
orig_values = [ orig_value[:255] for orig_value in original_values ]
if orig_values:
for value_suggestion in db.session.query(TranslationValueSuggestion).filter_by(language = language, target = target).filter(TranslationValueSuggestion.human_key.in_(orig_values)).all():
for key in original_keys_by_value.get(value_suggestion.human_key, []):
value_suggestions_by_key[key].append({
'target' : value_suggestion.value,
'number' : value_suggestion.number,
})
for human_key, suggested_values in translate_texts(original_values, language).iteritems():
for key in original_keys_by_value.get(human_key, []):
for suggested_value, weight in suggested_values.iteritems():
value_suggestions_by_key[key].append({
'target' : suggested_value,
'number' : weight,
})
current_suggestions.append(value_suggestions_by_key)
for key in original_keys:
current_key_suggestions = defaultdict(int)
# { 'target' : number }
for suggestions in current_suggestions:
for suggestion in suggestions.get(key, []):
current_key_suggestions[suggestion['target']] += suggestion['number']
all_suggestions[key] = []
if current_key_suggestions:
# Normalize the maximum value
total_value = max(current_key_suggestions.values())
for target, number in current_key_suggestions.iteritems():
normalized_value = 1.0 * number / total_value
all_suggestions[key].append({
'target' : target,
'weight' : normalized_value,
})
all_suggestions[key].sort(lambda x1, x2: cmp(x1['weight'], x2['weight']), reverse = True)
return all_suggestions
def _get_all_results_from_translation_url(translation_url, keys):
if not keys:
return []
results = db.session.query(func.count(func.distinct(ActiveTranslationMessage.key)), func.max(ActiveTranslationMessage.datetime), func.min(ActiveTranslationMessage.datetime), TranslationBundle.language, TranslationBundle.target).filter(
ActiveTranslationMessage.taken_from_default == False,
ActiveTranslationMessage.same_tool == True,
ActiveTranslationMessage.key.in_(keys),
ActiveTranslationMessage.bundle_id == TranslationBundle.id,
TranslationBundle.translation_url_id == TranslationUrl.id,
TranslationUrl.url == translation_url,
).group_by(TranslationBundle.language, TranslationBundle.target).all()
return results
def retrieve_translations_stats(translation_url, original_messages):
filtered_messages = {
# key: {
# typical properties (same_tool, tool_id, namespace...)
# }
}
other_tools = {
# tool_id : [ key1, key2, key3...],
}
for key, properties in original_messages.items():
if properties['same_tool']:
filtered_messages[key] = properties
else:
if properties['tool_id']:
if properties['tool_id'] not in other_tools:
other_tools[properties['tool_id']] = []
other_tools[properties['tool_id']].append(key)
items = len(filtered_messages)
results = _get_all_results_from_translation_url(translation_url, list(filtered_messages))
if items == 0:
return {}, []
dependencies_data = {
# (language, target) : [
# {
# "title": "My title",
# "link": "http://golabz.eu/...",
# "percent": 50,
# "translated": 10,
# "items": 20,
# }
# ]
}
generic_dependencies = []
translation_url_parsed = urlparse.urlparse(translation_url)
translation_url_base = '{0}://{1}/'.format(translation_url_parsed.scheme, translation_url_parsed.netloc)
if translation_url_base == 'http://go-lab.gw.utwente.nl/':
tool_domain_condition = or_(
TranslationUrl.url.like('{0}%'.format(translation_url_base)), # Check that it's from the same domain, and not other 'common' in other domain
TranslationUrl.url.like('http://localhost:5000/%'),
TranslationUrl.url.like('http://composer.golabz.eu/%'),
)
else:
tool_domain_condition = TranslationUrl.url.like('{0}%'.format(translation_url_base)) # Check that it's from the same domain, and not other 'common' in other domain
for tool_used, tool_keys in other_tools.items():
tool_translation_urls = db.session.query(TranslationUrl.url).filter(
tool_domain_condition,
TranslationBundle.translation_url_id == TranslationUrl.id,
ActiveTranslationMessage.bundle_id == TranslationBundle.id,
ActiveTranslationMessage.tool_id == tool_used,
ActiveTranslationMessage.same_tool == True,
).group_by(TranslationUrl.url).all()
tool_translation_urls = [ url for url, in tool_translation_urls ]
if tool_translation_urls:
tool_translation_url = tool_translation_urls[0]
tool_app_url_pack = db.session.query(TranslatedApp.url).filter(
TranslatedApp.translation_url_id == TranslationUrl.id,
TranslationUrl.url == tool_translation_url
).first()
if tool_app_url_pack is not None:
tool_app_url, = tool_app_url_pack
repo_contents = db.session.query(RepositoryApp.name, RepositoryApp.app_link).filter(
RepositoryApp.url == TranslatedApp.url,
TranslatedApp.url == tool_app_url
).first()
if repo_contents is not None:
tool_name, tool_link = repo_contents
else:
tool_name = tool_app_url
tool_link = tool_app_url
generic_dependencies.append({
'translated': 0,
'items': len(tool_keys),
'percent': 0.0,
'link': tool_link,
'title': tool_name,
'app_url': tool_app_url,
})
tool_results = _get_all_results_from_translation_url(tool_translation_url, tool_keys)
for count, modification_date, creation_date, lang, target in tool_results:
if (lang, target) not in dependencies_data:
dependencies_data[lang, target] = []
dependencies_data[lang, target].append({
'translated': count,
'items': len(tool_keys),
'percent': (100.0 * count / len(tool_keys)) if len(tool_keys) > 0 else 1.0,
'link': tool_link,
'title': tool_name,
'app_url': tool_app_url,
})
# After this, make sure we populate the rest of the languages too
for count, modification_date, creation_date, lang, target in results:
# We don't care about count, modification_date or creation_date
if (lang, target) not in dependencies_data:
dependencies_data[lang, target] = []
all_tools_info = dependencies_data[lang, target]
found = False
for tools_info in all_tools_info:
if tools_info['app_url'] == tool_app_url:
found = True
break
if not found:
dependencies_data[lang, target].append({
'translated': 0,
'items': len(tool_keys),
'percent': 0.0,
'link': tool_link,
'title': tool_name,
'app_url': tool_app_url,
})
translations = {
# es_ES : {
# "name" : foo,
# "targets" : {
# "ALL" : {
# "modified_date" : "2014-02-14",
# "creation_date" : "2014-02-14",
# "name" : "Adolescens...,
# "translated" : 21,
# "items" : 31,
# "dependencies" : [
# {
# "title": "My dependency",
# "link": "http://composer.golabz.eu/...",
# "percent": 50,
# "translated": 10,
# "items": 20,
# }
# ]
# }
# }
# }
}
for count, modification_date, creation_date, lang, target in results:
if lang not in translations:
translations[lang] = {
'name' : LANGUAGES.get(lang),
'targets' : {}
}
mdate = modification_date.strftime("%Y-%m-%d") if modification_date is not None else None
cdate = creation_date.strftime("%Y-%m-%d") if creation_date is not None else None
dependencies = dependencies_data.get((lang, target), [])
translations[lang]['targets'][target] = {
'modification_date' : mdate,
'creation_date' : cdate,
'name' : GROUPS.get(target),
'translated' : count,
'items' : items,
'dependencies': dependencies,
}
# Verify that all the info from the dependencies is displayed
for (lang, target), dependencies in dependencies_data.iteritems():
if lang not in translations:
translations[lang] = {
'name' : LANGUAGES.get(lang),
'targets' : {}
}
if target not in translations[lang]['targets']:
translations[lang]['targets'][target] = {
'modification_date' : None,
'creation_date' : None,
'name' : GROUPS.get(target),
'translated' : 0,
'items' : items,
'dependencies': dependencies,
}
return translations, generic_dependencies
def retrieve_translations_percent(translation_url, original_messages):
percent = {
# es_ES_ALL : 0.8
}
translations_stats, generic_dependencies = retrieve_translations_stats(translation_url, original_messages)
for lang, lang_package in translations_stats.iteritems():
targets = lang_package.get('targets', {})
for target, target_stats in targets.iteritems():
translated = target_stats['translated']
total_items = target_stats['items']
percent['%s_%s' % (lang, target)] = (1.0 * translated / total_items) if total_items > 0 else 1.0
return percent
def _deep_copy_bundle(src_bundle, dst_bundle):
"""Copy all the messages. Safely assume that there is no translation in the destination, so
we can copy all the history, active, etc.
"""
src_message_ids = {
# old_id : new_id
}
historic = {
# old_id : new historic instance
}
for msg in src_bundle.all_messages:
t_history = TranslationMessageHistory(dst_bundle, msg.key, msg.value, msg.user, msg.datetime, src_message_ids.get(msg.parent_translation_id), msg.taken_from_default,
same_tool = msg.same_tool, tool_id = msg.tool_id, fmt = msg.fmt,
position = msg.position, category = msg.category, from_developer = msg.from_developer,
namespace = msg.namespace)
db.session.add(t_history)
try:
db.session.commit()
except:
db.session.rollback()
raise
db.session.refresh(t_history)
src_message_ids[msg.id] = t_history.id
historic[msg.id] = t_history
now = datetime.datetime.utcnow()
for msg in src_bundle.active_messages:
history = historic.get(msg.history_id)
active_t = ActiveTranslationMessage(dst_bundle, msg.key, msg.value, history, now, msg.taken_from_default, msg.position, msg.category, msg.from_developer, msg.namespace, msg.tool_id, msg.same_tool, msg.fmt)
db.session.add(active_t)
try:
db.session.commit()
except:
db.session.rollback()
raise
def _merge_bundle(src_bundle, dst_bundle):
"""Copy all the messages. The destination bundle already existed, so we can only copy those
messages not present."""
now = datetime.datetime.utcnow()
for msg in src_bundle.active_messages:
existing_translation = db.session.query(ActiveTranslationMessage).filter_by(bundle = dst_bundle, key = msg.key).first()
if existing_translation is None:
t_history = TranslationMessageHistory(dst_bundle, msg.key, msg.value, msg.history.user, now, None, msg.taken_from_default,
same_tool = msg.same_tool, tool_id = msg.tool_id, fmt = msg.fmt,
position = msg.position, category = msg.category, from_developer = msg.from_developer,
namespace = msg.namespace)
db.session.add(t_history)
active_t = ActiveTranslationMessage(dst_bundle, msg.key, msg.value, t_history, now, msg.taken_from_default, msg.position, msg.category, msg.from_developer, msg.namespace, msg.tool_id, msg.same_tool, msg.fmt)
db.session.add(active_t)
try:
db.session.commit()
except:
db.session.rollback()
raise
elif existing_translation.taken_from_default and not msg.taken_from_default:
db.session.delete(existing_translation)
# Merge it
t_history = TranslationMessageHistory(dst_bundle, msg.key, msg.value, msg.history.user, now, existing_translation.history.id, msg.taken_from_default,
same_tool = msg.same_tool, tool_id = msg.tool_id, fmt = msg.fmt,
position = msg.position, category = msg.category, from_developer = msg.from_developer,
namespace = msg.namespace)
db.session.add(t_history)
# Delete before adding
active_t = ActiveTranslationMessage(dst_bundle, msg.key, msg.value, t_history, now, msg.taken_from_default, msg.position, msg.category, msg.from_developer, msg.namespace, msg.tool_id, msg.same_tool, msg.fmt)
db.session.add(active_t)
try:
db.session.commit()
except:
db.session.rollback()
db.session.remove()
raise
def _deep_copy_translations(old_translation_url, new_translation_url):
"""Given an old translation of a URL, take the old bundles and copy them to the new one."""
new_bundles = {}
for new_bundle in new_translation_url.bundles:
new_bundles[new_bundle.language, new_bundle.target] = new_bundle
for old_bundle in old_translation_url.bundles:
new_bundle = new_bundles.get((old_bundle.language, old_bundle.target))
if new_bundle:
_merge_bundle(old_bundle, new_bundle)
else:
new_bundle = TranslationBundle(old_bundle.language, old_bundle.target, new_translation_url, old_bundle.from_developer)
db.session.add(new_bundle)
_deep_copy_bundle(old_bundle, new_bundle)
def start_synchronization(source, cached, single_app_url = None):
now = datetime.datetime.utcnow()
sync_log = TranslationSyncLog(now, None, source, cached, single_app_url)
db.session.add(sync_log)
try:
db.session.commit()
except:
db.session.rollback()
raise
db.session.refresh(sync_log)
print "Starting synchronization %s" % sync_log.id
return sync_log.id
def end_synchronization(sync_id, number):
now = datetime.datetime.utcnow()
sync_log = db.session.query(TranslationSyncLog).filter_by(id = sync_id).first()
if sync_log is not None:
sync_log.end_datetime = now
sync_log.number_apps = number
print "Synchronization %s finished" % sync_log.id
try:
db.session.commit()
except:
db.session.rollback()
raise
def get_latest_synchronizations():
latest_syncs = db.session.query(TranslationSyncLog).order_by(TranslationSyncLog.start_datetime.desc()).limit(10).all()
return [
{
'id' : sync.id,
'start' : sync.start_datetime,
'end' : sync.end_datetime,
'source' : sync.source,
'cached' : sync.cached,
'single_url' : sync.single_url,
'number' : sync.number_apps,
} for sync in latest_syncs
]
def cached_get_bundle(language, target, app_url):
key = 'cache:bundle:{}:{}:{}'.format(language, target, app_url)
if redis_store.exists(key):
return True
# If they provide something that does not exist, check again. This does not happen often.
translated_app = db.session.query(TranslatedApp).filter_by(url = app_url).first()
if translated_app is None:
return False
translation_url = translated_app.translation_url
if translation_url is None:
return False
bundle = db.session.query(TranslationBundle).filter_by(translation_url = translation_url, language = language, target = target).first()
if bundle is None:
return False
# Keep for 24 hours the record
redis_store.setex(key, 24 * 3600, 'true')
return True
def update_user_status(language, target, app_url, user):
if not cached_get_bundle(language, target, app_url):
return
if user is None:
print "ERROR: user can't be NULL"
return
key_user = 'active_translators:{}:{}:{}:{}'.format(app_url, language, target, user.email)
key_translations = 'active_translations:{}:{}:{}'.format(app_url, language, target)
pipeline = redis_store.pipeline()
pipeline.hset(key_user, 'last', time.time())
pipeline.hset(key_user, 'email', user.email)
pipeline.hset(key_user, 'name', user.display_name)
pipeline.sadd(key_translations, user.email)
# Delete automatically these keys after 10 minutes
pipeline.expire(key_user, 600)
pipeline.expire(key_translations, 600)
pipeline.execute()
return
def get_user_status(language, target, app_url, user):
FORMAT = "%Y-%m-%dT%H:%M:%SZ"
now = datetime.datetime.utcnow()
now_str = now.strftime(FORMAT)
ERROR = {
'modificationDate': now_str,
'modificationDateByOther': now_str,
'time_now': now_str,
'collaborators': []
}
translated_app = db.session.query(TranslatedApp).filter_by(url = app_url).first()
if translated_app is None:
ERROR['error_msg'] = "Translation App URL not found"
return ERROR
translation_url = translated_app.translation_url
if translation_url is None:
ERROR['error_msg'] = "Translation Translation URL not found"
return ERROR
bundle = db.session.query(TranslationBundle).filter_by(translation_url = translation_url, language = language, target = target).first()
if bundle is None:
ERROR['error_msg'] = "Bundle not found"
return ERROR
last_change_by_user = db.session.query(func.max(ActiveTranslationMessage.datetime), TranslationMessageHistory.user_id).filter(ActiveTranslationMessage.history_id == TranslationMessageHistory.id, ActiveTranslationMessage.bundle == bundle).group_by(TranslationMessageHistory.user_id).all()
modification_date = None
modification_date_by_other = None
for last_change, user_id in last_change_by_user:
if user_id == user.id:
modification_date = last_change
else:
if modification_date_by_other is None or modification_date_by_other < last_change:
modification_date_by_other = last_change
if modification_date is None and modification_date_by_other is not None:
modification_date = modification_date_by_other
# Find collaborators (if any)
key_translations = 'active_translations:{}:{}:{}'.format(app_url, language, target)
emails = redis_store.smembers(key_translations)
pipeline = redis_store.pipeline()
for email in emails:
key_user = 'active_translators:{}:{}:{}:{}'.format(app_url, language, target, email)
pipeline.hgetall(key_user)
now = time.time()
collaborators = []
for collaborator in pipeline.execute():
if not collaborator:
# It might have disappeared: empty dict or None
continue
if collaborator['email'] == user.email:
# Ignore myself
continue
last_check = float(collaborator['last'])
if now - last_check > 60:
# If there was no update in the last 60 seconds ignore
continue
collaborators.append({
'name': collaborator['name'],
'md5': hashlib.md5(collaborator['email']).hexdigest(),
})
return {
'modificationDate': modification_date.strftime(FORMAT) if modification_date is not None else None,
'modificationDateByOther': modification_date_by_other.strftime(FORMAT) if modification_date_by_other is not None else None,
'time_now': now_str,
'collaborators': collaborators
}
| |
# -*- coding: utf-8 -*-
import mock
from django.conf import settings
from django.core import mail
from django.core.management import call_command
from olympia import amo
from olympia.activity.models import ActivityLog
from olympia.addons.models import AddonApprovalsCounter
from olympia.amo.tests import (
addon_factory, file_factory, TestCase, user_factory, version_factory)
from olympia.files.models import FileValidation
from olympia.files.utils import atomic_lock
from olympia.reviewers.management.commands import auto_approve
from olympia.reviewers.models import (
AutoApprovalNotEnoughFilesError, AutoApprovalNoValidationResultError,
AutoApprovalSummary, get_reviewing_cache, ReviewerScore)
class TestAutoApproveCommand(TestCase):
def setUp(self):
self.user = user_factory(
id=settings.TASK_USER_ID, username='taskuser',
email='taskuser@mozilla.com')
self.addon = addon_factory(average_daily_users=666)
self.version = version_factory(
addon=self.addon, file_kw={
'status': amo.STATUS_AWAITING_REVIEW,
'is_webextension': True})
self.file = self.version.all_files[0]
self.file_validation = FileValidation.objects.create(
file=self.version.all_files[0], validation=u'{}')
AddonApprovalsCounter.objects.create(addon=self.addon, counter=1)
# Always mock log_final_summary() method so we can look at the stats
# easily.
patcher = mock.patch.object(auto_approve.Command, 'log_final_summary')
self.log_final_summary_mock = patcher.start()
self.addCleanup(patcher.stop)
def _check_stats(self, expected_stats):
# We abuse the fact that log_final_summary receives stats as positional
# argument to check what happened.
assert self.log_final_summary_mock.call_count == 1
stats = self.log_final_summary_mock.call_args[0][0]
assert stats == expected_stats
def test_fetch_candidates(self):
# Add nominated add-on: it should be considered.
self.version.update(nomination=self.days_ago(1))
new_addon = addon_factory(status=amo.STATUS_NOMINATED, file_kw={
'status': amo.STATUS_AWAITING_REVIEW,
'is_webextension': True})
new_addon_version = new_addon.versions.all()[0]
new_addon_version.update(nomination=self.days_ago(2))
# Add langpack: it should also be considered.
langpack = addon_factory(
type=amo.ADDON_LPAPP, status=amo.STATUS_NOMINATED, file_kw={
'status': amo.STATUS_AWAITING_REVIEW,
'is_webextension': True})
langpack_version = langpack.versions.all()[0]
langpack_version.update(nomination=self.days_ago(3))
# Add a bunch of add-ons in various states that should not be returned.
# Public add-on with no updates.
addon_factory(file_kw={'is_webextension': True})
# Non-extension with updates.
search_addon = addon_factory(type=amo.ADDON_SEARCH)
version_factory(addon=search_addon, file_kw={
'status': amo.STATUS_AWAITING_REVIEW,
'is_webextension': True})
# Disabled add-on with updates.
disabled_addon = addon_factory(disabled_by_user=True)
version_factory(addon=disabled_addon, file_kw={
'status': amo.STATUS_AWAITING_REVIEW,
'is_webextension': True})
# Add-on with deleted version.
addon_with_deleted_version = addon_factory()
deleted_version = version_factory(
addon=addon_with_deleted_version, file_kw={
'status': amo.STATUS_AWAITING_REVIEW,
'is_webextension': True})
deleted_version.delete()
# Add-on with a non-webextension update.
non_webext_addon = addon_factory()
version_factory(addon=non_webext_addon, file_kw={
'status': amo.STATUS_AWAITING_REVIEW})
# Add-on with 3 versions:
# - one webext, listed, public.
# - one non-listed webext version
# - one listed non-webext awaiting review.
complex_addon = addon_factory(file_kw={'is_webextension': True})
version_factory(
addon=complex_addon, channel=amo.RELEASE_CHANNEL_UNLISTED,
file_kw={'is_webextension': True})
version_factory(addon=complex_addon, file_kw={
'status': amo.STATUS_AWAITING_REVIEW})
# Finally, add a second file to self.version to test the distinct().
file_factory(
version=self.version, status=amo.STATUS_AWAITING_REVIEW,
is_webextension=True)
# Gather the candidates.
command = auto_approve.Command()
command.post_review = True
qs = command.fetch_candidates()
# 3 versions should be found. Because of the nomination date,
# langpack_version should be first (its nomination date is the oldest),
# followed by new_addon_version and then self.version.
assert len(qs) == 3
assert qs[0] == langpack_version
assert qs[1] == new_addon_version
assert qs[2] == self.version
@mock.patch(
'olympia.reviewers.management.commands.auto_approve.statsd.incr')
@mock.patch(
'olympia.reviewers.management.commands.auto_approve.ReviewHelper')
def test_approve(self, review_helper_mock, statsd_incr_mock):
command = auto_approve.Command()
command.approve(self.version)
assert review_helper_mock.call_count == 1
assert review_helper_mock.call_args == (
(), {'addon': self.addon, 'version': self.version}
)
assert review_helper_mock().handler.process_public.call_count == 1
assert statsd_incr_mock.call_count == 1
assert statsd_incr_mock.call_args == (
('reviewers.auto_approve.approve',), {}
)
@mock.patch('olympia.reviewers.utils.sign_file')
def test_full(self, sign_file_mock):
# Simple integration test with as few mocks as possible.
assert not AutoApprovalSummary.objects.exists()
assert not self.file.reviewed
ActivityLog.objects.all().delete()
self.author = user_factory()
self.addon.addonuser_set.create(user=self.author)
# Delete the add-on current version and approval info, leaving it
# nominated. Because we're in post-review we should pick it up and
# approve it anyway.
AddonApprovalsCounter.objects.filter(addon=self.addon).get().delete()
self.addon.current_version.delete()
self.addon.update_status()
call_command('auto_approve', '--dry-run')
call_command('auto_approve')
self.addon.reload()
self.file.reload()
assert AutoApprovalSummary.objects.count() == 1
assert AutoApprovalSummary.objects.get(version=self.version)
assert get_reviewing_cache(self.addon.pk) is None
assert self.addon.status == amo.STATUS_PUBLIC
assert self.file.status == amo.STATUS_PUBLIC
assert self.file.reviewed
assert ActivityLog.objects.count()
activity_log = ActivityLog.objects.latest('pk')
assert activity_log.action == amo.LOG.APPROVE_VERSION.id
assert sign_file_mock.call_count == 1
assert sign_file_mock.call_args[0][0] == self.file
assert len(mail.outbox) == 1
msg = mail.outbox[0]
assert msg.to == [self.author.email]
assert msg.from_email == settings.REVIEWERS_EMAIL
assert msg.subject == 'Mozilla Add-ons: %s %s Approved' % (
unicode(self.addon.name), self.version.version)
@mock.patch.object(auto_approve, 'set_reviewing_cache')
@mock.patch.object(auto_approve, 'clear_reviewing_cache')
@mock.patch.object(AutoApprovalSummary, 'create_summary_for_version')
def test_locking(
self, create_summary_for_version_mock, clear_reviewing_cache_mock,
set_reviewing_cache_mock):
create_summary_for_version_mock.return_value = (
AutoApprovalSummary(), {})
call_command('auto_approve')
assert create_summary_for_version_mock.call_count == 1
assert set_reviewing_cache_mock.call_count == 1
assert set_reviewing_cache_mock.call_args == (
(self.addon.pk, settings.TASK_USER_ID), {})
assert clear_reviewing_cache_mock.call_count == 1
assert clear_reviewing_cache_mock.call_args == ((self.addon.pk,), {})
@mock.patch.object(auto_approve, 'set_reviewing_cache')
@mock.patch.object(auto_approve, 'clear_reviewing_cache')
@mock.patch.object(AutoApprovalSummary, 'check_is_locked')
@mock.patch.object(AutoApprovalSummary, 'create_summary_for_version')
def test_no_locking_if_already_locked(
self, create_summary_for_version_mock, check_is_locked_mock,
clear_reviewing_cache_mock, set_reviewing_cache_mock):
check_is_locked_mock.return_value = True
create_summary_for_version_mock.return_value = (
AutoApprovalSummary(), {})
call_command('auto_approve')
assert create_summary_for_version_mock.call_count == 1
assert set_reviewing_cache_mock.call_count == 0
assert clear_reviewing_cache_mock.call_count == 0
@mock.patch.object(AutoApprovalSummary, 'create_summary_for_version')
def test_not_enough_files_error(self, create_summary_for_version_mock):
create_summary_for_version_mock.side_effect = (
AutoApprovalNotEnoughFilesError)
call_command('auto_approve')
assert get_reviewing_cache(self.addon.pk) is None
assert create_summary_for_version_mock.call_count == 1
self._check_stats({'total': 1, 'error': 1})
@mock.patch.object(AutoApprovalSummary, 'create_summary_for_version')
def test_no_validation_result(self, create_summary_for_version_mock):
create_summary_for_version_mock.side_effect = (
AutoApprovalNoValidationResultError)
call_command('auto_approve')
assert get_reviewing_cache(self.addon.pk) is None
assert create_summary_for_version_mock.call_count == 1
self._check_stats({'total': 1, 'error': 1})
@mock.patch.object(auto_approve.Command, 'approve')
@mock.patch.object(AutoApprovalSummary, 'create_summary_for_version')
def test_successful_verdict_dry_run(
self, create_summary_for_version_mock, approve_mock):
create_summary_for_version_mock.return_value = (
AutoApprovalSummary(verdict=amo.WOULD_HAVE_BEEN_AUTO_APPROVED), {})
call_command('auto_approve', '--dry-run')
assert approve_mock.call_count == 0
assert create_summary_for_version_mock.call_args == (
(self.version, ), {'dry_run': True})
assert get_reviewing_cache(self.addon.pk) is None
self._check_stats({'total': 1, 'auto_approved': 1})
@mock.patch.object(auto_approve.Command, 'approve')
@mock.patch.object(AutoApprovalSummary, 'create_summary_for_version')
def test_successful_verdict(
self, create_summary_for_version_mock, approve_mock):
create_summary_for_version_mock.return_value = (
AutoApprovalSummary(verdict=amo.AUTO_APPROVED), {})
call_command('auto_approve')
assert create_summary_for_version_mock.call_count == 1
assert create_summary_for_version_mock.call_args == (
(self.version, ), {'dry_run': False})
assert get_reviewing_cache(self.addon.pk) is None
assert approve_mock.call_count == 1
assert approve_mock.call_args == (
(self.version, ), {})
self._check_stats({'total': 1, 'auto_approved': 1})
@mock.patch.object(auto_approve.Command, 'approve')
@mock.patch.object(AutoApprovalSummary, 'create_summary_for_version')
def test_failed_verdict(
self, create_summary_for_version_mock, approve_mock):
fake_verdict_info = {
'is_locked': True
}
create_summary_for_version_mock.return_value = (
AutoApprovalSummary(verdict=amo.NOT_AUTO_APPROVED),
fake_verdict_info)
call_command('auto_approve')
assert approve_mock.call_count == 0
assert create_summary_for_version_mock.call_args == (
(self.version, ), {'dry_run': False})
assert get_reviewing_cache(self.addon.pk) is None
self._check_stats({
'total': 1,
'is_locked': 1,
})
def test_prevent_multiple_runs_in_parallel(self):
# Create a lock manually, the command should exit immediately without
# doing anything.
with atomic_lock(settings.TMP_PATH, auto_approve.LOCK_NAME):
call_command('auto_approve')
assert self.log_final_summary_mock.call_count == 0
assert self.file.reload().status == amo.STATUS_AWAITING_REVIEW
class TestAwardPostReviewPoints(TestCase):
def setUp(self):
self.user1 = user_factory()
self.user2 = user_factory()
self.user3 = user_factory()
self.addon1 = addon_factory()
self.addon2 = addon_factory()
# First user approved content of addon1.
ActivityLog.create(
amo.LOG.APPROVE_CONTENT, self.addon1,
self.addon1.current_version, user=self.user1)
# Second user confirmed auto-approved of addon2.
ActivityLog.create(
amo.LOG.CONFIRM_AUTO_APPROVED, self.addon2,
self.addon2.current_version, user=self.user2)
# Third user approved content of addon2.
ActivityLog.create(
amo.LOG.APPROVE_CONTENT, self.addon2,
self.addon2.current_version, user=self.user3,)
def test_missing_auto_approval_summary(self):
assert ReviewerScore.objects.count() == 0
call_command('award_post_review_points')
# CONFIRM_AUTO_APPROVED was skipped since we can't determine its
# weight (has no AutoApprovalSummary).
assert ReviewerScore.objects.count() == 2
first_score = ReviewerScore.objects.filter(user=self.user1).get()
assert first_score.addon == self.addon1
assert first_score.note == (
'Retroactively awarded for past post/content review approval.')
assert first_score.note_key == amo.REVIEWED_CONTENT_REVIEW
second_score = ReviewerScore.objects.filter(user=self.user3).get()
assert second_score.addon == self.addon2
assert second_score.note == (
'Retroactively awarded for past post/content review approval.')
assert second_score.note_key == amo.REVIEWED_CONTENT_REVIEW
def test_full(self):
AutoApprovalSummary.objects.create(
version=self.addon2.current_version, verdict=amo.AUTO_APPROVED,
weight=151, confirmed=True)
assert ReviewerScore.objects.count() == 0
call_command('award_post_review_points')
assert ReviewerScore.objects.count() == 3
first_score = ReviewerScore.objects.filter(user=self.user1).get()
assert first_score.addon == self.addon1
assert first_score.note == (
'Retroactively awarded for past post/content review approval.')
assert first_score.note_key == amo.REVIEWED_CONTENT_REVIEW
second_score = ReviewerScore.objects.filter(user=self.user2).get()
assert second_score.addon == self.addon2
assert second_score.note == (
'Retroactively awarded for past post/content review approval.')
assert second_score.note_key == amo.REVIEWED_EXTENSION_HIGHEST_RISK
third_score = ReviewerScore.objects.filter(user=self.user3).get()
assert third_score.addon == self.addon2
assert third_score.note == (
'Retroactively awarded for past post/content review approval.')
assert third_score.note_key == amo.REVIEWED_CONTENT_REVIEW
def test_run_twice(self):
# Running twice should only generate the scores once.
AutoApprovalSummary.objects.create(
version=self.addon2.current_version, verdict=amo.AUTO_APPROVED,
weight=151, confirmed=True)
call_command('award_post_review_points')
call_command('award_post_review_points')
assert ReviewerScore.objects.count() == 3
| |
"""A class representing a microbial or tissue community."""
import re
import six
import six.moves.cPickle as pickle
import cobra
import pandas as pd
from sympy.core.singleton import S
from tqdm import tqdm
from micom.util import load_model, join_models, add_var_from_expression
from micom.logger import logger
from micom.media import default_excludes
from micom.problems import optcom, solve
_taxonomy_cols = ["id", "file"]
class Community(cobra.Model):
"""A community of models.
This class represents a community of individual models. It was designed for
microbial communities but may also be used for multi-tissue or tissue-cell
mixture models as long as all individuals exist within a single enclosing
compartment.
"""
def __init__(self, taxonomy, id=None, name=None, rel_threshold=1e-6,
solver=None, progress=True):
"""Create a new community object.
`micom` builds a community from a taxonomy which may simply be a list
of model files in its simplest form. Usually, the taxonomy will contain
additional information such as annotations for the individuals (for
instance phylum, organims or species) and abundances.
Notes
-----
`micom` will automatically add exchange fluxes and and a community
objective maximizing the overall growth rate of the community.
Parameters
----------
taxonomy : pandas.DataFrame
The taxonomy used for building the model. Must have at least the
two columns "id" and "file" which specify an ID and the filepath
for each model. Valid file extensions are ".pickle", ".xml",
".xml.gz" and ".json". If the taxonomy includes a column named
"abundance" it will be used to quantify each individual in the
community. If absent `micom` will assume all individuals are
present in the same amount.
id : str, optional
The ID for the community. Should only contain letters and numbers,
otherwise it will be formatted as such.
name : str, optional
The name for the community.
rel_threshold : float < 1, optional
The relative abundance threshold that will be used. Describes the
smallest relative amount of an individual that will be considered
non-zero. All individuals with a smaller relative amount will be
omitted.
solver : str, optional
Which solver to use. Will default to cplex if available which is
better suited for large problems.
progress : bool, optional
Show a progress bar.
Attributes
----------
objectives : dict
A dict of {id: sympy_expression} denoting the individual growth
objectives for each model in the community.
"""
super(Community, self).__init__(id, name)
logger.info("building new micom model {}.".format(id))
if not solver:
self.solver = ("cplex" if "cplex" in cobra.util.solver.solvers
else "glpk")
else:
self.solver = solver
if not (isinstance(taxonomy, pd.DataFrame) and
all(col in taxonomy.columns for col in _taxonomy_cols)):
raise ValueError("`taxonomy` must be a pandas DataFrame with at"
"least columns id and file :(")
self._rtol = rel_threshold
self._modification = None
taxonomy = taxonomy.copy()
if "abundance" not in taxonomy.columns:
taxonomy["abundance"] = 1
taxonomy.abundance /= taxonomy.abundance.sum()
logger.info("{} individuals with abundances below threshold".format(
(taxonomy.abundance <= self._rtol).sum()))
taxonomy = taxonomy[taxonomy.abundance > self._rtol]
if taxonomy.id.str.contains(r"[^A-Za-z0-9_]", regex=True).any():
logger.warning("taxonomy IDs contain prohibited characters and"
" will be reformatted")
taxonomy.id = taxonomy.id.replace(
[r"[^A-Za-z0-9_\s]", r"\s+"], ["", "_"], regex=True)
self.__taxonomy = taxonomy
self.__taxonomy.index = self.__taxonomy.id
obj = S.Zero
self.objectives = {}
index = self.__taxonomy.index
index = tqdm(index, unit="models") if progress else index
for idx in index:
row = self.__taxonomy.loc[idx]
if isinstance(row.file, list):
model = join_models(row.file)
if len(row.file) > 1:
logger.info("joined {} models".format(len(row.file)))
else:
model = load_model(row.file)
suffix = "__" + idx.replace(" ", "_").strip()
logger.info("converting IDs for {}".format(idx))
for r in model.reactions:
r.global_id = r.id
r.id += suffix
r.community_id = idx
for m in model.metabolites:
m.global_id = m.id
m.id += suffix
m.compartment += suffix
m.community_id = idx
logger.info("adding reactions for {} to community".format(idx))
self.add_reactions(model.reactions)
o = self.solver.interface.Objective.clone(model.objective,
model=self.solver)
obj += o.expression * row.abundance
self.objectives[idx] = o.expression
species_obj = self.problem.Constraint(
o.expression, name="objective_" + idx, lb=0.0)
self.add_cons_vars([species_obj])
self.__add_exchanges(model.reactions, row)
self.solver.update() # to avoid dangling refs due to lazy add
com_obj = add_var_from_expression(self, "community_objective",
obj, lb=0)
self.objective = self.problem.Objective(com_obj, direction="max")
def __add_exchanges(self, reactions, info, exclude=default_excludes,
external_compartment="e"):
"""Add exchange reactions for a new model."""
for r in reactions:
# Some sanity checks for whether the reaction is an exchange
ex = external_compartment + "__" + r.community_id
if (not r.boundary or any(bad in r.id for bad in exclude) or
ex not in r.compartments):
continue
if not r.id.lower().startswith("ex"):
logger.warning(
"Reaction %s seems to be an exchange " % r.id +
"reaction but its ID does not start with 'EX_'...")
export = len(r.reactants) == 1
lb, ub = r.bounds if export else (-r.upper_bound, -r.lower_bound)
met = (r.reactants + r.products)[0]
medium_id = re.sub("_{}$".format(met.compartment), "", met.id)
if medium_id in exclude:
continue
medium_id += "_m"
if medium_id == met.id:
medium_id += "_medium"
if medium_id not in self.metabolites:
# If metabolite does not exist in medium add it to the model
# and also add an exchange reaction for the medium
logger.info("adding metabolite %s to external medium" %
medium_id)
medium_met = met.copy()
medium_met.id = medium_id
medium_met.compartment = "m"
medium_met.global_id = medium_id
medium_met.community_id = "medium"
ex_medium = cobra.Reaction(
id="EX_" + medium_met.id,
name=medium_met.id + " medium exchange",
lower_bound=lb,
upper_bound=ub)
ex_medium.add_metabolites({medium_met: -1})
ex_medium.global_id = ex_medium.id
ex_medium.community_id = "medium"
self.add_reactions([ex_medium])
else:
logger.info("updating import rate for external metabolite %s" %
medium_id)
medium_met = self.metabolites.get_by_id(medium_id)
ex_medium = self.reactions.get_by_id("EX_" + medium_met.id)
ex_medium.lower_bound = min(lb, ex_medium.lower_bound)
ex_medium.upper_bound = max(ub, ex_medium.upper_bound)
coef = info.abundance
r.add_metabolites({medium_met: coef if export else -coef})
def __update_exchanges(self):
"""Update exchanges."""
logger.info("updating exchange reactions for %s" % self.id)
for met in self.metabolites.query(lambda x: x.compartment == "m"):
for r in met.reactions:
if r.boundary:
continue
coef = self.__taxonomy.loc[r.community_id, "abundance"]
if met in r.products:
r.add_metabolites({met: coef}, combine=False)
else:
r.add_metabolites({met: -coef}, combine=False)
def __update_community_objective(self):
"""Update the community objective."""
logger.info("updating the community objective for %s" % self.id)
v = self.variables.community_objective
const = self.constraints.community_objective_equality
self.remove_cons_vars([const])
com_obj = S.Zero
for sp, expr in self.objectives.items():
ab = self.__taxonomy.loc[sp, "abundance"]
com_obj += ab * expr
const = self.problem.Constraint(v - com_obj, lb=0, ub=0,
name="community_objective_equality")
self.add_cons_vars([const])
def optimize_single(self, id):
"""Optimize growth rate for one individual.
`optimize_single` will calculate the maximal growth rate for one
individual member of the community.
Notes
-----
This might well mean that growth rates for all other individuals are
low since the individual may use up all available resources.
Parameters
----------
id : str
The ID of the individual to be optimized.
fluxes : boolean, optional
Whether to return all fluxes. Defaults to just returning the
maximal growth rate.
Returns
-------
float
The maximal growth rate for the given species.
"""
if isinstance(id, six.string_types):
if id not in self.__taxonomy.index:
raise ValueError(id + " not in taxonomy!")
info = self.__taxonomy.loc[id]
elif isinstance(id, int) and id >= 0 and id < len(self.__taxonomy):
info = self.__taxonomy.iloc[id]
else:
raise ValueError("`id` must be an id or positive index!")
logger.info("optimizing for {}".format(info.name))
obj = self.objectives[info.name]
with self as m:
m.objective = obj
m.solver.optimize()
return m.objective.value
def optimize_all(self, fluxes=False, progress=False):
"""Return solutions for individually optimizing each model.
Notes
-----
This might well mean that growth rates for all other individuals are
low since the individual may use up all available resources. As a
consequence the reported growth rates may usually never be obtained
all at once.
Parameters
----------
fluxes : boolean, optional
Whether to return all fluxes. Defaults to just returning the
maximal growth rate.
progress : boolean, optional
Whether to show a progress bar.
Returns
-------
pandas.Series
The maximal growth rate for each species.
"""
index = self.__taxonomy.index
if progress:
index = tqdm(self.__taxonomy.index, unit="optimizations")
individual = (self.optimize_single(id) for id in index)
return pd.Series(individual, self.__taxonomy.index)
def optimize(self, slim=True):
"""Optimize the model using flux balance analysis.
Parameters
----------
slim : boolean
Whether to return a slim solution which does not contain fluxes,
just growth rates.
Returns
-------
micom.CommunitySolution
The solution after optimization or None if there is no optimum.
"""
self.solver.optimize()
with self:
solution = solve(self, fluxes=not slim)
return solution
@property
def abundances(self):
"""pandas.Series: The normalized abundances.
Setting this attribute will also trigger the appropriate updates in
the exchange fluxes and the community objective.
"""
return self.__taxonomy.abundance
@abundances.setter
def abundances(self, value):
try:
self.__taxonomy.abundance = value
except Exception:
raise ValueError("value must be an iterable with an entry for "
"each species/tissue")
logger.info("setting new abundances for %s" % self.id)
ab = self.__taxonomy.abundance
self.__taxonomy.abundance /= ab.sum()
small = ab < self._rtol
logger.info("adjusting abundances for %s to %g" %
(str(self.__taxonomy.index[small]), self._rtol))
self.__taxonomy.loc[small, "abundance"] = self._rtol
self.__update_exchanges()
self.__update_community_objective()
@property
def taxonomy(self):
"""pandas.DataFrame: The taxonomy used within the model.
This attribute only returns a copy.
"""
return self.__taxonomy.copy()
@property
def modification(self):
"""str: Denotes modifications to the model currently applied.
Will be None if the community is unmodified.
"""
return self._modification
@modification.setter
@cobra.util.context.resettable
def modification(self, mod):
self._modification = mod
@property
def exchanges(self):
"""list: Returns all exchange reactions in the model.
Uses several heuristics based on the reaction name and compartments
to exclude reactions that are *not* exchange reactions.
"""
return self.reactions.query(
lambda x: x.boundary and not
any(ex in x.id for ex in default_excludes) and
"m" in x.compartments)
def optcom(self, strategy="lagrangian", min_growth=0.1, tradeoff=0.5,
fluxes=False, pfba=True):
"""Run OptCom for the community.
OptCom methods are a group of optimization procedures to find community
solutions that provide a tradeoff between the cooperative community
growth and the egoistic growth of each individual [#c1]_. `micom`
provides several strategies that can be used to find optimal solutions:
- "linear": Applies a lower bound for the individual growth rates and
finds the optimal community growth rate. This is the fastest methods
but also ignores that individuals might strive to optimize their
individual growth instead of community growth.
- "lagrangian": Optimizes a joint objective containing the community
objective (maximized) as well as a cooperativity cost which
represents the distance to the individuals "egoistic" maximum growth
rate (minimized). Requires the `tradeoff` parameter. This method is
still relatively fast and does require only few additional variables.
- "linear lagrangian": The same as "lagrangian" only with a linear
representation of the cooperativity cost (absolute value).
- "moma": Minimization of metabolic adjustment. Simultaneously
optimizes the community objective (maximize) and the cooperativity
cost (minimize). This method finds an exact maximum but doubles the
number of required variables, thus being slow.
- "lmoma": The same as "moma" only with a linear
representation of the cooperativity cost (absolute value).
- "original": Solves the multi-objective problem described in [#c1]_.
Here, the community growth rate is maximized simultanously with all
individual growth rates. Note that there are usually many
Pareto-optimal solutions to this problem and the method will only
give one solution. This is also the slowest method.
Parameters
----------
community : micom.Community
The community to optimize.
strategy : str
The strategy used to solve the OptCom formulation. Defaults to
"lagrangian" which gives a decent tradeoff between speed and
correctness.
min_growth : float or array-like
The minimal growth rate required for each individual. May be a
single value or an array-like object with the same length as there
are individuals.
tradeoff : float in [0, 1]
Only used for lagrangian strategies. Must be between 0 and 1 and
describes the strength of the cooperativity cost / egoism. 1 means
optimization will only minimize the cooperativity cost and zero
means optimization will only maximize the community objective.
fluxes : boolean
Whether to return the fluxes as well.
pfba : boolean
Whether to obtain fluxes by parsimonious FBA rather than
"classical" FBA.
Returns
-------
micom.CommunitySolution
The solution of the optimization. If fluxes==False will only
contain the objective value, community growth rate and individual
growth rates.
References
----------
.. [#c1] OptCom: a multi-level optimization framework for the metabolic
modeling and analysis of microbial communities.
Zomorrodi AR, Maranas CD. PLoS Comput Biol. 2012 Feb;8(2):e1002363.
doi: 10.1371/journal.pcbi.1002363, PMID: 22319433
"""
return optcom(self, strategy, min_growth, tradeoff, fluxes, pfba)
def to_pickle(self, filename):
"""Save a community in serialized form.
Parameters
----------
filename : str
Where to save the pickled community.
Returns
-------
Nothing
"""
with open(filename, mode="wb") as out:
pickle.dump(self, out)
def load_pickle(filename):
"""Load a community model from a pickled version.
Parameters
----------
filename : str
The file the community is stored in.
Returns
-------
micom.Community
The loaded community model.
"""
with open(filename, mode="rb") as infile:
return pickle.load(infile)
| |
# This file is part of the Soletta (TM) Project
#
# Copyright (C) 2015 Intel Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gdb
import re
## IMPORTANT NOTE:
#
# This file is a Python GDB script that is highly dependent on
# symbol names, even the internal functions and parameters.
#
# Whenever depending on a symbol, mark them in the source file
# so people know they have to adapt this file on changes.
## LOADING:
#
# This file should be auto-loaded by gdb if it is installed in GDB's
# auto-load directory and matches the installed libsoletta.so,
# including the final so-version.
#
# If soletta is installed to custom directory, then make sure GDB knows
# about this location and that the directory is marked as safe-path:
#
# (gdb) add-auto-load-scripts-directory ${soletta_prefix}/share/gdb/auto-load
# (gdb) add-auto-load-safe-path ${soletta_prefix}/share/gdb/auto-load
#
# It may be included directly if not auto-loaded:
#
# (gdb) source ${soletta_prefix}/share/gdb/auto-load/libsoletta.so-gdb.py
#
## Usage:
# commands start with 'sol_' prefix, then you can use 'apropos ^sol_' to
# filter commands in our namespace or tabl-completion.
# GDB's "help command" to get more information
defvalue_member_map = {
"string": "s",
"byte": "byte",
"boolean": "b",
"int": "i",
"float": "f",
"rgb": "rgb",
"direction_vector": "direction_vector",
}
def get_type_description(type):
try:
tdesc = type["description"]
if tdesc:
return tdesc.dereference()
except KeyError:
pass
return None
def get_node_type_description(node):
type = node["type"]
return get_type_description(type)
def _get_node_port_index_by_name(node, member, port_name):
tdesc = get_node_type_description(node)
if not tdesc:
return -1
array = tdesc[member]
if not array:
return -1
i = 0
while array[i]:
port = array[i]
if port["name"] and port["name"].string() == port_name:
return i
i += 1
return -1
def get_node_port_out_index_by_name(node, port_name):
return _get_node_port_index_by_name(node, "ports_out", port_name)
def get_node_port_in_index_by_name(node, port_name):
return _get_node_port_index_by_name(node, "ports_in", port_name)
def _get_node_port_name_by_index(node, member, port_index):
tdesc = get_node_type_description(node)
if not tdesc:
return None
array = tdesc[member]
if not array:
return None
i = 0
while array[i]:
if i == port_index:
port = array[i]
if port["name"]:
return port["name"].string()
return None
elif i > port_index:
break
i += 1
return None
def get_node_port_out_name_by_index(node, port_index):
return _get_node_port_name_by_index(node, "ports_out", port_index)
def get_node_port_in_name_by_index(node, port_index):
return _get_node_port_name_by_index(node, "ports_in", port_index)
class FlowTypePrinter(object):
"Print a 'struct sol_flow_node_type'"
def __init__(self, val):
self.val = val
self.port_in_type = gdb.lookup_type("struct sol_flow_port_type_in").const().pointer()
def display_hint(self):
return 'sol_flow_node_type'
def _port_description_to_string(self, index, port, port_type):
s = ("\n %d %s (%s)\n" \
" description: %s\n") % (
index,
port["name"].string(),
port["data_type"].string(),
port["description"].string())
if port_type["connect"]:
s += " connect(): %s\n" % (port_type["connect"],)
if port_type["disconnect"]:
s += " disconnect(): %s\n" % (port_type["disconnect"],)
if port_type.type == self.port_in_type and port_type["process"]:
s += " process(): %s\n" % (port_type["process"],)
return s
def _option_description_to_string(self, option):
data_type = option["data_type"].string()
defvalue_member = defvalue_member_map.get(data_type)
if not defvalue_member:
defvalue = ""
else:
defvalue = option["defvalue"][defvalue_member]
if data_type == "string":
if defvalue:
defvalue = defvalue.string()
else:
defvalue = "NULL"
defvalue = " (default=%s)" % (defvalue,)
return "\n %s(%s) \"%s\"%s," % (
option["name"].string(),
data_type,
option["description"].string(),
defvalue)
def _ports_description_to_string(self, array, get_port_type):
if not array:
return ""
i = 0
r = []
while array[i]:
port_type = get_port_type(i)
r.append(self._port_description_to_string(i, array[i], port_type))
i += 1
if i > 0:
r.append("\n ")
return "".join(r)
def _options_description_to_string(self, opts):
if not opts:
return ""
opts = opts.dereference()
array = opts["members"]
if not array:
return ""
i = 0
r = []
while array[i]["name"]:
r.append(self._option_description_to_string(array[i]))
i += 1
if i > 0:
r.append("\n ")
return "".join(r)
def to_string(self):
type = self.val
tdesc = get_type_description(type)
if tdesc:
get_port_in = gdb.parse_and_eval("sol_flow_node_type_get_port_in")
get_port_out = gdb.parse_and_eval("sol_flow_node_type_get_port_out")
p_type = type.address
ports_in = self._ports_description_to_string(tdesc["ports_in"], lambda idx: get_port_in(p_type, idx))
ports_out = self._ports_description_to_string(tdesc["ports_out"], lambda idx: get_port_out(p_type, idx))
options = self._options_description_to_string(tdesc["options"])
return "%s=%s" \
"\n name=\"%s\"," \
"\n category=\"%s\"," \
"\n description=\"%s\"," \
"\n ports_in={%s}," \
"\n ports_out={%s}," \
"\n options={%s})" % (
tdesc["symbol"].string(),
type.address,
tdesc["name"].string(),
tdesc["category"].string(),
tdesc["description"].string(),
ports_in,
ports_out,
options)
return "(struct sol_flow_node_type)%s (no node type description)" % (type.address,)
class FlowPrinter(object):
"Print a 'struct sol_flow_node'"
def __init__(self, val):
self.val = val
def display_hint(self):
return 'sol_flow_node'
def to_string(self):
id = self.val["id"]
type = self.val["type"]
if not type:
return "sol_flow_node(%s) is under construction." % (
self.val.address,)
tname = "%#x (no node type description)" % (type.address,)
tdesc = get_type_description(type)
if tdesc:
tname = "%s(%s=%s)" % (
tdesc["name"].string(),
tdesc["symbol"].string(),
type.address)
return "sol_flow_node(%s, id=\"%s\", type=%s)" % (
self.val.address, id.string(), tname)
def sol_flow_pretty_printers(val):
lookup_tag = val.type.tag
if lookup_tag == "sol_flow_node":
return FlowPrinter(val)
elif lookup_tag == "sol_flow_node_type":
return FlowTypePrinter(val)
return None
def register_pretty_printers(objfile):
gdb.pretty_printers.append(sol_flow_pretty_printers)
def get_type_options_string(type, options):
if not options:
return ""
tdesc = get_type_description(type)
if not tdesc or not tdesc["options"] or not tdesc["options"]["members"]:
return "OPTIONS: %s (no node type description)\n" % (options,)
string = ""
opts_desc = tdesc["options"]
array = opts_desc["members"]
i = 0
string += "OPTIONS: (struct %s*)%s\n" % (tdesc["options_symbol"].string(), options)
opt_type = gdb.lookup_type("struct %s" % (tdesc["options_symbol"].string(),))
options = options.cast(opt_type.pointer())
while array[i]["name"]:
m = array[i]
name = m["name"].string()
data_type = m["data_type"].string()
description = m["description"].string()
value = options[name]
if data_type == "string":
if value:
value = value.string()
else:
value = "NULL"
defvalue_member = defvalue_member_map.get(data_type)
if not defvalue_member:
defvalue = ""
else:
defvalue = m["defvalue"][defvalue_member]
if data_type == "string":
if defvalue:
defvalue = defvalue.string()
else:
defvalue = "NULL"
defvalue = " (default=%s)" % (defvalue,)
string += " %s (%s) = %s // %s%s\n" % (name, data_type, value, description, defvalue)
i += 1
string += "\n"
return string
class InspectAndBreakIfMatches(gdb.Breakpoint):
class InternalBreak(gdb.Breakpoint):
def __init__(self, method, banner=None, matches=None, values=None):
addr = "*%s" % (method.cast(gdb.lookup_type("long")),)
self.method = method
self.banner = banner
self.matches = matches or {}
self.values = values or {}
gdb.Breakpoint.__init__(self, addr, gdb.BP_BREAKPOINT, internal=True, temporary=True)
def stop(self):
if self.banner:
if callable(self.banner):
self.banner(self.matches, self.values)
else:
gdb.write(self.banner)
return True
def __init__(self, spec, matches):
gdb.Breakpoint.__init__(self, spec, gdb.BP_BREAKPOINT, internal=False)
self.matches = {}
for k, v in matches.items():
self.matches[k] = get_str_or_regexp_match(v)
def print_matches(self, values=None):
gdb.write("%s matches:\n" % (self.__class__.__name__,), gdb.STDERR)
if not values:
values = {}
for k, func in self.matches.items():
v = values.get(k)
if v is None:
gdb.write(" %s = %s (no value provided)\n" % (k, func.__doc__), gdb.STDERR)
else:
try:
res = func(v)
except Exception as e:
res = "Exception executing match: %s" % (e,)
gdb.write(" %s = %s (value: '%s', match: %s)\n" %
(k, func.__doc__, v, res), gdb.STDERR)
gdb.write("\n", gdb.STDERR)
def get_values(self):
raise NotImplemented()
def stop(self):
try:
values = self.get_values()
except Exception as e:
gdb.write("Exception at %s.get_values(): %s\n" % (
self.__class__.__name__, e), gdb.STDERR)
return False
if not values:
gdb.write("%s.get_values() did not return values.\n" % (
self.__class__.__name__,), gdb.STDERR)
return False
def print_values():
gdb.write("Values:\n", gdb.STDERR)
for k, v in values.items():
gdb.write(" %s: %s\n" % (k, v), gdb.STDERR)
gdb.write("\n", gdb.STDERR)
for k, match_func in self.matches.items():
try:
v = values[k]
except KeyError:
gdb.write("%s.get_values() did not provide key '%s'.\n" % (
self.__class__.__name__, k), gdb.STDERR)
self.print_matches(values)
print_values()
return False
try:
if not match_func(v):
return False
except Exception as e:
gdb.write("Exception at %s.stop() while matching %s %s (%s): %s\n" % (
self.__class__.__name__, k, v, match_func.__doc__, e,), gdb.STDERR)
self.print_matches(values)
return False
method = values.get("method")
banner = values.get("banner")
if not method:
node = values.get("node")
if node:
gdb.write("NODE: %s\n" % (node,), gdb.STDERR)
gdb.write("%s did not return the internal method to break at.\n" % (
self.__class__.__name__,), gdb.STDERR)
self.print_matches(values)
gdb.write("Breaking at the caller function %s\n" % (self.location,),
gdb.STDERR)
return True
def add_breakpoint():
try:
self.InternalBreak(method, banner, self.matches, values)
except Exception as e:
gdb.write("Could not add internal breakpoint: %s\n" % (e,), gdb.STDERR)
self.print_matches(values)
gdb.post_event(add_breakpoint)
return False
def get_str_or_regexp_match(string):
if not string:
string = "/.*/"
if len(string) > 2 and string.startswith("/") and string.endswith("/"):
r = re.compile(string[1:-1])
match = lambda x: bool(r.match(x))
else:
match = lambda x: string == x
match.__doc__ = string
return match
class FlowBreakOpen(InspectAndBreakIfMatches):
def __init__(self, matches):
InspectAndBreakIfMatches.__init__(self, "sol_flow_node_init", matches)
def get_values(self):
node_id = gdb.parse_and_eval("name")
if node_id:
node_id = node_id.string()
type = gdb.parse_and_eval("type")
method = type["open"]
node = gdb.parse_and_eval("*node")
options = gdb.parse_and_eval("options")
def banner(matches, values):
gdb.write("""\
Break before opening node:
FUNCTION: %s
NODE....: %s (filter: %s)
%s""" % (method, node,
matches["node_id"].__doc__,
get_type_options_string(node["type"], options)))
return {
"node": node,
"node_id": node_id,
"method": method,
"banner": banner,
}
class FlowBreakClose(InspectAndBreakIfMatches):
def __init__(self, matches):
InspectAndBreakIfMatches.__init__(self, "sol_flow_node_fini", matches)
def get_values(self):
node = gdb.parse_and_eval("*node")
node_id = node["id"]
if node_id:
node_id = node_id.string()
type = node["type"]
method = type["close"]
def banner(matches, values):
gdb.write("""\
Break before closing node:
FUNCTION: %s
NODE....: %s (filter: %s)
""" % (method, node,
matches["node_id"].__doc__))
return {
"node": node,
"node_id": node_id,
"method": method,
"banner": banner,
}
class FlowBreakSend(InspectAndBreakIfMatches):
def __init__(self, matches):
InspectAndBreakIfMatches.__init__(self, "inspector_will_send_packet", matches)
def get_values(self):
node = gdb.parse_and_eval("*src_node")
port = gdb.parse_and_eval("src_port")
packet = gdb.parse_and_eval("*packet")
node_id = node["id"]
if node_id:
node_id = node_id.string()
port_name = get_node_port_out_name_by_index(node, port)
packet_type = packet["type"]["name"].string()
type = gdb.parse_and_eval("(struct sol_flow_node_container_type *)src_node->parent->type")
method = type["send"]
def banner(matches, values):
gdb.write("""\
Break before sending packet:
FUNCTION: %s
NODE....: %s (filter: %s)
PORT....: %s (index: %s, filter: %s)
PACKET..: %s (filter: %s)
""" % (
method,
node,
matches["node_id"].__doc__,
port_name,
port,
matches["port_name"].__doc__,
packet,
matches["packet_type"].__doc__))
return {
"node": node,
"node_id": node_id,
"port_name": port_name,
"packet_type": packet_type,
"method": method,
"banner": banner,
}
class FlowBreakProcess(InspectAndBreakIfMatches):
def __init__(self, matches):
InspectAndBreakIfMatches.__init__(self, "inspector_will_deliver_packet", matches)
def get_values(self):
node = gdb.parse_and_eval("*dst_node")
port = gdb.parse_and_eval("dst_port")
packet = gdb.parse_and_eval("*packet")
node_id = node["id"]
if node_id:
node_id = node_id.string()
port_name = get_node_port_in_name_by_index(node, port)
packet_type = packet["type"]["name"].string()
get_port_in = gdb.parse_and_eval("sol_flow_node_type_get_port_in")
type = node["type"]
port_type = get_port_in(type, port)
if not port_type:
method = None
else:
method = port_type["process"]
def banner(matches, values):
gdb.write("""\
Break before processing packet:
FUNCTION: %s
NODE....: %s (filter: %s)
PORT....: %s (index: %s, filter: %s)
PACKET..: %s (filter: %s)
""" % (
method,
node,
matches["node_id"].__doc__,
port_name,
port,
matches["port_name"].__doc__,
packet,
matches["packet_type"].__doc__))
return {
"node": node,
"node_id": node_id,
"port_name": port_name,
"packet_type": packet_type,
"method": method,
"banner": banner,
}
class FlowCommand(gdb.Command):
"Commands to operate with 'sol_flow'"
def __init__(self):
gdb.Command.__init__(self, "sol_flow", gdb.COMMAND_USER, gdb.COMPLETE_COMMAND, True)
def invoke(self, arg, from_tty):
raise gdb.GdbError("missing sub-command: break or print")
class FlowBreakCommand(gdb.Command):
"Add an execution break when sol_flow events happen."
def __init__(self):
gdb.Command.__init__(self, "sol_flow break", gdb.COMMAND_BREAKPOINTS, gdb.COMPLETE_SYMBOL, True)
def invoke(self, arg, from_tty):
raise gdb.GdbError("missing sub-command: open, close, send or process")
class FlowBreakFilterBaseCommand(gdb.Command):
"""Base command for 'sol_flow break' subcommands.
The subcommand will be registered and will take matches as list of
optional arguments. If not available then None is assumed. These
parameters will be sent to breakpoint in order.
"""
def __init__(self, subcommand, matches, breakpoint):
gdb.Command.__init__(self, "sol_flow break " + subcommand, gdb.COMMAND_BREAKPOINTS, gdb.COMPLETE_SYMBOL, True)
self.matches = matches
self.breakpoint = breakpoint
def invoke(self, arg, from_tty):
arg = gdb.string_to_argv(arg)
params = {}
for i, name in enumerate(self.matches):
if len(arg) > i:
p = arg[i]
else:
p = None
params[name] = p
self.breakpoint(params)
self.dont_repeat()
class FlowBreakOpenCommand(FlowBreakFilterBaseCommand):
"""Add an execution break when sol_flow_node is created (type->open).
Arguments: node_id
node_id may be an exact string or a regular expression if enclosed
in "//". Examples:
sol_flow break open timer
will break on nodes with id "timer" (exact match)
sol_flow break open /^timer.*$/
will break on nodes with id that matches regular expression
"^timer.*$" (starts with "timer")
"""
def __init__(self):
matches = ["node_id"]
FlowBreakFilterBaseCommand.__init__(self, "open", matches, FlowBreakOpen)
class FlowBreakCloseCommand(FlowBreakFilterBaseCommand):
"""Add an execution break when sol_flow_node is destroyed (type->close).
Arguments: node_id
node_id may be an exact string or a regular expression if enclosed
in "//". Examples:
sol_flow break close timer
will break on nodes with id "timer" (exact match)
sol_flow break close /^timer.*$/
will break on nodes with id that matches regular expression
"^timer.*$" (starts with "timer")
"""
def __init__(self):
matches = ["node_id"]
FlowBreakFilterBaseCommand.__init__(self, "close", matches, FlowBreakClose)
class FlowBreakSendCommand(FlowBreakFilterBaseCommand):
"""Add an execution break when sol_flow_node sends a packet on its output port.
Arguments: node_id port_name packet_type
Each argument is optional and may be a string or a regular
expression if enclosed in "//". If omitted the regular expression
/.*/ is assumed, matching all patterns.
"""
def __init__(self):
matches = ["node_id", "port_name", "packet_type"]
FlowBreakFilterBaseCommand.__init__(self, "send", matches, FlowBreakSend)
class FlowBreakProcessCommand(FlowBreakFilterBaseCommand):
"""Add an execution break when sol_flow_node will receive a packet on its input port (port's process()).
Arguments: node_id port_name packet_type
Each argument is optional and may be a string or a regular
expression if enclosed in "//". If omitted the regular expression
/.*/ is assumed, matching all patterns.
"""
def __init__(self):
matches = ["node_id", "port_name", "packet_type"]
FlowBreakFilterBaseCommand.__init__(self, "process", matches, FlowBreakProcess)
class FlowPrintCommand(gdb.Command):
"Print sol_flow types"
def __init__(self):
gdb.Command.__init__(self, "sol_flow print", gdb.COMMAND_BREAKPOINTS, gdb.COMPLETE_COMMAND, True)
def invoke(self, arg, from_tty):
raise gdb.GdbError("missing sub-command: type, port or options")
def get_node_type_from_exp(arg):
node = gdb.parse_and_eval(arg)
if not node:
raise gdb.GdbError("invalid node: %s" % (arg,))
gt = node.type.unqualified()
sol_flow_node_type = gdb.lookup_type("struct sol_flow_node")
sol_flow_node_type_type = gdb.lookup_type("struct sol_flow_node_type")
if gt == sol_flow_node_type or gt == sol_flow_node_type.pointer() or \
gt == sol_flow_node_type.const().pointer():
return node["type"]
elif gt == sol_flow_node_type_type or gt == sol_flow_node_type_type.pointer() or \
gt == sol_flow_node_type_type.const().pointer():
return node
else:
raise gdb.GdbError("invalid node: %s" % (arg,))
class FlowPrintTypeCommand(gdb.Command):
"""Prints the type information for the given 'struct sol_flow_node'.
Arguments: node
"""
def __init__(self):
gdb.Command.__init__(self, "sol_flow print type", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL, True)
def invoke(self, arg, from_tty):
arg = gdb.string_to_argv(arg)
if len(arg) < 1:
raise gdb.GdbError("missing pointer to struct sol_flow_node")
type = get_node_type_from_exp(arg[0])
gdb.write("%s\n" % (type.dereference(),))
class FlowPrintPortCommand(gdb.Command):
"""Prints the port information for the given node.
Arguments: node [direction] [filter_type] [filter_specifier]
node is the pointer to node where to find the port.
direction may be 'in', 'out' or 'both'. If omitted, both will be
assumed. May be omitted and 'both' is used.
filter_type may be 'all', 'number' or 'name'. If omitted, all
will be assumed.
If filter_type is 'number', then filter_specifier must be an integer.
If filter_type is 'name', then filter_specifier must be a string
or a regular expression enclosed in "//".
If filter_type is omitted, then it's gussed from filter_specifier.
"""
def __init__(self):
gdb.Command.__init__(self, "sol_flow print port", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL, True)
def _print_ports(self, type, tdesc, member, filter):
array = tdesc[member]
if not array:
return
did = 0
i = 0
if member == "ports_in":
get_port_type = gdb.parse_and_eval("sol_flow_node_type_get_port_in")
else:
get_port_type = gdb.parse_and_eval("sol_flow_node_type_get_port_out")
while array[i]:
port = array[i]
if filter["type"] == "all" or \
(filter["type"] == "number" and filter["number"] == i) or \
(filter["type"] == "name" and filter["name"](port["name"].string())):
if did == 0:
gdb.write("%s:\n" % member)
did += 1
gdb.write(" %d: %s (%s)\n description: %s\n" % (
i,
port["name"].string(),
port["data_type"].string(),
port["description"].string(),
))
port_type = get_port_type(type, i)
if port_type["connect"]:
gdb.write(" connect(): %s\n" % (port_type["connect"],))
if port_type["disconnect"]:
gdb.write(" disconnect(): %s\n" % (port_type["disconnect"],))
if member == "ports_in" and port_type["process"]:
gdb.write(" process(): %s\n" % (port_type["process"],))
gdb.write("\n")
i += 1
def invoke(self, arg, from_tty):
arg = gdb.string_to_argv(arg)
if len(arg) < 1:
raise gdb.GdbError("missing pointer to struct sol_flow_node")
direction = "both"
filter = {"type": "all"}
if len(arg) > 1:
direction = arg[1]
if direction not in ("both", "in", "out"):
direction = "both"
try:
filter["number"] = int(arg[1])
filter["type"] = "number"
except ValueError:
filter["name"] = get_str_or_regexp_match(arg[1])
filter["type"] = "name"
if len(arg) > 2:
filter["type"] = arg[2]
if filter["type"] not in ("all", "number", "name"):
try:
filter["number"] = int(arg[2])
filter["type"] = "number"
except ValueError:
filter["name"] = get_str_or_regexp_match(arg[2])
filter["type"] = "name"
elif filter["type"] == 'number':
if len(arg) < 4:
raise gdb.GdbError("missing port number to filter")
filter["number"] = int(arg[3])
elif filter["type"] == 'name':
if len(arg) < 4:
raise gdb.GdbError("missing port name to filter")
filter["name"] = get_str_or_regexp_match(arg[3])
type = get_node_type_from_exp(arg[0])
tdesc = get_type_description(type)
if not tdesc:
gdb.write("no node type description\n")
return
if direction == "both" or direction == "in":
self._print_ports(type, tdesc, "ports_in", filter)
if direction == "both" or direction == "out":
self._print_ports(type, tdesc, "ports_out", filter)
class FlowPrintOptionsCommand(gdb.Command):
"""Prints the options used to open the given node.
Arguments: node options
node is the pointer to node where to find the port.
options is the pointer to options to open to given node.
"""
def __init__(self):
gdb.Command.__init__(self, "sol_flow print options", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL, True)
def invoke(self, arg, from_tty):
arg = gdb.string_to_argv(arg)
if len(arg) != 2:
raise gdb.GdbError("Usage: sol_flow print options <node> <options>")
type = get_node_type_from_exp(arg[0])
options = gdb.parse_and_eval(arg[1])
gdb.write(get_type_options_string(type, options))
FlowCommand()
FlowBreakCommand()
FlowBreakOpenCommand()
FlowBreakCloseCommand()
FlowBreakSendCommand()
FlowBreakProcessCommand()
FlowPrintCommand()
FlowPrintTypeCommand()
FlowPrintPortCommand()
FlowPrintOptionsCommand()
register_pretty_printers(gdb.current_objfile())
| |
from JumpScale import j
import os
import errno
import stat
def _is_block(file):
try:
st = os.stat(file)
except OSError, err:
if err.errno == errno.ENOENT:
return False
raise
return stat.S_ISBLK(st.st_mode)
def get_open_blks(pid):
retlist = set()
files = os.listdir("/proc/%s/fd" % pid)
hit_enoent = False
for fd in files:
file = "/proc/%s/fd/%s" % (pid, fd)
if os.path.islink(file):
try:
file = os.readlink(file)
except OSError, err:
if err.errno == errno.ENOENT:
hit_enoent = True
continue
raise
else:
if file.startswith('/') and _is_block(file):
retlist.add(int(fd))
if hit_enoent:
# raise NSP if the process disappeared on us
os.stat('/proc/%s' % pid)
return retlist
class Disk():
"""
identifies a disk in the grid
"""
def __init__(self):
self.id=0
self.path = ""
self.size = ""
self.free = ""
self.ssd=False
self.fs=""
self.mounted=False
self.mountpoint=""
self.model=""
self.description=""
self.type=[]
def __str__(self):
return "%s %s %s free:%s ssd:%s fs:%s model:%s id:%s"%(self.path,self.mountpoint,self.size,self.free,self.ssd,self.fs,self.model,self.id)
__repr__=__str__
class Diskmanager():
def __init__(self):
self._parted=None
@property
def parted(self):
if self._parted==None:
try:
import parted
except:
j.system.platform.ubuntu.install("python-parted")
import parted
#patch self.parted
_orig_getAllDevices = parted.getAllDevices
def _patchedGetAllDevices():
pid = os.getpid()
fds = get_open_blks(pid)
try:
return _orig_getAllDevices()
finally:
afds = get_open_blks(pid)
for fd in afds.difference(fds):
os.close(fd)
parted.getAllDevices = _patchedGetAllDevices
self._parted=parted
return self._parted
def partitionAdd(self,disk, free, align=None, length=None, fs_type=None, type=None):
if type==None:
type=self.parted.PARTITION_NORMAL
start = free.start
if length:
end = start + length - 1
else:
end = free.end
length = free.end - start + 1
if not align:
align = disk.partitionAlignment.intersect(disk.device.optimumAlignment)
if not align.isAligned(free, start):
start = align.alignNearest(free, start)
end_align = self.parted.Alignment(offset=align.offset - 1, grainSize=align.grainSize)
if not end_align.isAligned(free, end):
end = end_align.alignNearest(free, end)
geometry = self.parted.Geometry(disk.device, start=start, end=end)
if fs_type:
fs = self.parted.FileSystem(type=fs_type, geometry=geometry)
else:
fs = None
partition = self.parted.Partition(disk, type=type, geometry=geometry, fs=fs)
constraint = self.parted.Constraint(exactGeom=partition.geometry)
disk.addPartition(partition, constraint)
return partition
def diskGetFreeRegions(self,disk, align):
"""Get a filtered list of free regions, excluding the gaps due to partition alignment"""
regions = disk.getFreeSpaceRegions()
new_regions = []
for region in regions:
if region.length > align.grainSize:
new_regions.append(region)
return new_regions
def _kib_to_sectors(self,device, kib):
return self.parted.sizeToSectors(kib, 'KiB', device.sectorSize)
def mirrorsFind(self):
cmd="cat /proc/mdstat"
rcode,out=j.system.process.execute(cmd)
return out
def partitionsFind(self,mounted=None,ttype=None,ssd=None,prefix="sd",minsize=5,maxsize=5000,devbusy=None,\
initialize=False,forceinitialize=False):
"""
looks for disks which are know to be data disks & are formatted ext4
return [[$partpath,$size,$free,$ssd]]
@param ssd if None then ssd and other
"""
import JumpScale.grid.osis
import psutil
result=[]
mounteddevices = psutil.disk_partitions()
def getpsutilpart(partname):
for part in mounteddevices:
if part.device==partname:
return part
return None
for dev in self.parted.getAllDevices():
path=dev.path
#ssize = dev.sectorSize;
# size = (geom[0] * geom[1] * geom[2] * ssize) / 1000 / 1000 / 1000;
# size2=dev.getSize()
if devbusy==None or dev.busy==devbusy:
if path.startswith("/dev/%s"%prefix):
try:
disk = self.parted.Disk(dev)
partitions = disk.partitions
except self.parted.DiskLabelException:
partitions = list()
for partition in partitions:
disko=Disk()
disko.model = dev.model
disko.path=partition.path if disk.type != 'loop' else disk.device.path
disko.size=round(partition.getSize(unit="mb"),2)
disko.free = 0
print "partition:%s %s"%(disko.path,disko.size)
try:
fs = self.parted.probeFileSystem(partition.geometry)
except:
fs = "unknown"
disko.fs=fs
partfound=getpsutilpart(disko.path)
mountpoint=None
if partfound==None and mounted<>True:
mountpoint="/mnt/tmp"
cmd="mount %s /mnt/tmp"%partition.path
rcode,output=j.system.process.execute(cmd,ignoreErrorOutput=False,dieOnNonZeroExitCode=False,)
if rcode<>0:
#mount did not work
mountpoint==None
disko.mountpoint=None
disko.mounted=False
elif partfound:
mountpoint=partfound.mountpoint
disko.mountpoint=mountpoint
disko.mounted=True
pathssdcheck="/sys/block/%s/queue/rotational"%dev.path.replace("/dev/","").strip()
if j.system.fs.exists(pathssdcheck):
ssd0=int(j.system.fs.fileGetContents(pathssdcheck))==0
else:
ssd0 = False
disko.ssd=ssd0
result.append(disko)
if mountpoint<>None:
print "mountpoint:%s"%mountpoint
size, used, free, percent=psutil.disk_usage(mountpoint)
disko.free=disko.size*float(1-percent/100)
size=disko.size / 1024
disko.free=int(disko.free)
if (ttype==None or fs==ttype) and size>minsize and (maxsize is None or size<maxsize):
if ssd==None or disko.ssd==ssd:
# print disko
hrdpath="%s/disk.hrd"%mountpoint
if j.system.fs.exists(hrdpath):
hrd=j.core.hrd.getHRD(hrdpath)
partnr=hrd.getInt("diskinfo.partnr")
if partnr==0 or forceinitialize:
j.system.fs.remove(hrdpath)
if not j.system.fs.exists(hrdpath) and initialize:
C="""
diskinfo.partnr=
diskinfo.gid=
diskinfo.nid=
diskinfo.type=
diskinfo.epoch=
diskinfo.description=
"""
j.system.fs.writeFile(filename=hrdpath,contents=C)
hrd=j.core.hrd.getHRD(hrdpath)
hrd.set("diskinfo.description",j.console.askString("please give description for disk"))
hrd.set("diskinfo.type",",".join(j.console.askChoiceMultiple(["BOOT","CACHE","TMP","DATA","OTHER"])))
hrd.set("diskinfo.gid",j.application.whoAmI.gid)
hrd.set("diskinfo.nid",j.application.whoAmI.nid)
hrd.set("diskinfo.epoch",j.base.time.getTimeEpoch())
client = j.core.osis.getClientByInstance('main')
client_disk=j.core.osis.getClientForCategory(client,"system","disk")
disk=client_disk.new()
for key,val in disko.__dict__.iteritems():
disk.__dict__[key]=val
disk.description=hrd.get("diskinfo.description")
disk.type=hrd.get("diskinfo.type").split(",")
disk.type.sort()
disk.nid=j.application.whoAmI.nid
disk.gid=j.application.whoAmI.gid
guid,new,changed=client_disk.set(disk)
disk=client_disk.get(guid)
diskid=disk.id
hrd.set("diskinfo.partnr",diskid)
if j.system.fs.exists(hrdpath):
# hrd=j.core.hrd.getHRD(hrdpath)
disko.id=hrd.get("diskinfo.partnr")
disko.type=hrd.get("diskinfo.type").split(",")
disko.type.sort()
disko.description=hrd.get("diskinfo.description")
print "found disk:\n%s"%(disko)
cmd="umount /mnt/tmp"
j.system.process.execute(cmd,dieOnNonZeroExitCode=False)
if os.path.ismount("/mnt/tmp")==True:
raise RuntimeError("/mnt/tmp should not be mounted")
return result
def partitionsFind_Ext4Data(self):
"""
looks for disks which are know to be data disks & are formatted ext4
return [[$partpath,$gid,$partid,$size,$free]]
"""
result=[item for item in self.partitionsFind(busy=False,ttype="ext4",ssd=False,prefix="sd",minsize=300,maxsize=5000)]
return result
def partitionsMount_Ext4Data(self):
for path,gid,partnr,size,free,ssd in self.partitionsFind_Ext4Data():
mntdir="/mnt/datadisks/%s"%partnr
j.system.fs.createDir(mntdir)
cmd="mount %s %s"%(path,mntdir)
j.system.process.execute(cmd)
def partitionsUnmount_Ext4Data(self):
partitions=self.partitionsGet_Ext4Data()
for partid,size,free in partitions:
mntdir="/mnt/datadisks/%s"%partnr
cmd="umount %s"%(mntdir)
j.system.process.execute(cmd)
def partitionsGetMounted_Ext4Data(self):
"""
find disks which are mounted
@return [[$partid,$size,$free]]
"""
# from IPython import embed
# print "DEBUG NOW partitionsGet_Ext4Data"
# embed()
##TODO
pass
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
from testinfra.modules.base import Module
from testinfra.utils import cached_property
def parse_socketspec(socketspec):
protocol, address = socketspec.split("://", 1)
if protocol not in ("udp", "tcp", "unix"):
raise RuntimeError(
"Cannot validate protocol '%s'. Should be tcp, udp or unix" % (
protocol,))
if protocol == "unix":
# unix:///foo/bar.sock
host = address
port = None
elif ":" in address:
# tcp://127.0.0.1:22
# tcp://:::22
host, port = address.rsplit(":", 1)
else:
# tcp://22
host = None
port = address
family = None
if protocol != "unix" and host is not None:
for f in (socket.AF_INET, socket.AF_INET6):
try:
socket.inet_pton(f, host)
except socket.error:
pass
else:
family = f
break
if family is None:
raise RuntimeError("Cannot validate ip address '%s'" % (host,))
if port is not None:
try:
port = int(port)
except ValueError:
raise RuntimeError("Cannot validate port '%s'" % (port,))
return protocol, host, port
class Socket(Module):
"""Test listening tcp/udp and unix sockets
``socketspec`` must be specified as ``<protocol>://<host>:<port>``
This module requires the ``netstat`` command to on the target host.
Example:
- Unix sockets: ``unix:///var/run/docker.sock``
- All ipv4 and ipv6 tcp sockets on port 22: ``tcp://22``
- All ipv4 sockets on port 22: ``tcp://0.0.0.0:22``
- All ipv6 sockets on port 22: ``tcp://:::22``
- udp socket on 127.0.0.1 port 69: ``udp://127.0.0.1:69``
"""
_command = None
def __init__(self, socketspec):
if socketspec is not None:
self.protocol, self.host, self.port = parse_socketspec(socketspec)
else:
self.protocol = self.host = self.port = None
super().__init__()
@property
def is_listening(self):
"""Test if socket is listening
>>> host.socket("unix:///var/run/docker.sock").is_listening
False
>>> # This HTTP server listen on all ipv4 adresses but not on ipv6
>>> host.socket("tcp://0.0.0.0:80").is_listening
True
>>> host.socket("tcp://:::80").is_listening
False
>>> host.socket("tcp://80").is_listening
False
.. note:: If you don't specify a host for udp and tcp sockets,
then the socket is listening if and only if the
socket listen on **both** all ipv4 and ipv6 addresses
(ie 0.0.0.0 and ::)
"""
sockets = list(self._iter_sockets(True))
if self.protocol == "unix":
return ("unix", self.host) in sockets
allipv4 = (self.protocol, "0.0.0.0", self.port) in sockets
allipv6 = (self.protocol, "::", self.port) in sockets
return (
any([allipv6, all([allipv4, allipv6])])
or (
self.host is not None
and (
(":" in self.host and allipv6 in sockets)
or (":" not in self.host and allipv4 in sockets)
or (self.protocol, self.host, self.port) in sockets)
)
)
@property
def clients(self):
"""Return a list of clients connected to a listening socket
For tcp and udp sockets a list of pair (adress, port) is returned.
For unix sockets a list of None is returned (thus you can make a
len() for counting clients).
>>> host.socket("tcp://22").clients
[('2001:db8:0:1', 44298), ('192.168.31.254', 34866)]
>>> host.socket("unix:///var/run/docker.sock")
[None, None, None]
"""
sockets = []
for sock in self._iter_sockets(False):
if sock[0] != self.protocol:
continue
if self.protocol == "unix":
if sock[1] == self.host:
sockets.append(None)
continue
if sock[2] != self.port:
continue
if (
self.host is None
or (self.host == "0.0.0.0" and ":" not in sock[3])
or (self.host == "::" and ":" in sock[3])
or self.host == sock[3]
):
sockets.append((sock[3], sock[4]))
return sockets
@classmethod
def get_listening_sockets(cls):
"""Return a list of all listening sockets
>>> host.socket.get_listening_sockets()
['tcp://0.0.0.0:22', 'tcp://:::22', 'unix:///run/systemd/private', ...]
"""
sockets = []
for sock in cls(None)._iter_sockets(True):
if sock[0] == "unix":
sockets.append("unix://" + sock[1])
else:
sockets.append("%s://%s:%s" % (
sock[0], sock[1], sock[2],
))
return sockets
def _iter_sockets(self, listening):
raise NotImplementedError
def __repr__(self):
return "<socket %s://%s%s>" % (
self.protocol,
self.host + ":" if self.host else "",
self.port,
)
@classmethod
def get_module_class(cls, host):
if host.system_info.type == "linux":
for cmd, impl in (
('ss', LinuxSocketSS),
('netstat', LinuxSocketNetstat),
):
try:
command = host.find_command(cmd)
except ValueError:
pass
else:
return type(impl.__name__, (impl,), {'_command': command})
raise RuntimeError(
'could not use the Socket module, either "ss" or "netstat"'
' utility is required in $PATH')
if host.system_info.type.endswith("bsd"):
return BSDSocket
raise NotImplementedError
class LinuxSocketSS(Socket):
def _iter_sockets(self, listening):
cmd = '%s --numeric'
if listening:
cmd += ' --listening'
else:
cmd += ' --all'
if self.protocol == 'tcp':
cmd += ' --tcp'
elif self.protocol == 'udp':
cmd += ' --udp'
elif self.protocol == 'unix':
cmd += ' --unix'
for line in self.run(cmd, self._command).stdout_bytes.splitlines()[1:]:
# Ignore unix datagram sockets.
if line.split(None, 1)[0] == b'u_dgr':
continue
splitted = line.decode().split()
# If listing only TCP or UDP sockets, output has 5 columns:
# (State, Recv-Q, Send-Q, Local Address:Port, Peer Address:Port)
if self.protocol in ('tcp', 'udp'):
protocol = self.protocol
status, local, remote = (
splitted[0], splitted[3], splitted[4])
# If listing all or just unix sockets, output has 6 columns:
# Netid, State, Recv-Q, Send-Q, LocalAddress:Port, PeerAddress:Port
else:
protocol, status, local, remote = (
splitted[0], splitted[1], splitted[4], splitted[5])
# ss reports unix socket as u_str.
if protocol == 'u_str':
protocol = 'unix'
host, port = local, None
elif protocol in ('tcp', 'udp'):
host, port = local.rsplit(':', 1)
port = int(port)
# new versions of ss output ipv6 adresses enclosed in []
if host and host[0] == '[' and host[-1] == ']':
host = host[1:-1]
else:
continue
# UDP listening sockets may be in 'UNCONN' status.
if listening and status in ('LISTEN', 'UNCONN'):
if host == '*' and protocol in ('tcp', 'udp'):
yield protocol, '::', port
yield protocol, '0.0.0.0', port
elif protocol in ('tcp', 'udp'):
yield protocol, host, port
else:
yield protocol, host
elif not listening and status == 'ESTAB':
if protocol in ('tcp', 'udp'):
remote_host, remote_port = remote.rsplit(':', 1)
remote_port = int(remote_port)
yield protocol, host, port, remote_host, remote_port
else:
yield protocol, remote
class LinuxSocketNetstat(Socket):
def _iter_sockets(self, listening):
cmd = "%s -n"
if listening:
cmd += " -l"
if self.protocol == "tcp":
cmd += " -t"
elif self.protocol == "udp":
cmd += " -u"
elif self.protocol == "unix":
cmd += " --unix"
for line in self.check_output(cmd, self._command).splitlines():
line = line.replace("\t", " ")
splitted = line.split()
protocol = splitted[0]
if protocol in ("udp", "tcp", "tcp6", "udp6"):
if protocol == "udp6":
protocol = "udp"
elif protocol == "tcp6":
protocol = "tcp"
address = splitted[3]
host, port = address.rsplit(":", 1)
port = int(port)
if listening:
yield protocol, host, port
else:
remote = splitted[4]
remote_host, remote_port = remote.rsplit(":", 1)
remote_port = int(remote_port)
yield protocol, host, port, remote_host, remote_port
elif protocol == "unix":
yield protocol, splitted[-1]
class BSDSocket(Socket):
@cached_property
def _command(self):
return self.find_command('netstat')
def _iter_sockets(self, listening):
cmd = "%s -n"
if listening:
cmd += " -a"
if self.protocol == "unix":
cmd += " -f unix"
for line in self.check_output(cmd, self._command).splitlines():
line = line.replace("\t", " ")
splitted = line.split()
# FreeBSD: tcp4/tcp6
# OpeNBSD/NetBSD: tcp/tcp6
if splitted[0] in ("tcp", "udp", "udp4", "tcp4", "tcp6", "udp6"):
address = splitted[3]
if address == '*.*':
# On OpenBSD 6.3 (issue #338)
# udp 0 0 *.* *.*
# udp6 0 0 *.* *.*
continue
host, port = address.rsplit(".", 1)
port = int(port)
if host == "*":
if splitted[0] in ("udp6", "tcp6"):
host = "::"
else:
host = "0.0.0.0"
if splitted[0] in ("udp", "udp6", "udp4"):
protocol = "udp"
elif splitted[0] in ("tcp", "tcp6", "tcp4"):
protocol = "tcp"
remote = splitted[4]
if remote == "*.*" and listening:
yield protocol, host, port
elif not listening:
remote_host, remote_port = remote.rsplit(".", 1)
remote_port = int(remote_port)
yield protocol, host, port, remote_host, remote_port
elif len(splitted) == 9 and splitted[1] in ("stream", "dgram"):
if (
(splitted[4] != "0" and listening)
or (splitted[4] == "0" and not listening)
):
yield 'unix', splitted[-1]
| |
###################################################################
# Numexpr - Fast numerical array expression evaluator for NumPy.
#
# License: MIT
# Author: See AUTHORS.txt
#
# See LICENSE.txt and LICENSES/*.txt for details about copyright and
# rights to use.
####################################################################
__all__ = ['E']
import operator
import sys
import threading
import numpy
# Declare a double type that does not exist in Python space
double = numpy.double
# The default kind for undeclared variables
default_kind = 'double'
if sys.version_info[0] < 3:
int_ = int
long_ = long
else:
int_ = numpy.int32
long_ = numpy.int64
type_to_kind = {bool: 'bool', int_: 'int', long_: 'long', float: 'float',
double: 'double', complex: 'complex', bytes: 'bytes'}
kind_to_type = {'bool': bool, 'int': int_, 'long': long_, 'float': float,
'double': double, 'complex': complex, 'bytes': bytes}
kind_rank = ['bool', 'int', 'long', 'float', 'double', 'complex', 'none']
scalar_constant_types = [bool, int_, long, float, double, complex, bytes]
# Final corrections for Python 3 (mainly for PyTables needs)
if sys.version_info[0] > 2:
type_to_kind[str] = 'str'
kind_to_type['str'] = str
scalar_constant_types.append(str)
scalar_constant_types = tuple(scalar_constant_types)
from numexpr import interpreter
class Expression(object):
def __init__(self):
object.__init__(self)
def __getattr__(self, name):
if name.startswith('_'):
return self.__dict__[name]
else:
return VariableNode(name, default_kind)
E = Expression()
class Context(threading.local):
initialized = False
def __init__(self, dict_):
if self.initialized:
raise SystemError('__init__ called too many times')
self.initialized = True
self.__dict__.update(dict_)
def get(self, value, default):
return self.__dict__.get(value, default)
def get_current_context(self):
return self.__dict__
def set_new_context(self, dict_):
self.__dict__.update(dict_)
# This will be called each time the local object is used in a separate thread
_context = Context({})
def get_optimization():
return _context.get('optimization', 'none')
# helper functions for creating __magic__ methods
def ophelper(f):
def func(*args):
args = list(args)
for i, x in enumerate(args):
if isConstant(x):
args[i] = x = ConstantNode(x)
if not isinstance(x, ExpressionNode):
raise TypeError("unsupported object type: %s" % type(x))
return f(*args)
func.__name__ = f.__name__
func.__doc__ = f.__doc__
func.__dict__.update(f.__dict__)
return func
def allConstantNodes(args):
"returns True if args are all ConstantNodes."
for x in args:
if not isinstance(x, ConstantNode):
return False
return True
def isConstant(ex):
"Returns True if ex is a constant scalar of an allowed type."
return isinstance(ex, scalar_constant_types)
def commonKind(nodes):
node_kinds = [node.astKind for node in nodes]
str_count = node_kinds.count('bytes') + node_kinds.count('str')
if 0 < str_count < len(node_kinds): # some args are strings, but not all
raise TypeError("strings can only be operated with strings")
if str_count > 0: # if there are some, all of them must be
return 'bytes'
n = -1
for x in nodes:
n = max(n, kind_rank.index(x.astKind))
return kind_rank[n]
max_int32 = 2147483647
min_int32 = -max_int32 - 1
def bestConstantType(x):
# ``numpy.string_`` is a subclass of ``bytes``
if isinstance(x, (bytes, str)):
return bytes
# Numeric conversion to boolean values is not tried because
# ``bool(1) == True`` (same for 0 and False), so 0 and 1 would be
# interpreted as booleans when ``False`` and ``True`` are already
# supported.
if isinstance(x, (bool, numpy.bool_)):
return bool
# ``long`` objects are kept as is to allow the user to force
# promotion of results by using long constants, e.g. by operating
# a 32-bit array with a long (64-bit) constant.
if isinstance(x, (long_, numpy.int64)):
return long_
# ``double`` objects are kept as is to allow the user to force
# promotion of results by using double constants, e.g. by operating
# a float (32-bit) array with a double (64-bit) constant.
if isinstance(x, double):
return double
if isinstance(x, (int, numpy.integer)):
# Constants needing more than 32 bits are always
# considered ``long``, *regardless of the platform*, so we
# can clearly tell 32- and 64-bit constants apart.
if not (min_int32 <= x <= max_int32):
return long_
return int_
# The duality of float and double in Python avoids that we have to list
# ``double`` too.
for converter in float, complex:
try:
y = converter(x)
except StandardError, err:
continue
if y == x:
return converter
def getKind(x):
converter = bestConstantType(x)
return type_to_kind[converter]
def binop(opname, reversed=False, kind=None):
# Getting the named method from self (after reversal) does not
# always work (e.g. int constants do not have a __lt__ method).
opfunc = getattr(operator, "__%s__" % opname)
@ophelper
def operation(self, other):
if reversed:
self, other = other, self
if allConstantNodes([self, other]):
return ConstantNode(opfunc(self.value, other.value))
else:
return OpNode(opname, (self, other), kind=kind)
return operation
def func(func, minkind=None, maxkind=None):
@ophelper
def function(*args):
if allConstantNodes(args):
return ConstantNode(func(*[x.value for x in args]))
kind = commonKind(args)
if kind in ('int', 'long'):
# Exception for following NumPy casting rules
#FIXME: this is not always desirable. The following
# functions which return ints (for int inputs) on numpy
# but not on numexpr: copy, abs, fmod, ones_like
kind = 'double'
else:
# Apply regular casting rules
if minkind and kind_rank.index(minkind) > kind_rank.index(kind):
kind = minkind
if maxkind and kind_rank.index(maxkind) < kind_rank.index(kind):
kind = maxkind
return FuncNode(func.__name__, args, kind)
return function
@ophelper
def where_func(a, b, c):
if isinstance(a, ConstantNode):
#FIXME: This prevents where(True, a, b)
raise ValueError("too many dimensions")
if allConstantNodes([a,b,c]):
return ConstantNode(numpy.where(a, b, c))
return FuncNode('where', [a,b,c])
def encode_axis(axis):
if isinstance(axis, ConstantNode):
axis = axis.value
if axis is None:
axis = interpreter.allaxes
else:
if axis < 0:
raise ValueError("negative axis are not supported")
if axis > 254:
raise ValueError("cannot encode axis")
return RawNode(axis)
def sum_func(a, axis=None):
axis = encode_axis(axis)
if isinstance(a, ConstantNode):
return a
if isinstance(a, (bool, int_, long_, float, double, complex)):
a = ConstantNode(a)
return FuncNode('sum', [a, axis], kind=a.astKind)
def prod_func(a, axis=None):
axis = encode_axis(axis)
if isinstance(a, (bool, int_, long_, float, double, complex)):
a = ConstantNode(a)
if isinstance(a, ConstantNode):
return a
return FuncNode('prod', [a, axis], kind=a.astKind)
@ophelper
def div_op(a, b):
if get_optimization() in ('moderate', 'aggressive'):
if (isinstance(b, ConstantNode) and
(a.astKind == b.astKind) and
a.astKind in ('float', 'double', 'complex')):
return OpNode('mul', [a, ConstantNode(1./b.value)])
return OpNode('div', [a,b])
@ophelper
def truediv_op(a, b):
if get_optimization() in ('moderate', 'aggressive'):
if (isinstance(b, ConstantNode) and
(a.astKind == b.astKind) and
a.astKind in ('float', 'double', 'complex')):
return OpNode('mul', [a, ConstantNode(1./b.value)])
kind = commonKind([a, b])
if kind in ('bool', 'int', 'long'):
kind = 'double'
return OpNode('div', [a, b], kind=kind)
@ophelper
def rtruediv_op(a, b):
return truediv_op(b, a)
@ophelper
def pow_op(a, b):
if allConstantNodes([a, b]):
return ConstantNode(a**b)
if isinstance(b, ConstantNode):
x = b.value
if get_optimization() == 'aggressive':
RANGE = 50 # Approximate break even point with pow(x,y)
# Optimize all integral and half integral powers in [-RANGE, RANGE]
# Note: for complex numbers RANGE could be larger.
if (int(2*x) == 2*x) and (-RANGE <= abs(x) <= RANGE):
n = int_(abs(x))
ishalfpower = int_(abs(2*x)) % 2
def multiply(x, y):
if x is None: return y
return OpNode('mul', [x, y])
r = None
p = a
mask = 1
while True:
if (n & mask):
r = multiply(r, p)
mask <<= 1
if mask > n:
break
p = OpNode('mul', [p,p])
if ishalfpower:
kind = commonKind([a])
if kind in ('int', 'long'):
kind = 'double'
r = multiply(r, OpNode('sqrt', [a], kind))
if r is None:
r = OpNode('ones_like', [a])
if x < 0:
r = OpNode('div', [ConstantNode(1), r])
return r
if get_optimization() in ('moderate', 'aggressive'):
if x == -1:
return OpNode('div', [ConstantNode(1),a])
if x == 0:
return OpNode('ones_like', [a])
if x == 0.5:
kind = a.astKind
if kind in ('int', 'long'): kind = 'double'
return FuncNode('sqrt', [a], kind=kind)
if x == 1:
return a
if x == 2:
return OpNode('mul', [a,a])
return OpNode('pow', [a,b])
# The functions and the minimum and maximum types accepted
functions = {
'copy' : func(numpy.copy),
'ones_like' : func(numpy.ones_like),
'sqrt' : func(numpy.sqrt, 'float'),
'sin' : func(numpy.sin, 'float'),
'cos' : func(numpy.cos, 'float'),
'tan' : func(numpy.tan, 'float'),
'arcsin' : func(numpy.arcsin, 'float'),
'arccos' : func(numpy.arccos, 'float'),
'arctan' : func(numpy.arctan, 'float'),
'sinh' : func(numpy.sinh, 'float'),
'cosh' : func(numpy.cosh, 'float'),
'tanh' : func(numpy.tanh, 'float'),
'arcsinh' : func(numpy.arcsinh, 'float'),
'arccosh' : func(numpy.arccosh, 'float'),
'arctanh' : func(numpy.arctanh, 'float'),
'fmod' : func(numpy.fmod, 'float'),
'arctan2' : func(numpy.arctan2, 'float'),
'log' : func(numpy.log, 'float'),
'log1p' : func(numpy.log1p, 'float'),
'log10' : func(numpy.log10, 'float'),
'exp' : func(numpy.exp, 'float'),
'expm1' : func(numpy.expm1, 'float'),
'abs': func(numpy.absolute, 'float'),
'where' : where_func,
'real' : func(numpy.real, 'double', 'double'),
'imag' : func(numpy.imag, 'double', 'double'),
'complex' : func(complex, 'complex'),
'sum' : sum_func,
'prod' : prod_func,
}
class ExpressionNode(object):
"""An object that represents a generic number object.
This implements the number special methods so that we can keep
track of how this object has been used.
"""
astType = 'generic'
def __init__(self, value=None, kind=None, children=None):
object.__init__(self)
self.value = value
if kind is None:
kind = 'none'
self.astKind = kind
if children is None:
self.children = ()
else:
self.children = tuple(children)
def get_real(self):
if self.astType == 'constant':
return ConstantNode(complex(self.value).real)
return OpNode('real', (self,), 'double')
real = property(get_real)
def get_imag(self):
if self.astType == 'constant':
return ConstantNode(complex(self.value).imag)
return OpNode('imag', (self,), 'double')
imag = property(get_imag)
def __str__(self):
return '%s(%s, %s, %s)' % (self.__class__.__name__, self.value,
self.astKind, self.children)
def __repr__(self):
return self.__str__()
def __neg__(self):
return OpNode('neg', (self,))
def __invert__(self):
return OpNode('invert', (self,))
def __pos__(self):
return self
# The next check is commented out. See #24 for more info.
def __nonzero__(self):
raise TypeError("You can't use Python's standard boolean operators in "
"NumExpr expressions. You should use their bitwise "
"counterparts instead: '&' instead of 'and', "
"'|' instead of 'or', and '~' instead of 'not'.")
__add__ = __radd__ = binop('add')
__sub__ = binop('sub')
__rsub__ = binop('sub', reversed=True)
__mul__ = __rmul__ = binop('mul')
if sys.version_info[0] < 3:
__div__ = div_op
__rdiv__ = binop('div', reversed=True)
__truediv__ = truediv_op
__rtruediv__ = rtruediv_op
__pow__ = pow_op
__rpow__ = binop('pow', reversed=True)
__mod__ = binop('mod')
__rmod__ = binop('mod', reversed=True)
# boolean operations
__and__ = binop('and', kind='bool')
__or__ = binop('or', kind='bool')
__gt__ = binop('gt', kind='bool')
__ge__ = binop('ge', kind='bool')
__eq__ = binop('eq', kind='bool')
__ne__ = binop('ne', kind='bool')
__lt__ = binop('gt', reversed=True, kind='bool')
__le__ = binop('ge', reversed=True, kind='bool')
class LeafNode(ExpressionNode):
leafNode = True
class VariableNode(LeafNode):
astType = 'variable'
def __init__(self, value=None, kind=None, children=None):
LeafNode.__init__(self, value=value, kind=kind)
class RawNode(object):
"""Used to pass raw integers to interpreter.
For instance, for selecting what function to use in func1.
Purposely don't inherit from ExpressionNode, since we don't wan't
this to be used for anything but being walked.
"""
astType = 'raw'
astKind = 'none'
def __init__(self, value):
self.value = value
self.children = ()
def __str__(self):
return 'RawNode(%s)' % (self.value,)
__repr__ = __str__
class ConstantNode(LeafNode):
astType = 'constant'
def __init__(self, value=None, children=None):
kind = getKind(value)
# Python float constants are double precision by default
if kind == 'float':
kind = 'double'
LeafNode.__init__(self, value=value, kind=kind)
def __neg__(self):
return ConstantNode(-self.value)
def __invert__(self):
return ConstantNode(~self.value)
class OpNode(ExpressionNode):
astType = 'op'
def __init__(self, opcode=None, args=None, kind=None):
if (kind is None) and (args is not None):
kind = commonKind(args)
ExpressionNode.__init__(self, value=opcode, kind=kind, children=args)
class FuncNode(OpNode):
def __init__(self, opcode=None, args=None, kind=None):
if (kind is None) and (args is not None):
kind = commonKind(args)
OpNode.__init__(self, opcode, args, kind)
| |
#!/usr/bin/env python
"""
--------------------------------------------------------------------------
Routine to perform aperture photometry on CHIMERA science frames.
Usage: python fastphot.py [options] image coords
Authors:
Navtej Saini, Lee Rosenthal
Organization:
Caltech, Pasadena, CA, USA
Version:
7 January 2016 0.1 Initial implementation
9 February 2016 0.2 User input for photometric zero point
28 July 2017 0.3 Allow processing of multiple stars.
--------------------------------------------------------------------------
"""
import os, sys
import numpy as np, warnings
from StringIO import StringIO
from optparse import OptionParser
import pdb
try:
import matplotlib.pylab as plt
except ImportError:
plot_flag = False
else:
try:
import seaborn
except ImportError:
pass
plot_flag = True
import chimera
def plotter(phot_data, nframes, exptime, outfile):
"""
Plot light curve.
Parameters
----------
phot_data : numpy array
Photometry array
nframes : int
Number of image cube frames
exptime : float
Kinetic or accumulation time
outfile : string
Name of the out png image
Returns
-------
None
"""
params = {'backend': 'ps',
'font.size': 10,
'axes.labelweight': 'medium',
'figure.dpi' : 300,
'savefig.dpi': 300,
'savefig.jpeg_quality': 100
}
plt.rcParams.update(params)
ts = np.linspace(0, nframes*exptime, nframes)
plt.figure(figsize=(6,4))
plt.title("Normalized Light Curve : %s" %phot_data[0]['DATETIME'].split('T')[0])
plt.xlabel("Time (secs)")
plt.ylabel("Normalized Flux")
plt.plot(ts, phot_data['FLUX_ADU']/np.mean(phot_data['FLUX_ADU']), "r-")
plt.savefig(outfile, dpi = 300, bbox_inches = "tight")
return
def time_average(image, avg):
nsnaps = image.shape[0]
avg = int(avg)
#pdb.set_trace()
new_image = np.zeros((nsnaps/avg, image.shape[1], image.shape[2]))
for i in range(nsnaps/avg):
new_image[i,:,:] = np.sum(image[i*avg:(i+1)*avg,:,:], axis=0)/avg
return new_image
def process(infile, coords, method, inner_radius, outer_radius, cen_method, window_size, output, zmag, avg):
"""
Entry point function to process science image.
Parameters
----------
infile : string
Science image or list of science images
coords : string
Input text file with coordinates of stars
method : string
FWHM of the stelar psf in pixels
inner_radius : float
Sky background sigma
outer_radius : int
Inner sky annulus radius in pixels
cen_method : string
Centroid method
window_size : int
Centroid finding window size in pixels
output : string
Output file name
zmag : float
Photometric zero point
Returns
-------
None
"""
print "FASTPHOT: CHIMERA Fast Aperture Photometry Routine"
inner_radius = float(inner_radius)
outer_radius = float(outer_radius)
# Check if input is a string of FITS images or a text file with file names
if infile[0] == "@":
infile = infile[1:]
if not os.path.exists(infile):
print "REGISTER: Not able to locate file %s" %infile
image_cubes = []
with open(infile, "r") as fd:
for line in fd.readlines():
if len(line) > 1:
image_cubes.append(line.replace("\n", ""))
else:
image_cubes = infile.split(",")
# Number of images
ncubes = len(image_cubes)
pos = np.loadtxt(coords, ndmin = 2)
nstars = len(pos)
total_phot_data = []
for i in range(ncubes):
sci_file = image_cubes[i]
print " Processing science image %s" %sci_file
# Read FITS image and star coordinate
image = chimera.fitsread(sci_file)
if avg != "":
image = time_average(image, avg)
avg = int(avg)
else:
avg = 1
# Instantiate an Aperphot object
ap = chimera.Aperphot(sci_file, coords)
# Set fwhmpsf, sigma, annulus, dannulus and zmag
ap.method = method
ap.inner_radius = inner_radius
ap.outer_radius = outer_radius
if zmag != "":
ap.zmag = float(zmag)
# Determine nominal aperture radius for photometry
if i == 0:
nom_aper = ap.cog(window_size, cen_method)
#nom_aper = 3
print " Nominal aperture radius : %4.1f pixels" %nom_aper
# Perform aperture photometry on all the frames
dtype = [("DATETIME", "S25"),("XCEN", "f4"),("YCEN", "f4"),("MSKY", "f8"),("NSKY", "f8"),("AREA", "f8"),("FLUX_ADU", "f8"),("FLUX_ELEC", "f8"),("FERR", "f8"),("MAG", "f8")]
phot_data = np.zeros([nstars, ap.nframes/avg], dtype = dtype)
for j in range(ap.nframes/avg):
print " Processing frame number : %d" %(j+1)
objpos = chimera.recenter(image[j,:,:], pos, window_size, cen_method)
aperphot_data = ap.phot(image[j,:,:], objpos, nom_aper)
pos = np.copy(objpos)
phot_data[:,j]['DATETIME'] = ap.addtime(j * avg * ap.kintime).isoformat()
phot_data[:,j]['XCEN'] = aperphot_data["xcenter_raw"]
phot_data[:,j]['YCEN'] = aperphot_data["ycenter_raw"]
phot_data[:,j]['MSKY'] = aperphot_data["msky"]
phot_data[:,j]['NSKY'] = aperphot_data["nsky"]
phot_data[:,j]['AREA'] = aperphot_data["area"]
phot_data[:,j]['FLUX_ADU'] = aperphot_data["flux"]
phot_data[:,j]['FLUX_ELEC'] = phot_data[:,j]['FLUX_ADU'] * ap.epadu
phot_data[:,j]['MAG'] = ap.zmag - 2.5 * np.log10(phot_data[:,j]['FLUX_ELEC']/(ap.exptime))
# Calculate error in flux - using the formula
# err = sqrt(flux * gain + npix * (1 + (npix/nsky)) * (flux_sky * gain + R**2))
phot_data[:,j]['FERR'] = np.sqrt(phot_data[:,j]['FLUX_ELEC'] + phot_data[:,j]['AREA'] * (1 + phot_data[:,j]['AREA']/phot_data[:,j]['NSKY']) * (phot_data[:,j]['MSKY'] * ap.epadu + ap.readnoise**2))
total_phot_data.append(phot_data)
# Save photometry data in numpy binary format
print " Saving photometry data as numpy binary"
if output != "":
npy_outfile = output + ".npy"
else:
npy_outfile = sci_file.replace(".fits", ".phot.npy")
if os.path.exists(npy_outfile):
os.remove(npy_outfile)
#np.save(npy_outfile, phot_data)
'''
# Plot first pass light curve
if plot_flag:
print " Plotting normalized light curve"
if output != "":
plt_outfile = output + ".png"
else:
plt_outfile = sci_file.replace(".fits", ".lc.png")
plotter(phot_data, ap.nframes, ap.kintime, plt_outfile)
'''
# Convert the total_phot_data to array and reshape it
print ' Saving consolidated photometry data...'
total_phot_data_arr = np.concatenate(total_phot_data, axis=1)
# Save the array as npy file
if output != "":
np.save(output+"phot_total.npy", total_phot_data_arr)
else: np.save("phot_total.npy", total_phot_data_arr)
return
if __name__ == "__main__":
usage = "Usage: python %prog [options] sci_image coords"
description = "Description. Utility to perform fast aperture photometry in CHIMERA science images."
parser = OptionParser(usage = usage, version = "%prog 0.2", description = description)
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default = False,
help = "print result messages to stdout"
)
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default = True,
help = "don't print result messages to stdout"
)
parser.add_option("-m", "--method", dest = "method",
action="store", metavar="METHOD", help = "Method to use for determining overlap between aperture and pixels (default is exact)",
default = "exact"
)
parser.add_option("-i", "--inner_radius", dest = "inner_radius",
action="store", metavar="INNER_RADIUS", help = "Inner radius of sky annlus in pixels (default is 14)",
default = 14
)
parser.add_option("-d", "--outer_radius", dest = "outer_radius",
action="store", metavar="OUTER_RADIUS", help = "Radius of sky annulus in pixels (default is 16)",
default = 16
)
parser.add_option("-c", "--cen_method", dest = "cen_method",
action="store", metavar="CEN_METHOD", help = "Centroid method (default is 2dg)",
default = "2dg"
)
parser.add_option("-w", "--window_size", dest = "window_size",
action="store", metavar="WINDOW_SIZE", help = "Window size for centroid (default is 35)",
default = 35
)
parser.add_option("-o", "--output", dest = "output",
action="store", metavar="OUTPUT", help = "Output file name",
default = ""
)
parser.add_option("-z", "--zmag", dest = "zmag",
action="store", metavar="ZMAG", help = "Photometric zero point",
default = ""
)
parser.add_option("-a", "--avg", dest = "avg",
action="store", metavar="avg", help = "Number of frames to average",
default = ""
)
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error("FASTPHOT: Incorrect number of arguments")
# Check verbosity
if not options.verbose:
output = StringIO()
old_stdout = sys.stdout
sys.stdout = output
# Switch off warnings
warnings.filterwarnings('ignore')
process(args[0], args[1], options.method, options.inner_radius, options.outer_radius, options.cen_method, options.window_size, options.output, options.zmag, options.avg)
# Reset verbosity
if not options.verbose:
sys.stdout = old_stdout
| |
# -*- encoding=utf-8 -*-
# Copyright 2016 David Cary; licensed under the Apache License, Version 2.0
"""Validate and reformat RCV data"""
from __future__ import print_function
from sb1288 import errors
from sb1288 import constants as K
from sb1288.ballot import Ballot
import sys
# Convenience functions to use the Validator class
def nbr_seats_to_fill(nbr_seats_to_fill):
"""
Validate the number of seats to fill, using the Validator class
This is a convenience function for using the Validator class.
"""
return Validator().nbr_seats_to_fill(nbr_seats_to_fill)
def candidates(candidates):
"""
Validate a sequence of candidates, using the Validator class
This is a convenience function for using the Validator class.
"""
return Validator().candidates(candidates)
def ballots(ballots, candidates, max_ranking_levels):
"""
Validate a sequence of candidates, using the Validator class
This is a convenience function for using the Validator class.
"""
return Validator().ballots(ballots, candidates, max_ranking_levels)
def max_ranking_levels(max_ranking_levels):
"""
Validate the max number of ranking levels, using the Validator class
This is a convenience function for using the Validator class.
"""
return Validator().max_ranking_levels(max_ranking_levels)
def tie_breaker(tie_breaker, candidates):
"""
Validate a tie breaker specification, using the Validator class
This is a convenience function for using the Validator class.
"""
return Validator().tie_breaker(tie_breaker, candidates)
def options(options):
"""
Validate RCV tabulation options, using the Validator class
This is a convenience function for using the Validator class.
"""
return Validator().options(options)
def str_tuple(value):
"""
Produce a tuple of strings
Arguments
---------
value
A string, a list of strings, or a tuple of strings.
For Python 2, where a string type is allowed, a unicode type may
also be used, but must be strictly convertible to an ASCII string.
Returns
-------
A tuple of zero or more strings.
If value is a tuple of strings, that tuple is returned.
If value is a list of strings, it is converted to a tuple of the same
strings.
If value is a string, then the string is split by its first character,
whatever that is, and a tuple is created from the resulting list. If
value is an empty string or consists of a single charater, an empty
tuple is returned.
Raises
------
TypeError
If value is not as described above.
"""
if sys.version_info[0] == 2 and type(value) == unicode:
value = value.encode('utf-8')
if type(value) == str:
if value:
result = tuple(value[1:].split(value[0]))
else:
result = tuple()
elif type(value) == list or type(value) == tuple:
result = []
for item in value:
if sys.version_info[0] == 2 and type(item) == unicode:
item = item.encode('utf-8')
if type(item) != str:
value_type = str(type(value)).split("'")[1]
raise TypeError('Item in {} is not a str:'.format(value_type)
+ '\n {:25}= {}'.format('item type', str(type(item)))
+ '\n {:25}= {}'.format('item value', repr(item)))
result.append(item)
result = tuple(result)
else:
raise TypeError('Can not make a str_tuple from a {}.'.
format(str(type(value))))
return result
class Validator(object):
"""
A collection of validation and reformatting methods for RCV data
"""
def nbr_seats_to_fill(self, nbr_seats_to_fill):
"""Validate the number of seats to fill
Arguments
---------
nbr_seats_to_fill
Must be a positive int
Returns
-------
nbr_seats_to_fill if it meets requirements.
Raises
------
RcvValueError
If nbr_seats_to_fill does not meet requirements.
"""
if type(nbr_seats_to_fill) != int:
raise errors.RcvValueError('nbr_seats_to_fill not an int:', (
('type(nbr_seats_to_fill)', type(nbr_seats_to_fill)),
))
if nbr_seats_to_fill <= 0:
raise errors.RcvValueError('nbr_seats_to_fill not >= 1:', (
('nbr_seats_to_fill', nbr_seats_to_fill),
))
return nbr_seats_to_fill
def candidates(self, candidates):
"""
Validate a specification of candidates names
Arguments
---------
candidates
An ordered collection of strings, each a unique candidate name,
which meets the requirements for rcv.tabulate().
Returns
-------
A tuple of the candidate names, in the same order, if they meet
requirements.
Raises
------
RcvValueError
If the candidate names do not meet requirements.
"""
try:
candidates = str_tuple(candidates)
except TypeError as exc:
raise errors.RcvValueError('Invalid candidates type:', (), exc)
for ix, name in enumerate(candidates):
if name in K.RANKING_CODES_NOT_A_CANDIDATE or name[0] == ':':
raise errors.RcvValueError('Invalid candidate name:', (
('candidate name', name),
('candidate name index', ix),
))
if len(set(candidates)) != len(candidates):
raise errors.RcvValueError('Candidate names are not unique.')
return candidates
def tie_breaker(self, tie_breaker, candidates):
"""
Validate and convert a specification of a tie_breaker
Arguments
---------
tie_breaker
An ordered collection of strings which meets the rcv.tabulate
requirements for a tie_breaker.
candidates
A tuple of the names of all candidates.
Returns
-------
A tie_breaker as a dictionary keyed by candidate names with the
ordering indexes as values, if the tie_breaker argument meets
requirements.
Raises
------
RcvValueError
If the tie_breaker argument does not meet requirements.
"""
try:
tie_breaker = str_tuple(tie_breaker)
except TypeError as exc:
raise errors.RcvValueError('Invalid tie_breaker type:', (), exc)
for ix, name in enumerate(tie_breaker):
if name not in candidates:
raise errors.RcvValueError('Invalid candidate name in tie_breaker:', (
('candidate name', name),
('tie_breaker index', ix),
))
result = {candidate: index
for index, candidate in enumerate(tie_breaker)}
if len(result) != len(tie_breaker):
raise errors.RcvValueError(
'Candidate names in tie_breaker are not unique.')
return result
def ballots(self, ballots, candidates, max_ranking_levels):
"""
Validate a specification of ballots
Arguments
---------
ballots
A valid specification of ballots that meet the requirements of
the rcv.tabulate function.
candidates
A tuple of all the names of all candidates.
max_ranking_levels
The maximum length of a ballot's rankings, possibly None.
Returns
-------
A tuple of the ballots, each converted to a Ballot object, in the same
order as ballots, if ballots meets requirements.
Raises
------
RcvValueError
If the ballots do not meet requirements.
"""
result = []
if type(ballots) not in (list, tuple):
raise errors.RcvValueError('ballots is not a list or tuple:', (
('type(ballots)', type(ballots)),
))
for ix, ballot in enumerate(ballots):
if type(ballot) not in (list, tuple):
raise errors.RcvValueError('A ballot is not a list or tuple:', (
('type(ballot)', type(ballot)),
('ballot index', ix),
))
if len(ballot) != 2:
raise errors.RcvValueError('A ballot is not a pair of values:', (
('len(ballot)', len(ballot)),
('ballot index', ix),
))
multiple = ballot[0]
if type(multiple) != int:
raise errors.RcvValueError('A ballot multiple is not an int:', (
('type(multiple)', type(multiple)),
('ballot index', ix),
))
if multiple < 1:
raise errors.RcvValueError('A ballot multiple is zero or less:', (
('multiple', multiple),
('ballot index', ix),
))
try:
rankings = str_tuple(ballot[1])
except TypeError as exc:
raise errors.RcvValueError('Invalid ballot rankings type:', (
('ballot index', ix),
), exc)
if (max_ranking_levels is not None and
len(rankings) > max_ranking_levels):
raise errors.RcvValueError('Ballot rankings is too long:', (
('len(rankings)', len(rankings)),
('max_ranking_levels', max_ranking_levels),
('ballot index', ix),
))
for rix, ranking_code in enumerate(rankings):
if (ranking_code not in candidates and
ranking_code not in K.RANKING_CODES_NOT_A_CANDIDATE):
raise errors.RcvValueError('Invalid ballot ranking code:', (
('ranking code', ranking_code),
('ballot index', ix),
('ranking code index', rix),
))
internal_ballot = Ballot(multiple, rankings)
result.append(internal_ballot)
result = tuple(result)
return result
def max_ranking_levels(self, max_ranking_levels):
"""Validate the maximum number of candidates that can be ranked
Arguments
---------
max_ranking_levels
Must be None or an int that is at least three.
Returns
-------
max_ranking_levels if it meets requirements.
Raises
------
RcvValueError
If max_ranking_levels does not meet requirements.
"""
if max_ranking_levels is None:
return max_ranking_levels
if type(max_ranking_levels) != int:
raise errors.RcvValueError('max_ranking_levels not an int:', (
('type(max_ranking_levels)', type(max_ranking_levels)),
))
if max_ranking_levels < K.MIN_RANKINGS_SUPPORTED:
raise errors.RcvValueError('max_ranking_levels is less than {}:'.
format(K.MIN_RANKINGS_SUPPORTED), (
('max_ranking_levels', max_ranking_levels),
))
return max_ranking_levels
def options(self, options):
"""Validate a dictionary of rcv.tabulate options
Arguments
---------
options
A dictionary of options that are valid for the the rcv.tabulate
function. An option is valid even if it might not be used.
Returns
-------
An options dictionary, if the options argument meets requirements.
Raises
------
RcvValueError
If options does not meet requirements.
"""
result = {}
if type(options) != dict:
raise errors.RcvValueError('options is not a dict:', (
('type(options)', type(options)),
))
for name, value in options.items():
if type(name) != str:
raise errors.RcvValueError('An option name is not a str:', (
('option name', name),
))
if name == K.OPTION_STOP_AT_MAJORITY:
if value is not True and value is not False:
raise errors.RcvValueError('The option {} must be True or False.'.
format(repr(K.OPTION_STOP_AT_MAJORITY)))
result[K.OPTION_STOP_AT_MAJORITY] = value
elif name == K.OPTION_ALTERNATIVE_DEFEATS:
if (type(value) == str and value.upper() in
K.OPTION_ALTERNATIVE_DEFEATS_VALUE_SET):
value = value.upper()
else:
try:
value = str_tuple(value)
except TypeError as exc:
raise errors.RcvValueError(
'Invalid option value type:', (
('option name', K.OPTION_ALTERNATIVE_DEFEATS),
), exc)
for ix, per_round_value in enumerate(value):
if (per_round_value.upper() not in
K.OPTION_ALTERNATIVE_DEFEATS_VALUE_SET):
raise errors.RcvValueError('Invalid per-round option value:', (
('per-round value', per_round_value),
('index', ix),
('for round', ix + 1),
('option name', K.OPTION_ALTERNATIVE_DEFEATS),
))
value = tuple([per_round_value.upper() for per_round_value in value])
result[K.OPTION_ALTERNATIVE_DEFEATS] = value
else:
raise errors.RcvValueError('Invalid option name:', (
('option name', name),
))
return result
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import sys
from datetime import datetime
from libcloud.utils.iso8601 import UTC
from libcloud.utils.py3 import httplib
from libcloud.compute.drivers.ec2 import EC2NodeDriver
from libcloud.compute.drivers.ec2 import EC2USWestNodeDriver
from libcloud.compute.drivers.ec2 import EC2USWestOregonNodeDriver
from libcloud.compute.drivers.ec2 import EC2EUNodeDriver
from libcloud.compute.drivers.ec2 import EC2APSENodeDriver
from libcloud.compute.drivers.ec2 import EC2APNENodeDriver
from libcloud.compute.drivers.ec2 import EC2APSESydneyNodeDriver
from libcloud.compute.drivers.ec2 import EC2SAEastNodeDriver
from libcloud.compute.drivers.ec2 import EC2PlacementGroup
from libcloud.compute.drivers.ec2 import NimbusNodeDriver, EucNodeDriver
from libcloud.compute.drivers.ec2 import OutscaleSASNodeDriver
from libcloud.compute.drivers.ec2 import IdempotentParamError
from libcloud.compute.drivers.ec2 import REGION_DETAILS
from libcloud.compute.drivers.ec2 import ExEC2AvailabilityZone
from libcloud.compute.drivers.ec2 import EC2NetworkSubnet
from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation
from libcloud.compute.base import StorageVolume, VolumeSnapshot
from libcloud.compute.types import KeyPairDoesNotExistError, StorageVolumeState, \
VolumeSnapshotState
from libcloud.test import MockHttpTestCase, LibcloudTestCase
from libcloud.test.compute import TestCaseMixin
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.test import unittest
from libcloud.test.secrets import EC2_PARAMS
null_fingerprint = '00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:' + \
'00:00:00:00:00'
class BaseEC2Tests(LibcloudTestCase):
def test_instantiate_driver_valid_regions(self):
regions = REGION_DETAILS.keys()
regions = [d for d in regions if d != 'nimbus']
region_endpoints = [
EC2NodeDriver(*EC2_PARAMS, **{'region': region}).connection.host for region in regions
]
# Verify that each driver doesn't get the same API host endpoint
self.assertEqual(len(region_endpoints),
len(set(region_endpoints)),
"Multiple Region Drivers were given the same API endpoint")
def test_instantiate_driver_invalid_regions(self):
for region in ['invalid', 'nimbus']:
try:
EC2NodeDriver(*EC2_PARAMS, **{'region': region})
except ValueError:
pass
else:
self.fail('Invalid region, but exception was not thrown')
class EC2Tests(LibcloudTestCase, TestCaseMixin):
image_name = 'ec2-public-images/fedora-8-i386-base-v1.04.manifest.xml'
region = 'us-east-1'
def setUp(self):
EC2MockHttp.test = self
EC2NodeDriver.connectionCls.conn_classes = (None, EC2MockHttp)
EC2MockHttp.use_param = 'Action'
EC2MockHttp.type = None
self.driver = EC2NodeDriver(*EC2_PARAMS,
**{'region': self.region})
def test_create_node(self):
image = NodeImage(id='ami-be3adfd7',
name=self.image_name,
driver=self.driver)
size = NodeSize('m1.small', 'Small Instance', None, None, None, None,
driver=self.driver)
node = self.driver.create_node(name='foo', image=image, size=size)
self.assertEqual(node.id, 'i-2ba64342')
self.assertEqual(node.name, 'foo')
self.assertEqual(node.extra['tags']['Name'], 'foo')
self.assertEqual(len(node.extra['tags']), 1)
def test_create_node_with_ex_mincount(self):
image = NodeImage(id='ami-be3adfd7',
name=self.image_name,
driver=self.driver)
size = NodeSize('m1.small', 'Small Instance', None, None, None, None,
driver=self.driver)
node = self.driver.create_node(name='foo', image=image, size=size,
ex_mincount=1, ex_maxcount=10)
self.assertEqual(node.id, 'i-2ba64342')
self.assertEqual(node.name, 'foo')
self.assertEqual(node.extra['tags']['Name'], 'foo')
self.assertEqual(len(node.extra['tags']), 1)
def test_create_node_with_ex_assign_public_ip(self):
# assertions are done in _create_ex_assign_public_ip_RunInstances
EC2MockHttp.type = 'create_ex_assign_public_ip'
image = NodeImage(id='ami-11111111',
name=self.image_name,
driver=self.driver)
size = NodeSize('m1.small', 'Small Instance', None, None, None, None,
driver=self.driver)
subnet = EC2NetworkSubnet('subnet-11111111', "test_subnet", "pending")
self.driver.create_node(
name='foo',
image=image,
size=size,
ex_subnet=subnet,
ex_security_group_ids=[
'sg-11111111'
],
ex_assign_public_ip=True,
)
def test_create_node_with_ex_terminate_on_shutdown(self):
EC2MockHttp.type = 'create_ex_terminate_on_shutdown'
image = NodeImage(id='ami-be3adfd7',
name=self.image_name,
driver=self.driver)
size = NodeSize('m1.small', 'Small Instance', None, None, None, None,
driver=self.driver)
# The important part about the test is asserted inside
# EC2MockHttp._create_ex_terminate_on_shutdown
self.driver.create_node(name='foo', image=image, size=size, ex_terminate_on_shutdown=True)
def test_create_node_with_metadata(self):
image = NodeImage(id='ami-be3adfd7',
name=self.image_name,
driver=self.driver)
size = NodeSize('m1.small', 'Small Instance', None, None, None, None,
driver=self.driver)
node = self.driver.create_node(name='foo',
image=image,
size=size,
ex_metadata={'Bar': 'baz', 'Num': '42'})
self.assertEqual(node.name, 'foo')
self.assertEqual(node.extra['tags']['Name'], 'foo')
self.assertEqual(node.extra['tags']['Bar'], 'baz')
self.assertEqual(node.extra['tags']['Num'], '42')
self.assertEqual(len(node.extra['tags']), 3)
def test_create_node_idempotent(self):
EC2MockHttp.type = 'idempotent'
image = NodeImage(id='ami-be3adfd7',
name=self.image_name,
driver=self.driver)
size = NodeSize('m1.small', 'Small Instance', None, None, None, None,
driver=self.driver)
token = 'testclienttoken'
node = self.driver.create_node(name='foo', image=image, size=size,
ex_clienttoken=token)
self.assertEqual(node.id, 'i-2ba64342')
self.assertEqual(node.extra['client_token'], token)
# from: http://docs.amazonwebservices.com/AWSEC2/latest/DeveloperGuide/index.html?Run_Instance_Idempotency.html
# If you repeat the request with the same client token, but change
# another request parameter, Amazon EC2 returns an
# IdempotentParameterMismatch error.
# In our case, changing the parameter doesn't actually matter since we
# are forcing the error response fixture.
EC2MockHttp.type = 'idempotent_mismatch'
idem_error = None
# different count
try:
self.driver.create_node(name='foo', image=image, size=size,
ex_mincount='2', ex_maxcount='2',
ex_clienttoken=token)
except IdempotentParamError:
e = sys.exc_info()[1]
idem_error = e
self.assertTrue(idem_error is not None)
def test_create_node_no_availability_zone(self):
image = NodeImage(id='ami-be3adfd7',
name=self.image_name,
driver=self.driver)
size = NodeSize('m1.small', 'Small Instance', None, None, None, None,
driver=self.driver)
node = self.driver.create_node(name='foo', image=image, size=size)
location = NodeLocation(0, 'Amazon US N. Virginia', 'US', self.driver)
self.assertEqual(node.id, 'i-2ba64342')
node = self.driver.create_node(name='foo', image=image, size=size,
location=location)
self.assertEqual(node.id, 'i-2ba64342')
self.assertEqual(node.name, 'foo')
def test_list_nodes(self):
node = self.driver.list_nodes()[0]
public_ips = sorted(node.public_ips)
self.assertEqual(node.id, 'i-4382922a')
self.assertEqual(node.name, node.id)
self.assertEqual(len(node.public_ips), 2)
self.assertEqual(node.extra['launch_time'],
'2013-12-02T11:58:11.000Z')
self.assertTrue('instance_type' in node.extra)
self.assertEqual(node.extra['availability'], 'us-east-1d')
self.assertEqual(node.extra['key_name'], 'fauxkey')
self.assertEqual(node.extra['monitoring'], 'disabled')
self.assertEqual(node.extra['image_id'], 'ami-3215fe5a')
self.assertEqual(len(node.extra['groups']), 2)
self.assertEqual(len(node.extra['block_device_mapping']), 1)
self.assertEqual(node.extra['block_device_mapping'][0]['device_name'], '/dev/sda1')
self.assertEqual(node.extra['block_device_mapping'][0]['ebs']['volume_id'], 'vol-5e312311')
self.assertTrue(node.extra['block_device_mapping'][0]['ebs']['delete'])
self.assertEqual(public_ips[0], '1.2.3.4')
nodes = self.driver.list_nodes(ex_node_ids=['i-4382922a',
'i-8474834a'])
ret_node1 = nodes[0]
ret_node2 = nodes[1]
self.assertEqual(ret_node1.id, 'i-4382922a')
self.assertEqual(ret_node2.id, 'i-8474834a')
self.assertEqual(ret_node2.name, 'Test Server 2')
self.assertEqual(ret_node2.extra['subnet_id'], 'subnet-5fd9d412')
self.assertEqual(ret_node2.extra['vpc_id'], 'vpc-61dcd30e')
self.assertEqual(ret_node2.extra['tags']['Group'], 'VPC Test')
self.assertEqual(ret_node1.extra['launch_time'],
'2013-12-02T11:58:11.000Z')
self.assertTrue('instance_type' in ret_node1.extra)
self.assertEqual(ret_node2.extra['launch_time'],
'2013-12-02T15:58:29.000Z')
self.assertTrue('instance_type' in ret_node2.extra)
def test_ex_list_reserved_nodes(self):
node = self.driver.ex_list_reserved_nodes()[0]
self.assertEqual(node.id, '93bbbca2-c500-49d0-9ede-9d8737400498')
self.assertEqual(node.state, 'active')
self.assertEqual(node.extra['instance_type'], 't1.micro')
self.assertEqual(node.extra['availability'], 'us-east-1b')
self.assertEqual(node.extra['start'], '2013-06-18T12:07:53.161Z')
self.assertEqual(node.extra['duration'], 31536000)
self.assertEqual(node.extra['usage_price'], 0.012)
self.assertEqual(node.extra['fixed_price'], 23.0)
self.assertEqual(node.extra['instance_count'], 1)
self.assertEqual(node.extra['description'], 'Linux/UNIX')
self.assertEqual(node.extra['instance_tenancy'], 'default')
self.assertEqual(node.extra['currency_code'], 'USD')
self.assertEqual(node.extra['offering_type'], 'Light Utilization')
def test_list_location(self):
locations = self.driver.list_locations()
self.assertTrue(len(locations) > 0)
self.assertEqual(locations[0].name, 'eu-west-1a')
self.assertTrue(locations[0].availability_zone is not None)
self.assertTrue(isinstance(locations[0].availability_zone,
ExEC2AvailabilityZone))
def test_list_security_groups(self):
groups = self.driver.ex_list_security_groups()
self.assertEqual(groups, ['WebServers', 'RangedPortsBySource'])
def test_ex_delete_security_group_by_id(self):
group_id = 'sg-443d0a12'
retValue = self.driver.ex_delete_security_group_by_id(group_id)
self.assertTrue(retValue)
def test_delete_security_group_by_name(self):
group_name = 'WebServers'
retValue = self.driver.ex_delete_security_group_by_name(group_name)
self.assertTrue(retValue)
def test_ex_delete_security_group(self):
name = 'WebServers'
retValue = self.driver.ex_delete_security_group(name)
self.assertTrue(retValue)
def test_authorize_security_group(self):
resp = self.driver.ex_authorize_security_group('TestGroup', '22', '22',
'0.0.0.0/0')
self.assertTrue(resp)
def test_authorize_security_group_ingress(self):
ranges = ['1.1.1.1/32', '2.2.2.2/32']
resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 22, cidr_ips=ranges)
self.assertTrue(resp)
groups = [{'group_id': 'sg-949265ff'}]
resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 23, group_pairs=groups)
self.assertTrue(resp)
def test_authorize_security_group_egress(self):
ranges = ['1.1.1.1/32', '2.2.2.2/32']
resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 22, cidr_ips=ranges)
self.assertTrue(resp)
groups = [{'group_id': 'sg-949265ff'}]
resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 22, group_pairs=groups)
self.assertTrue(resp)
def test_revoke_security_group_ingress(self):
ranges = ['1.1.1.1/32', '2.2.2.2/32']
resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 22, cidr_ips=ranges)
self.assertTrue(resp)
groups = [{'group_id': 'sg-949265ff'}]
resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 22, group_pairs=groups)
self.assertTrue(resp)
def test_revoke_security_group_egress(self):
ranges = ['1.1.1.1/32', '2.2.2.2/32']
resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 22, cidr_ips=ranges)
self.assertTrue(resp)
groups = [{'group_id': 'sg-949265ff'}]
resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 22, group_pairs=groups)
self.assertTrue(resp)
def test_reboot_node(self):
node = Node('i-4382922a', None, None, None, None, self.driver)
ret = self.driver.reboot_node(node)
self.assertTrue(ret)
def test_ex_start_node(self):
node = Node('i-4382922a', None, None, None, None, self.driver)
ret = self.driver.ex_start_node(node)
self.assertTrue(ret)
def test_ex_stop_node(self):
node = Node('i-4382922a', None, None, None, None, self.driver)
ret = self.driver.ex_stop_node(node)
self.assertTrue(ret)
def test_ex_create_node_with_ex_blockdevicemappings(self):
EC2MockHttp.type = 'create_ex_blockdevicemappings'
image = NodeImage(id='ami-be3adfd7',
name=self.image_name,
driver=self.driver)
size = NodeSize('m1.small', 'Small Instance', None, None, None, None,
driver=self.driver)
mappings = [
{'DeviceName': '/dev/sda1', 'Ebs.VolumeSize': 10},
{'DeviceName': '/dev/sdb', 'VirtualName': 'ephemeral0'},
{'DeviceName': '/dev/sdc', 'VirtualName': 'ephemeral1'}
]
node = self.driver.create_node(name='foo', image=image, size=size,
ex_blockdevicemappings=mappings)
self.assertEqual(node.id, 'i-2ba64342')
def test_ex_create_node_with_ex_blockdevicemappings_attribute_error(self):
EC2MockHttp.type = 'create_ex_blockdevicemappings'
image = NodeImage(id='ami-be3adfd7',
name=self.image_name,
driver=self.driver)
size = NodeSize('m1.small', 'Small Instance', None, None, None, None,
driver=self.driver)
mappings = 'this should be a list'
self.assertRaises(AttributeError, self.driver.create_node, name='foo',
image=image, size=size,
ex_blockdevicemappings=mappings)
mappings = ['this should be a dict']
self.assertRaises(AttributeError, self.driver.create_node, name='foo',
image=image, size=size,
ex_blockdevicemappings=mappings)
def test_destroy_node(self):
node = Node('i-4382922a', None, None, None, None, self.driver)
ret = self.driver.destroy_node(node)
self.assertTrue(ret)
def test_list_sizes(self):
region_old = self.driver.region_name
names = [
('ec2_us_east', 'us-east-1'),
('ec2_us_west', 'us-west-1'),
('ec2_eu_west', 'eu-west-1'),
('ec2_ap_southeast', 'ap-southeast-1'),
('ec2_ap_northeast', 'ap-northeast-1'),
('ec2_ap_southeast_2', 'ap-southeast-2')
]
for api_name, region_name in names:
self.driver.api_name = api_name
self.driver.region_name = region_name
sizes = self.driver.list_sizes()
ids = [s.id for s in sizes]
self.assertTrue('t1.micro' in ids)
self.assertTrue('m1.small' in ids)
self.assertTrue('m1.large' in ids)
self.assertTrue('m1.xlarge' in ids)
self.assertTrue('c1.medium' in ids)
self.assertTrue('c1.xlarge' in ids)
self.assertTrue('m2.xlarge' in ids)
self.assertTrue('m2.2xlarge' in ids)
self.assertTrue('m2.4xlarge' in ids)
if region_name == 'us-east-1':
self.assertEqual(len(sizes), 53)
self.assertTrue('cg1.4xlarge' in ids)
self.assertTrue('cc2.8xlarge' in ids)
self.assertTrue('cr1.8xlarge' in ids)
elif region_name == 'us-west-1':
self.assertEqual(len(sizes), 45)
if region_name == 'us-west-2':
self.assertEqual(len(sizes), 41)
elif region_name == 'ap-southeast-1':
self.assertEqual(len(sizes), 43)
elif region_name == 'ap-southeast-2':
self.assertEqual(len(sizes), 47)
elif region_name == 'eu-west-1':
self.assertEqual(len(sizes), 51)
self.driver.region_name = region_old
def test_ex_create_node_with_ex_iam_profile(self):
iamProfile = {
'id': 'AIDGPMS9RO4H3FEXAMPLE',
'name': 'Foo',
'arn': 'arn:aws:iam:...'
}
image = NodeImage(id='ami-be3adfd7',
name=self.image_name,
driver=self.driver)
size = NodeSize('m1.small', 'Small Instance', None, None, None, None,
driver=self.driver)
EC2MockHttp.type = None
node1 = self.driver.create_node(name='foo', image=image, size=size)
EC2MockHttp.type = 'ex_iam_profile'
node2 = self.driver.create_node(name='bar', image=image, size=size,
ex_iam_profile=iamProfile['name'])
node3 = self.driver.create_node(name='bar', image=image, size=size,
ex_iam_profile=iamProfile['arn'])
self.assertFalse(node1.extra['iam_profile'])
self.assertEqual(node2.extra['iam_profile'], iamProfile['id'])
self.assertEqual(node3.extra['iam_profile'], iamProfile['id'])
def test_list_images(self):
images = self.driver.list_images()
self.assertEqual(len(images), 2)
location = '123456788908/Test Image'
self.assertEqual(images[0].id, 'ami-57ba933a')
self.assertEqual(images[0].name, 'Test Image')
self.assertEqual(images[0].extra['image_location'], location)
self.assertEqual(images[0].extra['architecture'], 'x86_64')
self.assertEqual(len(images[0].extra['block_device_mapping']), 2)
ephemeral = images[0].extra['block_device_mapping'][1]['virtual_name']
self.assertEqual(ephemeral, 'ephemeral0')
location = '123456788908/Test Image 2'
self.assertEqual(images[1].id, 'ami-85b2a8ae')
self.assertEqual(images[1].name, 'Test Image 2')
self.assertEqual(images[1].extra['image_location'], location)
self.assertEqual(images[1].extra['architecture'], 'x86_64')
size = images[1].extra['block_device_mapping'][0]['ebs']['volume_size']
self.assertEqual(size, 20)
def test_list_images_with_image_ids(self):
EC2MockHttp.type = 'ex_imageids'
images = self.driver.list_images(ex_image_ids=['ami-57ba933a'])
self.assertEqual(len(images), 1)
self.assertEqual(images[0].name, 'Test Image')
def test_list_images_with_executable_by(self):
images = self.driver.list_images(ex_executableby='self')
self.assertEqual(len(images), 2)
def test_get_image(self):
image = self.driver.get_image('ami-57ba933a')
self.assertEqual(image.id, 'ami-57ba933a')
self.assertEqual(image.name, 'Test Image')
self.assertEqual(image.extra['architecture'], 'x86_64')
self.assertEqual(len(image.extra['block_device_mapping']), 2)
def test_copy_image(self):
image = self.driver.list_images()[0]
resp = self.driver.copy_image(image, 'us-east-1',
name='Faux Image',
description='Test Image Copy')
self.assertEqual(resp.id, 'ami-4db38224')
def test_create_image(self):
node = self.driver.list_nodes()[0]
mapping = [{'VirtualName': None,
'Ebs': {'VolumeSize': 10,
'VolumeType': 'standard',
'DeleteOnTermination': 'true'},
'DeviceName': '/dev/sda1'}]
resp = self.driver.create_image(node,
'New Image',
description='New EBS Image',
block_device_mapping=mapping)
self.assertEqual(resp.id, 'ami-e9b38280')
def test_create_image_no_mapping(self):
node = self.driver.list_nodes()[0]
resp = self.driver.create_image(node,
'New Image',
description='New EBS Image')
self.assertEqual(resp.id, 'ami-e9b38280')
def delete_image(self):
images = self.driver.list_images()
image = images[0]
resp = self.driver.delete_image(image)
self.assertTrue(resp)
def ex_register_image(self):
mapping = [{'DeviceName': '/dev/sda1',
'Ebs': {'SnapshotId': 'snap-5ade3e4e'}}]
image = self.driver.ex_register_image(name='Test Image',
root_device_name='/dev/sda1',
description='My Image',
architecture='x86_64',
block_device_mapping=mapping)
self.assertEqual(image.id, 'ami-57c2fb3e')
def test_ex_list_availability_zones(self):
availability_zones = self.driver.ex_list_availability_zones()
availability_zone = availability_zones[0]
self.assertTrue(len(availability_zones) > 0)
self.assertEqual(availability_zone.name, 'eu-west-1a')
self.assertEqual(availability_zone.zone_state, 'available')
self.assertEqual(availability_zone.region_name, 'eu-west-1')
def test_list_keypairs(self):
keypairs = self.driver.list_key_pairs()
self.assertEqual(len(keypairs), 1)
self.assertEqual(keypairs[0].name, 'gsg-keypair')
self.assertEqual(keypairs[0].fingerprint, null_fingerprint)
# Test old deprecated method
keypairs = self.driver.ex_list_keypairs()
self.assertEqual(len(keypairs), 1)
self.assertEqual(keypairs[0]['keyName'], 'gsg-keypair')
self.assertEqual(keypairs[0]['keyFingerprint'], null_fingerprint)
def test_get_key_pair(self):
EC2MockHttp.type = 'get_one'
key_pair = self.driver.get_key_pair(name='gsg-keypair')
self.assertEqual(key_pair.name, 'gsg-keypair')
def test_get_key_pair_does_not_exist(self):
EC2MockHttp.type = 'doesnt_exist'
self.assertRaises(KeyPairDoesNotExistError, self.driver.get_key_pair,
name='test-key-pair')
def test_create_key_pair(self):
key_pair = self.driver.create_key_pair(name='test-keypair')
fingerprint = ('1f:51:ae:28:bf:89:e9:d8:1f:25:5d'
':37:2d:7d:b8:ca:9f:f5:f1:6f')
self.assertEqual(key_pair.name, 'my-key-pair')
self.assertEqual(key_pair.fingerprint, fingerprint)
self.assertTrue(key_pair.private_key is not None)
# Test old and deprecated method
key_pair = self.driver.ex_create_keypair(name='test-keypair')
self.assertEqual(key_pair['keyFingerprint'], fingerprint)
self.assertTrue(key_pair['keyMaterial'] is not None)
def test_ex_describe_all_keypairs(self):
keys = self.driver.ex_describe_all_keypairs()
self.assertEqual(keys, ['gsg-keypair'])
def test_list_key_pairs(self):
keypair1 = self.driver.list_key_pairs()[0]
self.assertEqual(keypair1.name, 'gsg-keypair')
self.assertEqual(keypair1.fingerprint, null_fingerprint)
# Test backward compatibility
keypair2 = self.driver.ex_describe_keypairs('gsg-keypair')
self.assertEqual(keypair2['keyName'], 'gsg-keypair')
self.assertEqual(keypair2['keyFingerprint'], null_fingerprint)
def test_delete_key_pair(self):
keypair = self.driver.list_key_pairs()[0]
success = self.driver.delete_key_pair(keypair)
self.assertTrue(success)
# Test old and deprecated method
resp = self.driver.ex_delete_keypair('gsg-keypair')
self.assertTrue(resp)
def test_ex_describe_tags(self):
node = Node('i-4382922a', None, None, None, None, self.driver)
tags = self.driver.ex_describe_tags(resource=node)
self.assertEqual(len(tags), 3)
self.assertTrue('tag' in tags)
self.assertTrue('owner' in tags)
self.assertTrue('stack' in tags)
def test_import_key_pair_from_string(self):
path = os.path.join(os.path.dirname(__file__), 'fixtures', 'misc',
'dummy_rsa.pub')
with open(path, 'r') as fp:
key_material = fp.read()
key = self.driver.import_key_pair_from_string(name='keypair',
key_material=key_material)
self.assertEqual(key.name, 'keypair')
self.assertEqual(key.fingerprint, null_fingerprint)
# Test old and deprecated method
key = self.driver.ex_import_keypair_from_string('keypair',
key_material)
self.assertEqual(key['keyName'], 'keypair')
self.assertEqual(key['keyFingerprint'], null_fingerprint)
def test_import_key_pair_from_file(self):
path = os.path.join(os.path.dirname(__file__), 'fixtures', 'misc',
'dummy_rsa.pub')
key = self.driver.import_key_pair_from_file('keypair', path)
self.assertEqual(key.name, 'keypair')
self.assertEqual(key.fingerprint, null_fingerprint)
# Test old and deprecated method
key = self.driver.ex_import_keypair('keypair', path)
self.assertEqual(key['keyName'], 'keypair')
self.assertEqual(key['keyFingerprint'], null_fingerprint)
def test_ex_create_tags(self):
node = Node('i-4382922a', None, None, None, None, self.driver)
self.driver.ex_create_tags(node, {'sample': 'tag'})
def test_ex_delete_tags(self):
node = Node('i-4382922a', None, None, None, None, self.driver)
self.driver.ex_delete_tags(node, {'sample': 'tag'})
def test_ex_describe_addresses_for_node(self):
node1 = Node('i-4382922a', None, None, None, None, self.driver)
ip_addresses1 = self.driver.ex_describe_addresses_for_node(node1)
node2 = Node('i-4382922b', None, None, None, None, self.driver)
ip_addresses2 = sorted(
self.driver.ex_describe_addresses_for_node(node2))
node3 = Node('i-4382922g', None, None, None, None, self.driver)
ip_addresses3 = sorted(
self.driver.ex_describe_addresses_for_node(node3))
self.assertEqual(len(ip_addresses1), 1)
self.assertEqual(ip_addresses1[0], '1.2.3.4')
self.assertEqual(len(ip_addresses2), 2)
self.assertEqual(ip_addresses2[0], '1.2.3.5')
self.assertEqual(ip_addresses2[1], '1.2.3.6')
self.assertEqual(len(ip_addresses3), 0)
def test_ex_describe_addresses(self):
node1 = Node('i-4382922a', None, None, None, None, self.driver)
node2 = Node('i-4382922g', None, None, None, None, self.driver)
nodes_elastic_ips1 = self.driver.ex_describe_addresses([node1])
nodes_elastic_ips2 = self.driver.ex_describe_addresses([node2])
self.assertEqual(len(nodes_elastic_ips1), 1)
self.assertTrue(node1.id in nodes_elastic_ips1)
self.assertEqual(nodes_elastic_ips1[node1.id], ['1.2.3.4'])
self.assertEqual(len(nodes_elastic_ips2), 1)
self.assertTrue(node2.id in nodes_elastic_ips2)
self.assertEqual(nodes_elastic_ips2[node2.id], [])
def test_ex_describe_all_addresses(self):
EC2MockHttp.type = 'all_addresses'
elastic_ips1 = self.driver.ex_describe_all_addresses()
elastic_ips2 = self.driver.ex_describe_all_addresses(
only_associated=True)
self.assertEqual('1.2.3.7', elastic_ips1[3].ip)
self.assertEqual('vpc', elastic_ips1[3].domain)
self.assertEqual('eipalloc-992a5cf8', elastic_ips1[3].extra['allocation_id'])
self.assertEqual(len(elastic_ips2), 2)
self.assertEqual('1.2.3.5', elastic_ips2[1].ip)
self.assertEqual('vpc', elastic_ips2[1].domain)
def test_ex_allocate_address(self):
elastic_ip = self.driver.ex_allocate_address()
self.assertEqual('192.0.2.1', elastic_ip.ip)
self.assertEqual('standard', elastic_ip.domain)
EC2MockHttp.type = 'vpc'
elastic_ip = self.driver.ex_allocate_address(domain='vpc')
self.assertEqual('192.0.2.2', elastic_ip.ip)
self.assertEqual('vpc', elastic_ip.domain)
self.assertEqual('eipalloc-666d7f04', elastic_ip.extra['allocation_id'])
def test_ex_release_address(self):
EC2MockHttp.type = 'all_addresses'
elastic_ips = self.driver.ex_describe_all_addresses()
EC2MockHttp.type = ''
ret = self.driver.ex_release_address(elastic_ips[2])
self.assertTrue(ret)
ret = self.driver.ex_release_address(elastic_ips[0], domain='vpc')
self.assertTrue(ret)
self.assertRaises(AttributeError,
self.driver.ex_release_address,
elastic_ips[0],
domain='bogus')
def test_ex_associate_address_with_node(self):
node = Node('i-4382922a', None, None, None, None, self.driver)
EC2MockHttp.type = 'all_addresses'
elastic_ips = self.driver.ex_describe_all_addresses()
EC2MockHttp.type = ''
ret1 = self.driver.ex_associate_address_with_node(
node, elastic_ips[2])
ret2 = self.driver.ex_associate_addresses(
node, elastic_ips[2])
self.assertEqual(None, ret1)
self.assertEqual(None, ret2)
EC2MockHttp.type = 'vpc'
ret3 = self.driver.ex_associate_address_with_node(
node, elastic_ips[3], domain='vpc')
ret4 = self.driver.ex_associate_addresses(
node, elastic_ips[3], domain='vpc')
self.assertEqual('eipassoc-167a8073', ret3)
self.assertEqual('eipassoc-167a8073', ret4)
self.assertRaises(AttributeError,
self.driver.ex_associate_address_with_node,
node,
elastic_ips[1],
domain='bogus')
def test_ex_disassociate_address(self):
EC2MockHttp.type = 'all_addresses'
elastic_ips = self.driver.ex_describe_all_addresses()
EC2MockHttp.type = ''
ret = self.driver.ex_disassociate_address(elastic_ips[2])
self.assertTrue(ret)
# Test a VPC disassociation
ret = self.driver.ex_disassociate_address(elastic_ips[1],
domain='vpc')
self.assertTrue(ret)
self.assertRaises(AttributeError,
self.driver.ex_disassociate_address,
elastic_ips[1],
domain='bogus')
def test_ex_change_node_size_same_size(self):
size = NodeSize('m1.small', 'Small Instance',
None, None, None, None, driver=self.driver)
node = Node('i-4382922a', None, None, None, None, self.driver,
extra={'instancetype': 'm1.small'})
try:
self.driver.ex_change_node_size(node=node, new_size=size)
except ValueError:
pass
else:
self.fail('Same size was passed, but an exception was not thrown')
def test_ex_change_node_size(self):
size = NodeSize('m1.large', 'Small Instance',
None, None, None, None, driver=self.driver)
node = Node('i-4382922a', None, None, None, None, self.driver,
extra={'instancetype': 'm1.small'})
result = self.driver.ex_change_node_size(node=node, new_size=size)
self.assertTrue(result)
def test_list_volumes(self):
volumes = self.driver.list_volumes()
self.assertEqual(len(volumes), 3)
self.assertEqual('vol-10ae5e2b', volumes[0].id)
self.assertEqual(1, volumes[0].size)
self.assertEqual('available', volumes[0].extra['state'])
self.assertEqual(StorageVolumeState.AVAILABLE, volumes[0].state)
self.assertEqual('vol-v24bfh75', volumes[1].id)
self.assertEqual(11, volumes[1].size)
self.assertIsNone(volumes[1].extra['snapshot_id'])
self.assertEqual('in-use', volumes[1].extra['state'])
self.assertEqual(StorageVolumeState.INUSE, volumes[1].state)
self.assertEqual('vol-b6c851ec', volumes[2].id)
self.assertEqual(8, volumes[2].size)
self.assertEqual('some-unknown-status', volumes[2].extra['state'])
self.assertEqual('i-d334b4b3', volumes[2].extra['instance_id'])
self.assertEqual('/dev/sda1', volumes[2].extra['device'])
self.assertEqual('snap-30d37269', volumes[2].extra['snapshot_id'])
self.assertEqual(StorageVolumeState.UNKNOWN, volumes[2].state)
def test_create_volume(self):
location = self.driver.list_locations()[0]
vol = self.driver.create_volume(10, 'vol', location)
self.assertEqual(10, vol.size)
self.assertEqual('vol', vol.name)
self.assertEqual('creating', vol.extra['state'])
self.assertTrue(isinstance(vol.extra['create_time'], datetime))
def test_destroy_volume(self):
vol = StorageVolume(id='vol-4282672b', name='test',
state=StorageVolumeState.AVAILABLE,
size=10, driver=self.driver)
retValue = self.driver.destroy_volume(vol)
self.assertTrue(retValue)
def test_attach(self):
vol = StorageVolume(id='vol-4282672b', name='test',
size=10, state=StorageVolumeState.AVAILABLE,
driver=self.driver)
node = Node('i-4382922a', None, None, None, None, self.driver)
retValue = self.driver.attach_volume(node, vol, '/dev/sdh')
self.assertTrue(retValue)
def test_detach(self):
vol = StorageVolume(id='vol-4282672b', name='test',
state=StorageVolumeState.INUSE,
size=10, driver=self.driver)
retValue = self.driver.detach_volume(vol)
self.assertTrue(retValue)
def test_create_volume_snapshot(self):
vol = StorageVolume(id='vol-4282672b', name='test',
state=StorageVolumeState.AVAILABLE,
size=10, driver=self.driver)
snap = self.driver.create_volume_snapshot(
vol, 'Test snapshot')
self.assertEqual('snap-a7cb2hd9', snap.id)
self.assertEqual(vol.size, snap.size)
self.assertEqual('Test snapshot', snap.extra['name'])
self.assertEqual(vol.id, snap.extra['volume_id'])
self.assertEqual('pending', snap.extra['state'])
self.assertEqual(VolumeSnapshotState.CREATING, snap.state)
# 2013-08-15T16:22:30.000Z
self.assertEqual(datetime(2013, 8, 15, 16, 22, 30, tzinfo=UTC), snap.created)
def test_list_snapshots(self):
snaps = self.driver.list_snapshots()
self.assertEqual(len(snaps), 3)
self.assertEqual('snap-428abd35', snaps[0].id)
self.assertEqual(VolumeSnapshotState.CREATING, snaps[0].state)
self.assertEqual('vol-e020df80', snaps[0].extra['volume_id'])
self.assertEqual(30, snaps[0].size)
self.assertEqual('Daily Backup', snaps[0].extra['description'])
self.assertEqual('snap-18349159', snaps[1].id)
self.assertEqual(VolumeSnapshotState.AVAILABLE, snaps[1].state)
self.assertEqual('vol-b5a2c1v9', snaps[1].extra['volume_id'])
self.assertEqual(15, snaps[1].size)
self.assertEqual('Weekly backup', snaps[1].extra['description'])
self.assertEqual('DB Backup 1', snaps[1].extra['name'])
def test_list_volume_snapshots(self):
volume = self.driver.list_volumes()[0]
assert volume.id == 'vol-10ae5e2b'
snapshots = self.driver.list_volume_snapshots(volume)
self.assertEqual(len(snapshots), 1)
self.assertEqual(snapshots[0].id, 'snap-18349160')
def test_destroy_snapshot(self):
snap = VolumeSnapshot(id='snap-428abd35', size=10, driver=self.driver)
resp = snap.destroy()
self.assertTrue(resp)
def test_ex_modify_image_attribute(self):
images = self.driver.list_images()
image = images[0]
data = {'LaunchPermission.Add.1.Group': 'all'}
resp = self.driver.ex_modify_image_attribute(image, data)
self.assertTrue(resp)
def test_create_node_ex_security_groups(self):
EC2MockHttp.type = 'ex_security_groups'
image = NodeImage(id='ami-be3adfd7',
name=self.image_name,
driver=self.driver)
size = NodeSize('m1.small', 'Small Instance', None, None, None, None,
driver=self.driver)
security_groups = ['group1', 'group2']
# Old, deprecated argument name
self.driver.create_node(name='foo', image=image, size=size,
ex_securitygroup=security_groups)
# New argument name
self.driver.create_node(name='foo', image=image, size=size,
ex_security_groups=security_groups)
# Test old and new arguments are mutually exclusive
self.assertRaises(ValueError, self.driver.create_node,
name='foo', image=image, size=size,
ex_securitygroup=security_groups,
ex_security_groups=security_groups)
def test_create_node_ex_security_group_ids(self):
EC2MockHttp.type = 'ex_security_group_ids'
image = NodeImage(id='ami-be3adfd7',
name=self.image_name,
driver=self.driver)
size = NodeSize('m1.small', 'Small Instance', None, None, None, None,
driver=self.driver)
subnet = EC2NetworkSubnet(12345, "test_subnet", "pending")
security_groups = ['sg-1aa11a1a', 'sg-2bb22b2b']
self.driver.create_node(name='foo', image=image, size=size,
ex_security_group_ids=security_groups,
ex_subnet=subnet)
self.assertRaises(ValueError, self.driver.create_node,
name='foo', image=image, size=size,
ex_security_group_ids=security_groups)
def test_ex_get_metadata_for_node(self):
image = NodeImage(id='ami-be3adfd7',
name=self.image_name,
driver=self.driver)
size = NodeSize('m1.small', 'Small Instance', None, None, None, None,
driver=self.driver)
node = self.driver.create_node(name='foo',
image=image,
size=size,
ex_metadata={'Bar': 'baz', 'Num': '42'})
metadata = self.driver.ex_get_metadata_for_node(node)
self.assertEqual(metadata['Name'], 'foo')
self.assertEqual(metadata['Bar'], 'baz')
self.assertEqual(metadata['Num'], '42')
self.assertEqual(len(metadata), 3)
def test_ex_get_limits(self):
limits = self.driver.ex_get_limits()
expected = {'max-instances': 20, 'vpc-max-elastic-ips': 5,
'max-elastic-ips': 5}
self.assertEqual(limits['resource'], expected)
def test_ex_create_security_group(self):
group = self.driver.ex_create_security_group("WebServers",
"Rules to protect web nodes",
"vpc-143cab4")
self.assertEqual(group["group_id"], "sg-52e2f530")
def test_ex_create_placement_groups(self):
resp = self.driver.ex_create_placement_group("NewPG")
self.assertTrue(resp)
def test_ex_delete_placement_groups(self):
pgs = self.driver.ex_list_placement_groups()
pg = pgs[0]
resp = self.driver.ex_delete_placement_group(pg.name)
self.assertTrue(resp)
def test_ex_list_placement_groups(self):
pgs = self.driver.ex_list_placement_groups()
self.assertEqual(len(pgs), 2)
self.assertIsInstance(pgs[0], EC2PlacementGroup)
def test_ex_list_networks(self):
vpcs = self.driver.ex_list_networks()
self.assertEqual(len(vpcs), 2)
self.assertEqual('vpc-532335e1', vpcs[0].id)
self.assertEqual('vpc-532335e1', vpcs[0].name)
self.assertEqual('192.168.51.0/24', vpcs[0].cidr_block)
self.assertEqual('available', vpcs[0].extra['state'])
self.assertEqual('dopt-7eded312', vpcs[0].extra['dhcp_options_id'])
self.assertEqual('vpc-62ded30e', vpcs[1].id)
self.assertEqual('Test VPC', vpcs[1].name)
self.assertEqual('192.168.52.0/24', vpcs[1].cidr_block)
self.assertEqual('available', vpcs[1].extra['state'])
self.assertEqual('dopt-7eded312', vpcs[1].extra['dhcp_options_id'])
def test_ex_list_networks_network_ids(self):
EC2MockHttp.type = 'network_ids'
network_ids = ['vpc-532335e1']
# We assert in the mock http method
self.driver.ex_list_networks(network_ids=network_ids)
def test_ex_list_networks_filters(self):
EC2MockHttp.type = 'filters'
filters = {'dhcp-options-id': 'dopt-7eded312', # matches two networks
'cidr': '192.168.51.0/24'} # matches one network
# We assert in the mock http method
self.driver.ex_list_networks(filters=filters)
def test_ex_create_network(self):
vpc = self.driver.ex_create_network('192.168.55.0/24',
name='Test VPC',
instance_tenancy='default')
self.assertEqual('vpc-ad3527cf', vpc.id)
self.assertEqual('192.168.55.0/24', vpc.cidr_block)
self.assertEqual('pending', vpc.extra['state'])
def test_ex_delete_network(self):
vpcs = self.driver.ex_list_networks()
vpc = vpcs[0]
resp = self.driver.ex_delete_network(vpc)
self.assertTrue(resp)
def test_ex_list_subnets(self):
subnets = self.driver.ex_list_subnets()
self.assertEqual(len(subnets), 2)
self.assertEqual('subnet-ce0e7ce5', subnets[0].id)
self.assertEqual('available', subnets[0].state)
self.assertEqual(123, subnets[0].extra['available_ips'])
self.assertEqual('subnet-ce0e7ce6', subnets[1].id)
self.assertEqual('available', subnets[1].state)
self.assertEqual(59, subnets[1].extra['available_ips'])
def test_ex_create_subnet(self):
subnet = self.driver.ex_create_subnet('vpc-532135d1',
'192.168.51.128/26',
'us-east-1b',
name='Test Subnet')
self.assertEqual('subnet-ce0e7ce6', subnet.id)
self.assertEqual('pending', subnet.state)
self.assertEqual('vpc-532135d1', subnet.extra['vpc_id'])
def test_ex_delete_subnet(self):
subnet = self.driver.ex_list_subnets()[0]
resp = self.driver.ex_delete_subnet(subnet=subnet)
self.assertTrue(resp)
def test_ex_get_console_output(self):
node = self.driver.list_nodes()[0]
resp = self.driver.ex_get_console_output(node)
self.assertEqual('Test String', resp['output'])
def test_ex_list_network_interfaces(self):
interfaces = self.driver.ex_list_network_interfaces()
self.assertEqual(len(interfaces), 2)
self.assertEqual('eni-18e6c05e', interfaces[0].id)
self.assertEqual('in-use', interfaces[0].state)
self.assertEqual('0e:6e:df:72:78:af',
interfaces[0].extra['mac_address'])
self.assertEqual('eni-83e3c5c5', interfaces[1].id)
self.assertEqual('in-use', interfaces[1].state)
self.assertEqual('0e:93:0b:e9:e9:c4',
interfaces[1].extra['mac_address'])
def test_ex_create_network_interface(self):
subnet = self.driver.ex_list_subnets()[0]
interface = self.driver.ex_create_network_interface(
subnet,
name='Test Interface',
description='My Test')
self.assertEqual('eni-2b36086d', interface.id)
self.assertEqual('pending', interface.state)
self.assertEqual('0e:bd:49:3e:11:74', interface.extra['mac_address'])
def test_ex_delete_network_interface(self):
interface = self.driver.ex_list_network_interfaces()[0]
resp = self.driver.ex_delete_network_interface(interface)
self.assertTrue(resp)
def test_ex_attach_network_interface_to_node(self):
node = self.driver.list_nodes()[0]
interface = self.driver.ex_list_network_interfaces()[0]
resp = self.driver.ex_attach_network_interface_to_node(interface,
node, 1)
self.assertTrue(resp)
def test_ex_detach_network_interface(self):
resp = self.driver.ex_detach_network_interface('eni-attach-2b588b47')
self.assertTrue(resp)
def test_ex_list_internet_gateways(self):
gateways = self.driver.ex_list_internet_gateways()
self.assertEqual(len(gateways), 2)
self.assertEqual('igw-84dd3ae1', gateways[0].id)
self.assertEqual('igw-7fdae215', gateways[1].id)
self.assertEqual('available', gateways[1].state)
self.assertEqual('vpc-62cad41e', gateways[1].vpc_id)
def test_ex_create_internet_gateway(self):
gateway = self.driver.ex_create_internet_gateway()
self.assertEqual('igw-13ac2b36', gateway.id)
def test_ex_delete_internet_gateway(self):
gateway = self.driver.ex_list_internet_gateways()[0]
resp = self.driver.ex_delete_internet_gateway(gateway)
self.assertTrue(resp)
def test_ex_attach_internet_gateway(self):
gateway = self.driver.ex_list_internet_gateways()[0]
network = self.driver.ex_list_networks()[0]
resp = self.driver.ex_attach_internet_gateway(gateway, network)
self.assertTrue(resp)
def test_ex_detach_internet_gateway(self):
gateway = self.driver.ex_list_internet_gateways()[0]
network = self.driver.ex_list_networks()[0]
resp = self.driver.ex_detach_internet_gateway(gateway, network)
self.assertTrue(resp)
class EC2USWest1Tests(EC2Tests):
region = 'us-west-1'
class EC2USWest2Tests(EC2Tests):
region = 'us-west-2'
class EC2EUWestTests(EC2Tests):
region = 'eu-west-1'
class EC2APSE1Tests(EC2Tests):
region = 'ap-southeast-1'
class EC2APNETests(EC2Tests):
region = 'ap-northeast-1'
class EC2APSE2Tests(EC2Tests):
region = 'ap-southeast-2'
class EC2SAEastTests(EC2Tests):
region = 'sa-east-1'
# Tests for the old, deprecated way of instantiating a driver.
class EC2OldStyleModelTests(EC2Tests):
driver_klass = EC2USWestNodeDriver
def setUp(self):
EC2MockHttp.test = self
EC2NodeDriver.connectionCls.conn_classes = (None, EC2MockHttp)
EC2MockHttp.use_param = 'Action'
EC2MockHttp.type = None
self.driver = self.driver_klass(*EC2_PARAMS)
class EC2USWest1OldStyleModelTests(EC2OldStyleModelTests):
driver_klass = EC2USWestNodeDriver
class EC2USWest2OldStyleModelTests(EC2OldStyleModelTests):
driver_klass = EC2USWestOregonNodeDriver
class EC2EUWestOldStyleModelTests(EC2OldStyleModelTests):
driver_klass = EC2EUNodeDriver
class EC2APSE1OldStyleModelTests(EC2OldStyleModelTests):
driver_klass = EC2APSENodeDriver
class EC2APNEOldStyleModelTests(EC2OldStyleModelTests):
driver_klass = EC2APNENodeDriver
class EC2APSE2OldStyleModelTests(EC2OldStyleModelTests):
driver_klass = EC2APSESydneyNodeDriver
class EC2SAEastOldStyleModelTests(EC2OldStyleModelTests):
driver_klass = EC2SAEastNodeDriver
class EC2MockHttp(MockHttpTestCase):
fixtures = ComputeFileFixtures('ec2')
def _DescribeInstances(self, method, url, body, headers):
body = self.fixtures.load('describe_instances.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeReservedInstances(self, method, url, body, headers):
body = self.fixtures.load('describe_reserved_instances.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeAvailabilityZones(self, method, url, body, headers):
body = self.fixtures.load('describe_availability_zones.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _RebootInstances(self, method, url, body, headers):
body = self.fixtures.load('reboot_instances.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _StartInstances(self, method, url, body, headers):
body = self.fixtures.load('start_instances.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _StopInstances(self, method, url, body, headers):
body = self.fixtures.load('stop_instances.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeSecurityGroups(self, method, url, body, headers):
body = self.fixtures.load('describe_security_groups.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DeleteSecurityGroup(self, method, url, body, headers):
body = self.fixtures.load('delete_security_group.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _AuthorizeSecurityGroupIngress(self, method, url, body, headers):
body = self.fixtures.load('authorize_security_group_ingress.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeImages(self, method, url, body, headers):
body = self.fixtures.load('describe_images.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _RegisterImages(self, method, url, body, headers):
body = self.fixtures.load('register_image.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _ex_imageids_DescribeImages(self, method, url, body, headers):
body = self.fixtures.load('describe_images_ex_imageids.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _RunInstances(self, method, url, body, headers):
body = self.fixtures.load('run_instances.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _create_ex_assign_public_ip_RunInstances(self, method, url, body, headers):
self.assertUrlContainsQueryParams(url, {
'NetworkInterface.1.AssociatePublicIpAddress': "true",
'NetworkInterface.1.DeleteOnTermination': "true",
'NetworkInterface.1.DeviceIndex': "0",
'NetworkInterface.1.SubnetId': "subnet-11111111",
'NetworkInterface.1.SecurityGroupId.1': "sg-11111111",
})
body = self.fixtures.load('run_instances_with_subnet_and_security_group.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _create_ex_terminate_on_shutdown_RunInstances(self, method, url, body, headers):
self.assertUrlContainsQueryParams(url, {
'InstanceInitiatedShutdownBehavior': 'terminate'
})
body = self.fixtures.load('run_instances.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _ex_security_groups_RunInstances(self, method, url, body, headers):
self.assertUrlContainsQueryParams(url, {'SecurityGroup.1': 'group1'})
self.assertUrlContainsQueryParams(url, {'SecurityGroup.2': 'group2'})
body = self.fixtures.load('run_instances.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _ex_security_group_ids_RunInstances(self, method, url, body, headers):
self.assertUrlContainsQueryParams(url, {'SecurityGroupId.1': 'sg-1aa11a1a'})
self.assertUrlContainsQueryParams(url, {'SecurityGroupId.2': 'sg-2bb22b2b'})
body = self.fixtures.load('run_instances.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _create_ex_blockdevicemappings_RunInstances(self, method, url, body, headers):
expected_params = {
'BlockDeviceMapping.1.DeviceName': '/dev/sda1',
'BlockDeviceMapping.1.Ebs.VolumeSize': '10',
'BlockDeviceMapping.2.DeviceName': '/dev/sdb',
'BlockDeviceMapping.2.VirtualName': 'ephemeral0',
'BlockDeviceMapping.3.DeviceName': '/dev/sdc',
'BlockDeviceMapping.3.VirtualName': 'ephemeral1'
}
self.assertUrlContainsQueryParams(url, expected_params)
body = self.fixtures.load('run_instances.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _idempotent_RunInstances(self, method, url, body, headers):
body = self.fixtures.load('run_instances_idem.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _idempotent_mismatch_RunInstances(self, method, url, body, headers):
body = self.fixtures.load('run_instances_idem_mismatch.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.BAD_REQUEST])
def _ex_iam_profile_RunInstances(self, method, url, body, headers):
body = self.fixtures.load('run_instances_iam_profile.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _TerminateInstances(self, method, url, body, headers):
body = self.fixtures.load('terminate_instances.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeKeyPairs(self, method, url, body, headers):
body = self.fixtures.load('describe_key_pairs.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _get_one_DescribeKeyPairs(self, method, url, body, headers):
self.assertUrlContainsQueryParams(url, {'KeyName': 'gsg-keypair'})
body = self.fixtures.load('describe_key_pairs.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _doesnt_exist_DescribeKeyPairs(self, method, url, body, headers):
body = self.fixtures.load('describe_key_pairs_doesnt_exist.xml')
return (httplib.BAD_REQUEST, body, {},
httplib.responses[httplib.BAD_REQUEST])
def _CreateKeyPair(self, method, url, body, headers):
body = self.fixtures.load('create_key_pair.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _ImportKeyPair(self, method, url, body, headers):
body = self.fixtures.load('import_key_pair.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeTags(self, method, url, body, headers):
body = self.fixtures.load('describe_tags.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _CreateTags(self, method, url, body, headers):
body = self.fixtures.load('create_tags.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DeleteTags(self, method, url, body, headers):
body = self.fixtures.load('delete_tags.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeAddresses(self, method, url, body, headers):
body = self.fixtures.load('describe_addresses_multi.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _AllocateAddress(self, method, url, body, headers):
body = self.fixtures.load('allocate_address.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _vpc_AllocateAddress(self, method, url, body, headers):
body = self.fixtures.load('allocate_vpc_address.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _AssociateAddress(self, method, url, body, headers):
body = self.fixtures.load('associate_address.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _vpc_AssociateAddress(self, method, url, body, headers):
body = self.fixtures.load('associate_vpc_address.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DisassociateAddress(self, method, url, body, headers):
body = self.fixtures.load('disassociate_address.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _ReleaseAddress(self, method, url, body, headers):
body = self.fixtures.load('release_address.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _all_addresses_DescribeAddresses(self, method, url, body, headers):
body = self.fixtures.load('describe_addresses_all.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _WITH_TAGS_DescribeAddresses(self, method, url, body, headers):
body = self.fixtures.load('describe_addresses_multi.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _ModifyInstanceAttribute(self, method, url, body, headers):
body = self.fixtures.load('modify_instance_attribute.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _idempotent_CreateTags(self, method, url, body, headers):
body = self.fixtures.load('create_tags.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _CreateVolume(self, method, url, body, headers):
body = self.fixtures.load('create_volume.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DeleteVolume(self, method, url, body, headers):
body = self.fixtures.load('delete_volume.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _AttachVolume(self, method, url, body, headers):
body = self.fixtures.load('attach_volume.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DetachVolume(self, method, url, body, headers):
body = self.fixtures.load('detach_volume.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeVolumes(self, method, url, body, headers):
body = self.fixtures.load('describe_volumes.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _CreateSnapshot(self, method, url, body, headers):
body = self.fixtures.load('create_snapshot.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeSnapshots(self, method, url, body, headers):
body = self.fixtures.load('describe_snapshots.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DeleteSnapshot(self, method, url, body, headers):
body = self.fixtures.load('delete_snapshot.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _CopyImage(self, method, url, body, headers):
body = self.fixtures.load('copy_image.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _CreateImage(self, method, url, body, headers):
body = self.fixtures.load('create_image.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DeregisterImage(self, method, url, body, headers):
body = self.fixtures.load('deregister_image.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DeleteKeyPair(self, method, url, body, headers):
self.assertUrlContainsQueryParams(url, {'KeyName': 'gsg-keypair'})
body = self.fixtures.load('delete_key_pair.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _ModifyImageAttribute(self, method, url, body, headers):
body = self.fixtures.load('modify_image_attribute.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeAccountAttributes(self, method, url, body, headers):
body = self.fixtures.load('describe_account_attributes.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _CreateSecurityGroup(self, method, url, body, headers):
body = self.fixtures.load('create_security_group.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeVpcs(self, method, url, body, headers):
body = self.fixtures.load('describe_vpcs.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _network_ids_DescribeVpcs(self, method, url, body, headers):
expected_params = {
'VpcId.1': 'vpc-532335e1'
}
self.assertUrlContainsQueryParams(url, expected_params)
body = self.fixtures.load('describe_vpcs.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _filters_DescribeVpcs(self, method, url, body, headers):
expected_params_1 = {
'Filter.1.Name': 'dhcp-options-id',
'Filter.1.Value.1': 'dopt-7eded312',
'Filter.2.Name': 'cidr',
'Filter.2.Value.1': '192.168.51.0/24'
}
expected_params_2 = {
'Filter.1.Name': 'cidr',
'Filter.1.Value.1': '192.168.51.0/24',
'Filter.2.Name': 'dhcp-options-id',
'Filter.2.Value.1': 'dopt-7eded312'
}
try:
self.assertUrlContainsQueryParams(url, expected_params_1)
except AssertionError:
# dict ordering is not guaranteed
self.assertUrlContainsQueryParams(url, expected_params_2)
body = self.fixtures.load('describe_vpcs.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _CreateVpc(self, method, url, body, headers):
body = self.fixtures.load('create_vpc.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DeleteVpc(self, method, url, body, headers):
body = self.fixtures.load('delete_vpc.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeSubnets(self, method, url, body, headers):
body = self.fixtures.load('describe_subnets.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _CreateSubnet(self, method, url, body, headers):
body = self.fixtures.load('create_subnet.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DeleteSubnet(self, method, url, body, headers):
body = self.fixtures.load('delete_subnet.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _GetConsoleOutput(self, method, url, body, headers):
body = self.fixtures.load('get_console_output.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeNetworkInterfaces(self, method, url, body, headers):
body = self.fixtures.load('describe_network_interfaces.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _CreateNetworkInterface(self, method, url, body, headers):
body = self.fixtures.load('create_network_interface.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DeleteNetworkInterface(self, method, url, body, headers):
body = self.fixtures.load('delete_network_interface.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _AttachNetworkInterface(self, method, url, body, headers):
body = self.fixtures.load('attach_network_interface.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DetachNetworkInterface(self, method, url, body, headers):
body = self.fixtures.load('detach_network_interface.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeInternetGateways(self, method, url, body, headers):
body = self.fixtures.load('describe_internet_gateways.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _CreateInternetGateway(self, method, url, body, headers):
body = self.fixtures.load('create_internet_gateway.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DeleteInternetGateway(self, method, url, body, headers):
body = self.fixtures.load('delete_internet_gateway.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _AttachInternetGateway(self, method, url, body, headers):
body = self.fixtures.load('attach_internet_gateway.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DetachInternetGateway(self, method, url, body, headers):
body = self.fixtures.load('detach_internet_gateway.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _CreatePlacementGroup(self, method, url, body, headers):
body = self.fixtures.load('create_placement_groups.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DeletePlacementGroup(self, method, url, body, headers):
body = self.fixtures.load('delete_placement_groups.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribePlacementGroups(self, method, url, body, headers):
body = self.fixtures.load('describe_placement_groups.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
class EucMockHttp(EC2MockHttp):
fixtures = ComputeFileFixtures('ec2')
def _services_Eucalyptus_DescribeInstances(self, method, url, body,
headers):
return self._DescribeInstances(method, url, body, headers)
def _services_Eucalyptus_DescribeImages(self, method, url, body,
headers):
return self._DescribeImages(method, url, body, headers)
def _services_Eucalyptus_DescribeAddresses(self, method, url, body,
headers):
return self._DescribeAddresses(method, url, body, headers)
def _services_Eucalyptus_RebootInstances(self, method, url, body,
headers):
return self._RebootInstances(method, url, body, headers)
def _services_Eucalyptus_TerminateInstances(self, method, url, body,
headers):
return self._TerminateInstances(method, url, body, headers)
def _services_Eucalyptus_RunInstances(self, method, url, body,
headers):
return self._RunInstances(method, url, body, headers)
def _services_Eucalyptus_CreateTags(self, method, url, body,
headers):
return self._CreateTags(method, url, body, headers)
def _services_Eucalyptus_DescribeInstanceTypes(self, method, url, body,
headers):
body = self.fixtures.load('describe_instance_types.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
class NimbusTests(EC2Tests):
def setUp(self):
NimbusNodeDriver.connectionCls.conn_classes = (None, EC2MockHttp)
EC2MockHttp.use_param = 'Action'
EC2MockHttp.type = None
self.driver = NimbusNodeDriver(key=EC2_PARAMS[0], secret=EC2_PARAMS[1],
host='some.nimbuscloud.com')
def test_ex_describe_addresses_for_node(self):
# overridden from EC2Tests -- Nimbus doesn't support elastic IPs.
node = Node('i-4382922a', None, None, None, None, self.driver)
ip_addresses = self.driver.ex_describe_addresses_for_node(node)
self.assertEqual(len(ip_addresses), 0)
def test_ex_describe_addresses(self):
# overridden from EC2Tests -- Nimbus doesn't support elastic IPs.
node = Node('i-4382922a', None, None, None, None, self.driver)
nodes_elastic_ips = self.driver.ex_describe_addresses([node])
self.assertEqual(len(nodes_elastic_ips), 1)
self.assertEqual(len(nodes_elastic_ips[node.id]), 0)
def test_list_sizes(self):
sizes = self.driver.list_sizes()
ids = [s.id for s in sizes]
self.assertTrue('m1.small' in ids)
self.assertTrue('m1.large' in ids)
self.assertTrue('m1.xlarge' in ids)
def test_list_nodes(self):
# overridden from EC2Tests -- Nimbus doesn't support elastic IPs.
node = self.driver.list_nodes()[0]
self.assertExecutedMethodCount(0)
public_ips = node.public_ips
self.assertEqual(node.id, 'i-4382922a')
self.assertEqual(len(node.public_ips), 1)
self.assertEqual(public_ips[0], '1.2.3.4')
self.assertEqual(node.extra['tags'], {})
node = self.driver.list_nodes()[1]
self.assertExecutedMethodCount(0)
public_ips = node.public_ips
self.assertEqual(node.id, 'i-8474834a')
self.assertEqual(len(node.public_ips), 1)
self.assertEqual(public_ips[0], '1.2.3.5')
self.assertEqual(node.extra['tags'],
{'Name': 'Test Server 2', 'Group': 'VPC Test'})
def test_ex_create_tags(self):
# Nimbus doesn't support creating tags so this one should be a
# passthrough
node = self.driver.list_nodes()[0]
self.driver.ex_create_tags(resource=node, tags={'foo': 'bar'})
self.assertExecutedMethodCount(0)
class EucTests(LibcloudTestCase, TestCaseMixin):
def setUp(self):
EucNodeDriver.connectionCls.conn_classes = (None, EucMockHttp)
EC2MockHttp.use_param = 'Action'
EC2MockHttp.type = None
self.driver = EucNodeDriver(key=EC2_PARAMS[0], secret=EC2_PARAMS[1],
host='some.eucalyptus.com', api_version='3.4.1')
def test_list_locations_response(self):
try:
self.driver.list_locations()
except Exception:
pass
else:
self.fail('Exception was not thrown')
def test_list_location(self):
pass
def test_list_sizes(self):
sizes = self.driver.list_sizes()
ids = [s.id for s in sizes]
self.assertEqual(len(ids), 18)
self.assertTrue('t1.micro' in ids)
self.assertTrue('m1.medium' in ids)
self.assertTrue('m3.xlarge' in ids)
class OutscaleTests(EC2Tests):
def setUp(self):
OutscaleSASNodeDriver.connectionCls.conn_classes = (None, EC2MockHttp)
EC2MockHttp.use_param = 'Action'
EC2MockHttp.type = None
self.driver = OutscaleSASNodeDriver(key=EC2_PARAMS[0],
secret=EC2_PARAMS[1],
host='some.outscalecloud.com')
def test_ex_create_network(self):
# overridden from EC2Tests -- Outscale don't support instance_tenancy
vpc = self.driver.ex_create_network('192.168.55.0/24',
name='Test VPC')
self.assertEqual('vpc-ad3527cf', vpc.id)
self.assertEqual('192.168.55.0/24', vpc.cidr_block)
self.assertEqual('pending', vpc.extra['state'])
def test_ex_copy_image(self):
# overridden from EC2Tests -- Outscale does not support copying images
image = self.driver.list_images()[0]
try:
self.driver.ex_copy_image('us-east-1', image,
name='Faux Image',
description='Test Image Copy')
except NotImplementedError:
pass
else:
self.fail('Exception was not thrown')
def test_ex_get_limits(self):
# overridden from EC2Tests -- Outscale does not support getting limits
try:
self.driver.ex_get_limits()
except NotImplementedError:
pass
else:
self.fail('Exception was not thrown')
def test_ex_create_network_interface(self):
# overridden from EC2Tests -- Outscale don't allow creating interfaces
subnet = self.driver.ex_list_subnets()[0]
try:
self.driver.ex_create_network_interface(
subnet,
name='Test Interface',
description='My Test')
except NotImplementedError:
pass
else:
self.fail('Exception was not thrown')
def test_ex_delete_network_interface(self):
# overridden from EC2Tests -- Outscale don't allow deleting interfaces
interface = self.driver.ex_list_network_interfaces()[0]
try:
self.driver.ex_delete_network_interface(interface)
except NotImplementedError:
pass
else:
self.fail('Exception was not thrown')
def test_ex_attach_network_interface_to_node(self):
# overridden from EC2Tests -- Outscale don't allow attaching interfaces
node = self.driver.list_nodes()[0]
interface = self.driver.ex_list_network_interfaces()[0]
try:
self.driver.ex_attach_network_interface_to_node(interface, node, 1)
except NotImplementedError:
pass
else:
self.fail('Exception was not thrown')
def test_ex_detach_network_interface(self):
# overridden from EC2Tests -- Outscale don't allow detaching interfaces
try:
self.driver.ex_detach_network_interface('eni-attach-2b588b47')
except NotImplementedError:
pass
else:
self.fail('Exception was not thrown')
def test_list_sizes(self):
sizes = self.driver.list_sizes()
ids = [s.id for s in sizes]
self.assertTrue('m1.small' in ids)
self.assertTrue('m1.large' in ids)
self.assertTrue('m1.xlarge' in ids)
if __name__ == '__main__':
sys.exit(unittest.main())
| |
from django.db import models
from django.contrib import auth
from threading import Lock
import inspect
from vt_manager.utils.EthernetUtils import EthernetUtils
from vt_manager.utils.MutexStore import MutexStore
from vt_manager.models.MacSlot import MacSlot
'''
@author: msune
'''
class MacRange(models.Model):
from vt_manager.models.MacSlot import MacSlot
"""MacRange"""
class Meta:
"""Meta Class for your model."""
app_label = 'vt_manager'
'''
Private attributes
'''
#Range name
name = models.CharField(max_length = 255, default="", verbose_name = "Range name",unique=True)
isGlobal = models.BooleanField(verbose_name="Global range",default=1,help_text="Globa ranges will be used by servers which are not subscribed to any specific range")
#Range parameters
startMac = models.CharField(verbose_name="Start Mac Address", max_length = 17, default="", validators=[EthernetUtils.checkValidMac])
endMac = models.CharField(verbose_name="End Mac Address", max_length = 17, default="", validators=[EthernetUtils.checkValidMac])
#Pool of macs both assigned and excluded (particular case of assignment)
macs = models.ManyToManyField('MacSlot', blank = True, null = True, editable = False)
nextAvailableMac = models.CharField(verbose_name="Next Available Mac Address",max_length = 17, default="",editable=False)
#Statistics
numberOfSlots = models.BigIntegerField(blank = True, null=True, editable = False)
#Defines soft or hard state of the range
doSave = True
'''
Private methods
'''
@staticmethod
def constructor(name,startMac,endMac,isGlobal=True,save=True):
self = MacRange()
try:
#Default constructor
EthernetUtils.checkValidMac(startMac)
EthernetUtils.checkValidMac(endMac)
self.startMac = startMac.upper()
self.endMac = endMac.upper()
self.name = name
self.isGlobal= isGlobal
#Create an iterator
it= EthernetUtils.getMacIterator(self.startMac,self.endMac)
self.nextAvailableMac = it.getNextMac()
#Number of Slots
try:
self.numberOfSlots = EthernetUtils.getNumberOfSlotsInRange(startMac,endMac)
except Exception as e:
print "Exception doing slot calculation"+str(e)
self.numberOfSlots = -1
self.doSave = save
if save:
self.save()
except Exception as e:
print e
raise e
return self
def autoSave(self):
if self.doSave:
self.save()
def __setStartMac(self, value):
EthernetUtils.checkValidMac(value)
MAC4Utils.checkValidMac(value)
self.startMac = value.upper()
self.autoSave()
def __setEndMac(self, value):
EthernetUtils.checkValidMac(value)
self.endMac = value.upper()
self.autoSave()
def __isMacAvailable(self,mac):
return self.macs.filter(mac=mac).count() == 0
'''
Public methods
'''
def getLockIdentifier(self):
#Uniquely identifies object by a key
return inspect.currentframe().f_code.co_filename+str(self)+str(self.id)
def getName(self):
return self.name
def getStartMac(self):
return self.startMac
def getEndMac(self):
return self.endMac
def getIsGlobal(self):
return self.isGlobal
def getExcludedMacs(self):
return self.macs.filter(isExcluded=True).order_by('mac')
def getAllocatedMacs(self):
return self.macs.filter(isExcluded=False).order_by('mac')
def getNumberOfSlots(self):
return int(self.numberOfSlots)
def getPercentageRangeUsage(self):
if not self.numberOfSlots == -1:
return round((float(self.macs.all().count())/float(self.numberOfSlots))*100,2)
return -1
def destroy(self):
with MutexStore.getObjectLock(self.getLockIdentifier()):
if self.macs.filter(isExcluded=False).count() > 0:
raise Exception("Cannot delete MacRange. Range still contains allocated Macs")
for mac in self.macs.all():
#Delete excluded macs
mac.delete()
self.delete()
def allocateMac(self):
'''
Allocates an MAC address of the range
'''
with MutexStore.getObjectLock(self.getLockIdentifier()):
#Implements first fit algorithm
if self.nextAvailableMac == None:
raise Exception("Could not allocate any Mac")
newMac = MacSlot.macFactory(self,self.nextAvailableMac)
self.macs.add(newMac)
#Try to find new slot
try:
it= EthernetUtils.getMacIterator(self.nextAvailableMac,self.endMac)
while True:
mac = it.getNextMac()
if self.__isMacAvailable(mac):
break
self.nextAvailableMac = mac
except Exception as e:
self.nextAvailableMac = None
self.autoSave()
return newMac
def releaseMac(self,macObj):
'''
Releases an MAC address of the range (but it does not destroy the object!!)
'''
with MutexStore.getObjectLock(self.getLockIdentifier()):
macStr = macObj.getMac()
if not self.macs.filter(mac=macStr).count() > 0:
raise Exception("Cannot release Mac %s. Reason may be is unallocated or is an excluded Mac",macStr)
self.macs.remove(macObj)
#Determine new available Mac
if not self.nextAvailableMac == None:
if EthernetUtils.compareMacs(macStr,self.nextAvailableMac) > 0:
#Do nothing
pass
else:
self.nextAvailableMac = macStr
else:
#No more gaps
self.nextAvailableMac = macStr
self.autoSave()
def addExcludedMac(self,macStr,comment=""):
'''
Add an MAC to the exclusion list
'''
with MutexStore.getObjectLock(self.getLockIdentifier()):
#Check is not already allocated
if not self.__isMacAvailable(macStr):
raise Exception("Mac already allocated or marked as excluded")
#then forbidd
if not EthernetUtils.isMacInRange(macStr,self.startMac,self.endMac):
raise Exception("Mac is not in range")
newMac = MacSlot.excludedMacFactory(self,macStr,comment)
self.macs.add(newMac)
#if was nextSlot shift
if self.nextAvailableMac == macStr:
try:
it= EthernetUtils.getMacIterator(self.nextAvailableMac,self.endMac)
while True:
mac = it.getNextMac()
if self.__isMacAvailable(mac):
break
self.nextAvailableMac= mac
except Exception as e:
self.nextAvailableMac = None
self.autoSave()
def removeExcludedMac(self,macObj):
'''
Deletes an Mac from the exclusion list (but it does not destroy the object!!)
'''
with MutexStore.getObjectLock(self.getLockIdentifier()):
macStr = macObj.getMac()
if (not self.macs.get(mac=macStr).isExcludedMac()):
raise Exception("Cannot release Mac. Reason may be is unallocated or is not excluded Mac")
self.macs.remove(macObj)
#Determine new available Mac
if not self.nextAvailableMac == None:
if EthernetUtils.compareMacs(macStr,self.nextAvailableMac) > 0:
#Do nothing
pass
else:
self.nextAvailableMac = macStr
else:
#No more gaps
self.nextAvailableMac = macStr
self.autoSave()
'''
Static methods
'''
@staticmethod
def getAllocatedGlobalNumberOfSlots():
allocated = 0
for range in MacRange.objects.filter(isGlobal=True):
allocated += range.macs.all().count()
return allocated
@staticmethod
def getGlobalNumberOfSlots():
slots = 0
for range in MacRange.objects.filter(isGlobal=True):
slots += range.numberOfSlots
return int(slots)
def rebasePointer(self):
'''Used when pointer has lost track mostly due to bug #'''
with MutexStore.getObjectLock(self.getLockIdentifier()):
print "Rebasing pointer of range: "+str(self.id)
print "Current pointer point to: "+self.nextAvailableMac
try:
it= EthernetUtils.getMacIterator(self.startMac,self.endMac)
while True:
mac = it.getNextMac()
if self.__isMacAvailable(mac):
break
self.nextAvailableMac= mac
except Exception as e:
self.nextAvailableMac = None
print "Pointer will be rebased to: "+self.nextAvailableMac
self.save()
@staticmethod
def rebasePointers():
for range in MacRange.objects.all():
range.rebasePointer()
#slot = RangeSlot("127.0.0.1","127.0.0.255","255.255.255.0")
#slot.allocateMac()
#try:
# slot.releaseMac("d")
#except Exception:
# pass
#slot.allocateMac()
| |
"""This platform allows several lights to be grouped into one light."""
from collections import Counter
import itertools
import logging
from typing import Any, Callable, Iterator, List, Optional, Tuple
import voluptuous as vol
from homeassistant.components import light
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_SUPPORTED_FEATURES, CONF_ENTITIES, CONF_NAME,
STATE_ON, STATE_UNAVAILABLE)
from homeassistant.core import State, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_state_change
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_EFFECT, ATTR_EFFECT_LIST,
ATTR_FLASH, ATTR_HS_COLOR, ATTR_MAX_MIREDS, ATTR_MIN_MIREDS,
ATTR_TRANSITION, ATTR_WHITE_VALUE, PLATFORM_SCHEMA, SUPPORT_BRIGHTNESS,
SUPPORT_COLOR, SUPPORT_COLOR_TEMP, SUPPORT_EFFECT, SUPPORT_FLASH,
SUPPORT_TRANSITION, SUPPORT_WHITE_VALUE)
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Light Group'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_ENTITIES): cv.entities_domain(light.DOMAIN)
})
SUPPORT_GROUP_LIGHT = (SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP | SUPPORT_EFFECT
| SUPPORT_FLASH | SUPPORT_COLOR | SUPPORT_TRANSITION
| SUPPORT_WHITE_VALUE)
async def async_setup_platform(hass: HomeAssistantType, config: ConfigType,
async_add_entities,
discovery_info=None) -> None:
"""Initialize light.group platform."""
async_add_entities([LightGroup(config.get(CONF_NAME),
config[CONF_ENTITIES])])
class LightGroup(light.Light):
"""Representation of a light group."""
def __init__(self, name: str, entity_ids: List[str]) -> None:
"""Initialize a light group."""
self._name = name # type: str
self._entity_ids = entity_ids # type: List[str]
self._is_on = False # type: bool
self._available = False # type: bool
self._brightness = None # type: Optional[int]
self._hs_color = None # type: Optional[Tuple[float, float]]
self._color_temp = None # type: Optional[int]
self._min_mireds = 154 # type: Optional[int]
self._max_mireds = 500 # type: Optional[int]
self._white_value = None # type: Optional[int]
self._effect_list = None # type: Optional[List[str]]
self._effect = None # type: Optional[str]
self._supported_features = 0 # type: int
self._async_unsub_state_changed = None
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
@callback
def async_state_changed_listener(entity_id: str, old_state: State,
new_state: State):
"""Handle child updates."""
self.async_schedule_update_ha_state(True)
self._async_unsub_state_changed = async_track_state_change(
self.hass, self._entity_ids, async_state_changed_listener)
await self.async_update()
async def async_will_remove_from_hass(self):
"""Handle removal from HASS."""
if self._async_unsub_state_changed is not None:
self._async_unsub_state_changed()
self._async_unsub_state_changed = None
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def is_on(self) -> bool:
"""Return the on/off state of the light group."""
return self._is_on
@property
def available(self) -> bool:
"""Return whether the light group is available."""
return self._available
@property
def brightness(self) -> Optional[int]:
"""Return the brightness of this light group between 0..255."""
return self._brightness
@property
def hs_color(self) -> Optional[Tuple[float, float]]:
"""Return the HS color value [float, float]."""
return self._hs_color
@property
def color_temp(self) -> Optional[int]:
"""Return the CT color value in mireds."""
return self._color_temp
@property
def min_mireds(self) -> Optional[int]:
"""Return the coldest color_temp that this light group supports."""
return self._min_mireds
@property
def max_mireds(self) -> Optional[int]:
"""Return the warmest color_temp that this light group supports."""
return self._max_mireds
@property
def white_value(self) -> Optional[int]:
"""Return the white value of this light group between 0..255."""
return self._white_value
@property
def effect_list(self) -> Optional[List[str]]:
"""Return the list of supported effects."""
return self._effect_list
@property
def effect(self) -> Optional[str]:
"""Return the current effect."""
return self._effect
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._supported_features
@property
def should_poll(self) -> bool:
"""No polling needed for a light group."""
return False
async def async_turn_on(self, **kwargs):
"""Forward the turn_on command to all lights in the light group."""
data = {ATTR_ENTITY_ID: self._entity_ids}
if ATTR_BRIGHTNESS in kwargs:
data[ATTR_BRIGHTNESS] = kwargs[ATTR_BRIGHTNESS]
if ATTR_HS_COLOR in kwargs:
data[ATTR_HS_COLOR] = kwargs[ATTR_HS_COLOR]
if ATTR_COLOR_TEMP in kwargs:
data[ATTR_COLOR_TEMP] = kwargs[ATTR_COLOR_TEMP]
if ATTR_WHITE_VALUE in kwargs:
data[ATTR_WHITE_VALUE] = kwargs[ATTR_WHITE_VALUE]
if ATTR_EFFECT in kwargs:
data[ATTR_EFFECT] = kwargs[ATTR_EFFECT]
if ATTR_TRANSITION in kwargs:
data[ATTR_TRANSITION] = kwargs[ATTR_TRANSITION]
if ATTR_FLASH in kwargs:
data[ATTR_FLASH] = kwargs[ATTR_FLASH]
await self.hass.services.async_call(
light.DOMAIN, light.SERVICE_TURN_ON, data, blocking=True)
async def async_turn_off(self, **kwargs):
"""Forward the turn_off command to all lights in the light group."""
data = {ATTR_ENTITY_ID: self._entity_ids}
if ATTR_TRANSITION in kwargs:
data[ATTR_TRANSITION] = kwargs[ATTR_TRANSITION]
await self.hass.services.async_call(
light.DOMAIN, light.SERVICE_TURN_OFF, data, blocking=True)
async def async_update(self):
"""Query all members and determine the light group state."""
all_states = [self.hass.states.get(x) for x in self._entity_ids]
states = list(filter(None, all_states))
on_states = [state for state in states if state.state == STATE_ON]
self._is_on = len(on_states) > 0
self._available = any(state.state != STATE_UNAVAILABLE
for state in states)
self._brightness = _reduce_attribute(on_states, ATTR_BRIGHTNESS)
self._hs_color = _reduce_attribute(
on_states, ATTR_HS_COLOR, reduce=_mean_tuple)
self._white_value = _reduce_attribute(on_states, ATTR_WHITE_VALUE)
self._color_temp = _reduce_attribute(on_states, ATTR_COLOR_TEMP)
self._min_mireds = _reduce_attribute(
states, ATTR_MIN_MIREDS, default=154, reduce=min)
self._max_mireds = _reduce_attribute(
states, ATTR_MAX_MIREDS, default=500, reduce=max)
self._effect_list = None
all_effect_lists = list(
_find_state_attributes(states, ATTR_EFFECT_LIST))
if all_effect_lists:
# Merge all effects from all effect_lists with a union merge.
self._effect_list = list(set().union(*all_effect_lists))
self._effect = None
all_effects = list(_find_state_attributes(on_states, ATTR_EFFECT))
if all_effects:
# Report the most common effect.
effects_count = Counter(itertools.chain(all_effects))
self._effect = effects_count.most_common(1)[0][0]
self._supported_features = 0
for support in _find_state_attributes(states, ATTR_SUPPORTED_FEATURES):
# Merge supported features by emulating support for every feature
# we find.
self._supported_features |= support
# Bitwise-and the supported features with the GroupedLight's features
# so that we don't break in the future when a new feature is added.
self._supported_features &= SUPPORT_GROUP_LIGHT
def _find_state_attributes(states: List[State],
key: str) -> Iterator[Any]:
"""Find attributes with matching key from states."""
for state in states:
value = state.attributes.get(key)
if value is not None:
yield value
def _mean_int(*args):
"""Return the mean of the supplied values."""
return int(sum(args) / len(args))
def _mean_tuple(*args):
"""Return the mean values along the columns of the supplied values."""
return tuple(sum(l) / len(l) for l in zip(*args))
def _reduce_attribute(states: List[State],
key: str,
default: Optional[Any] = None,
reduce: Callable[..., Any] = _mean_int) -> Any:
"""Find the first attribute matching key from states.
If none are found, return default.
"""
attrs = list(_find_state_attributes(states, key))
if not attrs:
return default
if len(attrs) == 1:
return attrs[0]
return reduce(*attrs)
| |
#!/usr/bin/env python
import setpath
import unittest
import os
from bike import testdata
from bike.query.findDefinition import findAllPossibleDefinitionsByCoords
from bike.query.getTypeOf import getTypeOf,resolveImportedModuleOrPackage
from bike.parsing.newstuff import getModuleOrPackageUsingFQN
from bike.parsing.fastparserast import getRoot
from bike.testutils import *
class TestFindDefinitionByCoords(BRMTestCase):
def test_findsClassRef(self):
src=trimLines("""
class TheClass:
pass
a = TheClass()
""")
createSourceNodeAt(src,"mymodule")
defn = [x for x in findAllPossibleDefinitionsByCoords(os.path.abspath("mymodule.py"),3,6)]
assert defn[0].filename == os.path.abspath("mymodule.py")
assert defn[0].lineno == 1
assert defn[0].colno == 6
assert defn[0].confidence == 100
def tests_findsMethodRef(self):
src=trimLines("""
class TheClass:
def theMethod(self):
pass
a = TheClass()
a.theMethod()
""")
createSourceNodeAt(src,"mymodule")
defn = [x for x in findAllPossibleDefinitionsByCoords(os.path.abspath("mymodule.py"),5,3)]
assert defn[0].filename == os.path.abspath("mymodule.py")
assert defn[0].lineno == 2
assert defn[0].colno == 8
assert defn[0].confidence == 100
def test_returnsOtherMethodsWithSameName(self):
src=trimLines("""
class TheClass:
def theMethod(self):
pass
a = SomeOtherClass()
a.theMethod()
""")
createSourceNodeAt(src,"mymodule")
defn = [x for x in findAllPossibleDefinitionsByCoords(os.path.abspath("mymodule.py"),5,3)]
assert defn[0].filename == os.path.abspath("mymodule.py")
assert defn[0].lineno == 2
assert defn[0].colno == 8
assert defn[0].confidence == 50
def test_findsTemporaryDefinition(self):
src=trimLines("""
a = 3
b = a + 1
""")
createSourceNodeAt(src,"mymodule")
defn = [x for x in findAllPossibleDefinitionsByCoords(os.path.abspath("mymodule.py"),2,4)]
assert defn[0].filename == os.path.abspath("mymodule.py")
assert defn[0].lineno == 1
assert defn[0].colno == 0
assert defn[0].confidence == 100
def test_findsArgumentDefinition(self):
src=trimLines("""
def someFunction(a):
b = a + 1
""")
createSourceNodeAt(src,"mymodule")
defn = [x for x in findAllPossibleDefinitionsByCoords(os.path.abspath("mymodule.py"),2,8)]
assert defn[0].filename == os.path.abspath("mymodule.py")
assert defn[0].lineno == 1
assert defn[0].colno == 17
assert defn[0].confidence == 100
def test_findsClassInstanceDefinition(self):
src=trimLines("""
class TheClass():
pass
a = TheClass()
print a
""")
createSourceNodeAt(src,"mymodule")
defn = [x for x in findAllPossibleDefinitionsByCoords(os.path.abspath("mymodule.py"),4,6)]
assert defn[0].filename == os.path.abspath("mymodule.py")
assert defn[0].lineno == 3
assert defn[0].colno == 0
assert defn[0].confidence == 100
def test_findsDefinitionInParentScope(self):
src=trimLines("""
a = 3
def foo(self):
b = a + 1
""")
createSourceNodeAt(src,"mymodule")
defn = [x for x in findAllPossibleDefinitionsByCoords(os.path.abspath("mymodule.py"),3,8)]
assert defn[0].filename == os.path.abspath("mymodule.py")
assert defn[0].lineno == 1
assert defn[0].colno == 0
assert defn[0].confidence == 100
def test_findsDefinitionWithinFunction(self):
src=trimLines("""
def foo(yadda):
a = someFunction()
print a
""")
createSourceNodeAt(src,"mymodule")
defn = [x for x in findAllPossibleDefinitionsByCoords(os.path.abspath("mymodule.py"),3,10)]
assert defn[0].filename == os.path.abspath("mymodule.py")
assert defn[0].lineno == 2
assert defn[0].colno == 4
assert defn[0].confidence == 100
def test_findsDefinitionFromSubsequentAssignment(self):
src=trimLines("""
def foo(yadda):
a = 3
print a
a = 5
""")
createSourceNodeAt(src,"mymodule")
defn = [x for x in findAllPossibleDefinitionsByCoords(os.path.abspath("mymodule.py"),4,4)]
assert defn[0].filename == os.path.abspath("mymodule.py")
assert defn[0].lineno == 2
assert defn[0].colno == 4
assert defn[0].confidence == 100
def test_findsDefinitionFromDefinition(self):
src=trimLines("""
def foo(yadda):
a = 3
print a
a = 5
""")
createSourceNodeAt(src,"mymodule")
defn = [x for x in findAllPossibleDefinitionsByCoords(os.path.abspath("mymodule.py"),4,4)]
assert defn[0].filename == os.path.abspath("mymodule.py")
assert defn[0].lineno == 2
assert defn[0].colno == 4
assert defn[0].confidence == 100
def test_findsClassRefUsingFromImportStatement(self):
src=trimLines("""
from a.b.bah import TheClass
""")
classsrc=trimLines("""
class TheClass:
pass
""")
root = createSourceNodeAt(src,"a.foo")
root = createSourceNodeAt(classsrc, "a.b.bah")
module = getModuleOrPackageUsingFQN("a.foo")
filename = os.path.abspath(os.path.join("a","foo.py"))
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,1,21)]
assert defn[0].filename == os.path.abspath(os.path.join("a","b","bah.py"))
assert defn[0].lineno == 1
assert defn[0].colno == 6
assert defn[0].confidence == 100
def test_findsVariableRefUsingFromImportStatement(self):
importsrc=trimLines("""
from a.b.bah import mytext
print mytext
""")
src=trimLines("""
mytext = 'hello'
""")
root = createSourceNodeAt(importsrc,"a.foo")
root = createSourceNodeAt(src, "a.b.bah")
filename = os.path.abspath(os.path.join("a","foo.py"))
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,2,6)]
assert defn[0].filename == os.path.abspath(os.path.join("a","b","bah.py"))
assert defn[0].lineno == 1
assert defn[0].colno == 0
assert defn[0].confidence == 100
def test_findsVariableRefUsingImportStatement(self):
importsrc=trimLines("""
import a.b.bah
print a.b.bah.mytext
""")
src=trimLines("""
mytext = 'hello'
""")
root = createSourceNodeAt(importsrc,"a.foo")
root = createSourceNodeAt(src, "a.b.bah")
filename = os.path.abspath(os.path.join("a","foo.py"))
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,2,14)]
assert defn[0].filename == os.path.abspath(os.path.join("a","b","bah.py"))
assert defn[0].lineno == 1
assert defn[0].colno == 0
assert defn[0].confidence == 100
def test_findsVariableRefUsingFromImportStarStatement(self):
importsrc=trimLines("""
from a.b.bah import *
print mytext
""")
src=trimLines("""
mytext = 'hello'
""")
createSourceNodeAt(importsrc,"a.foo")
createSourceNodeAt(src, "a.b.bah")
filename = os.path.abspath(os.path.join("a","foo.py"))
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,2,6)]
assert defn[0].filename == os.path.abspath(os.path.join("a","b","bah.py"))
assert defn[0].lineno == 1
assert defn[0].colno == 0
assert defn[0].confidence == 100
def test_findsVariableRefUsingFromPackageImportModuleStatement(self):
importsrc=trimLines("""
from a.b import bah
print bah.mytext
""")
src=trimLines("""
mytext = 'hello'
""")
root = createSourceNodeAt(importsrc,"a.b.foo")
root = createSourceNodeAt(src, "a.b.bah")
filename = os.path.abspath(os.path.join("a","b","foo.py"))
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,2,10)]
assert defn[0].filename == os.path.abspath(os.path.join("a","b","bah.py"))
assert defn[0].lineno == 1
assert defn[0].colno == 0
assert defn[0].confidence == 100
def test_findsImportedVariableRefInAFunctionArg(self):
importsrc=trimLines("""
from a.b import bah
someFunction(bah.mytext)
""")
src=trimLines("""
mytext = 'hello'
""")
root = createSourceNodeAt(importsrc,"a.b.foo")
root = createSourceNodeAt(src, "a.b.bah")
filename = os.path.abspath(os.path.join("a","b","foo.py"))
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,2,17)]
assert defn[0].filename == os.path.abspath(os.path.join("a","b","bah.py"))
assert defn[0].lineno == 1
assert defn[0].colno == 0
assert defn[0].confidence == 100
def test_findsVariableRefUsingFromImportStatementInFunction(self):
importsrc=trimLines("""
def foo:
from a.b.bah import mytext
print mytext
""")
src=trimLines("""
mytext = 'hello'
""")
root = createSourceNodeAt(importsrc,"a.foo")
root = createSourceNodeAt(src, "a.b.bah")
filename = os.path.abspath(os.path.join("a","foo.py"))
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,3,10)]
assert defn[0].filename == os.path.abspath(os.path.join("a","b","bah.py"))
assert defn[0].lineno == 1
assert defn[0].colno == 0
assert defn[0].confidence == 100
def test_findsVariableRefByImportingModule(self):
importsrc=trimLines("""
import a.b.bah
print a.b.bah.mytext
""")
src=trimLines("""
mytext = 'hello'
""")
defn = self.helper(importsrc, src, 2, 14)
assert defn[0].filename == pkgstructureFile2
assert defn[0].lineno == 1
assert defn[0].colno == 0
assert defn[0].confidence == 100
def test_findsVariableRefByImportingModuleWithFrom(self):
importsrc=trimLines("""
from a.b import bah
someFunction(bah.mytext)
""")
src=trimLines("""
mytext = 'hello'
""")
defn = self.helper(importsrc, src, 2, 17)
assert defn[0].filename == pkgstructureFile2
assert defn[0].lineno == 1
assert defn[0].colno == 0
assert defn[0].confidence == 100
def helper(self, src, classsrc, line, col):
try:
createPackageStructure(src,classsrc)
filename = pkgstructureFile1
#Root(None,None,[pkgstructureRootDir])
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,line,col)]
finally:
removePackageStructure()
return defn
def test_doesntfindVariableRefOfUnimportedModule(self):
importsrc=trimLines("""
# a.b.bah not imported
print a.b.bah.mytext
""")
src=trimLines("""
mytext = 'hello'
""")
root = createSourceNodeAt(importsrc,"a.b.foo")
root = createSourceNodeAt(src, "a.b.bah")
filename = os.path.abspath(os.path.join("a","b","foo.py"))
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,2,14)]
self.assertEqual(defn,[])
def test_findsSelfAttributeDefinition(self):
src=trimLines("""
class MyClass:
def __init__(self):
self.a = 'hello'
def myMethod(self):
print self.a
""")
root = createSourceNodeAt(src,"mymodule")
filename = os.path.abspath("mymodule.py")
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,5,18)]
assert defn[0].filename == os.path.abspath("mymodule.py")
assert defn[0].lineno == 3
assert defn[0].colno == 12
assert defn[0].confidence == 100
def test_findsSelfAttributeDefinitionFromSamePlace(self):
src=trimLines("""
class MyClass:
def __init__(self):
self.a = 'hello'
def myMethod(self):
print self.a
""")
root = createSourceNodeAt(src,"mymodule")
filename = os.path.abspath("mymodule.py")
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,3,12)]
assert defn[0].filename == os.path.abspath("mymodule.py")
assert defn[0].lineno == 3
assert defn[0].colno == 12
assert defn[0].confidence == 100
def test_findsSelfAttributeDefinition(self):
src=trimLines("""
class MyClass:
def someOtherFn(self):
pass
def load(self, source):
# fastparser ast
self.fastparseroot = fastparser(source,self.modulename)
""")
root = createSourceNodeAt(src,"mymodule")
filename = os.path.abspath("mymodule.py")
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,6,14)]
assert defn[0].filename == os.path.abspath("mymodule.py")
assert defn[0].lineno == 6
assert defn[0].colno == 13
assert defn[0].confidence == 100
def test_findsDefnOfInnerClass(self):
src = trimLines("""
class TheClass:
class TheClass:
pass
a = TheClass.TheClass()
""")
root = createSourceNodeAt(src,"mymodule")
filename = os.path.abspath("mymodule.py")
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,4,14)]
assert defn[0].filename == os.path.abspath("mymodule.py")
assert defn[0].lineno == 2
assert defn[0].colno == 10
assert defn[0].confidence == 100
def test_findsDefnOfOuterClass(self):
src = trimLines("""
class TheClass:
class TheClass:
pass
a = TheClass.TheClass()
""")
root = createSourceNodeAt(src,"mymodule")
filename = os.path.abspath("mymodule.py")
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,4,4)]
assert defn[0].filename == os.path.abspath("mymodule.py")
assert defn[0].lineno == 1
assert defn[0].colno == 6
assert defn[0].confidence == 100
def test_findsClassDeclaredIn__init__Module(self):
importsrc=trimLines("""
class TheClass:
pass
""")
src=trimLines("""
from a import TheClass
c = TheClass()
""")
root = createSourceNodeAt(importsrc,"a.__init__")
root = createSourceNodeAt(src, "mymodule")
filename = os.path.abspath("mymodule.py")
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,2,6)]
assert defn[0].filename == os.path.abspath(os.path.join("a",
"__init__.py"))
assert defn[0].lineno == 1
assert defn[0].colno == 6
assert defn[0].confidence == 100
class TestFindDefinitionUsingFiles(BRMTestCase):
def test_findsASimpleDefinitionUsingFiles(self):
src=trimLines("""
class TheClass:
pass
a = TheClass()
""")
writeTmpTestFile(src)
defn = [x for x in findAllPossibleDefinitionsByCoords(tmpfile,3,6)]
assert defn[0].filename == tmpfile
assert defn[0].lineno == 1
assert defn[0].colno == 6
assert defn[0].confidence == 100
def test_findsDefinitionInAnotherModuleUsingFiles(self):
src=trimLines("""
from a.b.bah import TheClass
""")
classsrc=trimLines("""
class TheClass:
pass
""")
defn = self.helper(src, classsrc, 1, 21)
assert defn[0].filename == pkgstructureFile2
assert defn[0].lineno == 1
assert defn[0].colno == 6
assert defn[0].confidence == 100
def test_findsDefinitionInAnotherRelativeModuleUsingFiles(self):
src=trimLines("""
from b.bah import TheClass
""")
classsrc=trimLines("""
class TheClass:
pass
""")
defn = self.helper(src, classsrc,1,21)
assert defn[0].filename == pkgstructureFile2
assert defn[0].lineno == 1
assert defn[0].colno == 6
assert defn[0].confidence == 100
def test_findsMethodDefinitionInAnotherModuleUsingFiles(self):
src=trimLines("""
from b.bah import TheClass
a = TheClass()
a.theMethod()
""")
classsrc=trimLines("""
class TheClass:
def theMethod(self):
pass
""")
defn = self.helper(src, classsrc, 3, 2)
assert defn[0].filename == pkgstructureFile2
assert defn[0].lineno == 2
assert defn[0].colno == 8
assert defn[0].confidence == 100
def test_findsDefinitonOfMethodWhenUseIsOnAMultiLine(self):
classsrc=trimLines("""
class TheClass:
def theMethod(self):
pass
""")
src=trimLines("""
from b.bah import TheClass
a = TheClass()
i,j = (32,
a.theMethod()) # <--- find me!
something=somethingelse
""")
defn = self.helper(src, classsrc, 4, 9)
assert defn[0].filename == pkgstructureFile2
assert defn[0].lineno == 2
assert defn[0].colno == 8
assert defn[0].confidence == 100
def test_findsDefinitionWhenUseIsOnAMultilineAndNextLineBalancesBrace(self):
classsrc=trimLines("""
class TheClass:
def theMethod(self):
pass
""")
src=trimLines("""
from b.bah import TheClass
c = TheClass()
f1, f2 = (c.func1,
c.theMethod)
f1, f2 = (c.func1,
c.theMethod)
""")
defn = self.helper(src, classsrc, 4, 10)
self.assertEqual(pkgstructureFile2,defn[0].filename)
self.assertEqual(2,defn[0].lineno)
self.assertEqual(8,defn[0].colno)
self.assertEqual(100,defn[0].confidence)
def test_worksIfFindingDefnOfRefInSlashMultiline(self):
classsrc=trimLines("""
class TheClass:
def theMethod(self):
pass
""")
src=trimLines("""
from b.bah import TheClass
c = TheClass()
f1, f2 = c.func1 \\
,c.theMethod
""")
defn = self.helper(src, classsrc, 4, 10)
self.assertEqual(pkgstructureFile2,defn[0].filename)
self.assertEqual(2,defn[0].lineno)
self.assertEqual(8,defn[0].colno)
self.assertEqual(100,defn[0].confidence)
def test_findsDefnInSameNonPackageDirectory(self):
try:
getRoot().pythonpath = [] # clear the python path
classsrc = trimLines("""
def testFunction():
print 'hello'
""")
src = trimLines("""
from baz import testFunction
""")
writeTmpTestFile(src)
newtmpfile = os.path.join(tmproot,"baz.py")
writeFile(newtmpfile, classsrc)
refs = [x for x in findAllPossibleDefinitionsByCoords(tmpfile,1,16)]
assert refs[0].filename == newtmpfile
assert refs[0].lineno == 1
finally:
os.remove(newtmpfile)
deleteTmpTestFile()
def test_findsDefnInPackageSubDirectoryAndRootNotInPath(self):
src=trimLines("""
from b.bah import TheClass
""")
classsrc=trimLines("""
class TheClass:
def theMethod(self):
pass
""")
getRoot().pythonpath = [] # clear the python path
defn = self.helper(src, classsrc, 1, 18)
assert defn[0].filename == pkgstructureFile2
assert defn[0].lineno == 1
assert defn[0].colno == 6
assert defn[0].confidence == 100
def test_findsDefnInSamePackageHierarchyAndRootNotInPath(self):
src=trimLines("""
from a.b.bah import TheClass
""")
classsrc=trimLines("""
class TheClass:
def theMethod(self):
pass
""")
getRoot().pythonpath = [] # clear the python path
defn = self.helper(src, classsrc, 1, 20)
assert defn[0].filename == pkgstructureFile2
assert defn[0].lineno == 1
assert defn[0].colno == 6
assert defn[0].confidence == 100
def helper(self, src, classsrc, line, col):
try:
createPackageStructure(src,classsrc)
filename = pkgstructureFile1
#Root(None,None,[pkgstructureRootDir])
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,line,col)]
finally:
removePackageStructure()
return defn
if __name__ == "__main__":
unittest.main()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Any, Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._azure_stack_hci_client_enums import *
class Resource(msrest.serialization.Model):
"""Common fields that are returned in the response for all Azure Resource Manager resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class ProxyResource(Resource):
"""The resource model definition for a Azure Resource Manager proxy resource. It will not have tags and a location.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ProxyResource, self).__init__(**kwargs)
class ArcSetting(ProxyResource):
"""ArcSetting details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar provisioning_state: Provisioning state of the ArcSetting proxy resource. Possible values
include: "Succeeded", "Failed", "Canceled", "Accepted", "Provisioning".
:vartype provisioning_state: str or ~azure_stack_hci_client.models.ProvisioningState
:ivar arc_instance_resource_group: The resource group that hosts the Arc agents, ie. Hybrid
Compute Machine resources.
:vartype arc_instance_resource_group: str
:ivar aggregate_state: Aggregate state of Arc agent across the nodes in this HCI cluster.
Possible values include: "NotSpecified", "Error", "Succeeded", "Canceled", "Failed",
"Connected", "Disconnected", "Deleted", "Creating", "Updating", "Deleting", "Moving",
"PartiallySucceeded", "PartiallyConnected", "InProgress".
:vartype aggregate_state: str or ~azure_stack_hci_client.models.ArcSettingAggregateState
:ivar per_node_details: State of Arc agent in each of the nodes.
:vartype per_node_details: list[~azure_stack_hci_client.models.PerNodeState]
:param created_by: The identity that created the resource.
:type created_by: str
:param created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:type created_by_type: str or ~azure_stack_hci_client.models.CreatedByType
:param created_at: The timestamp of resource creation (UTC).
:type created_at: ~datetime.datetime
:param last_modified_by: The identity that last modified the resource.
:type last_modified_by: str
:param last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:type last_modified_by_type: str or ~azure_stack_hci_client.models.CreatedByType
:param last_modified_at: The timestamp of resource last modification (UTC).
:type last_modified_at: ~datetime.datetime
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
'arc_instance_resource_group': {'readonly': True},
'aggregate_state': {'readonly': True},
'per_node_details': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'arc_instance_resource_group': {'key': 'properties.arcInstanceResourceGroup', 'type': 'str'},
'aggregate_state': {'key': 'properties.aggregateState', 'type': 'str'},
'per_node_details': {'key': 'properties.perNodeDetails', 'type': '[PerNodeState]'},
'created_by': {'key': 'systemData.createdBy', 'type': 'str'},
'created_by_type': {'key': 'systemData.createdByType', 'type': 'str'},
'created_at': {'key': 'systemData.createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'systemData.lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'systemData.lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'systemData.lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
*,
created_by: Optional[str] = None,
created_by_type: Optional[Union[str, "CreatedByType"]] = None,
created_at: Optional[datetime.datetime] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[Union[str, "CreatedByType"]] = None,
last_modified_at: Optional[datetime.datetime] = None,
**kwargs
):
super(ArcSetting, self).__init__(**kwargs)
self.provisioning_state = None
self.arc_instance_resource_group = None
self.aggregate_state = None
self.per_node_details = None
self.created_by = created_by
self.created_by_type = created_by_type
self.created_at = created_at
self.last_modified_by = last_modified_by
self.last_modified_by_type = last_modified_by_type
self.last_modified_at = last_modified_at
class ArcSettingList(msrest.serialization.Model):
"""List of ArcSetting proxy resources for the HCI cluster.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of ArcSetting proxy resources.
:vartype value: list[~azure_stack_hci_client.models.ArcSetting]
:ivar next_link: Link to the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ArcSetting]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ArcSettingList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class TrackedResource(Resource):
"""The resource model definition for an Azure Resource Manager tracked top level resource which has 'tags' and a 'location'.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(TrackedResource, self).__init__(**kwargs)
self.tags = tags
self.location = location
class Cluster(TrackedResource):
"""Cluster details.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
:ivar provisioning_state: Provisioning state. Possible values include: "Succeeded", "Failed",
"Canceled", "Accepted", "Provisioning".
:vartype provisioning_state: str or ~azure_stack_hci_client.models.ProvisioningState
:ivar status: Status of the cluster agent. Possible values include: "NotYetRegistered",
"ConnectedRecently", "NotConnectedRecently", "Disconnected", "Error".
:vartype status: str or ~azure_stack_hci_client.models.Status
:ivar cloud_id: Unique, immutable resource id.
:vartype cloud_id: str
:param cloud_management_endpoint: Endpoint configured for management from the Azure portal.
:type cloud_management_endpoint: str
:param aad_client_id: App id of cluster AAD identity.
:type aad_client_id: str
:param aad_tenant_id: Tenant id of cluster AAD identity.
:type aad_tenant_id: str
:ivar reported_properties: Properties reported by cluster agent.
:vartype reported_properties: ~azure_stack_hci_client.models.ClusterReportedProperties
:ivar trial_days_remaining: Number of days remaining in the trial period.
:vartype trial_days_remaining: float
:ivar billing_model: Type of billing applied to the resource.
:vartype billing_model: str
:ivar registration_timestamp: First cluster sync timestamp.
:vartype registration_timestamp: ~datetime.datetime
:ivar last_sync_timestamp: Most recent cluster sync timestamp.
:vartype last_sync_timestamp: ~datetime.datetime
:ivar last_billing_timestamp: Most recent billing meter timestamp.
:vartype last_billing_timestamp: ~datetime.datetime
:param created_by: The identity that created the resource.
:type created_by: str
:param created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:type created_by_type: str or ~azure_stack_hci_client.models.CreatedByType
:param created_at: The timestamp of resource creation (UTC).
:type created_at: ~datetime.datetime
:param last_modified_by: The identity that last modified the resource.
:type last_modified_by: str
:param last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:type last_modified_by_type: str or ~azure_stack_hci_client.models.CreatedByType
:param last_modified_at: The timestamp of resource last modification (UTC).
:type last_modified_at: ~datetime.datetime
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'status': {'readonly': True},
'cloud_id': {'readonly': True},
'reported_properties': {'readonly': True},
'trial_days_remaining': {'readonly': True},
'billing_model': {'readonly': True},
'registration_timestamp': {'readonly': True},
'last_sync_timestamp': {'readonly': True},
'last_billing_timestamp': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'cloud_id': {'key': 'properties.cloudId', 'type': 'str'},
'cloud_management_endpoint': {'key': 'properties.cloudManagementEndpoint', 'type': 'str'},
'aad_client_id': {'key': 'properties.aadClientId', 'type': 'str'},
'aad_tenant_id': {'key': 'properties.aadTenantId', 'type': 'str'},
'reported_properties': {'key': 'properties.reportedProperties', 'type': 'ClusterReportedProperties'},
'trial_days_remaining': {'key': 'properties.trialDaysRemaining', 'type': 'float'},
'billing_model': {'key': 'properties.billingModel', 'type': 'str'},
'registration_timestamp': {'key': 'properties.registrationTimestamp', 'type': 'iso-8601'},
'last_sync_timestamp': {'key': 'properties.lastSyncTimestamp', 'type': 'iso-8601'},
'last_billing_timestamp': {'key': 'properties.lastBillingTimestamp', 'type': 'iso-8601'},
'created_by': {'key': 'systemData.createdBy', 'type': 'str'},
'created_by_type': {'key': 'systemData.createdByType', 'type': 'str'},
'created_at': {'key': 'systemData.createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'systemData.lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'systemData.lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'systemData.lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
cloud_management_endpoint: Optional[str] = None,
aad_client_id: Optional[str] = None,
aad_tenant_id: Optional[str] = None,
created_by: Optional[str] = None,
created_by_type: Optional[Union[str, "CreatedByType"]] = None,
created_at: Optional[datetime.datetime] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[Union[str, "CreatedByType"]] = None,
last_modified_at: Optional[datetime.datetime] = None,
**kwargs
):
super(Cluster, self).__init__(tags=tags, location=location, **kwargs)
self.provisioning_state = None
self.status = None
self.cloud_id = None
self.cloud_management_endpoint = cloud_management_endpoint
self.aad_client_id = aad_client_id
self.aad_tenant_id = aad_tenant_id
self.reported_properties = None
self.trial_days_remaining = None
self.billing_model = None
self.registration_timestamp = None
self.last_sync_timestamp = None
self.last_billing_timestamp = None
self.created_by = created_by
self.created_by_type = created_by_type
self.created_at = created_at
self.last_modified_by = last_modified_by
self.last_modified_by_type = last_modified_by_type
self.last_modified_at = last_modified_at
class ClusterList(msrest.serialization.Model):
"""List of clusters.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: List of clusters.
:type value: list[~azure_stack_hci_client.models.Cluster]
:ivar next_link: Link to the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Cluster]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["Cluster"]] = None,
**kwargs
):
super(ClusterList, self).__init__(**kwargs)
self.value = value
self.next_link = None
class ClusterNode(msrest.serialization.Model):
"""Cluster node details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Name of the cluster node.
:vartype name: str
:ivar id: Id of the node in the cluster.
:vartype id: float
:ivar manufacturer: Manufacturer of the cluster node hardware.
:vartype manufacturer: str
:ivar model: Model name of the cluster node hardware.
:vartype model: str
:ivar os_name: Operating system running on the cluster node.
:vartype os_name: str
:ivar os_version: Version of the operating system running on the cluster node.
:vartype os_version: str
:ivar serial_number: Immutable id of the cluster node.
:vartype serial_number: str
:ivar core_count: Number of physical cores on the cluster node.
:vartype core_count: float
:ivar memory_in_gi_b: Total available memory on the cluster node (in GiB).
:vartype memory_in_gi_b: float
"""
_validation = {
'name': {'readonly': True},
'id': {'readonly': True},
'manufacturer': {'readonly': True},
'model': {'readonly': True},
'os_name': {'readonly': True},
'os_version': {'readonly': True},
'serial_number': {'readonly': True},
'core_count': {'readonly': True},
'memory_in_gi_b': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'float'},
'manufacturer': {'key': 'manufacturer', 'type': 'str'},
'model': {'key': 'model', 'type': 'str'},
'os_name': {'key': 'osName', 'type': 'str'},
'os_version': {'key': 'osVersion', 'type': 'str'},
'serial_number': {'key': 'serialNumber', 'type': 'str'},
'core_count': {'key': 'coreCount', 'type': 'float'},
'memory_in_gi_b': {'key': 'memoryInGiB', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(ClusterNode, self).__init__(**kwargs)
self.name = None
self.id = None
self.manufacturer = None
self.model = None
self.os_name = None
self.os_version = None
self.serial_number = None
self.core_count = None
self.memory_in_gi_b = None
class ClusterPatch(msrest.serialization.Model):
"""Cluster details to update.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param cloud_management_endpoint: Endpoint configured for management from the Azure portal.
:type cloud_management_endpoint: str
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'cloud_management_endpoint': {'key': 'properties.cloudManagementEndpoint', 'type': 'str'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
cloud_management_endpoint: Optional[str] = None,
**kwargs
):
super(ClusterPatch, self).__init__(**kwargs)
self.tags = tags
self.cloud_management_endpoint = cloud_management_endpoint
class ClusterReportedProperties(msrest.serialization.Model):
"""Properties reported by cluster agent.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar cluster_name: Name of the on-prem cluster connected to this resource.
:vartype cluster_name: str
:ivar cluster_id: Unique id generated by the on-prem cluster.
:vartype cluster_id: str
:ivar cluster_version: Version of the cluster software.
:vartype cluster_version: str
:ivar nodes: List of nodes reported by the cluster.
:vartype nodes: list[~azure_stack_hci_client.models.ClusterNode]
:ivar last_updated: Last time the cluster reported the data.
:vartype last_updated: ~datetime.datetime
"""
_validation = {
'cluster_name': {'readonly': True},
'cluster_id': {'readonly': True},
'cluster_version': {'readonly': True},
'nodes': {'readonly': True},
'last_updated': {'readonly': True},
}
_attribute_map = {
'cluster_name': {'key': 'clusterName', 'type': 'str'},
'cluster_id': {'key': 'clusterId', 'type': 'str'},
'cluster_version': {'key': 'clusterVersion', 'type': 'str'},
'nodes': {'key': 'nodes', 'type': '[ClusterNode]'},
'last_updated': {'key': 'lastUpdated', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(ClusterReportedProperties, self).__init__(**kwargs)
self.cluster_name = None
self.cluster_id = None
self.cluster_version = None
self.nodes = None
self.last_updated = None
class ErrorAdditionalInfo(msrest.serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: any
"""
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(ErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class ErrorDetail(msrest.serialization.Model):
"""The error detail.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details: list[~azure_stack_hci_client.models.ErrorDetail]
:ivar additional_info: The error additional info.
:vartype additional_info: list[~azure_stack_hci_client.models.ErrorAdditionalInfo]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'},
}
def __init__(
self,
**kwargs
):
super(ErrorDetail, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class ErrorResponse(msrest.serialization.Model):
"""Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.).
:param error: The error object.
:type error: ~azure_stack_hci_client.models.ErrorDetail
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
*,
error: Optional["ErrorDetail"] = None,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = error
class Extension(ProxyResource):
"""Details of a particular extension in HCI Cluster.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar provisioning_state: Provisioning state of the Extension proxy resource. Possible values
include: "Succeeded", "Failed", "Canceled", "Accepted", "Provisioning".
:vartype provisioning_state: str or ~azure_stack_hci_client.models.ProvisioningState
:ivar aggregate_state: Aggregate state of Arc Extensions across the nodes in this HCI cluster.
Possible values include: "NotSpecified", "Error", "Succeeded", "Canceled", "Failed",
"Connected", "Disconnected", "Deleted", "Creating", "Updating", "Deleting", "Moving",
"PartiallySucceeded", "PartiallyConnected", "InProgress".
:vartype aggregate_state: str or ~azure_stack_hci_client.models.ExtensionAggregateState
:ivar per_node_extension_details: State of Arc Extension in each of the nodes.
:vartype per_node_extension_details: list[~azure_stack_hci_client.models.PerNodeExtensionState]
:param force_update_tag: How the extension handler should be forced to update even if the
extension configuration has not changed.
:type force_update_tag: str
:param publisher: The name of the extension handler publisher.
:type publisher: str
:param type_properties_extension_parameters_type: Specifies the type of the extension; an
example is "CustomScriptExtension".
:type type_properties_extension_parameters_type: str
:param type_handler_version: Specifies the version of the script handler.
:type type_handler_version: str
:param auto_upgrade_minor_version: Indicates whether the extension should use a newer minor
version if one is available at deployment time. Once deployed, however, the extension will not
upgrade minor versions unless redeployed, even with this property set to true.
:type auto_upgrade_minor_version: bool
:param settings: Json formatted public settings for the extension.
:type settings: any
:param protected_settings: Protected settings (may contain secrets).
:type protected_settings: any
:param created_by: The identity that created the resource.
:type created_by: str
:param created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:type created_by_type: str or ~azure_stack_hci_client.models.CreatedByType
:param created_at: The timestamp of resource creation (UTC).
:type created_at: ~datetime.datetime
:param last_modified_by: The identity that last modified the resource.
:type last_modified_by: str
:param last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:type last_modified_by_type: str or ~azure_stack_hci_client.models.CreatedByType
:param last_modified_at: The timestamp of resource last modification (UTC).
:type last_modified_at: ~datetime.datetime
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
'aggregate_state': {'readonly': True},
'per_node_extension_details': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'aggregate_state': {'key': 'properties.aggregateState', 'type': 'str'},
'per_node_extension_details': {'key': 'properties.perNodeExtensionDetails', 'type': '[PerNodeExtensionState]'},
'force_update_tag': {'key': 'properties.extensionParameters.forceUpdateTag', 'type': 'str'},
'publisher': {'key': 'properties.extensionParameters.publisher', 'type': 'str'},
'type_properties_extension_parameters_type': {'key': 'properties.extensionParameters.type', 'type': 'str'},
'type_handler_version': {'key': 'properties.extensionParameters.typeHandlerVersion', 'type': 'str'},
'auto_upgrade_minor_version': {'key': 'properties.extensionParameters.autoUpgradeMinorVersion', 'type': 'bool'},
'settings': {'key': 'properties.extensionParameters.settings', 'type': 'object'},
'protected_settings': {'key': 'properties.extensionParameters.protectedSettings', 'type': 'object'},
'created_by': {'key': 'systemData.createdBy', 'type': 'str'},
'created_by_type': {'key': 'systemData.createdByType', 'type': 'str'},
'created_at': {'key': 'systemData.createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'systemData.lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'systemData.lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'systemData.lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
*,
force_update_tag: Optional[str] = None,
publisher: Optional[str] = None,
type_properties_extension_parameters_type: Optional[str] = None,
type_handler_version: Optional[str] = None,
auto_upgrade_minor_version: Optional[bool] = None,
settings: Optional[Any] = None,
protected_settings: Optional[Any] = None,
created_by: Optional[str] = None,
created_by_type: Optional[Union[str, "CreatedByType"]] = None,
created_at: Optional[datetime.datetime] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[Union[str, "CreatedByType"]] = None,
last_modified_at: Optional[datetime.datetime] = None,
**kwargs
):
super(Extension, self).__init__(**kwargs)
self.provisioning_state = None
self.aggregate_state = None
self.per_node_extension_details = None
self.force_update_tag = force_update_tag
self.publisher = publisher
self.type_properties_extension_parameters_type = type_properties_extension_parameters_type
self.type_handler_version = type_handler_version
self.auto_upgrade_minor_version = auto_upgrade_minor_version
self.settings = settings
self.protected_settings = protected_settings
self.created_by = created_by
self.created_by_type = created_by_type
self.created_at = created_at
self.last_modified_by = last_modified_by
self.last_modified_by_type = last_modified_by_type
self.last_modified_at = last_modified_at
class ExtensionList(msrest.serialization.Model):
"""List of Extensions in HCI cluster.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of Extensions in HCI cluster.
:vartype value: list[~azure_stack_hci_client.models.Extension]
:ivar next_link: Link to the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Extension]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExtensionList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class Operation(msrest.serialization.Model):
"""Details of a REST API operation, returned from the Resource Provider Operations API.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the operation, as per Resource-Based Access Control (RBAC). Examples:
"Microsoft.Compute/virtualMachines/write", "Microsoft.Compute/virtualMachines/capture/action".
:vartype name: str
:ivar is_data_action: Whether the operation applies to data-plane. This is "true" for
data-plane operations and "false" for ARM/control-plane operations.
:vartype is_data_action: bool
:param display: Localized display information for this particular operation.
:type display: ~azure_stack_hci_client.models.OperationDisplay
:ivar origin: The intended executor of the operation; as in Resource Based Access Control
(RBAC) and audit logs UX. Default value is "user,system". Possible values include: "user",
"system", "user,system".
:vartype origin: str or ~azure_stack_hci_client.models.Origin
:ivar action_type: Enum. Indicates the action type. "Internal" refers to actions that are for
internal only APIs. Possible values include: "Internal".
:vartype action_type: str or ~azure_stack_hci_client.models.ActionType
"""
_validation = {
'name': {'readonly': True},
'is_data_action': {'readonly': True},
'origin': {'readonly': True},
'action_type': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
'action_type': {'key': 'actionType', 'type': 'str'},
}
def __init__(
self,
*,
display: Optional["OperationDisplay"] = None,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = None
self.is_data_action = None
self.display = display
self.origin = None
self.action_type = None
class OperationDisplay(msrest.serialization.Model):
"""Localized display information for this particular operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provider: The localized friendly form of the resource provider name, e.g. "Microsoft
Monitoring Insights" or "Microsoft Compute".
:vartype provider: str
:ivar resource: The localized friendly name of the resource type related to this operation.
E.g. "Virtual Machines" or "Job Schedule Collections".
:vartype resource: str
:ivar operation: The concise, localized friendly name for the operation; suitable for
dropdowns. E.g. "Create or Update Virtual Machine", "Restart Virtual Machine".
:vartype operation: str
:ivar description: The short, localized friendly description of the operation; suitable for
tool tips and detailed views.
:vartype description: str
"""
_validation = {
'provider': {'readonly': True},
'resource': {'readonly': True},
'operation': {'readonly': True},
'description': {'readonly': True},
}
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = None
self.resource = None
self.operation = None
self.description = None
class OperationListResult(msrest.serialization.Model):
"""A list of REST API operations supported by an Azure Resource Provider. It contains an URL link to get the next set of results.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of operations supported by the resource provider.
:vartype value: list[~azure_stack_hci_client.models.Operation]
:ivar next_link: URL to get the next set of operation list results (if there are any).
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class PerNodeExtensionState(msrest.serialization.Model):
"""Status of Arc Extension for a particular node in HCI Cluster.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Name of the node in HCI Cluster.
:vartype name: str
:ivar extension: Fully qualified resource ID for the particular Arc Extension on this node.
:vartype extension: str
:ivar state: State of Arc Extension in this node. Possible values include: "NotSpecified",
"Error", "Succeeded", "Canceled", "Failed", "Connected", "Disconnected", "Deleted", "Creating",
"Updating", "Deleting", "Moving".
:vartype state: str or ~azure_stack_hci_client.models.NodeExtensionState
"""
_validation = {
'name': {'readonly': True},
'extension': {'readonly': True},
'state': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'extension': {'key': 'extension', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PerNodeExtensionState, self).__init__(**kwargs)
self.name = None
self.extension = None
self.state = None
class PerNodeState(msrest.serialization.Model):
"""Status of Arc agent for a particular node in HCI Cluster.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Name of the Node in HCI Cluster.
:vartype name: str
:ivar arc_instance: Fully qualified resource ID for the Arc agent of this node.
:vartype arc_instance: str
:ivar state: State of Arc agent in this node. Possible values include: "NotSpecified", "Error",
"Succeeded", "Canceled", "Failed", "Connected", "Disconnected", "Deleted", "Creating",
"Updating", "Deleting", "Moving".
:vartype state: str or ~azure_stack_hci_client.models.NodeArcState
"""
_validation = {
'name': {'readonly': True},
'arc_instance': {'readonly': True},
'state': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'arc_instance': {'key': 'arcInstance', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PerNodeState, self).__init__(**kwargs)
self.name = None
self.arc_instance = None
self.state = None
| |
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.core.paginator import Paginator
from django.db import transaction
from django.db.models import Count, Q
from django.http import HttpRequest, HttpResponse
from django.shortcuts import render, get_object_or_404, redirect
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.http import require_POST
from rest_framework.decorators import api_view
from rest_framework.exceptions import ParseError
from rest_framework.response import Response
from rest_framework.status import HTTP_200_OK, HTTP_403_FORBIDDEN
from imagetagger.annotations.models import ExportFormat
from imagetagger.annotations.forms import ExportFormatEditForm
from imagetagger.images.forms import ImageSetCreationForm
from imagetagger.images.models import ImageSet
from imagetagger.users.forms import TeamCreationForm
from .models import Team, User
@login_required
def create_team(request):
form = TeamCreationForm()
if request.method == 'POST':
form = TeamCreationForm(request.POST)
if form.is_valid():
with transaction.atomic():
form.instance.save()
form.instance.memberships.create(user=request.user,
is_admin=True)
return redirect(reverse('users:team', args=(form.instance.id,)))
return render(request, 'users/create_team.html', {
'form': form,
})
@login_required
@require_POST
def revoke_team_admin(request, team_id, user_id):
user = get_object_or_404(User, id=user_id)
team = get_object_or_404(Team, id=team_id)
if user == request.user:
messages.warning(
request, _('You can not revoke your own admin privileges.').format(
team.name))
return redirect(reverse('users:team', args=(team.id,)))
if team.has_perm('user_management', request.user):
team.memberships.filter(user=user).update(is_admin=False)
else:
messages.warning(
request,
_('You do not have permission to revoke this users admin '
'privileges in the team {}.').format(team.name))
return redirect(reverse('users:team', args=(team.id,)))
@login_required
@require_POST
def grant_team_admin(request, team_id, user_id):
user = get_object_or_404(User, id=user_id)
team = get_object_or_404(Team, id=team_id)
if not team.members.filter(pk=request.user.pk).exists():
messages.warning(request, _('You are no member of the team {}.').format(
team.name))
return redirect(reverse('users:explore_team'))
# Allow granting of admin privileges any team member if there is no admin
if team.has_perm('user_management', request.user) or not team.admins:
team.memberships.filter(user=user).update(is_admin=True)
else:
messages.warning(
request,
_('You do not have permission to grant this user admin '
'privileges in the team {}.').format(team.name))
return redirect(reverse('users:team', args=(team.id,)))
@login_required
def explore_team(request):
teams = Team.objects.all()
query = request.GET.get('query')
get_query = ''
if query:
teams = teams.filter(name__icontains=query)
get_query = '&query=' + str(query)
paginator = Paginator(teams, 25)
page = request.GET.get('page')
page_teams = paginator.get_page(page)
return render(request, 'base/explore.html', {
'mode': 'team',
'teams': page_teams, # to separate what kind of stuff is displayed in the view
'paginator': page_teams, # for page stuff
'get_query': get_query,
'query': query,
})
@login_required
def explore_user(request):
users = User.objects.all().order_by('-points')
query = request.GET.get('query')
get_query = ''
if query:
users = users.filter(username__icontains=query)
get_query = '&query=' + str(query)
paginator = Paginator(users, 25)
page = request.GET.get('page')
page_users = paginator.get_page(page)
return render(request, 'base/explore.html', {
'mode': 'user',
'users': page_users, # to separate what kind of stuff is displayed in the view
'paginator': page_users, # for page stuff
'get_query': get_query,
'query': query,
})
@login_required
def leave_team(request, team_id, user_id=None):
team = get_object_or_404(Team, id=team_id)
user = request.user
warning = _('You are not in the team.')
try:
user_id = int(user_id)
except ValueError:
return redirect(reverse('users:team', args=(team.id,)))
if user_id and user_id != request.user.pk:
user = get_object_or_404(User, id=user_id)
warning = _('The user is not in the team.')
if not team.has_perm('user_management', request.user):
messages.warning(
request,
_('You do not have the permission to kick other users from this team.'))
return redirect(reverse('users:team', args=(team.id,)))
if not team.members.filter(pk=user.pk).exists():
messages.warning(request, warning)
return redirect(reverse('users:team', args=(team.id,)))
if request.method == 'POST':
team.memberships.filter(user=user).delete()
if team.memberships.count() == 0:
for imageset in ImageSet.objects.filter(team=team):
imageset.public = True
imageset.image_lock = True
imageset.save()
team.delete()
if user == request.user:
return redirect(reverse('users:explore_team'))
return redirect(reverse('users:team', args=(team.id,)))
return render(request, 'users/leave_team.html', {
'user': user,
'team': team,
'last': team.memberships.count() == 1,
})
@require_POST
@login_required
def add_team_member(request: HttpRequest, team_id: int) -> HttpResponse:
"""Add a member to a team."""
team = get_object_or_404(Team, id=team_id)
username = request.POST.get('username')
if not team.has_perm('user_management', request.user):
messages.warning(
request, _(
'You do not have the permission to add users to the team {}.')
.format(team.name))
return redirect(reverse('users:team', args=(team_id,)))
user = User.objects.filter(username=username).first()
if not user:
messages.warning(request, _('The user {} does not exist.')
.format(username))
return redirect(reverse('users:team', args=(team_id,)))
if team.members.filter(pk=user.pk).exists():
messages.info(request, _(
'The user {} is already a member of the team {}.').format(
username, team.name))
return redirect(reverse('users:team', args=(team_id,)))
team.memberships.create(user=user)
messages.success(request,
_('The user {} has been added to the team successfully.')
.format(username))
return redirect(reverse('users:team', args=(team_id,)))
@login_required
def view_team(request, team_id):
team = get_object_or_404(Team, id=team_id)
members = team.members.all().order_by('-points')
members_30 = User.objects.raw('''
SELECT
u.id, u.password, u.last_login, u.is_superuser, u.username, u.first_name,
u.last_name, u.email, u.is_staff, u.is_active, u.date_joined, u.points,
COUNT(v.id) points_30
FROM
users_user u,
users_teammembership utm,
annotations_annotation AS a,
annotations_verification AS v
WHERE
utm.team_id = %s AND
u.id = utm.user_id AND
a.id = v.annotation_id AND
a.user_id = u.id AND
v.time >= NOW() - '30 days'::interval
GROUP BY
u.id, u.password, u.last_login, u.is_superuser, u.username, u.first_name,
u.last_name, u.email, u.is_staff, u.is_active, u.date_joined, u.points
ORDER BY
points_30
DESC
''', (team.pk,))
is_member = request.user in members
admins = team.admins
imagesets = ImageSet.objects.filter(team=team).annotate(
image_count_agg=Count('images')).prefetch_related('set_tags').\
order_by('-public', 'name')
export_formats = ExportFormat.objects.filter(
team=team).prefetch_related('annotations_types').order_by('name')
if not is_member:
export_formats = export_formats.filter(public=True)
imagesets = imagesets.filter(public=True)
export_format_forms = (ExportFormatEditForm(instance=format_instance) for format_instance in export_formats)
test_imagesets = imagesets.filter(set_tags__name='test').order_by('-public', 'name')
return render(request, 'users/view_team.html', {
'team': team,
'members': members,
'members_30': members_30,
'admins': admins,
'imagesets': imagesets,
'date_imagesets': sorted(imagesets, key=lambda i: i.time, reverse=True),
'size_imagesets': sorted(imagesets, key=lambda i: i.image_count, reverse=True),
'test_imagesets': test_imagesets,
'imageset_creation_form': ImageSetCreationForm(),
'team_perms': team.get_perms(request.user),
'export_formats_forms': export_format_forms,
})
@login_required
def user(request, user_id):
user = get_object_or_404(User, id=user_id)
teams = Team.objects.filter(members=user)
return render(request, 'users/view_user.html', {
'user': user,
'teams': teams,
'own_profile': request.user == user,
})
@login_required
def delete_account(request, user_id):
user = get_object_or_404(User, id=user_id)
teams = Team.objects.filter(members=user)
if user != request.user:
messages.error(request, 'You have no permission to delete this user.')
return redirect(reverse('users:user', args=(user.id,)))
if request.method == 'GET':
return render(request, 'users/delete_account.html', {
'user': user,
'teams': teams,
})
elif request.method == 'POST':
user.delete()
messages.warning(request, 'Your account was successfully deleted')
return redirect('/')
@login_required
@api_view(['GET'])
def user_autocomplete(request) -> Response:
try:
username_query = str(request.GET['query']).lower()
except (KeyError, TypeError, ValueError):
raise ParseError
user_suggestions = list(User.objects.filter(username__startswith=username_query))
user_suggestions.extend(list(User.objects.filter(~Q(username__startswith=username_query) & Q(username__contains=username_query))))
user_suggestions = [user_suggestion.username for user_suggestion in user_suggestions]
print(user_suggestions)
return Response({
'query': username_query,
'suggestions': user_suggestions,
}, status=HTTP_200_OK)
| |
import base64
from datetime import datetime, timedelta
import pickle
import shutil
import tempfile
from django.conf import settings
from django.contrib.sessions.backends.db import SessionStore as DatabaseSession
from django.contrib.sessions.backends.cache import SessionStore as CacheSession
from django.contrib.sessions.backends.cached_db import SessionStore as CacheDBSession
from django.contrib.sessions.backends.file import SessionStore as FileSession
from django.contrib.sessions.models import Session
from django.contrib.sessions.middleware import SessionMiddleware
from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation
from django.http import HttpResponse
from django.test import TestCase, RequestFactory
from django.utils import unittest
from django.utils.hashcompat import md5_constructor
class SessionTestsMixin(object):
# This does not inherit from TestCase to avoid any tests being run with this
# class, which wouldn't work, and to allow different TestCase subclasses to
# be used.
backend = None # subclasses must specify
def setUp(self):
self.session = self.backend()
def tearDown(self):
# NB: be careful to delete any sessions created; stale sessions fill up
# the /tmp (with some backends) and eventually overwhelm it after lots
# of runs (think buildbots)
self.session.delete()
def test_new_session(self):
self.assertFalse(self.session.modified)
self.assertFalse(self.session.accessed)
def test_get_empty(self):
self.assertEqual(self.session.get('cat'), None)
def test_store(self):
self.session['cat'] = "dog"
self.assertTrue(self.session.modified)
self.assertEqual(self.session.pop('cat'), 'dog')
def test_pop(self):
self.session['some key'] = 'exists'
# Need to reset these to pretend we haven't accessed it:
self.accessed = False
self.modified = False
self.assertEqual(self.session.pop('some key'), 'exists')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('some key'), None)
def test_pop_default(self):
self.assertEqual(self.session.pop('some key', 'does not exist'),
'does not exist')
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_setdefault(self):
self.assertEqual(self.session.setdefault('foo', 'bar'), 'bar')
self.assertEqual(self.session.setdefault('foo', 'baz'), 'bar')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_update(self):
self.session.update({'update key': 1})
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('update key', None), 1)
def test_has_key(self):
self.session['some key'] = 1
self.session.modified = False
self.session.accessed = False
self.assertTrue(self.session.has_key('some key'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_values(self):
self.assertEqual(self.session.values(), [])
self.assertTrue(self.session.accessed)
self.session['some key'] = 1
self.assertEqual(self.session.values(), [1])
def test_iterkeys(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = self.session.iterkeys()
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), ['x'])
def test_itervalues(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = self.session.itervalues()
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [1])
def test_iteritems(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = self.session.iteritems()
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [('x',1)])
def test_clear(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(self.session.items(), [('x',1)])
self.session.clear()
self.assertEqual(self.session.items(), [])
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_save(self):
self.session.save()
self.assertTrue(self.session.exists(self.session.session_key))
def test_delete(self):
self.session.delete(self.session.session_key)
self.assertFalse(self.session.exists(self.session.session_key))
def test_flush(self):
self.session['foo'] = 'bar'
self.session.save()
prev_key = self.session.session_key
self.session.flush()
self.assertFalse(self.session.exists(prev_key))
self.assertNotEqual(self.session.session_key, prev_key)
self.assertTrue(self.session.modified)
self.assertTrue(self.session.accessed)
def test_cycle(self):
self.session['a'], self.session['b'] = 'c', 'd'
self.session.save()
prev_key = self.session.session_key
prev_data = self.session.items()
self.session.cycle_key()
self.assertNotEqual(self.session.session_key, prev_key)
self.assertEqual(self.session.items(), prev_data)
def test_invalid_key(self):
# Submitting an invalid session key (either by guessing, or if the db has
# removed the key) results in a new key being generated.
try:
session = self.backend('1')
session.save()
self.assertNotEqual(session.session_key, '1')
self.assertEqual(session.get('cat'), None)
session.delete()
finally:
# Some backends leave a stale cache entry for the invalid
# session key; make sure that entry is manually deleted
session.delete('1')
# Custom session expiry
def test_default_expiry(self):
# A normal session has a max age equal to settings
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
# So does a custom session with an idle expiration time of 0 (but it'll
# expire at browser close)
self.session.set_expiry(0)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_custom_expiry_seconds(self):
# Using seconds
self.session.set_expiry(10)
delta = self.session.get_expiry_date() - datetime.now()
self.assertTrue(delta.seconds in (9, 10))
age = self.session.get_expiry_age()
self.assertTrue(age in (9, 10))
def test_custom_expiry_timedelta(self):
# Using timedelta
self.session.set_expiry(timedelta(seconds=10))
delta = self.session.get_expiry_date() - datetime.now()
self.assertTrue(delta.seconds in (9, 10))
age = self.session.get_expiry_age()
self.assertTrue(age in (9, 10))
def test_custom_expiry_datetime(self):
# Using fixed datetime
self.session.set_expiry(datetime.now() + timedelta(seconds=10))
delta = self.session.get_expiry_date() - datetime.now()
self.assertTrue(delta.seconds in (9, 10))
age = self.session.get_expiry_age()
self.assertTrue(age in (9, 10))
def test_custom_expiry_reset(self):
self.session.set_expiry(None)
self.session.set_expiry(10)
self.session.set_expiry(None)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_get_expire_at_browser_close(self):
# Tests get_expire_at_browser_close with different settings and different
# set_expiry calls
try:
try:
original_expire_at_browser_close = settings.SESSION_EXPIRE_AT_BROWSER_CLOSE
settings.SESSION_EXPIRE_AT_BROWSER_CLOSE = False
self.session.set_expiry(10)
self.assertFalse(self.session.get_expire_at_browser_close())
self.session.set_expiry(0)
self.assertTrue(self.session.get_expire_at_browser_close())
self.session.set_expiry(None)
self.assertFalse(self.session.get_expire_at_browser_close())
settings.SESSION_EXPIRE_AT_BROWSER_CLOSE = True
self.session.set_expiry(10)
self.assertFalse(self.session.get_expire_at_browser_close())
self.session.set_expiry(0)
self.assertTrue(self.session.get_expire_at_browser_close())
self.session.set_expiry(None)
self.assertTrue(self.session.get_expire_at_browser_close())
except:
raise
finally:
settings.SESSION_EXPIRE_AT_BROWSER_CLOSE = original_expire_at_browser_close
def test_decode(self):
# Ensure we can decode what we encode
data = {'a test key': 'a test value'}
encoded = self.session.encode(data)
self.assertEqual(self.session.decode(encoded), data)
def test_decode_django12(self):
# Ensure we can decode values encoded using Django 1.2
# Hard code the Django 1.2 method here:
def encode(session_dict):
pickled = pickle.dumps(session_dict, pickle.HIGHEST_PROTOCOL)
pickled_md5 = md5_constructor(pickled + settings.SECRET_KEY).hexdigest()
return base64.encodestring(pickled + pickled_md5)
data = {'a test key': 'a test value'}
encoded = encode(data)
self.assertEqual(self.session.decode(encoded), data)
class DatabaseSessionTests(SessionTestsMixin, TestCase):
backend = DatabaseSession
def test_session_get_decoded(self):
"""
Test we can use Session.get_decoded to retrieve data stored
in normal way
"""
self.session['x'] = 1
self.session.save()
s = Session.objects.get(session_key=self.session.session_key)
self.assertEqual(s.get_decoded(), {'x': 1})
def test_sessionmanager_save(self):
"""
Test SessionManager.save method
"""
# Create a session
self.session['y'] = 1
self.session.save()
s = Session.objects.get(session_key=self.session.session_key)
# Change it
Session.objects.save(s.session_key, {'y':2}, s.expire_date)
# Clear cache, so that it will be retrieved from DB
del self.session._session_cache
self.assertEqual(self.session['y'], 2)
class CacheDBSessionTests(SessionTestsMixin, TestCase):
backend = CacheDBSession
# Don't need DB flushing for these tests, so can use unittest.TestCase as base class
class FileSessionTests(SessionTestsMixin, unittest.TestCase):
backend = FileSession
def setUp(self):
super(FileSessionTests, self).setUp()
# Do file session tests in an isolated directory, and kill it after we're done.
self.original_session_file_path = settings.SESSION_FILE_PATH
self.temp_session_store = settings.SESSION_FILE_PATH = tempfile.mkdtemp()
def tearDown(self):
settings.SESSION_FILE_PATH = self.original_session_file_path
shutil.rmtree(self.temp_session_store)
super(FileSessionTests, self).tearDown()
def test_configuration_check(self):
# Make sure the file backend checks for a good storage dir
settings.SESSION_FILE_PATH = "/if/this/directory/exists/you/have/a/weird/computer"
self.assertRaises(ImproperlyConfigured, self.backend)
def test_invalid_key_backslash(self):
# Ensure we don't allow directory-traversal
self.assertRaises(SuspiciousOperation,
self.backend("a\\b\\c").load)
def test_invalid_key_forwardslash(self):
# Ensure we don't allow directory-traversal
self.assertRaises(SuspiciousOperation,
self.backend("a/b/c").load)
class CacheSessionTests(SessionTestsMixin, unittest.TestCase):
backend = CacheSession
class SessionMiddlewareTests(unittest.TestCase):
def setUp(self):
self.old_SESSION_COOKIE_SECURE = settings.SESSION_COOKIE_SECURE
self.old_SESSION_COOKIE_HTTPONLY = settings.SESSION_COOKIE_HTTPONLY
def tearDown(self):
settings.SESSION_COOKIE_SECURE = self.old_SESSION_COOKIE_SECURE
settings.SESSION_COOKIE_HTTPONLY = self.old_SESSION_COOKIE_HTTPONLY
def test_secure_session_cookie(self):
settings.SESSION_COOKIE_SECURE = True
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertTrue(response.cookies[settings.SESSION_COOKIE_NAME]['secure'])
def test_httponly_session_cookie(self):
settings.SESSION_COOKIE_HTTPONLY = True
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertTrue(response.cookies[settings.SESSION_COOKIE_NAME]['httponly'])
| |
# Copyright (c) <2015> <Sergi Delgado Segura>
# Distributed under the BSD software license, see the accompanying file LICENSE
from ConfigParser import ConfigParser
from urllib2 import urlopen, URLError
from hashlib import sha256
from os import rename
from time import time, sleep
from json import dumps, loads
from base64 import b64encode, b64decode
from random import randint, getrandbits
from requests import post
from os import mkdir, path
from shutil import rmtree
from M2Crypto import EC, BIO, EVP, ASN1, RSA, X509
from pyasn1_modules.rfc2459 import Certificate
from pyasn1_modules.rfc2314 import Signature
from pyasn1.codec.der import encoder, decoder
from Crypto.PublicKey import RSA as tbRSA
from Crypto.Util.number import long_to_bytes
from stem.control import Controller
from utils.bitcoin.tools import get_pub_key_hex, public_key_to_btc_address, btc_address_from_cert, get_balance, private_key_to_wif, get_priv_key_hex
from utils.bitcoin.transactions import reputation_transfer, blockr_unspent, get_tx_signature
from utils.certificate.tools import store_certificate
from utils.tor.tools import tor_query, init_tor
__author__ = 'sdelgado'
############################
# GLOBAL VARIABLES #
############################
# Paths to local files
P_KEY = 'paysense_public.key'
S_KEY = 'private/paysense.key'
CERT = 'paysense.crt'
WIF = 'wif_qr.png'
tmp = "_tmp/"
# Configuration file data loading
config = ConfigParser()
config.read("paysense.conf")
DCS = config.get("Servers", "DCS", )
ACA = config.get("Servers", "ACA", )
RAND_SIZE = int(config.get("Misc", "RandomSize"))
CERT_COUNT = int(config.get("Misc", "CertCount"))
class CS(object):
def __init__(self, data_path, new=False):
self.data_path = data_path
if new:
self.btc_address = None
else:
self.btc_address = btc_address_from_cert(self.data_path + CERT)
# Generates the CS EC keys.
def generate_keys(self):
# Generate the elliptic curve and the keys
ec = EC.gen_params(EC.NID_secp256k1)
ec.gen_key()
# Generate a Pkey object to store the EC keys
mem = BIO.MemoryBuffer()
ec.save_pub_key_bio(mem)
ec.save_key_bio(mem, None)
pk = EVP.load_key_bio(mem)
# Generate the bitcoin address from the public key
public_key_hex = get_pub_key_hex(ec.pub())
bitcoin_address = public_key_to_btc_address(public_key_hex, 'test')
# Save both keys
if not path.exists(self.data_path + tmp):
mkdir(self.data_path + tmp)
ec.save_key(self.data_path + tmp + bitcoin_address + '_key.pem', None)
ec.save_pub_key(self.data_path + tmp + bitcoin_address + '_public_key.pem')
# Create the WIF file
wif = private_key_to_wif(get_priv_key_hex(self.data_path + tmp + bitcoin_address + '_key.pem'), 'image', 'test')
wif.save(self.data_path + tmp + bitcoin_address + "_WIF.png")
return pk, bitcoin_address
def generate_certificate(self, aca_cert, btc_address=None, pkey=None,):
if pkey is None and btc_address is None:
pkey, btc_address = self.generate_keys()
self.btc_address = btc_address
issuer = aca_cert.get_issuer()
# Creating a certificate
cert = X509.X509()
# Set issuer
cert.set_issuer(issuer)
# Generate CS information
cert_name = X509.X509_Name()
cert_name.C = 'CT'
cert_name.ST = 'Barcelona'
cert_name.L = 'Bellaterra'
cert_name.O = 'UAB'
cert_name.OU = 'DEIC'
cert_name.CN = btc_address
cert.set_subject_name(cert_name)
# Set public_key
cert.set_pubkey(pkey)
# Time for certificate to stay valid
cur_time = ASN1.ASN1_UTCTIME()
cur_time.set_time(int(time()))
# Expire certs in 1 year.
expire_time = ASN1.ASN1_UTCTIME()
expire_time.set_time(int(time()) + 60 * 60 * 24 * 365)
# Set the validity
cert.set_not_before(cur_time)
cert.set_not_after(expire_time)
# Sign the certificate using the same key type the CA is going to use later
# The resulting signature will not be used, it is only for setting the corresponding field into the certificate
rsa_keys = RSA.gen_key(2046, 65537, callback=lambda x, y, z: None)
rsa_pkey = EVP.PKey()
rsa_pkey.assign_rsa(rsa_keys)
cert.sign(rsa_pkey, md='sha256')
# Load the Certificate as a ASN.1 object and extract the TBS Certificate (special thanks to Alex <ralienpp@gmail.com>)
asn1_cert = decoder.decode(cert.as_der(), asn1Spec=Certificate())[0]
tbs = asn1_cert.getComponentByName("tbsCertificate")
# Compute the sha256 of the TBS Certificate
tbs_der = encoder.encode(tbs)
digest = sha256()
digest.update(tbs_der)
cert_hash = digest.digest()
return asn1_cert, cert_hash
def generate_new_identity(self, new_btc_addr, new_pk, filename='paysense'):
new_dir = "old/" + self.btc_address + "/"
# Create an 'old' directory if it doesn't exist
if not path.exists(self.data_path + 'old'):
mkdir(self.data_path + 'old')
# Create a directory named by the bitcoin address inside the 'old' directory
if not path.exists(self.data_path + new_dir):
mkdir(self.data_path + new_dir)
# Move all the old data to its new directory
rename(self.data_path + "private", self.data_path + new_dir + "private")
rename(self.data_path + filename + ".crt", self.data_path + new_dir + filename + ".crt")
rename(self.data_path + filename + "_public.key", self.data_path + new_dir + filename + "_public.key")
if path.exists(self.data_path + self.btc_address):
rename(self.data_path + self.btc_address, self.data_path + new_dir + self.btc_address)
aca_cert_text = b64decode(urlopen(ACA + '/get_ca_cert').read())
aca_cert = X509.load_cert_string(aca_cert_text)
asn1_cert, cert_hash = self.generate_certificate(aca_cert, new_btc_addr, new_pk)
# Create a 'private' directory
if not path.exists(self.data_path + 'private'):
mkdir(self.data_path + 'private')
# Create the new identity files
rename(self.data_path + tmp + new_btc_addr + "_key.pem", self.data_path + "private/" + filename + ".key")
rename(self.data_path + tmp + new_btc_addr + "_public_key.pem", self.data_path + filename + "_public.key")
rename(self.data_path + tmp + new_btc_addr + "_WIF.png", self.data_path + "private/wif_qr.png")
f = open(self.data_path + new_btc_addr, 'w')
f.close()
rmtree(self.data_path + tmp)
certificate = b64encode(encoder.encode(asn1_cert))
# Send the final certificate to the ACA
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
data = {'certificate': certificate, 'bitcoin_address': new_btc_addr}
response = post(ACA + "/sign_certificate", data=dumps(data), headers=headers)
# Store the certificate
cert_der = b64decode(response.content)
store_certificate(cert_der, self.data_path + filename)
# CS registration
def registration(self, filename='paysense'):
certs, certs_der, cert_hashes, blinded_hashes, btc_addresses,rands = [], [], [], [], [], []
# Create the directories if they don't exist already
if not path.exists(self.data_path):
mkdir(self.data_path)
if not path.exists(self.data_path + "private"):
mkdir(self.data_path + "private")
try:
# Get ACA information
aca_cert_text = b64decode(urlopen(ACA + '/get_ca_cert').read())
aca_cert = X509.load_cert_string(aca_cert_text)
pk = tbRSA.importKey(aca_cert.get_pubkey().as_der())
# Generate the basic certificates
for i in range(CERT_COUNT):
pkey, btc_address = self.generate_keys()
cert, cert_hash = self.generate_certificate(aca_cert, btc_address, pkey)
btc_addresses.append(btc_address)
certs.append(cert)
cert_hashes.append(cert_hash)
# Blind the cert hash
rands.append(getrandbits(RAND_SIZE))
blinded_hashes.append(pk.blind(cert_hashes[i], rands[i]))
# Contact the ACA and send her the certificate hash to be signed
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
data = {'cert_hashes': b64encode(str(blinded_hashes)), 'step': 1}
response = post(ACA + "/sign_in", data=dumps(data), headers=headers)
# If response is OK
if response.status_code is 200:
p = int(b64decode(response.content))
# Prepare the data to be sent to the ACA
for i in range(len(certs)):
if i != p:
certs_der.append(encoder.encode(certs[i]))
else:
# The data in the chosen position is deleted and not sent to the ACA
certs_der.append(None)
r = rands[i]
rands[i] = 0
# Send the data to the ACA
data = {'certs': b64encode(str(certs_der)), 'rands': str(rands), 'step': 2}
response = post(ACA + "/sign_in", data=dumps(data), headers=headers)
# If response is OK
if response.status_code is 200:
signed_b_hash = b64decode(response.content)
signature = pk.unblind(long(signed_b_hash), r)
# Check that the signature is valid
if pk.verify(cert_hashes[p], [signature, 0]):
# Attach the signature to the certificate
bin_signature = Signature("'%s'H" % ''.join("%02X" % ord(c) for c in long_to_bytes(signature)))
certs[p].setComponentByName("signatureValue", bin_signature)
# Set the bitcoin address to the chosen one
self.btc_address = btc_addresses[p]
# Rename and move the keys associated with the chosen bitcoin address
if not path.exists(self.data_path + "/private"):
mkdir(self.data_path + "private")
rename(self.data_path + tmp + self.btc_address + "_key.pem", self.data_path + "/private/paysense.key")
rename(self.data_path + tmp + self.btc_address + "_public_key.pem", self.data_path + "paysense_public.key")
rename(self.data_path + tmp + self.btc_address + "_WIF.png", self.data_path + "private/wif_qr.png")
# Delete the temp folder and all the other keys
rmtree(self.data_path + tmp)
# Store the certificate
der_cert = encoder.encode(certs[p])
store_certificate(der_cert, self.data_path + filename)
# Send the final certificate to the ACA
data = {'certificate': b64encode(der_cert), 'bitcoin_address': self.btc_address}
response = post(ACA + "/store_certificate", data=dumps(data), headers=headers)
else:
response = "Invalid certificate signature"
return response
except URLError as e:
return e
# Reports the data gathered by the CS
def report_data(self, message, certificate=False):
# Load CS private key
ec = EC.load_key(self.data_path + S_KEY)
signature = ec.sign_dsa_asn1(message)
signature = b64encode(signature)
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
data = {'message': message, 'signature': signature, 'bitcoin_address': self.btc_address}
if certificate is True:
f = open(self.data_path + CERT, 'r')
cs_pem_data = b64encode(f.read())
f.close()
data['cs_pem_data'] = cs_pem_data
r = post(DCS, data=dumps(data), headers=headers)
return r.status_code, r.reason, r.content
# This test emulates the CS reputation exchange when he doesn't trust any other CS nor the ACA
def self_reputation_exchange(self, new_btc_address, outside_btc_address=None, fee=1000):
address_balance = get_balance(self.btc_address)
if outside_btc_address is not None:
# ToDo: Perform a proper way to withdraw reputation
reputation_withdraw = (float(randint(2, 5)) / 100) * address_balance
tx_hash, _ = reputation_transfer(self.data_path + S_KEY, self.btc_address, new_btc_address, address_balance, outside_btc_address, int(reputation_withdraw) - fee, fee)
else:
tx_hash, _ = reputation_transfer(self.data_path + S_KEY, self.btc_address, new_btc_address, address_balance - fee, fee=fee)
response = urlopen(ACA + '/reputation_exchange?new_btc_address=' + new_btc_address)
return response
def coinjoin_reputation_exchange(self, amount, fee=1000):
# Get onion server address and the mixing amount from the ACA
data = loads(urlopen(ACA + '/get_tor_address').read())
tor_server = data.get("address")
mixing_amount = data.get("amount")
if mixing_amount == amount:
utxo = self.get_mixing_utxo(amount, fee)
if utxo is not None:
# Create the address that will be used as a new pseudonym
new_btc_addr_pk, new_btc_addr = self.generate_keys()
# Build the output of the mixing transaction
mixing_output = [{'value': amount, 'address': new_btc_addr}]
# Build the input of the mixing transaction
mixing_input = [{'output': utxo, 'value': amount + fee}]
print "Connecting to " + tor_server
# ToDo: Uncomment, actually running tor from terminal since testing server and client from the same machine
# print(term.format("Starting Tor:\n", term.Attr.BOLD))
# tor_process, controller = init_tor()
# ToDo: Delete the following two lines when the above one is uncommented
controller = Controller.from_port()
controller.authenticate()
headers = ['Content-type: application/json', 'Accept: text/plain']
# Send reputation exchange output
data = dumps({'outputs': mixing_output})
code, response = tor_query(tor_server + "/outputs", 'POST', data, headers)
if code is 200:
print "Output correctly sent. Resetting tor connection"
controller.new_circuit()
timer = float(loads(response).get("data"))
print "Waiting " + str(timer) + " for sending the input"
sleep(timer)
# Send reputation exchange input
data = dumps({'inputs': mixing_input})
code, response = tor_query(tor_server + "/inputs", 'POST', data, headers)
if code is 200:
print "Input correctly sent. Resetting tor connection"
controller.new_circuit()
timer = float(loads(response).get("data"))
print "Waiting " + str(timer) + " for getting the tx to be signed"
sleep(timer)
# Get tx hash to sign it
code, response = tor_query(tor_server + '/signatures')
if code is 200:
private_key_hex = get_priv_key_hex(self.data_path + S_KEY)
public_key = EC.load_pub_key(self.data_path + P_KEY)
public_key_hex = get_pub_key_hex(public_key.pub())
signature, index = get_tx_signature(response, private_key_hex, self.btc_address)
data = {'signature': signature, 'index': index, 'public_key': public_key_hex}
data = dumps({'data': data})
code, response = tor_query(tor_server + "/signatures", 'POST', data, headers)
if code is 200:
timer = float(loads(response).get("data"))
print "Waiting " + str(timer) + " for the transaction to be completed"
sleep(timer)
confirmed = False
while not confirmed:
code, response = tor_query(tor_server + '/confirmation')
data = loads(response)
confirmed = bool(data.get("confirmation"))
timer = float(data.get("time"))
print "Waiting " + str(timer) + " for the transaction correctness confirmation"
sleep(timer)
print "Transaction confirmed"
self.generate_new_identity(new_btc_addr, new_btc_addr_pk)
data = loads(response).get("data")
result = data
else:
try:
data = loads(response).get("data")
result = data
except ValueError:
result = "Error sending signatures. " + str(response)
else:
try:
data = loads(response).get("data")
result = data
except ValueError:
result = "Error getting signatures. " + str(response)
else:
try:
data = loads(response).get("data")
result = data
except ValueError:
result = "Error sending inputs. " + str(response)
else:
try:
data = loads(response).get("data")
result = data
except ValueError:
result = "Error sending outputs. " + str(response)
else:
result = "You have not enough reputation to perform a reputation exchange. Minimum amount: " + str(amount) + " + " + str(fee) + " (transaction fee)."
else:
result = "The mixing server does not provide a mixing process for the chosen reputation amount"
return result
def get_mixing_utxo(self, amount, fee):
# Get the address current balance
address_balance = get_balance(self.btc_address)
# Get the address utxo set
utxo_set = blockr_unspent(self.btc_address, 'testnet')
transaction_hash = None
if address_balance == amount + fee and len(utxo_set) is 1:
# Case 0. If the address balance is exactly amount + fee the only way to perform the transaction is if there is only one utxo. Otherwise, some balance would be expended to
# create a utxo with amount + fee and the total balance will be reduced, concluding in a balance < amount + fee.
transaction_hash = utxo_set[0].get("output")
elif address_balance > amount + fee:
# If the balance is greater that amount + fee, a utxo with the exact amount ( amount + fee) will be looked for in the utxo pool.
utxo_n = None
for utxo in utxo_set:
if utxo.get("value") == amount + fee:
utxo_n = utxo
break
# Case 2. If it could be found, that will be the utxo to be used.
if utxo_n is not None:
transaction_hash = utxo_n.get("output")
# Case 1 and 3. Otherwise, a transaction to create a utxo of amount + fee should be performed, only if the balance is greater that amount + 2 fee.
elif address_balance >= amount + 2 * fee:
transaction_hash, used_tx = reputation_transfer(self.data_path + S_KEY, self.btc_address, self.btc_address, amount + fee, fee=fee)
if transaction_hash is not None:
transaction_hash += ":0"
return transaction_hash
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import logging
import faulthandler
from pylons import tmpl_context as c
from ming.orm import ThreadLocalORMSession
from allura import model as M
from allura.lib.utils import chunked_find, chunked_list
from allura.scripts import ScriptTask
log = logging.getLogger(__name__)
class RefreshRepo(ScriptTask):
@classmethod
def execute(cls, options):
q_project = {}
if options.nbhd:
nbhd = M.Neighborhood.query.get(url_prefix=options.nbhd)
if not nbhd:
return "Invalid neighborhood url prefix."
q_project['neighborhood_id'] = nbhd._id
if options.project:
q_project['shortname'] = options.project
elif options.project_regex:
q_project['shortname'] = {'$regex': options.project_regex}
log.info('Refreshing repositories')
for chunk in chunked_find(M.Project, q_project):
for p in chunk:
log.info("Refreshing repos for project '%s'." % p.shortname)
if options.dry_run:
continue
c.project = p
if options.mount_point:
mount_points = [options.mount_point]
else:
mount_points = [ac.options.mount_point for ac in
M.AppConfig.query.find(dict(project_id=p._id))]
for app in (p.app_instance(mp) for mp in mount_points):
c.app = app
if not hasattr(app, 'repo'):
continue
if c.app.repo.tool.lower() not in options.repo_types:
log.info("Skipping %r: wrong type (%s)", c.app.repo,
c.app.repo.tool.lower())
continue
if options.clean:
ci_ids = list(c.app.repo.all_commit_ids())
log.info("Deleting mongo data for %i commits...",
len(ci_ids))
# like the tree_ids themselves below, we need to process these in
# chunks to avoid hitting the BSON max size limit
tree_ids = []
for ci_ids_chunk in chunked_list(ci_ids, 3000):
tree_ids.extend([
tree_id for doc in
M.repository.TreesDoc.m.find(
{"_id": {"$in": ci_ids_chunk}},
{"tree_ids": 1})
for tree_id in doc.get("tree_ids", [])])
i = M.repository.CommitDoc.m.find(
{"_id": {"$in": ci_ids_chunk}}).count()
if i:
log.info("Deleting %i CommitDoc docs...", i)
M.repository.CommitDoc.m.remove(
{"_id": {"$in": ci_ids_chunk}})
# delete these in chunks, otherwise the query doc can
# exceed the max BSON size limit (16MB at the moment)
for tree_ids_chunk in chunked_list(tree_ids, 300000):
i = M.repository.TreeDoc.m.find(
{"_id": {"$in": tree_ids_chunk}}).count()
if i:
log.info("Deleting %i TreeDoc docs...", i)
M.repository.TreeDoc.m.remove(
{"_id": {"$in": tree_ids_chunk}})
del tree_ids
# delete these after TreeDoc and LastCommitDoc so that if
# we crash, we don't lose the ability to delete those
for ci_ids_chunk in chunked_list(ci_ids, 3000):
# delete TreesDocs
i = M.repository.TreesDoc.m.find(
{"_id": {"$in": ci_ids_chunk}}).count()
if i:
log.info("Deleting %i TreesDoc docs...", i)
M.repository.TreesDoc.m.remove(
{"_id": {"$in": ci_ids_chunk}})
# delete LastCommitDocs
i = M.repository.LastCommitDoc.m.find(
dict(commit_ids={'$in': ci_ids_chunk})).count()
if i:
log.info(
"Deleting %i remaining LastCommitDoc docs, by repo id...", i)
M.repository.LastCommitDoc.m.remove(
dict(commit_ids={'$in': ci_ids_chunk}))
i = M.repository.CommitRunDoc.m.find(
{"commit_ids": {"$in": ci_ids_chunk}}).count()
if i:
log.info("Deleting %i CommitRunDoc docs...", i)
M.repository.CommitRunDoc.m.remove(
{"commit_ids": {"$in": ci_ids_chunk}})
del ci_ids
try:
if options.all:
log.info('Refreshing ALL commits in %r',
c.app.repo)
else:
log.info('Refreshing NEW commits in %r',
c.app.repo)
if options.profile:
import cProfile
cProfile.runctx(
'c.app.repo.refresh(options.all, notify=options.notify)',
globals(), locals(), 'refresh.profile')
else:
c.app.repo.refresh(
options.all, notify=options.notify)
except:
log.exception('Error refreshing %r', c.app.repo)
ThreadLocalORMSession.flush_all()
@classmethod
def parser(cls):
def repo_type_list(s):
repo_types = []
for repo_type in s.split(','):
repo_type = repo_type.strip()
if repo_type not in ['svn', 'git', 'hg']:
raise argparse.ArgumentTypeError(
'{} is not a valid repo type.'.format(repo_type))
repo_types.append(repo_type)
return repo_types
parser = argparse.ArgumentParser(description='Scan repos on filesytem and '
'update repo metadata in MongoDB. Run for all repos (no args), '
'or restrict by neighborhood, project, or code tool mount point.')
parser.add_argument('--nbhd', action='store', default='', dest='nbhd',
help='Restrict update to a particular neighborhood, e.g. /p/.')
parser.add_argument(
'--project', action='store', default='', dest='project',
help='Restrict update to a particular project. To specify a '
'subproject, use a slash: project/subproject.')
parser.add_argument('--project-regex', action='store', default='',
dest='project_regex',
help='Restrict update to projects for which the shortname matches '
'the provided regex.')
parser.add_argument(
'--repo-types', action='store', type=repo_type_list,
default=['svn', 'git', 'hg'], dest='repo_types',
help='Only refresh repos of the given type(s). Defaults to: '
'svn,git,hg. Example: --repo-types=git,hg')
parser.add_argument('--mount-point', default='', dest='mount_point',
help='Restrict update to repos at the given tool mount point. ')
parser.add_argument('--clean', action='store_true', dest='clean',
default=False, help='Remove repo-related mongo docs (for '
'project(s) being refreshed only) before doing the refresh.')
parser.add_argument(
'--all', action='store_true', dest='all', default=False,
help='Refresh all commits (not just the ones that are new).')
parser.add_argument('--notify', action='store_true', dest='notify',
default=False, help='Send email notifications of new commits.')
parser.add_argument('--dry-run', action='store_true', dest='dry_run',
default=False, help='Log names of projects that would have their '
'repos refreshed, but do not perform the actual refresh.')
parser.add_argument('--profile', action='store_true', dest='profile',
default=False, help='Enable the profiler (slow). Will log '
'profiling output to ./refresh.profile')
return parser
def get_parser():
return RefreshRepo.parser()
if __name__ == '__main__':
faulthandler.enable()
RefreshRepo.main()
| |
from rdkit import DataStructs
from rdkit import RDConfig
import unittest,os
def feq(a,b,tol=1e-4):
return abs(a-b)<tol
class TestCase(unittest.TestCase):
def setUp(self) :
self.dirname = os.path.join(RDConfig.RDBaseDir,'Code','DataStructs','testData')
self.filename = os.path.join(self.dirname,'zim.head100.fpb')
self.fpbr = DataStructs.FPBReader(self.filename)
self.fpbr.Init()
def test1Basics(self) :
self.assertEqual(len(self.fpbr),100)
self.assertEqual(self.fpbr.GetNumBits(),2048)
self.assertEqual(self.fpbr.GetId(0),"ZINC00902219")
self.assertEqual(self.fpbr.GetId(3),"ZINC04803506")
fp = self.fpbr.GetFP(0)
self.assertEqual(fp.GetNumBits(),2048)
self.assertEqual(fp.GetNumOnBits(),17)
obs = (1, 80, 183, 222, 227, 231, 482, 650, 807,
811, 831, 888, 1335, 1411, 1664, 1820, 1917)
obl = tuple(fp.GetOnBits())
self.assertEqual(obs,obl)
# test operator[]
fp,nm = self.fpbr[0]
self.assertEqual(nm,"ZINC00902219")
self.assertEqual(fp.GetNumOnBits(),17)
def test2Tanimoto(self) :
bv = self.fpbr.GetBytes(0)
self.assertAlmostEqual(self.fpbr.GetTanimoto(0,bv),1.0,4)
self.assertAlmostEqual(self.fpbr.GetTanimoto(1,bv),0.3704,4)
tpl = self.fpbr.GetTanimotoNeighbors(bv)
self.assertEqual(len(tpl),1)
self.assertEqual(tpl[0][1],0)
self.assertAlmostEqual(tpl[0][0],1.,4)
tpl = self.fpbr.GetTanimotoNeighbors(bv,threshold=0.3)
self.assertEqual(len(tpl),5)
self.assertEqual(tpl[0][1],0)
self.assertAlmostEqual(tpl[0][0],1.,4)
self.assertEqual(tpl[1][1],1)
self.assertAlmostEqual(tpl[1][0],0.3704,4)
def test3Tversky(self) :
bv = self.fpbr.GetBytes(0)
self.assertAlmostEqual(self.fpbr.GetTversky(0,bv,1,1),1.0,4)
self.assertAlmostEqual(self.fpbr.GetTversky(1,bv,1,1),0.3704,4)
tpl = self.fpbr.GetTverskyNeighbors(bv,1,1)
self.assertEqual(len(tpl),1)
self.assertEqual(tpl[0][1],0)
self.assertAlmostEqual(tpl[0][0],1.,4)
tpl = self.fpbr.GetTverskyNeighbors(bv,1,1,threshold=0.3)
self.assertEqual(len(tpl),5)
self.assertEqual(tpl[0][1],0)
self.assertAlmostEqual(tpl[0][0],1.,4)
self.assertEqual(tpl[1][1],1)
self.assertAlmostEqual(tpl[1][0],0.3704,4)
def test4Contains(self):
bv = self.fpbr.GetBytes(0)
nbrs = self.fpbr.GetContainingNeighbors(bv)
self.assertEqual(len(nbrs),1)
self.assertEqual(nbrs[0],0)
bv = self.fpbr.GetBytes(1)
nbrs = self.fpbr.GetContainingNeighbors(bv)
self.assertEqual(len(nbrs),4)
self.assertEqual(nbrs,(1,2,3,4))
def test5Contains(self):
" an example based on substructure screening "
filename = os.path.join(RDConfig.RDBaseDir,'Code','DataStructs','testData','zinc_all_clean.100.patt1k.fpb')
fpbr = DataStructs.FPBReader(filename)
fpbr.Init()
bytes = b'\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x000\x00@\x00 \x00\x00 \x00\x00\x02@\x00\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00`\x07\x00\x04\x00"\x14\x02\x00\x00"\x00\x00\x00\x00\x08\x00\x80\x00\x00@\x00@\x00\x80\x00\x00\x00\x00B\x00\x00\x80\x00\x80\x08\x00\x04\x00@\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x80\x04\x00\x00\x0c\x00\x00\x00@\x88\x10\x10\x00\x00\x88\x00@'
nbrs = fpbr.GetContainingNeighbors(bytes)
self.assertEqual(len(nbrs),9)
ids = sorted(fpbr.GetId(x) for x in nbrs)
self.assertEqual(ids,['ZINC00000562',
'ZINC00000843',
'ZINC00000969',
'ZINC00001484',
'ZINC00001585',
'ZINC00002094',
'ZINC00004739',
'ZINC00005235',
'ZINC00006300'])
def test6MultiFPBReaderTani(self):
basen = os.path.join(RDConfig.RDBaseDir,'Code','DataStructs','testData')
mfpbr = DataStructs.MultiFPBReader()
self.assertEqual(mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen,"zinc_random200.1.patt.fpb"))),1)
self.assertEqual(mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen,"zinc_random200.2.patt.fpb"))),2)
self.assertEqual(mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen,"zinc_random200.3.patt.fpb"))),3)
self.assertEqual(mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen,"zinc_random200.4.patt.fpb"))),4)
mfpbr.Init();
self.assertEqual(mfpbr.GetNumBits(),1024);
self.assertEqual(len(mfpbr),4);
fps = "0000000000404000100000001000040000300040222000002004000240000020000000"+\
"8200010200000090000024040860070044003214820000220401054008018000226000"+\
"4800800140000042000080008008020482400000200410800000300430200800400000"+\
"0000080a0000800400010c800200648818100010880040"
ebv = DataStructs.CreateFromFPSText(fps)
bytes = DataStructs.BitVectToBinaryText(ebv)
nbrs = mfpbr.GetTanimotoNeighbors(bytes,threshold=0.6)
self.assertEqual(len(nbrs),6)
self.assertAlmostEqual(nbrs[0][0],0.66412,4)
self.assertEqual(nbrs[0][1],0)
self.assertEqual(nbrs[0][2],3)
self.assertAlmostEqual(nbrs[1][0],0.65289,4)
self.assertEqual(nbrs[1][1],1)
self.assertEqual(nbrs[1][2],2)
self.assertAlmostEqual(nbrs[2][0],0.64341,4)
self.assertEqual(nbrs[2][1],2)
self.assertEqual(nbrs[2][2],1)
self.assertAlmostEqual(nbrs[3][0],0.61940,4)
self.assertEqual(nbrs[3][1],1)
self.assertEqual(nbrs[3][2],0)
self.assertAlmostEqual(nbrs[4][0],0.61905,4)
self.assertEqual(nbrs[4][1],0)
self.assertEqual(nbrs[4][2],0)
self.assertAlmostEqual(nbrs[5][0],0.61344,4)
self.assertEqual(nbrs[5][1],0)
self.assertEqual(nbrs[5][2],1)
# test multi-threaded (won't do anything if the RDKit isn't compiled with threads support)
nbrs = mfpbr.GetTanimotoNeighbors(bytes,threshold=0.6,numThreads=4)
self.assertEqual(len(nbrs),6)
self.assertAlmostEqual(nbrs[0][0],0.66412,4)
self.assertEqual(nbrs[0][1],0)
self.assertEqual(nbrs[0][2],3)
self.assertAlmostEqual(nbrs[1][0],0.65289,4)
self.assertEqual(nbrs[1][1],1)
self.assertEqual(nbrs[1][2],2)
self.assertAlmostEqual(nbrs[2][0],0.64341,4)
self.assertEqual(nbrs[2][1],2)
self.assertEqual(nbrs[2][2],1)
self.assertAlmostEqual(nbrs[3][0],0.61940,4)
self.assertEqual(nbrs[3][1],1)
self.assertEqual(nbrs[3][2],0)
self.assertAlmostEqual(nbrs[4][0],0.61905,4)
self.assertEqual(nbrs[4][1],0)
self.assertEqual(nbrs[4][2],0)
self.assertAlmostEqual(nbrs[5][0],0.61344,4)
self.assertEqual(nbrs[5][1],0)
self.assertEqual(nbrs[5][2],1)
def test7MultiFPBReaderContains(self):
basen = os.path.join(RDConfig.RDBaseDir,'Code','DataStructs','testData')
mfpbr = DataStructs.MultiFPBReader()
self.assertEqual(mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen,"zinc_random200.1.patt.fpb"))),1)
self.assertEqual(mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen,"zinc_random200.2.patt.fpb"))),2)
self.assertEqual(mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen,"zinc_random200.3.patt.fpb"))),3)
self.assertEqual(mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen,"zinc_random200.4.patt.fpb"))),4)
mfpbr.Init();
self.assertEqual(mfpbr.GetNumBits(),1024);
self.assertEqual(len(mfpbr),4);
fps = "40081010824820021000500010110410003000402b20285000a4040240010030050000"+\
"080001420040009000003d04086007080c03b31d920004220400074008098010206080"+\
"00488001080000c64002a00080000200024c2000602410049200340820200002400010"+\
"02200106090401056801080182006088101000088a0048";
ebv = DataStructs.CreateFromFPSText(fps)
bytes = DataStructs.BitVectToBinaryText(ebv)
nbrs = mfpbr.GetContainingNeighbors(bytes)
self.assertEqual(len(nbrs),9)
self.assertEqual(nbrs[0][0],160)
self.assertEqual(nbrs[0][1],0)
self.assertEqual(nbrs[1][0],163)
self.assertEqual(nbrs[1][1],0)
self.assertEqual(nbrs[2][0],170)
self.assertEqual(nbrs[2][1],0)
self.assertEqual(nbrs[3][0],180)
self.assertEqual(nbrs[3][1],2)
self.assertEqual(nbrs[4][0],182)
self.assertEqual(nbrs[4][1],3)
self.assertEqual(nbrs[5][0],185)
self.assertEqual(nbrs[5][1],0)
self.assertEqual(nbrs[6][0],189)
self.assertEqual(nbrs[6][1],0)
self.assertEqual(nbrs[7][0],192)
self.assertEqual(nbrs[7][1],3)
self.assertEqual(nbrs[8][0],193)
self.assertEqual(nbrs[8][1],0)
nbrs = mfpbr.GetContainingNeighbors(bytes,numThreads=4)
self.assertEqual(len(nbrs),9)
self.assertEqual(nbrs[0][0],160)
self.assertEqual(nbrs[0][1],0)
self.assertEqual(nbrs[1][0],163)
self.assertEqual(nbrs[1][1],0)
self.assertEqual(nbrs[2][0],170)
self.assertEqual(nbrs[2][1],0)
self.assertEqual(nbrs[3][0],180)
self.assertEqual(nbrs[3][1],2)
self.assertEqual(nbrs[4][0],182)
self.assertEqual(nbrs[4][1],3)
self.assertEqual(nbrs[5][0],185)
self.assertEqual(nbrs[5][1],0)
self.assertEqual(nbrs[6][0],189)
self.assertEqual(nbrs[6][1],0)
self.assertEqual(nbrs[7][0],192)
self.assertEqual(nbrs[7][1],3)
self.assertEqual(nbrs[8][0],193)
self.assertEqual(nbrs[8][1],0)
def test8MultiFPBReaderContainsInitOnSearch(self):
basen = os.path.join(RDConfig.RDBaseDir,'Code','DataStructs','testData')
mfpbr = DataStructs.MultiFPBReader(initOnSearch=True)
self.assertEqual(mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen,"zinc_random200.1.patt.fpb"))),1)
self.assertEqual(mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen,"zinc_random200.2.patt.fpb"))),2)
self.assertEqual(mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen,"zinc_random200.3.patt.fpb"))),3)
self.assertEqual(mfpbr.AddReader(DataStructs.FPBReader(os.path.join(basen,"zinc_random200.4.patt.fpb"))),4)
fps = "40081010824820021000500010110410003000402b20285000a4040240010030050000"+\
"080001420040009000003d04086007080c03b31d920004220400074008098010206080"+\
"00488001080000c64002a00080000200024c2000602410049200340820200002400010"+\
"02200106090401056801080182006088101000088a0048";
ebv = DataStructs.CreateFromFPSText(fps)
bytes = DataStructs.BitVectToBinaryText(ebv)
nbrs = mfpbr.GetContainingNeighbors(bytes,numThreads=4)
self.assertEqual(len(nbrs),9)
self.assertEqual(nbrs[0][0],160)
self.assertEqual(nbrs[0][1],0)
self.assertEqual(nbrs[1][0],163)
self.assertEqual(nbrs[1][1],0)
self.assertEqual(nbrs[2][0],170)
self.assertEqual(nbrs[2][1],0)
self.assertEqual(nbrs[3][0],180)
self.assertEqual(nbrs[3][1],2)
self.assertEqual(nbrs[4][0],182)
self.assertEqual(nbrs[4][1],3)
self.assertEqual(nbrs[5][0],185)
self.assertEqual(nbrs[5][1],0)
self.assertEqual(nbrs[6][0],189)
self.assertEqual(nbrs[6][1],0)
self.assertEqual(nbrs[7][0],192)
self.assertEqual(nbrs[7][1],3)
self.assertEqual(nbrs[8][0],193)
self.assertEqual(nbrs[8][1],0)
def test9MultiFPBReaderEdges(self):
basen = os.path.join(RDConfig.RDBaseDir,'Code','DataStructs','testData')
mfpbr = DataStructs.MultiFPBReader()
mfpbr.Init();
fps = "0000000000404000100000001000040000300040222000002004000240000020000000"+\
"8200010200000090000024040860070044003214820000220401054008018000226000"+\
"4800800140000042000080008008020482400000200410800000300430200800400000"+\
"0000080a0000800400010c800200648818100010880040"
ebv = DataStructs.CreateFromFPSText(fps)
bytes = DataStructs.BitVectToBinaryText(ebv)
nbrs = mfpbr.GetTanimotoNeighbors(bytes,threshold=0.6)
self.assertEqual(len(nbrs),0)
if __name__ == '__main__':
unittest.main()
| |
'''
AUTHOR:
Created by Crashed on Feb 07 2009.
USAGE:
DMIREAD(path)
returns a list of Icons retrieved from the DMI
DMIWRITE(icons[], path)
given a list of Icons, this method will save the data to a dmi you specify (path)
Icon
How to get images from the icon:
Icon.icons is a map of all the images the icon holds.
Icon.icons[DMI_WEST][3] - fourth frame in the west direction.
Images without multiple frames and directions are still grabbed the same:
Icon.icons[DMI_SOUTH][0]
How to get delay for each frame:
Delays are stored in a list, Icon.delays.
If there are 5 frames, there are 5 delays.
[1, 1, 1, 1, 1] by default.
TODO:
Assuming reading DMIs is working flawlessly, saving DMIs would be the next task. [x]
Reading and writing appears to work great, so what's left to do is to find bugs. [ ]
'''
import Image
import re
import os
import sys
sys.path.insert(0, os.path.split(__file__)[0])
import PngImagePlugin
import traceback
import copy
optipng = os.getcwd()
DMI_SOUTH = 0
DMI_NORTH = 1
DMI_EAST = 2
DMI_WEST = 3
DMI_SOUTHEAST = 4
DMI_SOUTHWEST = 5
DMI_NORTHEAST = 6
DMI_NORTHWEST = 7
class Icon(object):
# Each Icon holds the information for each icon in a DMI - which includes each dir and frame
def __init__(self):
self.origin = None
self.width = None
self.height = None
self.state = '' # icon state name
self.dirs = 1 # number of dirs (directions)
self.frames = 1 # number of frames
self.icons = [] # contains all the icons for dirs and frames
self.delays = [] # delay numbers, one for each frame
self.loops = 0 # times to loop
self.rewind = 0 # rewind flag
self.movement = 0 # movement flag
self.hotspot = [] # hotspots flag
def __repr__(self):
return 'Icon(origin=%r,width=%r,height=%r,state=%r,dirs=%r,frames=%r,icons=%r,delays=%r,loops=%r,rewind=%r,movement=%r,hotspot=%r)' % \
(self.origin, self.width, self.height, self.state, self.dirs, self.frames, self.icons, self.delays, self.loops, self.rewind, self.movement, self.hotspot)
def __str__(self):
return 'DMI Icon (file: %s, size: %sx%s, state: "%s", dirs: %i, frames: %i, icons: %i, delays: %s, loops: %i, rewind: %i, movement: %i, hotspots: %i)' % \
(self.origin, self.width, self.height, self.state, self.dirs, self.frames, len(flatten_array(self.icons)), self.delays, self.loops, self.rewind, self.movement, len(flatten_array(self.hotspot)))
def copy(self, deep=True):
icon = Icon()
icon.state = self.state
icon.dirs = self.dirs
icon.frames = self.frames
if not deep:
# original and copied icon will share delay list and icon image objects
# a change to one will change both
icon.icons = self.icons
icon.delays = self.delays
else:
icon.icons = [[]] * self.dirs
for dir in xrange(self.dirs):
icon.icons[dir] = [x.copy() for x in self.icons[dir]]
icon.delays = copy.copy(self.delays)
icon.loops = self.loops
icon.rewind = self.rewind
icon.movement = self.movement
return icon
def DMIINFOREAD(path):
# Return DMI metadata without cropping images
try:
try:
states, (width, height) = DMIINFO(Image.open(path).info['Description'])
return states, (width, height)
except IOError:
return None
except KeyError:
dmif = open(path, 'rb')
if dmif.read(4).endswith('DMI'):
print >> sys.stderr, '[DMIREAD] Failed to load icon `%s`\n\Unable to load DMI v3 icons.\n' % path
return None
except IOError:
print >> sys.stderr, '[DMIREAD] File `%s` non existant.' % path
print >> sys.stderr, traceback.format_exc()
except Exception:
print >> sys.stderr, '[DMIREAD]', traceback.format_exc()
return None
def DMIINFO(info):
# Parses DMI information.
icon_states = [] #[(name, dirs, frames), ]
width = -1
height = -1
if len(info) < 11: return -1
if info[0:11] != '# BEGIN DMI': return -2
groups = re.findall('(.+) = (.+)', info)
if not groups or not len(groups): return -3
def split_delays(delay):
# delays are saved as 1,2,1,1 - we want to split this into a list
if not ',' in delay and str(int(delay)) == delay:
return int(delay)
return [int(x) for x in delay.split(',')]
for x in xrange(len(groups)):
index, value = groups[x]
if index == 'version':
if value != '4.0':
return -4
if groups[x + 1][0] == '\twidth':
x += 1
width = int(groups[x][1])
if groups[x + 1][0] == '\theight':
x += 1
height = int(groups[x][1])
if index == 'state':
try:
state = unicode(value, 'utf-8')
except:
value
# FIXME: make order of occurance unimportant, do a loop
if len(groups) > x + 1 and groups[x + 1][0] == '\tdirs': # this should always exist
x += 1
dirs = int(groups[x][1])
if len(groups) > x + 1 and groups[x + 1][0] == '\tframes': # this should always exist
x += 1
frames = int(groups[x][1])
delays = [1]
if len(groups) > x + 1 and groups[x + 1][0] == '\tdelay':
x += 1
delays = split_delays(groups[x][1])
loops = 0
if len(groups) > x + 1 and groups[x + 1][0] == '\tloop':
x += 1
loops = int(groups[x][1])
rewind = 0
if len(groups) > x + 1 and groups[x + 1][0] == '\trewind':
x += 1
rewind = int(groups[x][1])
movement = 0
if len(groups) > x + 1 and groups[x + 1][0] == '\tmovement':
x += 1
movement = int(groups[x][1])
# seen this, but what it's used for is unknown:
# hotspot = 1,14,1
hotspot = []
if len(groups) > x + 1 and groups[x + 1][0] == '\thotspot':
x += 1
hotspot = split_delays(groups[x][1])
icon_states.append( (state, dirs, frames, delays, rewind, loops, movement, hotspot) )
return icon_states, (width, height)
def DMIREAD(path):
# Takes a path to a dmi file and splits it up into multiple Icon objects
def DMIICON(dmi, width=-1, height=-1):
# Splits up the DMI/PNG into multiple bitmaps.
dmis = []
if width < 0 or height < 0:
width, height = dmi.size
# actual image size
fullwidth, fullheight = dmi.size
for y in xrange(0, fullheight, height):
for x in xrange(0, fullwidth, width):
# Image.crop((left, top, right, bottom))
dmis.append(dmi.crop((x, y, x + width, y + height)))
return dmis
try:
dmifile = os.path.split(path)[-1]
print 'DMIREAD:', dmifile
try:
dmi = Image.open(path).convert('RGBA')
#print dmi.info['Description']
states, (width, height) = DMIINFO(dmi.info['Description'])
#print 'icon, states: %s (width: %s; height: %s)' % (states, width, height)
if width == -1:
frames = 0
for state in states:
frames += state[1] * state[2]
width = dmi.size[0] / frames
if height == -1:
height = dmi.size[1]
dmis = DMIICON(dmi, width, height)
if type(states) == int:
return []
except IOError:
return []
except KeyError:
dmif = open(path, 'rb')
if dmif.read(4).endswith('DMI'):
print >> sys.stderr, '[DMIREAD] Failed to load icon `%s`\n\Unable to load DMI v3 icons.\n' % path
else:
dmi = Image.open(path).convert('RGBA')
icon = Icon()
icon.icons = [[dmi]]
icon.state = ''
icon.dirs = 1
return [icon]
return []
icons = []
dmi_counter = 0
if states != -1:
for index in xrange(len(states)):
image = dmis[dmi_counter]
state = states[index]
icon = Icon()
icon.origin = dmifile
# maybe also the path from project's root?
icon.icons = [[image]]
icon.width = width
icon.height = height
icon.state = state[0][1:-1]
icon.dirs = state[1]
icon.frames = state[2]
icon.delays = state[3]
icon.rewind = state[4]
icon.loops = state[5]
icon.movement = state[6]
icon.hotspot = state[7]
print 'DEBUG: %s' % icon
if icon.dirs > 1:
for x in xrange(1, icon.dirs):
dmi_counter += 1
image = dmis[dmi_counter]
icon.icons.append([image])
if icon.frames > 1:
for x in xrange(0, icon.frames - 1):
for y in xrange(0, icon.dirs):
dmi_counter += 1
image = dmis[dmi_counter]
icon.icons[y].append(image)
icons.append(icon)
dmi_counter += 1
return icons
except IOError:
print >> sys.stderr, '[DMIREAD] File `%s` non existant.' % path
print >> sys.stderr, traceback.format_exc()
except Exception:
print >> sys.stderr, '[DMIREAD]', traceback.format_exc()
return []
def DMIWRITE(icons, path=None, crush=None):
# Saves a list of icons to a DMI.
MAX_WIDTH = 40000 # very important, the maximum allowed width for the generated png
# PIL doesn't like large widths
def DMIICON(icons):
# Takes a list of icons and flattens them to a long bitmap strip
if not len(icons): return -1
frames = []
for icon in icons:
for y in xrange(icon.frames):
for x in xrange(icon.dirs):
frames.append(icon.icons[x][y])
width, height = icons[0].icons[0][0].size # size of frames
'''
icons_per_row = math.ceil(math.sqrt(len(frames)))
png_width = icons_per_row * size
bitmap = Image.new('RGBA', (png_width, png_width))
for index, icon in enumerate(frames):
pos_x = index
pos_y = 0
while pos_x > icons_per_row:
pos_x -= icons_per_row + 1
pos_y += 1
bitmap.paste(icon, (pos_x * size, pos_y * size))
'''
bitmap = Image.new('RGBA', (len(frames) * width, height))
for index, icon in enumerate(frames):
bitmap.paste(icon, (index * width, 0))
return bitmap
def DMIINFO(icons):
# Format the icons information in standard DMI metadata format
if not len(icons): return -1
metadata = '# BEGIN DMI'
metadata += '\nversion=4.0'
for icon in icons:
metadata += '\nstate = "%s"' % icon.state
metadata += '\n\tdirs = %i' % icon.dirs
metadata += '\n\tframes = %i' % icon.frames
# only write the next values if they're not the default values, keep the size of the file down
if icon.delays != [1]:
metadata += '\n\tdelay = %s' % (','.join([str(x) for x in icon.delays]))
if icon.loops != 0:
metadata += '\n\tloop = %i' % icon.loops
if icon.rewind != 0:
metadata += '\n\trewind = %i' % icon.rewind
if icon.movement != 0:
metadata += '\n\tmovement = %i' % icon.movement
metadata += '\n# END DMI'
return metadata
def pngsave(im, file):
# Borrowed from: http://blog.modp.com/2007/08/python-pil-and-png-metadata-take-2.html - thanks! :)
# these can be automatically added to Image.info dict
# they are not user-added metadata
reserved = ('interlace', 'gamma', 'dpi', 'transparency', 'aspect')
# undocumented class
meta = PngImagePlugin.PngInfo()
# copy metadata into new object
for k,v in im.info.iteritems():
if k in reserved: continue
meta.add_text(k, v, 1)
# and save
im.save(file, 'PNG', pnginfo=meta)
image = DMIICON(icons)
metadata = DMIINFO(icons)
if type(image) == int and not len(icons):
image = Image.new('RGBA', (1, 1))
metadata = ''
if path:
image.info['Description'] = metadata
pngsave(image, path)
def optimize_optipng(path):
curdir = os.getcwd()
os.chdir(optipng)
os.system('optipng.exe "%s" -out "%s"' % (path, path)) #-o1 -quiet -force
os.chdir(curdir)
if crush:
import thread
thread.start_new_thread(optimize_optipng, (path,))
else:
return image, metadata
def flatten_array(lst):
# cheap way to flatten all the elements in an array to a single list
def count(lst):
c = []
for i in lst:
if type(i) == list:
c += count(i)
else:
c.append(i)
return c
return count(lst)
| |
__author__ = 'abdul'
import mongoctl.repository as repository
from mongoctl.mongo_uri_tools import is_mongo_uri, parse_mongo_uri
from mongoctl.utils import resolve_path
from mongoctl.mongoctl_logging import log_info , log_warning
from mongoctl.commands.command_utils import (
is_db_address, is_dbpath, extract_mongo_exe_options, get_mongo_executable,
options_to_command_args,
VERSION_PREF_EXACT_OR_MINOR
)
from mongoctl.errors import MongoctlException
from mongoctl.utils import call_command
from mongoctl.objects.server import Server
from mongoctl.mongo_version import version_obj, MongoctlNormalizedVersion
###############################################################################
# CONSTS
###############################################################################
SUPPORTED_MONGO_DUMP_OPTIONS = [
"directoryperdb",
"journal",
"collection",
"out",
"query",
"oplog",
"repair",
"forceTableScan",
"ipv6",
"verbose",
"authenticationDatabase"
]
###############################################################################
# dump command
###############################################################################
def dump_command(parsed_options):
# get and validate dump target
target = parsed_options.target
use_best_secondary = parsed_options.useBestSecondary
#max_repl_lag = parsed_options.maxReplLag
is_addr = is_db_address(target)
is_path = is_dbpath(target)
if is_addr and is_path:
msg = ("Ambiguous target value '%s'. Your target matches both a dbpath"
" and a db address. Use prefix 'file://', 'cluster://' or"
" 'server://' to make it more specific" % target)
raise MongoctlException(msg)
elif not (is_addr or is_path):
raise MongoctlException("Invalid target value '%s'. Target has to be"
" a valid db address or dbpath." % target)
dump_options = extract_mongo_dump_options(parsed_options)
if is_addr:
mongo_dump_db_address(target,
username=parsed_options.username,
password=parsed_options.password,
use_best_secondary=use_best_secondary,
max_repl_lag=None,
dump_options=dump_options)
else:
dbpath = resolve_path(target)
mongo_dump_db_path(dbpath, dump_options=dump_options)
###############################################################################
# mongo_dump
###############################################################################
def mongo_dump_db_address(db_address,
username=None,
password=None,
use_best_secondary=False,
max_repl_lag=None,
dump_options=None):
if is_mongo_uri(db_address):
mongo_dump_uri(uri=db_address, username=username, password=password,
use_best_secondary=use_best_secondary,
dump_options=dump_options)
return
# db_address is an id string
id_path = db_address.split("/")
id = id_path[0]
database = id_path[1] if len(id_path) == 2 else None
server = repository.lookup_server(id)
if server:
mongo_dump_server(server, database=database, username=username,
password=password, dump_options=dump_options)
return
else:
cluster = repository.lookup_cluster(id)
if cluster:
mongo_dump_cluster(cluster, database=database, username=username,
password=password,
use_best_secondary=use_best_secondary,
max_repl_lag=max_repl_lag,
dump_options=dump_options)
return
# Unknown destination
raise MongoctlException("Unknown db address '%s'" % db_address)
###############################################################################
def mongo_dump_db_path(dbpath, dump_options=None):
do_mongo_dump(dbpath=dbpath,
dump_options=dump_options)
###############################################################################
def mongo_dump_uri(uri,
username=None,
password=None,
use_best_secondary=False,
dump_options=None):
uri_wrapper = parse_mongo_uri(uri)
database = uri_wrapper.database
username = username if username else uri_wrapper.username
password = password if password else uri_wrapper.password
server_or_cluster = repository.build_server_or_cluster_from_uri(uri)
if isinstance(server_or_cluster, Server):
mongo_dump_server(server_or_cluster,
database=database,
username=username,
password=password,
dump_options=dump_options)
else:
mongo_dump_cluster(server_or_cluster,
database=database,
username=username,
password=password,
use_best_secondary=use_best_secondary,
dump_options=dump_options)
###############################################################################
def mongo_dump_server(server,
database=None,
username=None,
password=None,
dump_options=None):
repository.validate_server(server)
auth_db = database or "admin"
# auto complete password if possible
if username:
if not password and database:
password = server.lookup_password(database, username)
if not password:
password = server.lookup_password("admin", username)
do_mongo_dump(host=server.get_connection_host_address(),
port=server.get_port(),
database=database,
username=username,
password=password,
server_version=server.get_mongo_version(),
dump_options=dump_options)
###############################################################################
def mongo_dump_cluster(cluster,
database=None,
username=None,
password=None,
use_best_secondary=False,
max_repl_lag=False,
dump_options=None):
repository.validate_cluster(cluster)
if use_best_secondary:
mongo_dump_cluster_best_secondary(cluster=cluster,
max_repl_lag=max_repl_lag,
database=database,
username=username,
password=password,
dump_options=dump_options)
else:
mongo_dump_cluster_primary(cluster=cluster,
database=database,
username=username,
password=password,
dump_options=dump_options)
###############################################################################
def mongo_dump_cluster_primary(cluster,
database=None,
username=None,
password=None,
dump_options=None):
log_info("Locating default server for cluster '%s'..." % cluster.id)
default_server = cluster.get_default_server()
if default_server:
log_info("Dumping default server '%s'..." % default_server.id)
mongo_dump_server(default_server,
database=database,
username=username,
password=password,
dump_options=dump_options)
else:
raise MongoctlException("No default server found for cluster '%s'" %
cluster.id)
###############################################################################
def mongo_dump_cluster_best_secondary(cluster,
max_repl_lag=None,
database=None,
username=None,
password=None,
dump_options=None):
#max_repl_lag = max_repl_lag or 3600
log_info("Finding best secondary server for cluster '%s' with replication"
" lag less than max (%s seconds)..." %
(cluster.id, max_repl_lag))
best_secondary = cluster.get_dump_best_secondary(max_repl_lag=max_repl_lag)
if best_secondary:
server = best_secondary.get_server()
log_info("Found secondary server '%s'. Dumping..." % server.id)
mongo_dump_server(server, database=database, username=username,
password=password, dump_options=dump_options)
else:
raise MongoctlException("No secondary server found for cluster '%s'" %
cluster.id)
###############################################################################
def do_mongo_dump(host=None,
port=None,
dbpath=None,
database=None,
username=None,
password=None,
server_version=None,
dump_options=None):
# create dump command with host and port
dump_cmd = [get_mongo_dump_executable(server_version)]
if host:
dump_cmd.extend(["--host", host])
if port:
dump_cmd.extend(["--port", str(port)])
# dbpath
if dbpath:
dump_cmd.extend(["--dbpath", dbpath])
# database
if database:
dump_cmd.extend(["-d", database])
# username and password
if username:
dump_cmd.extend(["-u", username, "-p"])
if password:
dump_cmd.append(password)
# ignore authenticationDatabase option is server_version is less than 2.4.0
if (dump_options and "authenticationDatabase" in dump_options and
server_version and
version_obj(server_version) < MongoctlNormalizedVersion("2.4.0")):
dump_options.pop("authenticationDatabase", None)
# append shell options
if dump_options:
dump_cmd.extend(options_to_command_args(dump_options))
cmd_display = dump_cmd[:]
# mask user/password
if username:
cmd_display[cmd_display.index("-u") + 1] = "****"
if password:
cmd_display[cmd_display.index("-p") + 1] = "****"
log_info("Executing command: \n%s" % " ".join(cmd_display))
call_command(dump_cmd, bubble_exit_code=True)
###############################################################################
def extract_mongo_dump_options(parsed_args):
return extract_mongo_exe_options(parsed_args,
SUPPORTED_MONGO_DUMP_OPTIONS)
###############################################################################
def get_mongo_dump_executable(server_version):
dump_exe = get_mongo_executable(server_version,
'mongodump',
version_check_pref=
VERSION_PREF_EXACT_OR_MINOR)
# Warn the user if it is not an exact match (minor match)
if server_version and version_obj(server_version) != dump_exe.version:
log_warning("Using mongodump '%s' that does not exactly match "
"server version '%s'" % (dump_exe.version, server_version))
return dump_exe.path
| |
from PIL import Image
import commands
import os
import re
import tempfile
import numpy as np
from menpo.io import metadata
from menpo.shape import TexturedTriMesh, TriMesh
def process_with_meshlabserver(file_path, output_dir=None, script_path=None,
output_filetype=None, export_flags=None):
""" Interface to `meshlabserver` to perform prepossessing on meshes before
import. Returns a path to the result of the meshlabserver call, ready for
import as usual.
Kwargs:
* script_path: if specified this script will be run on the input mesh.
* output_dir: if None provided, set to the users tmp directory.
* output_filetype: the output desired from meshlabserver. If not provided
the output type will be the same as the input.
* export_flags: flags passed to the -om parameter. Allows for choosing
what aspects of the model will be exported (normals,
texture coords etc)
"""
if output_dir is None:
output_dir = tempfile.gettempdir()
filename = os.path.split(file_path)[-1]
if output_filetype is not None:
file_root = os.path.splitext(filename)[0]
output_filename = file_root + '.' + output_filetype
else:
output_filename = filename
output_path = os.path.join(output_dir, output_filename)
command = ('meshlabserver -i ' + file_path + ' -o ' +
output_path)
if script_path is not None:
command += ' -s ' + script_path
if export_flags is not None:
command += ' -om ' + export_flags
commands.getoutput(command)
return output_path
class MeshImporter(object):
"""Base class for importing 3D meshes
"""
def __init__(self, filepath):
self.filepath = os.path.abspath(os.path.expanduser(filepath))
self.path_and_filename = os.path.splitext(self.filepath)[0]
# depreciate this once the other parsers are regexp
with open(self.filepath) as f:
self.lines = f.readlines()
# text is the entire file in one string (useful for regexp)
with open(self.filepath) as f:
self.text = f.read()
self.parse_geometry()
self.import_texture()
self.import_landmarks()
def parse_geometry(self):
raise NotImplementedError
def import_texture(self):
raise NotImplementedError
def import_landmarks(self):
try:
self.landmarks = metadata.json_menpo_landmarks(
self.path_and_filename)
except metadata.MissingLandmarksError:
self.landmarks = None
def build(self, **kwargs):
if self.texture is not None:
mesh = TexturedTriMesh(self.points, self.trilist,
self.tcoords, self.texture)
else:
mesh = TriMesh(self.points, self.trilist)
if self.landmarks is not None:
mesh.landmarks.add_reference_landmarks(self.landmarks)
mesh.legacy = {'path_and_filename': self.path_and_filename}
return mesh
class OBJImporter(MeshImporter):
def __init__(self, filepath):
MeshImporter.__init__(self, filepath)
def parse_geometry(self):
#v 1.345 2134.234 1e015
re_v = re.compile(u'v ([^\s]+) ([^\s]+) ([^\s]+)')
#vn 1.345 2134.234 1e015
re_vn = re.compile(u'vn ([^\s]+) ([^\s]+) ([^\s]+)')
#vt 0.0025 0.502
re_tc = re.compile(u'vt ([^\s]+) ([^\s]+)')
# now we just grab the three possible values that can be given
# to each face grouping.
re_ti = re.compile(
u'f (\d+)/*\d*/*\d* (\d+)/*\d*/*\d* (\d+)/*\d*/*\d*')
re_tcti = re.compile(
u'f \d+/(\d+)/*\d* \d+/(\d+)/*\d* \d+/(\d+)/*\d*')
re_vnti = re.compile(
u'f \d+/\d*/(\d+) \d+/\d*/(\d+) \d+/\d*/(\d+)')
self.points = np.array(re_v.findall(self.text), dtype=np.float)
self.normals = np.array(re_vn.findall(self.text), dtype=np.float)
self.tcoords = np.array(re_tc.findall(self.text), dtype=np.float)
self.trilist = np.array(re_ti.findall(self.text), dtype=np.uint32) - 1
self.tcoords_trilist = np.array(
re_tcti.findall(self.text), dtype=np.uint32) - 1
self.normals_trilist = np.array(
re_vnti.findall(self.text), dtype=np.uint32) - 1
def import_texture(self):
# TODO: make this more intelligent in locating the texture
# (i.e. from the materials file, this can be second guess)
pathToJpg = os.path.splitext(self.filepath)[0] + '.jpg'
print pathToJpg
try:
Image.open(pathToJpg)
self.texture = Image.open(pathToJpg)
except IOError:
print 'Warning, no texture found'
if self.tcoords:
raise Exception(
'why do we have texture coords but no texture?')
else:
print '(there are no texture coordinates anyway so this is' \
' expected)'
self.texture = None
class WRLImporter(MeshImporter):
""" WARNING - this class may need to be restructured to work correctly
(see OBJImporter for an exemplary MeshImporter subclass)
"""
def __init__(self, filepath):
MeshImporter.__init__(self, filepath)
def parse_geometry(self):
self._sectionEnds = [i for i, line in enumerate(self.lines)
if ']' in line]
self.points = self._getFloatDataForString(' Coordinate')
self.tcoords = self._getFloatDataForString('TextureCoordinate')
tcoords_trilist = self._getFloatDataForString('texCoordIndex',
separator=', ', cast=int)
self.tcoords_trilist = [x[:-1] for x in tcoords_trilist]
self.trilist = self.tcoords_trilist
self.normalsIndex = None
self.normals = None
def _getFloatDataForString(self, string, **kwargs):
sep = kwargs.get('separator', ' ')
cast = kwargs.get('cast', float)
start = self._findIndexOfFirstInstanceOfString(string)
end = self._findNextSectionEnd(start)
floatLines = self.lines[start + 1:end]
return [[cast(x) for x in line[5:-3].split(sep)] for line in
floatLines]
def _findIndexOfFirstInstanceOfString(self, string):
return [i for i, line in enumerate(self.lines) if string in line][0]
def _findNextSectionEnd(self, beginningIndex):
return [i for i in self._sectionEnds if i > beginningIndex][0]
def import_texture(self):
imageIndex = self._findIndexOfFirstInstanceOfString('ImageTexture') + 1
self.imageName = self.lines[imageIndex].split('"')[1]
pathToTexture = os.path.dirname(self.filepath) + '/' + self.imageName
self.texture = Image.open(pathToTexture)
class OFFImporter(MeshImporter):
""" WARNING - this class may need to be restructured to work correctly
(see OBJImporter for an exemplary MeshImporter subclass)
"""
def __init__(self, filepath):
MeshImporter.__init__(self, filepath)
#.off files only have geometry info - all other fields None
self.tcoords = None
self.normals = None
self.normalsIndex = None
self.tcoords_trilist = None
self.texture = None
def parse_geometry(self):
lines = [l.rstrip() for l in self.lines]
self.n_points = int(lines[1].split(' ')[0])
offset = 2
while lines[offset] == '':
offset += 1
x = self.n_points + offset
coord_lines = lines[offset:x]
coord_index_lines = lines[x:]
self.points = [[float(x) for x in l.split(' ')] for l in coord_lines]
self.trilist = [[int(x) for x in l.split(' ')[2:]] for l in
coord_index_lines if l != '']
def import_texture(self):
pass
| |
"""Light platform support for yeelight."""
from __future__ import annotations
import asyncio
import logging
import math
import voluptuous as vol
import yeelight
from yeelight import Bulb, Flow, RGBTransition, SleepTransition, flows
from yeelight.enums import BulbType, LightType, PowerMode, SceneClass
from yeelight.main import BulbException
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_KELVIN,
ATTR_RGB_COLOR,
ATTR_TRANSITION,
COLOR_MODE_BRIGHTNESS,
COLOR_MODE_COLOR_TEMP,
COLOR_MODE_HS,
COLOR_MODE_ONOFF,
COLOR_MODE_RGB,
COLOR_MODE_UNKNOWN,
FLASH_LONG,
FLASH_SHORT,
SUPPORT_EFFECT,
SUPPORT_FLASH,
SUPPORT_TRANSITION,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ENTITY_ID, ATTR_MODE, CONF_NAME
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import entity_platform
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.event import async_call_later
import homeassistant.util.color as color_util
from homeassistant.util.color import (
color_temperature_kelvin_to_mired as kelvin_to_mired,
color_temperature_mired_to_kelvin as mired_to_kelvin,
)
from . import (
ACTION_RECOVER,
ATTR_ACTION,
ATTR_COUNT,
ATTR_MODE_MUSIC,
ATTR_TRANSITIONS,
CONF_FLOW_PARAMS,
CONF_MODE_MUSIC,
CONF_NIGHTLIGHT_SWITCH,
CONF_SAVE_ON_CHANGE,
CONF_TRANSITION,
DATA_CONFIG_ENTRIES,
DATA_CUSTOM_EFFECTS,
DATA_DEVICE,
DATA_UPDATED,
DOMAIN,
POWER_STATE_CHANGE_TIME,
YEELIGHT_FLOW_TRANSITION_SCHEMA,
YeelightEntity,
)
_LOGGER = logging.getLogger(__name__)
SUPPORT_YEELIGHT = SUPPORT_TRANSITION | SUPPORT_FLASH | SUPPORT_EFFECT
ATTR_MINUTES = "minutes"
SERVICE_SET_MODE = "set_mode"
SERVICE_SET_MUSIC_MODE = "set_music_mode"
SERVICE_START_FLOW = "start_flow"
SERVICE_SET_COLOR_SCENE = "set_color_scene"
SERVICE_SET_HSV_SCENE = "set_hsv_scene"
SERVICE_SET_COLOR_TEMP_SCENE = "set_color_temp_scene"
SERVICE_SET_COLOR_FLOW_SCENE = "set_color_flow_scene"
SERVICE_SET_AUTO_DELAY_OFF_SCENE = "set_auto_delay_off_scene"
EFFECT_DISCO = "Disco"
EFFECT_TEMP = "Slow Temp"
EFFECT_STROBE = "Strobe epilepsy!"
EFFECT_STROBE_COLOR = "Strobe color"
EFFECT_ALARM = "Alarm"
EFFECT_POLICE = "Police"
EFFECT_POLICE2 = "Police2"
EFFECT_CHRISTMAS = "Christmas"
EFFECT_RGB = "RGB"
EFFECT_RANDOM_LOOP = "Random Loop"
EFFECT_FAST_RANDOM_LOOP = "Fast Random Loop"
EFFECT_LSD = "LSD"
EFFECT_SLOWDOWN = "Slowdown"
EFFECT_WHATSAPP = "WhatsApp"
EFFECT_FACEBOOK = "Facebook"
EFFECT_TWITTER = "Twitter"
EFFECT_STOP = "Stop"
EFFECT_HOME = "Home"
EFFECT_NIGHT_MODE = "Night Mode"
EFFECT_DATE_NIGHT = "Date Night"
EFFECT_MOVIE = "Movie"
EFFECT_SUNRISE = "Sunrise"
EFFECT_SUNSET = "Sunset"
EFFECT_ROMANCE = "Romance"
EFFECT_HAPPY_BIRTHDAY = "Happy Birthday"
EFFECT_CANDLE_FLICKER = "Candle Flicker"
YEELIGHT_TEMP_ONLY_EFFECT_LIST = [EFFECT_TEMP, EFFECT_STOP]
YEELIGHT_MONO_EFFECT_LIST = [
EFFECT_DISCO,
EFFECT_STROBE,
EFFECT_ALARM,
EFFECT_POLICE2,
EFFECT_WHATSAPP,
EFFECT_FACEBOOK,
EFFECT_TWITTER,
EFFECT_HOME,
EFFECT_CANDLE_FLICKER,
*YEELIGHT_TEMP_ONLY_EFFECT_LIST,
]
YEELIGHT_COLOR_EFFECT_LIST = [
EFFECT_STROBE_COLOR,
EFFECT_POLICE,
EFFECT_CHRISTMAS,
EFFECT_RGB,
EFFECT_RANDOM_LOOP,
EFFECT_FAST_RANDOM_LOOP,
EFFECT_LSD,
EFFECT_SLOWDOWN,
EFFECT_NIGHT_MODE,
EFFECT_DATE_NIGHT,
EFFECT_MOVIE,
EFFECT_SUNRISE,
EFFECT_SUNSET,
EFFECT_ROMANCE,
EFFECT_HAPPY_BIRTHDAY,
*YEELIGHT_MONO_EFFECT_LIST,
]
EFFECTS_MAP = {
EFFECT_DISCO: flows.disco,
EFFECT_TEMP: flows.temp,
EFFECT_STROBE: flows.strobe,
EFFECT_STROBE_COLOR: flows.strobe_color,
EFFECT_ALARM: flows.alarm,
EFFECT_POLICE: flows.police,
EFFECT_POLICE2: flows.police2,
EFFECT_CHRISTMAS: flows.christmas,
EFFECT_RGB: flows.rgb,
EFFECT_RANDOM_LOOP: flows.random_loop,
EFFECT_LSD: flows.lsd,
EFFECT_SLOWDOWN: flows.slowdown,
EFFECT_HOME: flows.home,
EFFECT_NIGHT_MODE: flows.night_mode,
EFFECT_DATE_NIGHT: flows.date_night,
EFFECT_MOVIE: flows.movie,
EFFECT_SUNRISE: flows.sunrise,
EFFECT_SUNSET: flows.sunset,
EFFECT_ROMANCE: flows.romance,
EFFECT_HAPPY_BIRTHDAY: flows.happy_birthday,
EFFECT_CANDLE_FLICKER: flows.candle_flicker,
}
VALID_BRIGHTNESS = vol.All(vol.Coerce(int), vol.Range(min=1, max=100))
SERVICE_SCHEMA_SET_MODE = {
vol.Required(ATTR_MODE): vol.In([mode.name.lower() for mode in PowerMode])
}
SERVICE_SCHEMA_SET_MUSIC_MODE = {vol.Required(ATTR_MODE_MUSIC): cv.boolean}
SERVICE_SCHEMA_START_FLOW = YEELIGHT_FLOW_TRANSITION_SCHEMA
SERVICE_SCHEMA_SET_COLOR_SCENE = {
vol.Required(ATTR_RGB_COLOR): vol.All(
vol.ExactSequence((cv.byte, cv.byte, cv.byte)), vol.Coerce(tuple)
),
vol.Required(ATTR_BRIGHTNESS): VALID_BRIGHTNESS,
}
SERVICE_SCHEMA_SET_HSV_SCENE = {
vol.Required(ATTR_HS_COLOR): vol.All(
vol.ExactSequence(
(
vol.All(vol.Coerce(float), vol.Range(min=0, max=359)),
vol.All(vol.Coerce(float), vol.Range(min=0, max=100)),
)
),
vol.Coerce(tuple),
),
vol.Required(ATTR_BRIGHTNESS): VALID_BRIGHTNESS,
}
SERVICE_SCHEMA_SET_COLOR_TEMP_SCENE = {
vol.Required(ATTR_KELVIN): vol.All(vol.Coerce(int), vol.Range(min=1700, max=6500)),
vol.Required(ATTR_BRIGHTNESS): VALID_BRIGHTNESS,
}
SERVICE_SCHEMA_SET_COLOR_FLOW_SCENE = YEELIGHT_FLOW_TRANSITION_SCHEMA
SERVICE_SCHEMA_SET_AUTO_DELAY_OFF_SCENE = {
vol.Required(ATTR_MINUTES): vol.All(vol.Coerce(int), vol.Range(min=1, max=60)),
vol.Required(ATTR_BRIGHTNESS): VALID_BRIGHTNESS,
}
@callback
def _transitions_config_parser(transitions):
"""Parse transitions config into initialized objects."""
transition_objects = []
for transition_config in transitions:
transition, params = list(transition_config.items())[0]
transition_objects.append(getattr(yeelight, transition)(*params))
return transition_objects
@callback
def _parse_custom_effects(effects_config):
effects = {}
for config in effects_config:
params = config[CONF_FLOW_PARAMS]
action = Flow.actions[params[ATTR_ACTION]]
transitions = _transitions_config_parser(params[ATTR_TRANSITIONS])
effects[config[CONF_NAME]] = {
ATTR_COUNT: params[ATTR_COUNT],
ATTR_ACTION: action,
ATTR_TRANSITIONS: transitions,
}
return effects
def _async_cmd(func):
"""Define a wrapper to catch exceptions from the bulb."""
async def _async_wrap(self, *args, **kwargs):
for attempts in range(2):
try:
_LOGGER.debug("Calling %s with %s %s", func, args, kwargs)
return await func(self, *args, **kwargs)
except asyncio.TimeoutError as ex:
# The wifi likely dropped, so we want to retry once since
# python-yeelight will auto reconnect
exc_message = str(ex) or type(ex)
if attempts == 0:
continue
raise HomeAssistantError(
f"Timed out when calling {func.__name__} for bulb {self.device.name} at {self.device.host}: {exc_message}"
) from ex
except OSError as ex:
# A network error happened, the bulb is likely offline now
self.device.async_mark_unavailable()
self.async_state_changed()
exc_message = str(ex) or type(ex)
raise HomeAssistantError(
f"Error when calling {func.__name__} for bulb {self.device.name} at {self.device.host}: {exc_message}"
) from ex
except BulbException as ex:
# The bulb likely responded but had an error
exc_message = str(ex) or type(ex)
raise HomeAssistantError(
f"Error when calling {func.__name__} for bulb {self.device.name} at {self.device.host}: {exc_message}"
) from ex
return _async_wrap
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up Yeelight from a config entry."""
custom_effects = _parse_custom_effects(hass.data[DOMAIN][DATA_CUSTOM_EFFECTS])
device = hass.data[DOMAIN][DATA_CONFIG_ENTRIES][config_entry.entry_id][DATA_DEVICE]
_LOGGER.debug("Adding %s", device.name)
nl_switch_light = device.config.get(CONF_NIGHTLIGHT_SWITCH)
lights = []
device_type = device.type
def _lights_setup_helper(klass):
lights.append(klass(device, config_entry, custom_effects=custom_effects))
if device_type == BulbType.White:
_lights_setup_helper(YeelightGenericLight)
elif device_type == BulbType.Color:
if nl_switch_light and device.is_nightlight_supported:
_lights_setup_helper(YeelightColorLightWithNightlightSwitch)
_lights_setup_helper(YeelightNightLightModeWithoutBrightnessControl)
else:
_lights_setup_helper(YeelightColorLightWithoutNightlightSwitch)
elif device_type == BulbType.WhiteTemp:
if nl_switch_light and device.is_nightlight_supported:
_lights_setup_helper(YeelightWithNightLight)
_lights_setup_helper(YeelightNightLightMode)
else:
_lights_setup_helper(YeelightWhiteTempWithoutNightlightSwitch)
elif device_type == BulbType.WhiteTempMood:
if nl_switch_light and device.is_nightlight_supported:
_lights_setup_helper(YeelightNightLightModeWithAmbientSupport)
_lights_setup_helper(YeelightWithAmbientAndNightlight)
else:
_lights_setup_helper(YeelightWithAmbientWithoutNightlight)
_lights_setup_helper(YeelightAmbientLight)
else:
_lights_setup_helper(YeelightGenericLight)
_LOGGER.warning(
"Cannot determine device type for %s, %s. Falling back to white only",
device.host,
device.name,
)
async_add_entities(lights, True)
_async_setup_services(hass)
@callback
def _async_setup_services(hass: HomeAssistant):
"""Set up custom services."""
async def _async_start_flow(entity, service_call):
params = {**service_call.data}
params.pop(ATTR_ENTITY_ID)
params[ATTR_TRANSITIONS] = _transitions_config_parser(params[ATTR_TRANSITIONS])
await entity.async_start_flow(**params)
async def _async_set_color_scene(entity, service_call):
await entity.async_set_scene(
SceneClass.COLOR,
*service_call.data[ATTR_RGB_COLOR],
service_call.data[ATTR_BRIGHTNESS],
)
async def _async_set_hsv_scene(entity, service_call):
await entity.async_set_scene(
SceneClass.HSV,
*service_call.data[ATTR_HS_COLOR],
service_call.data[ATTR_BRIGHTNESS],
)
async def _async_set_color_temp_scene(entity, service_call):
await entity.async_set_scene(
SceneClass.CT,
service_call.data[ATTR_KELVIN],
service_call.data[ATTR_BRIGHTNESS],
)
async def _async_set_color_flow_scene(entity, service_call):
flow = Flow(
count=service_call.data[ATTR_COUNT],
action=Flow.actions[service_call.data[ATTR_ACTION]],
transitions=_transitions_config_parser(service_call.data[ATTR_TRANSITIONS]),
)
await entity.async_set_scene(SceneClass.CF, flow)
async def _async_set_auto_delay_off_scene(entity, service_call):
await entity.async_set_scene(
SceneClass.AUTO_DELAY_OFF,
service_call.data[ATTR_BRIGHTNESS],
service_call.data[ATTR_MINUTES],
)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_SET_MODE, SERVICE_SCHEMA_SET_MODE, "async_set_mode"
)
platform.async_register_entity_service(
SERVICE_START_FLOW, SERVICE_SCHEMA_START_FLOW, _async_start_flow
)
platform.async_register_entity_service(
SERVICE_SET_COLOR_SCENE, SERVICE_SCHEMA_SET_COLOR_SCENE, _async_set_color_scene
)
platform.async_register_entity_service(
SERVICE_SET_HSV_SCENE, SERVICE_SCHEMA_SET_HSV_SCENE, _async_set_hsv_scene
)
platform.async_register_entity_service(
SERVICE_SET_COLOR_TEMP_SCENE,
SERVICE_SCHEMA_SET_COLOR_TEMP_SCENE,
_async_set_color_temp_scene,
)
platform.async_register_entity_service(
SERVICE_SET_COLOR_FLOW_SCENE,
SERVICE_SCHEMA_SET_COLOR_FLOW_SCENE,
_async_set_color_flow_scene,
)
platform.async_register_entity_service(
SERVICE_SET_AUTO_DELAY_OFF_SCENE,
SERVICE_SCHEMA_SET_AUTO_DELAY_OFF_SCENE,
_async_set_auto_delay_off_scene,
)
platform.async_register_entity_service(
SERVICE_SET_MUSIC_MODE, SERVICE_SCHEMA_SET_MUSIC_MODE, "async_set_music_mode"
)
class YeelightGenericLight(YeelightEntity, LightEntity):
"""Representation of a Yeelight generic light."""
_attr_color_mode = COLOR_MODE_BRIGHTNESS
_attr_supported_color_modes = {COLOR_MODE_BRIGHTNESS}
def __init__(self, device, entry, custom_effects=None):
"""Initialize the Yeelight light."""
super().__init__(device, entry)
self.config = device.config
self._color_temp = None
self._effect = None
model_specs = self._bulb.get_model_specs()
self._min_mireds = kelvin_to_mired(model_specs["color_temp"]["max"])
self._max_mireds = kelvin_to_mired(model_specs["color_temp"]["min"])
self._light_type = LightType.Main
if custom_effects:
self._custom_effects = custom_effects
else:
self._custom_effects = {}
self._unexpected_state_check = None
@callback
def async_state_changed(self):
"""Call when the device changes state."""
if not self._device.available:
self._async_cancel_pending_state_check()
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Handle entity which will be added."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
DATA_UPDATED.format(self._device.host),
self.async_state_changed,
)
)
await super().async_added_to_hass()
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_YEELIGHT
@property
def effect_list(self):
"""Return the list of supported effects."""
return self._predefined_effects + self.custom_effects_names
@property
def color_temp(self) -> int:
"""Return the color temperature."""
temp_in_k = self._get_property("ct")
if temp_in_k:
self._color_temp = kelvin_to_mired(int(temp_in_k))
return self._color_temp
@property
def name(self) -> str:
"""Return the name of the device if any."""
return self.device.name
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return self._get_property(self._power_property) == "on"
@property
def brightness(self) -> int:
"""Return the brightness of this light between 1..255."""
# Always use "bright" as property name in music mode
# Since music mode states are only caches in upstream library
# and the cache key is always "bright" for brightness
brightness_property = (
"bright" if self._bulb.music_mode else self._brightness_property
)
brightness = self._get_property(brightness_property)
return round(255 * (int(brightness) / 100))
@property
def min_mireds(self):
"""Return minimum supported color temperature."""
return self._min_mireds
@property
def max_mireds(self):
"""Return maximum supported color temperature."""
return self._max_mireds
@property
def custom_effects(self):
"""Return dict with custom effects."""
return self._custom_effects
@property
def custom_effects_names(self):
"""Return list with custom effects names."""
return list(self.custom_effects)
@property
def light_type(self):
"""Return light type."""
return self._light_type
@property
def hs_color(self) -> tuple:
"""Return the color property."""
hue = self._get_property("hue")
sat = self._get_property("sat")
if hue is None or sat is None:
return None
return (int(hue), int(sat))
@property
def rgb_color(self) -> tuple:
"""Return the color property."""
rgb = self._get_property("rgb")
if rgb is None:
return None
rgb = int(rgb)
blue = rgb & 0xFF
green = (rgb >> 8) & 0xFF
red = (rgb >> 16) & 0xFF
return (red, green, blue)
@property
def effect(self):
"""Return the current effect."""
return self._effect if self.device.is_color_flow_enabled else None
@property
def _bulb(self) -> Bulb:
return self.device.bulb
@property
def _properties(self) -> dict:
return self._bulb.last_properties if self._bulb else {}
def _get_property(self, prop, default=None):
return self._properties.get(prop, default)
@property
def _brightness_property(self):
return "bright"
@property
def _power_property(self):
return "power"
@property
def _turn_on_power_mode(self):
return PowerMode.LAST
@property
def _predefined_effects(self):
return YEELIGHT_MONO_EFFECT_LIST
@property
def extra_state_attributes(self):
"""Return the device specific state attributes."""
attributes = {
"flowing": self.device.is_color_flow_enabled,
"music_mode": self._bulb.music_mode,
}
if self.device.is_nightlight_supported:
attributes["night_light"] = self.device.is_nightlight_enabled
return attributes
@property
def device(self):
"""Return yeelight device."""
return self._device
async def async_update(self):
"""Update light properties."""
await self.device.async_update()
async def async_set_music_mode(self, music_mode) -> None:
"""Set the music mode on or off."""
try:
await self._async_set_music_mode(music_mode)
except AssertionError as ex:
_LOGGER.error("Unable to turn on music mode, consider disabling it: %s", ex)
@_async_cmd
async def _async_set_music_mode(self, music_mode) -> None:
"""Set the music mode on or off wrapped with _async_cmd."""
bulb = self._bulb
method = bulb.stop_music if not music_mode else bulb.start_music
await self.hass.async_add_executor_job(method)
@_async_cmd
async def async_set_brightness(self, brightness, duration) -> None:
"""Set bulb brightness."""
if not brightness:
return
if math.floor(self.brightness) == math.floor(brightness):
_LOGGER.debug("brightness already set to: %s", brightness)
# Already set, and since we get pushed updates
# we avoid setting it again to ensure we do not
# hit the rate limit
return
_LOGGER.debug("Setting brightness: %s", brightness)
await self._bulb.async_set_brightness(
brightness / 255 * 100, duration=duration, light_type=self.light_type
)
@_async_cmd
async def async_set_hs(self, hs_color, duration) -> None:
"""Set bulb's color."""
if not hs_color or COLOR_MODE_HS not in self.supported_color_modes:
return
if (
not self.device.is_color_flow_enabled
and self.color_mode == COLOR_MODE_HS
and self.hs_color == hs_color
):
_LOGGER.debug("HS already set to: %s", hs_color)
# Already set, and since we get pushed updates
# we avoid setting it again to ensure we do not
# hit the rate limit
return
_LOGGER.debug("Setting HS: %s", hs_color)
await self._bulb.async_set_hsv(
hs_color[0], hs_color[1], duration=duration, light_type=self.light_type
)
@_async_cmd
async def async_set_rgb(self, rgb, duration) -> None:
"""Set bulb's color."""
if not rgb or COLOR_MODE_RGB not in self.supported_color_modes:
return
if (
not self.device.is_color_flow_enabled
and self.color_mode == COLOR_MODE_RGB
and self.rgb_color == rgb
):
_LOGGER.debug("RGB already set to: %s", rgb)
# Already set, and since we get pushed updates
# we avoid setting it again to ensure we do not
# hit the rate limit
return
_LOGGER.debug("Setting RGB: %s", rgb)
await self._bulb.async_set_rgb(
*rgb, duration=duration, light_type=self.light_type
)
@_async_cmd
async def async_set_colortemp(self, colortemp, duration) -> None:
"""Set bulb's color temperature."""
if not colortemp or COLOR_MODE_COLOR_TEMP not in self.supported_color_modes:
return
temp_in_k = mired_to_kelvin(colortemp)
if (
not self.device.is_color_flow_enabled
and self.color_mode == COLOR_MODE_COLOR_TEMP
and self.color_temp == colortemp
):
_LOGGER.debug("Color temp already set to: %s", temp_in_k)
# Already set, and since we get pushed updates
# we avoid setting it again to ensure we do not
# hit the rate limit
return
await self._bulb.async_set_color_temp(
temp_in_k, duration=duration, light_type=self.light_type
)
@_async_cmd
async def async_set_default(self) -> None:
"""Set current options as default."""
await self._bulb.async_set_default()
@_async_cmd
async def async_set_flash(self, flash) -> None:
"""Activate flash."""
if not flash:
return
if int(self._bulb.last_properties["color_mode"]) != 1:
_LOGGER.error("Flash supported currently only in RGB mode")
return
transition = int(self.config[CONF_TRANSITION])
if flash == FLASH_LONG:
count = 1
duration = transition * 5
if flash == FLASH_SHORT:
count = 1
duration = transition * 2
red, green, blue = color_util.color_hs_to_RGB(*self.hs_color)
transitions = []
transitions.append(RGBTransition(255, 0, 0, brightness=10, duration=duration))
transitions.append(SleepTransition(duration=transition))
transitions.append(
RGBTransition(
red, green, blue, brightness=self.brightness, duration=duration
)
)
flow = Flow(count=count, transitions=transitions)
await self._bulb.async_start_flow(flow, light_type=self.light_type)
@_async_cmd
async def async_set_effect(self, effect) -> None:
"""Activate effect."""
if not effect:
return
if effect == EFFECT_STOP:
await self._bulb.async_stop_flow(light_type=self.light_type)
return
if effect in self.custom_effects_names:
flow = Flow(**self.custom_effects[effect])
elif effect in EFFECTS_MAP:
flow = EFFECTS_MAP[effect]()
elif effect == EFFECT_FAST_RANDOM_LOOP:
flow = flows.random_loop(duration=250)
elif effect == EFFECT_WHATSAPP:
flow = flows.pulse(37, 211, 102, count=2)
elif effect == EFFECT_FACEBOOK:
flow = flows.pulse(59, 89, 152, count=2)
elif effect == EFFECT_TWITTER:
flow = flows.pulse(0, 172, 237, count=2)
else:
return
await self._bulb.async_start_flow(flow, light_type=self.light_type)
self._effect = effect
@_async_cmd
async def _async_turn_on(self, duration) -> None:
"""Turn on the bulb for with a transition duration wrapped with _async_cmd."""
await self._bulb.async_turn_on(
duration=duration,
light_type=self.light_type,
power_mode=self._turn_on_power_mode,
)
async def async_turn_on(self, **kwargs) -> None:
"""Turn the bulb on."""
brightness = kwargs.get(ATTR_BRIGHTNESS)
colortemp = kwargs.get(ATTR_COLOR_TEMP)
hs_color = kwargs.get(ATTR_HS_COLOR)
rgb = kwargs.get(ATTR_RGB_COLOR)
flash = kwargs.get(ATTR_FLASH)
effect = kwargs.get(ATTR_EFFECT)
duration = int(self.config[CONF_TRANSITION]) # in ms
if ATTR_TRANSITION in kwargs: # passed kwarg overrides config
duration = int(kwargs.get(ATTR_TRANSITION) * 1000) # kwarg in s
if not self.is_on:
await self._async_turn_on(duration)
if self.config[CONF_MODE_MUSIC] and not self._bulb.music_mode:
await self.async_set_music_mode(True)
await self.async_set_hs(hs_color, duration)
await self.async_set_rgb(rgb, duration)
await self.async_set_colortemp(colortemp, duration)
await self.async_set_brightness(brightness, duration)
await self.async_set_flash(flash)
await self.async_set_effect(effect)
# save the current state if we had a manual change.
if self.config[CONF_SAVE_ON_CHANGE] and (brightness or colortemp or rgb):
await self.async_set_default()
self._async_schedule_state_check(True)
@callback
def _async_cancel_pending_state_check(self):
"""Cancel a pending state check."""
if self._unexpected_state_check:
self._unexpected_state_check()
self._unexpected_state_check = None
@callback
def _async_schedule_state_check(self, expected_power_state):
"""Schedule a poll if the change failed to get pushed back to us.
Some devices (mainly nightlights) will not send back the on state
so we need to force a refresh.
"""
self._async_cancel_pending_state_check()
async def _async_update_if_state_unexpected(*_):
self._unexpected_state_check = None
if self.is_on != expected_power_state:
await self.device.async_update(True)
self._unexpected_state_check = async_call_later(
self.hass, POWER_STATE_CHANGE_TIME, _async_update_if_state_unexpected
)
@_async_cmd
async def _async_turn_off(self, duration) -> None:
"""Turn off with a given transition duration wrapped with _async_cmd."""
await self._bulb.async_turn_off(duration=duration, light_type=self.light_type)
async def async_turn_off(self, **kwargs) -> None:
"""Turn off."""
if not self.is_on:
return
duration = int(self.config[CONF_TRANSITION]) # in ms
if ATTR_TRANSITION in kwargs: # passed kwarg overrides config
duration = int(kwargs.get(ATTR_TRANSITION) * 1000) # kwarg in s
await self._async_turn_off(duration)
self._async_schedule_state_check(False)
@_async_cmd
async def async_set_mode(self, mode: str):
"""Set a power mode."""
await self._bulb.async_set_power_mode(PowerMode[mode.upper()])
@_async_cmd
async def async_start_flow(self, transitions, count=0, action=ACTION_RECOVER):
"""Start flow."""
flow = Flow(count=count, action=Flow.actions[action], transitions=transitions)
await self._bulb.async_start_flow(flow, light_type=self.light_type)
@_async_cmd
async def async_set_scene(self, scene_class, *args):
"""
Set the light directly to the specified state.
If the light is off, it will first be turned on.
"""
await self._bulb.async_set_scene(scene_class, *args)
class YeelightColorLightSupport(YeelightGenericLight):
"""Representation of a Color Yeelight light support."""
_attr_supported_color_modes = {COLOR_MODE_COLOR_TEMP, COLOR_MODE_HS, COLOR_MODE_RGB}
@property
def color_mode(self):
"""Return the color mode."""
color_mode = int(self._get_property("color_mode"))
if color_mode == 1: # RGB
return COLOR_MODE_RGB
if color_mode == 2: # color temperature
return COLOR_MODE_COLOR_TEMP
if color_mode == 3: # hsv
return COLOR_MODE_HS
_LOGGER.debug("Light reported unknown color mode: %s", color_mode)
return COLOR_MODE_UNKNOWN
@property
def _predefined_effects(self):
return YEELIGHT_COLOR_EFFECT_LIST
class YeelightWhiteTempLightSupport:
"""Representation of a White temp Yeelight light."""
_attr_color_mode = COLOR_MODE_COLOR_TEMP
_attr_supported_color_modes = {COLOR_MODE_COLOR_TEMP}
@property
def _predefined_effects(self):
return YEELIGHT_TEMP_ONLY_EFFECT_LIST
class YeelightNightLightSupport:
"""Representation of a Yeelight nightlight support."""
@property
def _turn_on_power_mode(self):
return PowerMode.NORMAL
class YeelightWithoutNightlightSwitchMixIn:
"""A mix-in for yeelights without a nightlight switch."""
@property
def _brightness_property(self):
# If the nightlight is not active, we do not
# want to "current_brightness" since it will check
# "bg_power" and main light could still be on
if self.device.is_nightlight_enabled:
return "nl_br"
return super()._brightness_property
@property
def color_temp(self) -> int:
"""Return the color temperature."""
if self.device.is_nightlight_enabled:
# Enabling the nightlight locks the colortemp to max
return self._max_mireds
return super().color_temp
class YeelightColorLightWithoutNightlightSwitch(
YeelightColorLightSupport,
YeelightWithoutNightlightSwitchMixIn,
YeelightGenericLight,
):
"""Representation of a Color Yeelight light."""
class YeelightColorLightWithNightlightSwitch(
YeelightNightLightSupport, YeelightColorLightSupport, YeelightGenericLight
):
"""Representation of a Yeelight with rgb support and nightlight.
It represents case when nightlight switch is set to light.
"""
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return super().is_on and not self.device.is_nightlight_enabled
class YeelightWhiteTempWithoutNightlightSwitch(
YeelightWhiteTempLightSupport,
YeelightWithoutNightlightSwitchMixIn,
YeelightGenericLight,
):
"""White temp light, when nightlight switch is not set to light."""
class YeelightWithNightLight(
YeelightNightLightSupport, YeelightWhiteTempLightSupport, YeelightGenericLight
):
"""Representation of a Yeelight with temp only support and nightlight.
It represents case when nightlight switch is set to light.
"""
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return super().is_on and not self.device.is_nightlight_enabled
class YeelightNightLightMode(YeelightGenericLight):
"""Representation of a Yeelight when in nightlight mode."""
_attr_color_mode = COLOR_MODE_BRIGHTNESS
_attr_supported_color_modes = {COLOR_MODE_BRIGHTNESS}
@property
def unique_id(self) -> str:
"""Return a unique ID."""
unique = super().unique_id
return f"{unique}-nightlight"
@property
def name(self) -> str:
"""Return the name of the device if any."""
return f"{self.device.name} Nightlight"
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return "mdi:weather-night"
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return super().is_on and self.device.is_nightlight_enabled
@property
def _brightness_property(self):
return "nl_br"
@property
def _turn_on_power_mode(self):
return PowerMode.MOONLIGHT
@property
def supported_features(self):
"""Flag no supported features."""
return 0
class YeelightNightLightModeWithAmbientSupport(YeelightNightLightMode):
"""Representation of a Yeelight, with ambient support, when in nightlight mode."""
@property
def _power_property(self):
return "main_power"
class YeelightNightLightModeWithoutBrightnessControl(YeelightNightLightMode):
"""Representation of a Yeelight, when in nightlight mode.
It represents case when nightlight mode brightness control is not supported.
"""
_attr_color_mode = COLOR_MODE_ONOFF
_attr_supported_color_modes = {COLOR_MODE_ONOFF}
class YeelightWithAmbientWithoutNightlight(YeelightWhiteTempWithoutNightlightSwitch):
"""Representation of a Yeelight which has ambilight support.
And nightlight switch type is none.
"""
@property
def _power_property(self):
return "main_power"
class YeelightWithAmbientAndNightlight(YeelightWithNightLight):
"""Representation of a Yeelight which has ambilight support.
And nightlight switch type is set to light.
"""
@property
def _power_property(self):
return "main_power"
class YeelightAmbientLight(YeelightColorLightWithoutNightlightSwitch):
"""Representation of a Yeelight ambient light."""
PROPERTIES_MAPPING = {"color_mode": "bg_lmode"}
def __init__(self, *args, **kwargs):
"""Initialize the Yeelight Ambient light."""
super().__init__(*args, **kwargs)
self._min_mireds = kelvin_to_mired(6500)
self._max_mireds = kelvin_to_mired(1700)
self._light_type = LightType.Ambient
@property
def unique_id(self) -> str:
"""Return a unique ID."""
unique = super().unique_id
return f"{unique}-ambilight"
@property
def name(self) -> str:
"""Return the name of the device if any."""
return f"{self.device.name} Ambilight"
@property
def _brightness_property(self):
return "bright"
def _get_property(self, prop, default=None):
if not (bg_prop := self.PROPERTIES_MAPPING.get(prop)):
bg_prop = f"bg_{prop}"
return super()._get_property(bg_prop, default)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ApplicationSecurityGroupsOperations:
"""ApplicationSecurityGroupsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
application_security_group_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
application_security_group_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified application security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_security_group_name: The name of the application security group.
:type application_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
application_security_group_name=application_security_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
async def get(
self,
resource_group_name: str,
application_security_group_name: str,
**kwargs: Any
) -> "_models.ApplicationSecurityGroup":
"""Gets information about the specified application security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_security_group_name: The name of the application security group.
:type application_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationSecurityGroup, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_10_01.models.ApplicationSecurityGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
application_security_group_name: str,
parameters: "_models.ApplicationSecurityGroup",
**kwargs: Any
) -> "_models.ApplicationSecurityGroup":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ApplicationSecurityGroup')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ApplicationSecurityGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ApplicationSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
application_security_group_name: str,
parameters: "_models.ApplicationSecurityGroup",
**kwargs: Any
) -> AsyncLROPoller["_models.ApplicationSecurityGroup"]:
"""Creates or updates an application security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_security_group_name: The name of the application security group.
:type application_security_group_name: str
:param parameters: Parameters supplied to the create or update ApplicationSecurityGroup
operation.
:type parameters: ~azure.mgmt.network.v2017_10_01.models.ApplicationSecurityGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ApplicationSecurityGroup or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2017_10_01.models.ApplicationSecurityGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationSecurityGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
application_security_group_name=application_security_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ApplicationSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.ApplicationSecurityGroupListResult"]:
"""Gets all application security groups in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_10_01.models.ApplicationSecurityGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationSecurityGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationSecurityGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationSecurityGroups'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ApplicationSecurityGroupListResult"]:
"""Gets all the application security groups in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_10_01.models.ApplicationSecurityGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationSecurityGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationSecurityGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups'} # type: ignore
| |
from datetime import datetime, timedelta
import warnings
import numpy as np
from pandas._libs import index as libindex
from pandas._libs.tslibs import (
NaT, frequencies as libfrequencies, iNaT, resolution)
from pandas._libs.tslibs.period import (
DIFFERENT_FREQ, IncompatibleFrequency, Period)
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.core.dtypes.common import (
is_bool_dtype, is_datetime64_any_dtype, is_float, is_float_dtype,
is_integer, is_integer_dtype, pandas_dtype)
from pandas.core import common as com
from pandas.core.accessor import delegate_names
from pandas.core.algorithms import unique1d
from pandas.core.arrays.period import (
PeriodArray, period_array, validate_dtype_freq)
from pandas.core.base import _shared_docs
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import _index_shared_docs, ensure_index
from pandas.core.indexes.datetimelike import (
DatetimeIndexOpsMixin, DatetimelikeDelegateMixin)
from pandas.core.indexes.datetimes import DatetimeIndex, Index, Int64Index
from pandas.core.missing import isna
from pandas.core.ops import get_op_result_name
from pandas.core.tools.datetimes import DateParseError, parse_time_string
from pandas.tseries import frequencies
from pandas.tseries.offsets import DateOffset, Tick
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(target_klass='PeriodIndex or list of Periods'))
# --- Period index sketch
def _new_PeriodIndex(cls, **d):
# GH13277 for unpickling
values = d.pop('data')
if values.dtype == 'int64':
freq = d.pop('freq', None)
values = PeriodArray(values, freq=freq)
return cls._simple_new(values, **d)
else:
return cls(values, **d)
class PeriodDelegateMixin(DatetimelikeDelegateMixin):
"""
Delegate from PeriodIndex to PeriodArray.
"""
_delegate_class = PeriodArray
_delegated_properties = PeriodArray._datetimelike_ops
_delegated_methods = (
set(PeriodArray._datetimelike_methods) | {'_addsub_int_array'}
)
_raw_properties = {'is_leap_year'}
@delegate_names(PeriodArray,
PeriodDelegateMixin._delegated_properties,
typ='property')
@delegate_names(PeriodArray,
PeriodDelegateMixin._delegated_methods,
typ="method",
overwrite=True)
class PeriodIndex(DatetimeIndexOpsMixin, Int64Index, PeriodDelegateMixin):
"""
Immutable ndarray holding ordinal values indicating regular periods in
time such as particular years, quarters, months, etc.
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1d integer np.ndarray or PeriodArray), optional
Optional period-like data to construct index with
copy : bool
Make a copy of input ndarray
freq : string or period object, optional
One of pandas period strings or corresponding objects
start : starting value, period-like, optional
If data is None, used as the start point in generating regular
period data.
.. deprecated:: 0.24.0
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
.. deprecated:: 0.24.0
end : end value, period-like, optional
If periods is none, generated index will extend to first conforming
period on or just past end argument
.. deprecated:: 0.24.0
year : int, array, or Series, default None
month : int, array, or Series, default None
quarter : int, array, or Series, default None
day : int, array, or Series, default None
hour : int, array, or Series, default None
minute : int, array, or Series, default None
second : int, array, or Series, default None
tz : object, default None
Timezone for converting datetime64 data to Periods
dtype : str or PeriodDtype, default None
Attributes
----------
day
dayofweek
dayofyear
days_in_month
daysinmonth
end_time
freq
freqstr
hour
is_leap_year
minute
month
quarter
qyear
second
start_time
week
weekday
weekofyear
year
Methods
-------
asfreq
strftime
to_timestamp
See Also
--------
Index : The base pandas Index type.
Period : Represents a period of time.
DatetimeIndex : Index with datetime64 data.
TimedeltaIndex : Index of timedelta64 data.
period_range : Create a fixed-frequency PeriodIndex.
Notes
-----
Creating a PeriodIndex based on `start`, `periods`, and `end` has
been deprecated in favor of :func:`period_range`.
Examples
--------
>>> idx = pd.PeriodIndex(year=year_arr, quarter=q_arr)
"""
_typ = 'periodindex'
_attributes = ['name', 'freq']
# define my properties & methods for delegation
_is_numeric_dtype = False
_infer_as_myclass = True
_data = None
_engine_type = libindex.PeriodEngine
# ------------------------------------------------------------------------
# Index Constructors
def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None,
periods=None, tz=None, dtype=None, copy=False, name=None,
**fields):
valid_field_set = {'year', 'month', 'day', 'quarter',
'hour', 'minute', 'second'}
if not set(fields).issubset(valid_field_set):
raise TypeError('__new__() got an unexpected keyword argument {}'.
format(list(set(fields) - valid_field_set)[0]))
if name is None and hasattr(data, 'name'):
name = data.name
if data is None and ordinal is None:
# range-based.
data, freq2 = PeriodArray._generate_range(start, end, periods,
freq, fields)
# PeriodArray._generate range does validate that fields is
# empty when really using the range-based constructor.
if not fields:
msg = ("Creating a PeriodIndex by passing range "
"endpoints is deprecated. Use "
"`pandas.period_range` instead.")
# period_range differs from PeriodIndex for cases like
# start="2000", periods=4
# PeriodIndex interprets that as A-DEC freq.
# period_range interprets it as 'D' freq.
cond = (
freq is None and (
(start and not isinstance(start, Period)) or
(end and not isinstance(end, Period))
)
)
if cond:
msg += (
" Note that the default `freq` may differ. Pass "
"'freq=\"{}\"' to ensure the same output."
).format(freq2.freqstr)
warnings.warn(msg, FutureWarning, stacklevel=2)
freq = freq2
data = PeriodArray(data, freq=freq)
else:
freq = validate_dtype_freq(dtype, freq)
# PeriodIndex allow PeriodIndex(period_index, freq=different)
# Let's not encourage that kind of behavior in PeriodArray.
if freq and isinstance(data, cls) and data.freq != freq:
# TODO: We can do some of these with no-copy / coercion?
# e.g. D -> 2D seems to be OK
data = data.asfreq(freq)
if data is None and ordinal is not None:
# we strangely ignore `ordinal` if data is passed.
ordinal = np.asarray(ordinal, dtype=np.int64)
data = PeriodArray(ordinal, freq)
else:
# don't pass copy here, since we copy later.
data = period_array(data=data, freq=freq)
if copy:
data = data.copy()
return cls._simple_new(data, name=name)
@classmethod
def _simple_new(cls, values, name=None, freq=None, **kwargs):
"""
Create a new PeriodIndex.
Parameters
----------
values : PeriodArray, PeriodIndex, Index[int64], ndarray[int64]
Values that can be converted to a PeriodArray without inference
or coercion.
"""
# TODO: raising on floats is tested, but maybe not useful.
# Should the callers know not to pass floats?
# At the very least, I think we can ensure that lists aren't passed.
if isinstance(values, list):
values = np.asarray(values)
if is_float_dtype(values):
raise TypeError("PeriodIndex._simple_new does not accept floats.")
if freq:
freq = Period._maybe_convert_freq(freq)
values = PeriodArray(values, freq=freq)
if not isinstance(values, PeriodArray):
raise TypeError("PeriodIndex._simple_new only accepts PeriodArray")
result = object.__new__(cls)
result._data = values
# For groupby perf. See note in indexes/base about _index_data
result._index_data = values._data
result.name = name
result._reset_identity()
return result
# ------------------------------------------------------------------------
# Data
@property
def values(self):
return np.asarray(self)
@property
def freq(self):
return self._data.freq
@freq.setter
def freq(self, value):
value = Period._maybe_convert_freq(value)
# TODO: When this deprecation is enforced, PeriodIndex.freq can
# be removed entirely, and we'll just inherit.
msg = ('Setting {cls}.freq has been deprecated and will be '
'removed in a future version; use {cls}.asfreq instead. '
'The {cls}.freq setter is not guaranteed to work.')
warnings.warn(msg.format(cls=type(self).__name__),
FutureWarning, stacklevel=2)
# PeriodArray._freq isn't actually mutable. We set the private _freq
# here, but people shouldn't be doing this anyway.
self._data._freq = value
def _shallow_copy(self, values=None, **kwargs):
# TODO: simplify, figure out type of values
if values is None:
values = self._data
if isinstance(values, type(self)):
values = values._values
if not isinstance(values, PeriodArray):
if (isinstance(values, np.ndarray) and
is_integer_dtype(values.dtype)):
values = PeriodArray(values, freq=self.freq)
else:
# in particular, I would like to avoid period_array here.
# Some people seem to be calling use with unexpected types
# Index.difference -> ndarray[Period]
# DatetimelikeIndexOpsMixin.repeat -> ndarray[ordinal]
# I think that once all of Datetime* are EAs, we can simplify
# this quite a bit.
values = period_array(values, freq=self.freq)
# We don't allow changing `freq` in _shallow_copy.
validate_dtype_freq(self.dtype, kwargs.get('freq'))
attributes = self._get_attributes_dict()
attributes.update(kwargs)
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
return self._simple_new(values, **attributes)
def _shallow_copy_with_infer(self, values=None, **kwargs):
""" we always want to return a PeriodIndex """
return self._shallow_copy(values=values, **kwargs)
@property
def _box_func(self):
"""Maybe box an ordinal or Period"""
# TODO(DatetimeArray): Avoid double-boxing
# PeriodArray takes care of boxing already, so we need to check
# whether we're given an ordinal or a Period. It seems like some
# places outside of indexes/period.py are calling this _box_func,
# but passing data that's already boxed.
def func(x):
if isinstance(x, Period) or x is NaT:
return x
else:
return Period._from_ordinal(ordinal=x, freq=self.freq)
return func
def _maybe_convert_timedelta(self, other):
"""
Convert timedelta-like input to an integer multiple of self.freq
Parameters
----------
other : timedelta, np.timedelta64, DateOffset, int, np.ndarray
Returns
-------
converted : int, np.ndarray[int64]
Raises
------
IncompatibleFrequency : if the input cannot be written as a multiple
of self.freq. Note IncompatibleFrequency subclasses ValueError.
"""
if isinstance(
other, (timedelta, np.timedelta64, Tick, np.ndarray)):
offset = frequencies.to_offset(self.freq.rule_code)
if isinstance(offset, Tick):
# _check_timedeltalike_freq_compat will raise if incompatible
delta = self._data._check_timedeltalike_freq_compat(other)
return delta
elif isinstance(other, DateOffset):
freqstr = other.rule_code
base = libfrequencies.get_base_alias(freqstr)
if base == self.freq.rule_code:
return other.n
msg = DIFFERENT_FREQ.format(cls=type(self).__name__,
own_freq=self.freqstr,
other_freq=other.freqstr)
raise IncompatibleFrequency(msg)
elif is_integer(other):
# integer is passed to .shift via
# _add_datetimelike_methods basically
# but ufunc may pass integer to _add_delta
return other
# raise when input doesn't have freq
msg = DIFFERENT_FREQ.format(cls=type(self).__name__,
own_freq=self.freqstr,
other_freq=None)
raise IncompatibleFrequency(msg)
# ------------------------------------------------------------------------
# Rendering Methods
def _format_native_types(self, na_rep='NaT', quoting=None, **kwargs):
# just dispatch, return ndarray
return self._data._format_native_types(na_rep=na_rep,
quoting=quoting,
**kwargs)
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.astype(object).values
@property
def _formatter_func(self):
return self.array._formatter(boxed=False)
# ------------------------------------------------------------------------
# Indexing
@cache_readonly
def _engine(self):
return self._engine_type(lambda: self, len(self))
@Appender(_index_shared_docs['contains'])
def __contains__(self, key):
if isinstance(key, Period):
if key.freq != self.freq:
return False
else:
return key.ordinal in self._engine
else:
try:
self.get_loc(key)
return True
except Exception:
return False
contains = __contains__
@cache_readonly
def _int64index(self):
return Int64Index._simple_new(self.asi8, name=self.name)
# ------------------------------------------------------------------------
# Index Methods
def _coerce_scalar_to_index(self, item):
"""
we need to coerce a scalar to a compat for our index type
Parameters
----------
item : scalar item to coerce
"""
return PeriodIndex([item], **self._get_attributes_dict())
def __array__(self, dtype=None):
if is_integer_dtype(dtype):
return self.asi8
else:
return self.astype(object).values
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc. Needs additional handling as
PeriodIndex stores internal data as int dtype
Replace this to __numpy_ufunc__ in future version
"""
if isinstance(context, tuple) and len(context) > 0:
func = context[0]
if func is np.add:
pass
elif func is np.subtract:
name = self.name
left = context[1][0]
right = context[1][1]
if (isinstance(left, PeriodIndex) and
isinstance(right, PeriodIndex)):
name = left.name if left.name == right.name else None
return Index(result, name=name)
elif isinstance(left, Period) or isinstance(right, Period):
return Index(result, name=name)
elif isinstance(func, np.ufunc):
if 'M->M' not in func.types:
msg = "ufunc '{0}' not supported for the PeriodIndex"
# This should be TypeError, but TypeError cannot be raised
# from here because numpy catches.
raise ValueError(msg.format(func.__name__))
if is_bool_dtype(result):
return result
# the result is object dtype array of Period
# cannot pass _simple_new as it is
return type(self)(result, freq=self.freq, name=self.name)
def asof_locs(self, where, mask):
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
where_idx = where
if isinstance(where_idx, DatetimeIndex):
where_idx = PeriodIndex(where_idx.values, freq=self.freq)
locs = self._ndarray_values[mask].searchsorted(
where_idx._ndarray_values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where_idx._ndarray_values <
self._ndarray_values[first])] = -1
return result
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True, how='start'):
dtype = pandas_dtype(dtype)
if is_datetime64_any_dtype(dtype):
# 'how' is index-specific, isn't part of the EA interface.
tz = getattr(dtype, 'tz', None)
return self.to_timestamp(how=how).tz_localize(tz)
# TODO: should probably raise on `how` here, so we don't ignore it.
return super().astype(dtype, copy=copy)
@Substitution(klass='PeriodIndex')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if isinstance(value, Period):
if value.freq != self.freq:
msg = DIFFERENT_FREQ.format(cls=type(self).__name__,
own_freq=self.freqstr,
other_freq=value.freqstr)
raise IncompatibleFrequency(msg)
value = value.ordinal
elif isinstance(value, str):
try:
value = Period(value, freq=self.freq).ordinal
except DateParseError:
raise KeyError("Cannot interpret '{}' as period".format(value))
return self._ndarray_values.searchsorted(value, side=side,
sorter=sorter)
@property
def is_all_dates(self):
return True
@property
def is_full(self):
"""
Returns True if this PeriodIndex is range-like in that all Periods
between start and end are present, in order.
"""
if len(self) == 0:
return True
if not self.is_monotonic:
raise ValueError('Index is not monotonic')
values = self.asi8
return ((values[1:] - values[:-1]) < 2).all()
@property
def inferred_type(self):
# b/c data is represented as ints make sure we can't have ambiguous
# indexing
return 'period'
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
s = com.values_from_object(series)
try:
return com.maybe_box(self,
super().get_value(s, key),
series, key)
except (KeyError, IndexError):
try:
asdt, parsed, reso = parse_time_string(key, self.freq)
grp = resolution.Resolution.get_freq_group(reso)
freqn = resolution.get_freq_group(self.freq)
vals = self._ndarray_values
# if our data is higher resolution than requested key, slice
if grp < freqn:
iv = Period(asdt, freq=(grp, 1))
ord1 = iv.asfreq(self.freq, how='S').ordinal
ord2 = iv.asfreq(self.freq, how='E').ordinal
if ord2 < vals[0] or ord1 > vals[-1]:
raise KeyError(key)
pos = np.searchsorted(self._ndarray_values, [ord1, ord2])
key = slice(pos[0], pos[1] + 1)
return series[key]
elif grp == freqn:
key = Period(asdt, freq=self.freq).ordinal
return com.maybe_box(self, self._engine.get_value(s, key),
series, key)
else:
raise KeyError(key)
except TypeError:
pass
period = Period(key, self.freq)
key = period.value if isna(period) else period.ordinal
return com.maybe_box(self, self._engine.get_value(s, key),
series, key)
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
target = ensure_index(target)
if hasattr(target, 'freq') and target.freq != self.freq:
msg = DIFFERENT_FREQ.format(cls=type(self).__name__,
own_freq=self.freqstr,
other_freq=target.freqstr)
raise IncompatibleFrequency(msg)
if isinstance(target, PeriodIndex):
target = target.asi8
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance, target)
return Index.get_indexer(self._int64index, target, method,
limit, tolerance)
def _get_unique_index(self, dropna=False):
"""
wrap Index._get_unique_index to handle NaT
"""
res = super()._get_unique_index(dropna=dropna)
if dropna:
res = res.dropna()
return res
@Appender(Index.unique.__doc__)
def unique(self, level=None):
# override the Index.unique method for performance GH#23083
if level is not None:
# this should never occur, but is retained to make the signature
# match Index.unique
self._validate_index_level(level)
values = self._ndarray_values
result = unique1d(values)
return self._shallow_copy(result)
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
try:
return self._engine.get_loc(key)
except KeyError:
if is_integer(key):
raise
try:
asdt, parsed, reso = parse_time_string(key, self.freq)
key = asdt
except TypeError:
pass
except DateParseError:
# A string with invalid format
raise KeyError("Cannot interpret '{}' as period".format(key))
try:
key = Period(key, freq=self.freq)
except ValueError:
# we cannot construct the Period
# as we have an invalid type
raise KeyError(key)
try:
ordinal = iNaT if key is NaT else key.ordinal
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance,
np.asarray(key))
return self._int64index.get_loc(ordinal, method, tolerance)
except KeyError:
raise KeyError(key)
def _maybe_cast_slice_bound(self, label, side, kind):
"""
If label is a string or a datetime, cast it to Period.ordinal according
to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
bound : Period or object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ['ix', 'loc', 'getitem']
if isinstance(label, datetime):
return Period(label, freq=self.freq)
elif isinstance(label, str):
try:
_, parsed, reso = parse_time_string(label, self.freq)
bounds = self._parsed_string_to_bounds(reso, parsed)
return bounds[0 if side == 'left' else 1]
except Exception:
raise KeyError(label)
elif is_integer(label) or is_float(label):
self._invalid_indexer('slice', label)
return label
def _parsed_string_to_bounds(self, reso, parsed):
if reso == 'year':
t1 = Period(year=parsed.year, freq='A')
elif reso == 'month':
t1 = Period(year=parsed.year, month=parsed.month, freq='M')
elif reso == 'quarter':
q = (parsed.month - 1) // 3 + 1
t1 = Period(year=parsed.year, quarter=q, freq='Q-DEC')
elif reso == 'day':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
freq='D')
elif reso == 'hour':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
hour=parsed.hour, freq='H')
elif reso == 'minute':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
hour=parsed.hour, minute=parsed.minute, freq='T')
elif reso == 'second':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
hour=parsed.hour, minute=parsed.minute,
second=parsed.second, freq='S')
else:
raise KeyError(reso)
return (t1.asfreq(self.freq, how='start'),
t1.asfreq(self.freq, how='end'))
def _get_string_slice(self, key):
if not self.is_monotonic:
raise ValueError('Partial indexing only valid for '
'ordered time series')
key, parsed, reso = parse_time_string(key, self.freq)
grp = resolution.Resolution.get_freq_group(reso)
freqn = resolution.get_freq_group(self.freq)
if reso in ['day', 'hour', 'minute', 'second'] and not grp < freqn:
raise KeyError(key)
t1, t2 = self._parsed_string_to_bounds(reso, parsed)
return slice(self.searchsorted(t1.ordinal, side='left'),
self.searchsorted(t2.ordinal, side='right'))
def _convert_tolerance(self, tolerance, target):
tolerance = DatetimeIndexOpsMixin._convert_tolerance(self, tolerance,
target)
if target.size != tolerance.size and tolerance.size > 1:
raise ValueError('list-like tolerance size must match '
'target index size')
return self._maybe_convert_timedelta(tolerance)
def insert(self, loc, item):
if not isinstance(item, Period) or self.freq != item.freq:
return self.astype(object).insert(loc, item)
idx = np.concatenate((self[:loc].asi8, np.array([item.ordinal]),
self[loc:].asi8))
return self._shallow_copy(idx)
def join(self, other, how='left', level=None, return_indexers=False,
sort=False):
"""
See Index.join
"""
self._assert_can_do_setop(other)
if not isinstance(other, PeriodIndex):
return self.astype(object).join(other, how=how, level=level,
return_indexers=return_indexers,
sort=sort)
result = Int64Index.join(self, other, how=how, level=level,
return_indexers=return_indexers,
sort=sort)
if return_indexers:
result, lidx, ridx = result
return self._apply_meta(result), lidx, ridx
return self._apply_meta(result)
@Appender(Index.intersection.__doc__)
def intersection(self, other, sort=False):
return Index.intersection(self, other, sort=sort)
def _assert_can_do_setop(self, other):
super()._assert_can_do_setop(other)
# *Can't* use PeriodIndexes of different freqs
# *Can* use PeriodIndex/DatetimeIndex
if isinstance(other, PeriodIndex) and self.freq != other.freq:
msg = DIFFERENT_FREQ.format(cls=type(self).__name__,
own_freq=self.freqstr,
other_freq=other.freqstr)
raise IncompatibleFrequency(msg)
def _wrap_setop_result(self, other, result):
name = get_op_result_name(self, other)
result = self._apply_meta(result)
result.name = name
return result
def _apply_meta(self, rawarr):
if not isinstance(rawarr, PeriodIndex):
rawarr = PeriodIndex._simple_new(rawarr, freq=self.freq,
name=self.name)
return rawarr
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
super().__setstate__(state)
elif isinstance(state, tuple):
# < 0.15 compat
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
# backcompat
freq = Period._maybe_convert_freq(own_state[1])
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(self, state)
freq = None # ?
data = PeriodArray(data, freq=freq)
self._data = data
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
@property
def flags(self):
""" return the ndarray.flags for the underlying data """
warnings.warn("{obj}.flags is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self._ndarray_values.flags
def item(self):
"""
return the first element of the underlying data as a python
scalar
"""
# TODO(DatetimeArray): remove
if len(self) == 1:
return self[0]
else:
# copy numpy's message here because Py26 raises an IndexError
raise ValueError('can only convert an array of size 1 to a '
'Python scalar')
@property
def data(self):
""" return the data pointer of the underlying data """
warnings.warn("{obj}.data is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return np.asarray(self._data).data
@property
def base(self):
""" return the base object if the memory of the underlying data is
shared
"""
warnings.warn("{obj}.base is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return np.asarray(self._data)
PeriodIndex._add_comparison_ops()
PeriodIndex._add_numeric_methods_disabled()
PeriodIndex._add_logical_methods_disabled()
PeriodIndex._add_datetimelike_methods()
def period_range(start=None, end=None, periods=None, freq=None, name=None):
"""
Return a fixed frequency PeriodIndex, with day (calendar) as the default
frequency
Parameters
----------
start : string or period-like, default None
Left bound for generating periods
end : string or period-like, default None
Right bound for generating periods
periods : integer, default None
Number of periods to generate
freq : string or DateOffset, optional
Frequency alias. By default the freq is taken from `start` or `end`
if those are Period objects. Otherwise, the default is ``"D"`` for
daily frequency.
name : string, default None
Name of the resulting PeriodIndex
Returns
-------
prng : PeriodIndex
Notes
-----
Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
must be specified.
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
>>> pd.period_range(start='2017-01-01', end='2018-01-01', freq='M')
PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05',
'2017-06', '2017-06', '2017-07', '2017-08', '2017-09',
'2017-10', '2017-11', '2017-12', '2018-01'],
dtype='period[M]', freq='M')
If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor
endpoints for a ``PeriodIndex`` with frequency matching that of the
``period_range`` constructor.
>>> pd.period_range(start=pd.Period('2017Q1', freq='Q'),
... end=pd.Period('2017Q2', freq='Q'), freq='M')
PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'],
dtype='period[M]', freq='M')
"""
if com.count_not_none(start, end, periods) != 2:
raise ValueError('Of the three parameters: start, end, and periods, '
'exactly two must be specified')
if freq is None and (not isinstance(start, Period)
and not isinstance(end, Period)):
freq = 'D'
data, freq = PeriodArray._generate_range(start, end, periods, freq,
fields={})
data = PeriodArray(data, freq=freq)
return PeriodIndex(data, name=name)
| |
#!/usr/bin/env python2
import sys
import os
import numpy as np
from mpmath import mp
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import math
curid = os.path.basename(os.getcwd())
print("*"*80)
print(curid)
print("*"*80)
if len(sys.argv) < 2:
print("Execute with [executable] [reference_file] [output] [dispersion files]")
sys.exit(1)
plot_reference = True
if 'dont_plot_reference' == sys.argv[1]:
plot_reference = False
output_file = sys.argv[2]
comp_files = sys.argv[3:]
ext = 'png'
if 'pdf' in output_file:
ext = 'pdf'
class Params:
time = -1
g = -1
f = -1
h = -1
r = -1
def load_data(filename):
p = Params()
fileh=open(filename)
while True:
headerstr = fileh.readline()
if headerstr[0] != '#':
break
if headerstr[2] == 't':
p.time = float(headerstr[4:])
elif headerstr[2] == 'g':
p.gravitation= float(headerstr[4:])
elif headerstr[2] == 'h':
p.h0 = float(headerstr[4:])
elif headerstr[2] == 'r':
p.sphere_radius = float(headerstr[4:])
elif headerstr[2] == 'f':
p.sphere_rotating_coriolis_omega = float(headerstr[4:])
else:
print("ERROR: Unknown tag "+headerstr[2])
sys.exit(1)
fileh.close()
if p.time == -1:
print("Warning: time meta information not found")
print(filename)
sys.exit(1)
if p.gravitation== -1:
print("Warning: g meta information not found")
print(filename)
sys.exit(1)
if p.h0 == -1:
print("Warning: h meta information not found")
print(filename)
sys.exit(1)
if p.sphere_radius == -1:
print("Warning: r meta information not found")
print(filename)
sys.exit(1)
if p.sphere_rotating_coriolis_omega == -1:
print("Warning: f meta information not found")
print(filename)
sys.exit(1)
data_ref = np.loadtxt(filename, skiprows=0)
rows,cols = data_ref.shape
print("Loaded "+str(rows)+" rows")
print("Loaded "+str(cols)+" cols")
if cols > 2:
print("Fatal error, cols > 2!")
sys.exit(1)
if cols == 2:
print("Assuming complex values => splitting them!")
data_ref = data_ref[:,0] + data_ref[:,1]*1j
return (data_ref, p)
def getTruncation(
k # number of reference Eigenvalues (3x number of DoFs)
) :
k=k/3
N = 0
t = 0
while t < k:
t += N+1
N = N+1
if t != k:
print("ERROR: Unable to compute truncation")
sys.exit(1)
return N
# get truncation
(data_dummy, p) = load_data(comp_files[0])
data_dummy = data_dummy.imag
num_evalues = len(data_dummy)
N = getTruncation(num_evalues)
# compute reference data
data_ref=[]
if 'fsphere1' in comp_files[0]:
# compute analytical Eigenvalues for fsphere
f = p.f
r = p.r
h = p.h
for n in range(0, N):
# compute w in side brackets (equation 47) in John's paper
w = 0
if True:
w = math.sqrt(f*f + n*(n+1)*h/(r*r))
else:
from sympy import Symbol, solve
w = Symbol("w")
s = solve(w*(w*w-f*f-n*(n+1)*h/(r*r)))
for i in range(0, n+1):
data_ref.append(w)
data_ref.append(-w)
for i in range(0, n+1):
data_ref.append(0)
data_ref.sort()
else:
print("TODO")
sys.exit(1)
##################################################################
##################################################################
# markers
markers = []
for m in Line2D.markers:
try:
if m != ' ':
markers.append(m)
except TypeError:
pass
print markers
##################################################################
##################################################################
print("Time: "+str(p.time))
print("g: "+str(p.g))
print("h: "+str(p.h))
print("r: "+str(p.r))
print("f: "+str(p.f))
##################################################################
##################################################################
##################################################################
epsilon=1e-14
#
# Relative dispersion error for single stage two-level schemes
# (See Durran, Numerical Methods or Fluid Dynamics, page 46)
# parameter a selects the type of two-level scheme:
# 0: forward euler
# 1: backward euler
# 0.5: CN (trapezoidal)
#
def RSingle( omega, # wave speed
dt, # time step size
a # alpha coefficient
):
if abs(omega) < epsilon:
return 1.0
retval = 1.0/(omega*dt)*math.atan( (omega*dt) / (1.0-a*(1.0-a)*((omega*dt)**2.0)))
return retval
def RSingleRK2( omega, # wave speed
dt # time step size
):
if abs(omega) < epsilon:
return 1.0
retval = 1.0/(omega*dt)*math.atan( (omega*dt) / (1.0-0.5*((omega*dt)**2.0)))
return retval
if True:
s = 0.7
fig, ax = plt.subplots(figsize=(10.0*s, 5.0*s))
plt.subplots_adjust(left=0.15, right=0.95, top=0.85, bottom=0.15)
ax.grid(linestyle='-', linewidth='0.5', color='grey')
alpha_vals = [0.0, 1.0, 0.5] # alpha values for relative dispersion shifts (see RSingle for a description)
# number of total plots
num_plots = len(alpha_vals)
colormap = plt.cm.gist_ncar
#plt.gca().set_color_cycle([colormap(i) for i in np.linspace(0, 0.9, num_plots)])
colors = [colormap(i) for i in np.linspace(0, 0.9, num_plots)]
legend_labels=[]
# plt.yscale("log", nonposy='clip')
# timestep numbers
ik = 4
for comp_file in comp_files:
ik = ik+1
print("Loading "+comp_file)
(data_cmp, p_cmp) = load_data(comp_file)
data_cmp = data_cmp.imag
data_cmp.sort()
if len(data_ref) != len(data_cmp):
print("Number of Eigenvalues don't match")
sys.exit(1)
tmp = comp_file[:]
tmp = tmp.replace('_rexim00000000', '')
tmp = tmp.replace('_rexim0000000', '_M')
tmp = tmp.replace('_rexim000000', '_M')
tmp = tmp.replace('_rexim00000', '_M')
tmp = tmp.replace('_rexim0000', '_M')
tmp = tmp.replace('script_g1_h100000_f0.000145842_a6371220_u0_robert1_pdeid1_fsphere1_', '')
tmp = tmp.replace('script_g1_h100000_f0.000145842_a6371220_u0_robert1_pdeid1_fsphere0_', '')
tmp = tmp.replace('_t-0000001_o000.0001', '')
tmp = tmp.replace('C0000', 'C')
tmp = tmp.replace('_tsm00', '_TM')
tmp = tmp.replace('_tsm', '_TM')
tmp = tmp.replace('_tso0', '')
tmp = tmp.replace('_rexih0.15', '')
tmp = tmp.replace('_rexihalf0', '')
tmp = tmp.replace('_rexihalf1', '')
tmp = tmp.replace('_rexiextmodes02', '')
tmp = tmp.replace('_rexiextmodes04', '')
tmp = tmp.replace('_rexiprealloc0', '')
tmp = tmp.replace('/output_normal_modes_physical_t00000002000.00000000.csv_evalues_complex.csv', '')
tmp = tmp.replace('/output_normal_modes_physical_t00000000400.00000000.csv_evalues_complex.csv', '')
legend_labels.append(tmp)
datax = []
datap = []
for i in range(len(data_cmp)):
if 'TM_l_rexi' in tmp:
if abs(data_cmp[i]) < epsilon:
continue
datax.append(data_ref[i])
rel_diff = (data_cmp[i]-data_ref[i])/data_ref[i]+1.0
# Filter out this point
if not 'TM_l_rexi' in tmp:
if abs(data_cmp[i]) < epsilon:
rel_diff = 1.0
datap.append(rel_diff)
marker = markers[(ik-1) % len(markers)]
color = colors[(ik-1) % len(colors)]
plt.plot(
datax,
datap,
linewidth=1,
linestyle='-',
marker=marker,
markevery=10,
markersize=4,
color = color
)
ref_linewidth = 0.5
ref_linestyle = '--'
ref_color = 'k'
ref_marker = ''
ref_markevery = 10
ref_markersize = 2
if plot_reference:
legend_labels.append("Analytical dispersion errors for RK1/2 and CN")
for alpha in alpha_vals:
ik = ik+1
print("Plotting relative dispersion for alpha "+str(alpha))
#legend_labels.append("Dispersion with alpha="+str(alpha))
#
# generate graph (datax, datap) for
# omega vs. omega+relative error
#
datax = data_ref
dt = p.time
datap = [RSingle(i, dt, alpha) for i in data_ref]
# ref_marker = markers[(ik-1) % len(markers)]
plt.plot(
datax,
datap,
linewidth=ref_linewidth,
linestyle=ref_linestyle,
color=ref_color,
marker=ref_marker,
markevery=ref_markevery,
markersize=ref_markersize
)
if True:
ik = ik+1
print("Plotting relative dispersion for alpha "+str(alpha))
#legend_labels.append("Dispersion for RK2")
#
# generate graph (datax, datap) for
# omega vs. omega+relative error
#
datax = data_ref
dt = p.time
datap = [RSingleRK2(i, dt) for i in data_ref]
plt.plot(
datax,
datap,
linewidth=ref_linewidth,
linestyle=ref_linestyle,
color=ref_color,
marker=ref_marker,
markevery=ref_markevery,
markersize=ref_markersize
)
ax.set_xlabel('Dispersion speed')
ax.set_ylabel('Error in dispersion')
leg = plt.legend(legend_labels, ncol=1, loc='lower right', fontsize=6)
leg.get_frame().set_alpha(1)
plt.savefig(output_file, dpi=300)
| |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.python_api_dispatcher."""
import collections
import numpy as np
from tensorflow.python.framework import _pywrap_python_api_dispatcher as dispatch
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import googletest
# PyTypeChecker::MatchType enum values.
NO_MATCH = dispatch.MatchType.NO_MATCH
MATCH = dispatch.MatchType.MATCH
MATCH_DISPATCHABLE = dispatch.MatchType.MATCH_DISPATCHABLE
@test_util.run_all_in_graph_and_eager_modes
class PythonTypeCheckerTest(test_util.TensorFlowTestCase):
def testInstanceChecker(self):
t = constant_op.constant([1, 2, 3])
rt = ragged_factory_ops.constant([[1, 2], [3, 4, 5]])
with self.subTest('int checker'):
int_checker = dispatch.MakeInstanceChecker(int)
self.assertEqual(int_checker.Check(3), MATCH)
self.assertEqual(int_checker.Check(3.0), NO_MATCH)
self.assertEqual(int_checker.Check(t), NO_MATCH)
self.assertEqual(int_checker.cost(), 1)
self.assertEqual(repr(int_checker), '<PyTypeChecker int>')
with self.subTest('tensor checker'):
tensor_checker = dispatch.MakeInstanceChecker(ops.Tensor)
self.assertEqual(tensor_checker.Check(t), MATCH)
self.assertEqual(tensor_checker.Check(3), NO_MATCH)
self.assertEqual(tensor_checker.Check(3.0), NO_MATCH)
self.assertEqual(tensor_checker.cost(), 1)
self.assertEqual(repr(tensor_checker), '<PyTypeChecker Tensor>')
with self.subTest('ragged checker'):
ragged_checker = dispatch.MakeInstanceChecker(ragged_tensor.RaggedTensor)
self.assertEqual(ragged_checker.Check(rt), MATCH_DISPATCHABLE)
self.assertEqual(ragged_checker.Check(3), NO_MATCH)
self.assertEqual(ragged_checker.Check(t), NO_MATCH)
self.assertEqual(ragged_checker.cost(), 1)
self.assertEqual(repr(ragged_checker), '<PyTypeChecker RaggedTensor>')
with self.subTest('int or float checker'):
int_checker = dispatch.MakeInstanceChecker(int, float)
self.assertEqual(int_checker.Check(3), MATCH)
self.assertEqual(int_checker.Check(3.0), MATCH)
self.assertEqual(int_checker.Check(t), NO_MATCH)
self.assertEqual(int_checker.cost(), 2)
self.assertEqual(repr(int_checker), '<PyTypeChecker int, float>')
with self.subTest('subclasses'):
class A(object):
pass
class B(A):
pass
class C(object):
pass
class D(C, B):
pass
checker = dispatch.MakeInstanceChecker(A)
self.assertEqual(checker.Check(A()), MATCH)
self.assertEqual(checker.Check(B()), MATCH)
self.assertEqual(checker.Check(C()), NO_MATCH)
self.assertEqual(checker.Check(D()), MATCH)
def testInstanceCheckerCache(self):
checker = dispatch.MakeInstanceChecker(tuple)
MyTuple = collections.namedtuple('MyTuple', ['a', 'b']) # Subclass of tuple
self.assertEqual(checker.cache_size(), 0)
self.assertEqual(checker.Check(5), NO_MATCH)
self.assertEqual(checker.cache_size(), 1) # cache miss
self.assertEqual(checker.Check(12), NO_MATCH)
self.assertEqual(checker.cache_size(), 1) # cache hit
self.assertEqual(checker.Check(1.3), NO_MATCH)
self.assertEqual(checker.cache_size(), 2) # cache miss
self.assertEqual(checker.Check([1]), NO_MATCH)
self.assertEqual(checker.cache_size(), 3) # cache miss
self.assertEqual(checker.Check((1,)), MATCH)
self.assertEqual(checker.cache_size(), 4) # cache miss
self.assertEqual(checker.Check((1, 2, 3)), MATCH)
self.assertEqual(checker.cache_size(), 4) # cache hit
self.assertEqual(checker.Check(MyTuple(1, 2)), MATCH)
self.assertEqual(checker.cache_size(), 5) # cache miss
self.assertEqual(checker.Check(MyTuple(3, 4)), MATCH)
self.assertEqual(checker.cache_size(), 5) # cache miss
self.assertEqual(checker.Check(()), MATCH)
self.assertEqual(checker.cache_size(), 5) # cache hit
def testUnionChecker(self):
int_checker = dispatch.MakeInstanceChecker(int)
float_checker = dispatch.MakeInstanceChecker(float)
str_checker = dispatch.MakeInstanceChecker(str)
none_checker = dispatch.MakeInstanceChecker(type(None))
tensor_checker = dispatch.MakeInstanceChecker(ops.Tensor)
ragged_checker = dispatch.MakeInstanceChecker(ragged_tensor.RaggedTensor)
t = constant_op.constant([1, 2, 3])
rt = ragged_factory_ops.constant([[1, 2], [3, 4, 5]])
with self.subTest('Union[int, float, str]'):
checker = dispatch.MakeUnionChecker(
[int_checker, float_checker, str_checker])
self.assertEqual(checker.Check(3), MATCH)
self.assertEqual(checker.Check(3.0), MATCH)
self.assertEqual(checker.Check('x'), MATCH)
self.assertEqual(checker.Check('x'), MATCH)
self.assertEqual(checker.Check(None), NO_MATCH)
self.assertEqual(checker.Check(t), NO_MATCH)
self.assertEqual(checker.cost(), 4)
self.assertEqual(repr(checker), '<PyTypeChecker Union[int, float, str]>')
with self.subTest('Optional[int] (aka Union[int, None])'):
checker = dispatch.MakeUnionChecker([int_checker, none_checker])
self.assertEqual(checker.Check(3), MATCH)
self.assertEqual(checker.Check(3.0), NO_MATCH)
self.assertEqual(checker.Check(None), MATCH)
self.assertEqual(checker.Check(t), NO_MATCH)
self.assertEqual(checker.cost(), 3)
self.assertEqual(repr(checker), '<PyTypeChecker Union[int, NoneType]>')
with self.subTest('Union[Tensor, RaggedTensor]'):
checker = dispatch.MakeUnionChecker([tensor_checker, ragged_checker])
self.assertEqual(checker.Check(3), NO_MATCH)
self.assertEqual(checker.Check(3.0), NO_MATCH)
self.assertEqual(checker.Check(None), NO_MATCH)
self.assertEqual(checker.Check(t), MATCH)
self.assertEqual(checker.Check(rt), MATCH_DISPATCHABLE)
self.assertEqual(checker.cost(), 3)
self.assertEqual(
repr(checker), '<PyTypeChecker Union[Tensor, RaggedTensor]>')
def testListChecker(self):
int_checker = dispatch.MakeInstanceChecker(int)
tensor_checker = dispatch.MakeInstanceChecker(ops.Tensor)
ragged_checker = dispatch.MakeInstanceChecker(ragged_tensor.RaggedTensor)
np_int_checker = dispatch.MakeInstanceChecker(np.integer)
t = constant_op.constant([1, 2, 3])
rt = ragged_factory_ops.constant([[1, 2], [3, 4, 5]])
a = [1, 2, 3]
b = ['a', 2, t]
c = [t, t * 2, t - 2]
d = [t, rt]
e = []
f = (1, 2, 3)
g = (rt,)
h = {1: 2, 3: 4}
i = np.array([1, 2, 3])
with self.subTest('List[int]'):
checker = dispatch.MakeListChecker(int_checker)
self.assertEqual(checker.Check(a), MATCH)
self.assertEqual(checker.Check(b), NO_MATCH)
self.assertEqual(checker.Check(c), NO_MATCH)
self.assertEqual(checker.Check(d), NO_MATCH)
self.assertEqual(checker.Check(e), MATCH)
self.assertEqual(checker.Check(f), MATCH)
self.assertEqual(checker.Check(iter(a)), NO_MATCH)
self.assertEqual(checker.Check(iter(b)), NO_MATCH)
self.assertEqual(checker.Check(reversed(e)), NO_MATCH)
self.assertEqual(checker.Check(h), NO_MATCH)
self.assertEqual(checker.Check(i), NO_MATCH)
self.assertEqual(checker.cost(), 10)
self.assertEqual(repr(checker), '<PyTypeChecker List[int]>')
with self.subTest('List[Tensor]'):
checker = dispatch.MakeListChecker(tensor_checker)
self.assertEqual(checker.Check(a), NO_MATCH)
self.assertEqual(checker.Check(b), NO_MATCH)
self.assertEqual(checker.Check(c), MATCH)
self.assertEqual(checker.Check(d), NO_MATCH)
self.assertEqual(checker.Check(e), MATCH)
self.assertEqual(checker.cost(), 10)
self.assertEqual(repr(checker), '<PyTypeChecker List[Tensor]>')
with self.subTest('List[Union[Tensor, RaggedTensor]]'):
checker = dispatch.MakeListChecker(
dispatch.MakeUnionChecker([tensor_checker, ragged_checker]))
self.assertEqual(checker.Check(a), NO_MATCH)
self.assertEqual(checker.Check(b), NO_MATCH)
self.assertEqual(checker.Check(c), MATCH)
self.assertEqual(checker.Check(d), MATCH_DISPATCHABLE)
self.assertEqual(checker.Check(e), MATCH)
self.assertEqual(checker.Check(f), NO_MATCH)
self.assertEqual(checker.Check(g), MATCH_DISPATCHABLE)
self.assertEqual(checker.cost(), 30)
self.assertEqual(
repr(checker), '<PyTypeChecker List[Union[Tensor, RaggedTensor]]>')
with self.subTest('List[Union[int, np.integer]]'):
# Note: np.integer is a subtype of int in *some* Python versions.
checker = dispatch.MakeListChecker(
dispatch.MakeUnionChecker([int_checker, np_int_checker]))
self.assertEqual(checker.Check(a), MATCH)
self.assertEqual(checker.Check(np.array(a)), NO_MATCH)
self.assertEqual(checker.Check(np.array(a) * 1.5), NO_MATCH)
def testRegisterDispatchableType(self):
@dispatch.register_dispatchable_type
class A(object):
pass
checker = dispatch.MakeInstanceChecker(A)
self.assertEqual(checker.Check(A()), MATCH_DISPATCHABLE)
def testRegisterDispatchableTypeError(self):
with self.assertRaisesRegex(ValueError, 'Expected a type object'):
dispatch.register_dispatchable_type(3)
with self.assertRaisesRegex(ValueError,
'Type .* has already been registered'):
dispatch.register_dispatchable_type(ragged_tensor.RaggedTensor)
@test_util.run_all_in_graph_and_eager_modes
class PythonSignatureCheckerTest(test_util.TensorFlowTestCase):
def check_signatures(self, checker, canon_expected_pairs):
for (canon_args, expected) in canon_expected_pairs:
with self.subTest(f'{canon_args} -> {expected}'):
self.assertEqual(checker.CheckCanonicalizedArgs(canon_args), expected)
def testSimpleSignature(self):
int_checker = dispatch.MakeInstanceChecker(int)
rt_checker = dispatch.MakeInstanceChecker(ragged_tensor.RaggedTensor)
checker = dispatch.PySignatureChecker([(0, int_checker), (2, rt_checker)])
rt = ragged_factory_ops.constant([[1, 2], [3]])
self.check_signatures(checker, [
((1, 2, rt), True),
((1, 2, 3), False),
((1, 2), False), ((), False),
((5, 'x', rt, None), True),
(([5], 'x', rt, None), False),
((5, 'x', [rt], None), False),
]) # pyformat: disable
self.assertEqual(
repr(checker), '<PySignatureChecker args[0]:int, args[2]:RaggedTensor>')
def testUnion(self):
rt_checker = dispatch.MakeInstanceChecker(ragged_tensor.RaggedTensor)
tensor_checker = dispatch.MakeInstanceChecker(ops.Tensor)
rt_or_tensor = dispatch.MakeUnionChecker([rt_checker, tensor_checker])
checker = dispatch.PySignatureChecker([(0, rt_or_tensor),
(1, rt_or_tensor)])
t = constant_op.constant([[1, 2], [3, 4]])
rt = ragged_factory_ops.constant([[1, 2], [3]])
self.check_signatures(checker, [
((t, t), False),
((t, rt), True),
((rt, t), True),
((rt, rt), True),
((rt, [rt]), False),
((rt, rt, 1, 2, None), True),
]) # pyformat: disable
self.assertEqual(
repr(checker),
'<PySignatureChecker args[0]:Union[RaggedTensor, Tensor], '
'args[1]:Union[RaggedTensor, Tensor]>')
def testList(self):
rt_checker = dispatch.MakeInstanceChecker(ragged_tensor.RaggedTensor)
rt_list_checker = dispatch.MakeListChecker(rt_checker)
checker = dispatch.PySignatureChecker([(0, rt_list_checker)])
rt = ragged_factory_ops.constant([[1, 2], [3]])
self.check_signatures(checker, [
(([rt],), True),
(([],), False),
((rt,), False),
(([rt, rt+3, rt*2],), True),
(([rt, rt.values, rt*2],), False),
]) # pyformat: disable
self.assertEqual(
repr(checker), '<PySignatureChecker args[0]:List[RaggedTensor]>')
def testSortByCost(self):
a = dispatch.MakeInstanceChecker(int)
b = dispatch.MakeInstanceChecker(float)
c = dispatch.MakeUnionChecker([a, b])
d = dispatch.MakeListChecker(a)
e = dispatch.MakeListChecker(c)
checker = dispatch.PySignatureChecker([(0, e), (1, c), (2, d), (3, a)])
# Note: `repr(checker)` lists the args in the order they will be checked.
self.assertEqual(
repr(checker), '<PySignatureChecker '
'args[3]:int, ' # a: cost=1
'args[1]:Union[int, float], ' # c: cost=3
'args[2]:List[int], ' # d: cost=10
'args[0]:List[Union[int, float]]>' # e: cost=30
) # pyformat: disable
@test_util.run_all_in_graph_and_eager_modes
class PythonAPIDispatcherTest(test_util.TensorFlowTestCase):
def testBasicDispatch(self):
dispatcher = dispatch.PythonAPIDispatcher('tf.foo', ['x', 'y', 'name'],
(None,))
rt_checker = dispatch.MakeInstanceChecker(ragged_tensor.RaggedTensor)
f1 = lambda x, y, name=None: 'f1'
dispatcher.Register(dispatch.PySignatureChecker([(0, rt_checker)]), f1)
rt = ragged_factory_ops.constant([[1, 2], [3]])
self.assertEqual(dispatcher.Dispatch((rt, 5), None), 'f1')
self.assertEqual(dispatcher.Dispatch((rt, 5, 'my_name'), None), 'f1')
self.assertEqual(dispatcher.Dispatch((), {'x': rt, 'y': 5}), 'f1')
self.assertEqual(
dispatcher.Dispatch((), {
'x': rt,
'y': 5,
'name': 'x'
}), 'f1')
self.assertEqual(dispatcher.Dispatch(('foo', rt), None), NotImplemented)
self.assertEqual(dispatcher.Dispatch(('foo', 'bar'), None), NotImplemented)
self.assertEqual(
dispatcher.Dispatch(('foo', 'bar', 'baz'), None), NotImplemented)
def testMultipleDispatchers(self):
dispatcher = dispatch.PythonAPIDispatcher('tf.foo', ['x', 'y', 'name'],
(None,))
rt_checker = dispatch.MakeInstanceChecker(ragged_tensor.RaggedTensor)
rt_x_checker = dispatch.PySignatureChecker([(0, rt_checker)])
rt_y_checker = dispatch.PySignatureChecker([(1, rt_checker)])
f1 = lambda x, y, name=None: 'f1'
f2 = lambda x, y, name=None: 'f2'
rt = ragged_factory_ops.constant([[1, 2], [3]])
dispatcher.Register(rt_x_checker, f1)
dispatcher.Register(rt_y_checker, f2)
self.assertEqual(dispatcher.Dispatch((rt, 5), None), 'f1')
self.assertEqual(dispatcher.Dispatch(('foo', rt), None), 'f2')
self.assertEqual(dispatcher.Dispatch(('foo',), {'y': rt}), 'f2')
self.assertEqual(dispatcher.Dispatch(('foo', 'bar'), None), NotImplemented)
with self.assertRaisesRegex(
ValueError, 'Multiple dispatch targets .*'
r'match the arguments to tf\.foo'):
dispatcher.Dispatch((rt, rt), None)
def testListAndUnionDispatch(self):
dispatcher = dispatch.PythonAPIDispatcher('tf.foo', ['x', 'ys', 'name'],
(None,))
rt_checker = dispatch.MakeInstanceChecker(ragged_tensor.RaggedTensor)
tensor_checker = dispatch.MakeInstanceChecker(ops.Tensor)
rt_or_t = dispatch.MakeUnionChecker([rt_checker, tensor_checker])
list_of_rt_or_t = dispatch.MakeListChecker(rt_or_t)
f1 = lambda x, ys, name=None: 'f1'
dispatcher.Register(
dispatch.PySignatureChecker([(0, rt_or_t), (1, list_of_rt_or_t)]), f1)
rt = ragged_factory_ops.constant([[1, 2], [3]])
t = constant_op.constant(5)
self.assertEqual(dispatcher.Dispatch((rt, [t]), None), 'f1')
self.assertEqual(dispatcher.Dispatch((rt, [rt]), None), 'f1')
self.assertEqual(dispatcher.Dispatch((t, [rt]), None), 'f1')
self.assertEqual(dispatcher.Dispatch((rt, []), None), 'f1')
self.assertEqual(dispatcher.Dispatch((t, [t, t, rt, t]), None), 'f1')
self.assertEqual(dispatcher.Dispatch((rt, [t], 'my_name'), None), 'f1')
self.assertEqual(dispatcher.Dispatch((), {'x': rt, 'ys': [t]}), 'f1')
self.assertEqual(
dispatcher.Dispatch((), {
'x': rt,
'ys': [t],
'name': 'x'
}), 'f1')
self.assertEqual(dispatcher.Dispatch((t, [t]), None), NotImplemented)
self.assertEqual(dispatcher.Dispatch((t, []), None), NotImplemented)
self.assertEqual(dispatcher.Dispatch(('foo', [rt]), None), NotImplemented)
self.assertEqual(dispatcher.Dispatch(('foo', 'bar'), None), NotImplemented)
self.assertEqual(
dispatcher.Dispatch(('foo', 'bar', 'baz'), None), NotImplemented)
if __name__ == '__main__':
googletest.main()
| |
import time
import json
import random
import requests
from unittest import TestCase
from unittest.mock import Mock, call, patch
from pandora.errors import InvalidAuthToken, PandoraException
from tests.test_pandora.test_clientbuilder import TestSettingsDictBuilder
import pandora.transport as t
class SysCallError(Exception):
pass
class TestTransport(TestCase):
def test_test_url_should_return_true_if_request_okay(self):
transport = t.APITransport(Mock())
transport._http = Mock()
transport._http.head.return_value = Mock(
status_code=requests.codes.not_found
)
self.assertFalse(transport.test_url("foo"))
transport._http.head.return_value = Mock(status_code=requests.codes.OK)
self.assertTrue(transport.test_url("foo"))
def test_call_should_retry_max_times_on_sys_call_error(self):
with self.assertRaises(SysCallError):
client = TestSettingsDictBuilder._build_minimal()
time.sleep = Mock()
client.transport._make_http_request = Mock(
side_effect=SysCallError("error_mock")
)
client.transport._start_request = Mock()
client("method")
client.transport._start_request.assert_has_calls([call("method")])
assert client.transport._start_request.call_count == 3
def test_call_should_not_retry_for_pandora_exceptions(self):
with self.assertRaises(PandoraException):
client = TestSettingsDictBuilder._build_minimal()
time.sleep = Mock()
client.transport._make_http_request = Mock(
side_effect=PandoraException("error_mock")
)
client.transport._start_request = Mock()
client("method")
client.transport._start_request.assert_has_calls([call("method")])
assert client.transport._start_request.call_count == 1
def test_call_should_retry_if_auth_token_expired(self):
with self.assertRaises(InvalidAuthToken):
client = TestSettingsDictBuilder._build_minimal()
time.sleep = Mock()
client.transport._make_http_request = Mock(
side_effect=InvalidAuthToken("error_mock")
)
client.transport._start_request = Mock()
client._authenticate = Mock()
client("method")
client.transport._start_request.assert_has_calls([call("method")])
assert client.transport._start_request.call_count == 2
assert client._authenticate.call_count == 1
def test_complete_request(self):
transport = t.APITransport(Mock())
transport._http = Mock()
http_result = Mock()
http_result.content = b'{"stat":"ok","result":"bar"}'
transport._http.post.return_value = http_result
self.assertEqual(
"bar", transport(t.APITransport.NO_ENCRYPT[0], foo="bar")
)
class TestTransportSetters(TestCase):
def setUp(self):
self.cryptor = Mock()
self.transport = t.APITransport(self.cryptor)
def test_set_partner(self):
self.cryptor.decrypt_sync_time.return_value = 456
self.transport.set_partner(
{
"syncTime": "123",
"partnerAuthToken": "partner_auth_token",
"partnerId": "partner_id",
}
)
self.cryptor.decrypt_sync_time.assert_called_with("123")
self.assertEqual("partner_auth_token", self.transport.auth_token)
self.assertEqual("partner_id", self.transport.partner_id)
self.assertEqual(
"partner_auth_token", self.transport.partner_auth_token
)
self.transport.start_time = 10
with patch.object(time, "time", return_value=30):
self.assertEqual(476, self.transport.sync_time)
def test_set_user(self):
self.transport.set_user(
{
"userId": "user",
"userAuthToken": "auth",
}
)
self.assertEqual("user", self.transport.user_id)
self.assertEqual("auth", self.transport.user_auth_token)
self.assertEqual("auth", self.transport.auth_token)
def test_getting_auth_token_no_login(self):
self.assertIsNone(self.transport.auth_token)
self.assertIsNone(self.transport.sync_time)
class TestDelayExponential(TestCase):
def test_fixed_delay(self):
self.assertEqual(8, t.delay_exponential(2, 2, 3))
def test_random_delay(self):
with patch.object(random, "random", return_value=10):
self.assertEqual(20, t.delay_exponential("rand", 2, 2))
def test_fails_with_base_zero_or_below(self):
with self.assertRaises(ValueError):
t.delay_exponential(0, 1, 1)
with self.assertRaises(ValueError):
t.delay_exponential(-1, 1, 1)
class TestRetries(TestCase):
def test_no_retries_returns_none(self):
@t.retries(0)
def foo():
return True
self.assertIsNone(foo())
class TestParseResponse(TestCase):
VALID_MSG_NO_BODY_JSON = b'{"stat":"ok"}'
VALID_MSG_JSON = b'{"stat":"ok", "result":{"foo":"bar"}}'
ERROR_MSG_JSON = b'{"stat":"err", "code":1001, "message":"Details"}'
def setUp(self):
self.transport = t.APITransport(Mock())
def test_with_valid_response(self):
res = self.transport._parse_response(self.VALID_MSG_JSON)
self.assertEqual({"foo": "bar"}, res)
def test_with_valid_response_no_body(self):
res = self.transport._parse_response(self.VALID_MSG_NO_BODY_JSON)
self.assertIsNone(res)
def test_with_error_response(self):
with self.assertRaises(InvalidAuthToken) as ex:
self.transport._parse_response(self.ERROR_MSG_JSON)
self.assertEqual(1001, ex.exception.code)
self.assertEqual("Details", ex.exception.extended_message)
class TestTransportRequestPrep(TestCase):
def setUp(self):
self.cryptor = Mock()
self.transport = t.APITransport(self.cryptor)
def test_start_request(self):
self.transport.start_time = 10
self.transport._start_request("method_name")
self.assertEqual(10, self.transport.start_time)
def test_start_request_with_reset(self):
self.transport.reset = Mock()
self.transport._start_request(self.transport.REQUIRE_RESET[0])
self.transport.reset.assert_called_with()
def test_start_request_without_time(self):
with patch.object(time, "time", return_value=10.0):
self.transport._start_request("method_name")
self.assertEqual(10, self.transport.start_time)
def test_make_http_request(self):
# url, data, params
http = Mock()
retval = Mock()
retval.content = "foo"
http.post.return_value = retval
self.transport._http = http
res = self.transport._make_http_request(
"/url", b"data", {"a": None, "b": "c"}
)
http.post.assert_called_with("/url", data=b"data", params={"b": "c"})
retval.raise_for_status.assert_called_with()
self.assertEqual("foo", res)
def test_build_data_not_logged_in(self):
self.cryptor.encrypt = lambda x: x
self.transport.partner_auth_token = "pat"
self.transport.server_sync_time = 123
self.transport.start_time = 23
with patch.object(time, "time", return_value=20):
val = self.transport._build_data("foo", {"a": "b", "c": None})
val = json.loads(val)
self.assertEqual("b", val["a"])
self.assertEqual("pat", val["partnerAuthToken"])
self.assertEqual(120, val["syncTime"])
def test_build_data_no_encrypt(self):
self.transport.user_auth_token = "uat"
self.transport.partner_auth_token = "pat"
self.transport.server_sync_time = 123
self.transport.start_time = 23
with patch.object(time, "time", return_value=20):
val = self.transport._build_data(
t.APITransport.NO_ENCRYPT[0], {"a": "b", "c": None}
)
val = json.loads(val)
self.assertEqual("b", val["a"])
self.assertEqual("uat", val["userAuthToken"])
self.assertEqual(120, val["syncTime"])
# All Cryptor implementations must pass these test cases unmodified
class CommonCryptorTestCases:
def test_decrypt_invalid_padding(self):
with self.assertRaises(ValueError):
data = b"12345678\x00"
self.assertEqual(b"12345678\x00", self.cryptor.decrypt(data))
def test_decrypt_strip_padding(self):
data = b"123456\x02\x02"
self.assertEqual(b"123456", self.cryptor.decrypt(data))
def test_decrypt_preserve_padding(self):
data = b"123456\x02\x02"
self.assertEqual(b"123456\x02\x02", self.cryptor.decrypt(data, False))
def test_encrypt(self):
data = "123456"
self.assertEqual(b"123456\x02\x02", self.cryptor.encrypt(data))
class TestPurePythonBlowfishCryptor(TestCase, CommonCryptorTestCases):
def setUp(self):
# Ugh... blowfish can't even be *imported* in python2
if not t.blowfish:
t.blowfish = Mock()
self.cipher = Mock()
self.cipher.decrypt_ecb = lambda x: [x]
self.cipher.encrypt_ecb = lambda x: [x]
self.cryptor = t.PurePythonBlowfish("keys")
self.cryptor.cipher = self.cipher
class TestEncryptor(TestCase):
ENCODED_JSON = "7b22666f6f223a22626172227d"
UNENCODED_JSON = b'{"foo":"bar"}'
EXPECTED_TIME = 4111
ENCODED_TIME = "31353037343131313539"
class NoopCrypto:
def __init__(self, key):
pass
def decrypt(self, data, strip_padding=True):
return data.decode("ascii")
def encrypt(self, data):
return data
def setUp(self):
self.cryptor = t.Encryptor("in", "out", self.NoopCrypto)
def test_decrypt(self):
self.assertEqual(
{"foo": "bar"}, self.cryptor.decrypt(self.ENCODED_JSON)
)
def test_encrypt(self):
self.assertEqual(
self.ENCODED_JSON.encode("ascii"),
self.cryptor.encrypt(self.UNENCODED_JSON),
)
def test_decrypt_sync_time(self):
self.assertEqual(
self.EXPECTED_TIME,
self.cryptor.decrypt_sync_time(self.ENCODED_TIME),
)
| |
"""
Models for working with remote translation data stored in a VCS.
"""
import logging
import os
import scandir
import shutil
import requests
from datetime import datetime
from itertools import chain
from pathlib import Path
from urllib.parse import urljoin, urlparse
from compare_locales.paths import (
ProjectFiles,
TOMLParser,
)
from django.utils import timezone
from django.utils.functional import cached_property
from pontoon.base import MOZILLA_REPOS
from pontoon.sync.exceptions import ParseError
from pontoon.sync.utils import (
is_hidden,
is_resource,
is_asymmetric_resource,
get_parent_directory,
uses_undercore_as_separator,
directory_contains_resources,
locale_directory_path,
locale_to_source_path,
source_to_locale_path,
)
from pontoon.sync.vcs.repositories import get_changed_files
log = logging.getLogger(__name__)
class DownloadTOMLParser(TOMLParser):
"""
This wrapper is a workaround for the lack of the shared and persistent filesystem
on Heroku workers.
Related: https://bugzilla.mozilla.org/show_bug.cgi?id=1530988
"""
def __init__(self, checkout_path, permalink_prefix, configuration_file):
self.checkout_path = os.path.join(checkout_path, "")
self.permalink_prefix = permalink_prefix
self.config_path = urlparse(permalink_prefix).path
self.config_file = configuration_file
def get_local_path(self, path):
"""Return the directory in which the config file should be stored."""
local_path = path.replace(self.config_path, "")
return os.path.join(self.checkout_path, local_path)
def get_remote_path(self, path):
"""Construct the link to the remote resource based on the local path."""
remote_config_path = path.replace(self.checkout_path, "")
return urljoin(self.permalink_prefix, remote_config_path)
def get_project_config(self, path):
"""Download the project config file and return its local path."""
local_path = Path(self.get_local_path(path))
local_path.parent.mkdir(parents=True, exist_ok=True)
with local_path.open("wb") as f:
remote_path = self.get_remote_path(path)
config_file = requests.get(remote_path)
config_file.raise_for_status()
f.write(config_file.content)
return str(local_path)
def parse(self, path=None, env=None, ignore_missing_includes=True):
"""Download the config file before it gets parsed."""
return super(DownloadTOMLParser, self).parse(
self.get_project_config(path or self.config_file),
env,
ignore_missing_includes,
)
class MissingRepositoryPermalink(Exception):
"""
Raised when a project uses project config files and
its source repository doesn't have the permalink.
"""
class MissingSourceRepository(Exception):
"""
Raised when project can't find the repository
which contains source files.
"""
class MissingSourceDirectoryError(Exception):
"""Raised when sync can't find the source directory for the locales."""
class MissingLocaleDirectoryError(IOError):
"""Raised when sync can't find the locale directory."""
class VCSProject(object):
"""
Container for project data that is stored on the filesystem and
pulled from a remote VCS.
"""
SOURCE_DIR_SCORES = {
"templates": 3,
"en-US": 2,
"en-us": 2,
"en_US": 2,
"en_us": 2,
"en": 1,
}
SOURCE_DIR_NAMES = SOURCE_DIR_SCORES.keys()
def __init__(
self,
db_project,
now=None,
locales=None,
repo_locales=None,
added_paths=None,
changed_paths=None,
force=False,
):
"""
Load resource paths from the given db_project and parse them
for translation data.
:param Project db_project:
Project model instance for the project we're going to be
reading files for.
:param datetime.datetime now:
Sync start time.
:param list locales:
List of Locale model instances for the locales that we want
to parse. Defaults to parsing resources for all enabled
locales on the project.
:param dict repo_locales:
A dict of repository PKs and their currently checked out locales
(not neccessarily matching the ones stored in the DB).
:param list added_paths:
List of added source file paths
:param list changed_paths:
List of changed source file paths
:param bool force:
Scans all resources in repository
:param VCSConfiguration configuration:
Project configuration, provided by the optional configuration file.
"""
self.db_project = db_project
self.now = now
self.locales = locales if locales is not None else db_project.locales.all()
self.repo_locales = repo_locales
self.added_paths = added_paths or []
self.changed_paths = changed_paths or []
self.force = force
self.synced_locales = set()
self.configuration = None
if db_project.configuration_file:
# Permalink is required to download project config files.
if not db_project.source_repository.permalink_prefix:
raise MissingRepositoryPermalink()
self.configuration = VCSConfiguration(self)
@cached_property
def changed_files(self):
if self.force or (
self.db_project.configuration_file and self.changed_config_files
):
# All files are marked as changed
return None
if self.locales:
return self.changed_locales_files
else:
return self.changed_source_files[0]
@cached_property
def changed_source_files(self):
"""
Returns a tuple of changed and removed source files in the project:
(changed_files, removed_files)
"""
source_resources_repo = self.db_project.source_repository
if not source_resources_repo:
raise MissingSourceRepository(self.db_project)
source_directory = self.source_directory_path
last_revision = source_resources_repo.get_last_synced_revisions()
modified_files, removed_files = get_changed_files(
source_resources_repo.type, source_directory, last_revision
)
# Unify filesystem and data model file extensions
if not self.configuration:
modified_files = map(source_to_locale_path, modified_files)
removed_files = map(source_to_locale_path, removed_files)
if source_resources_repo.source_repo or not last_revision:
def get_path(path):
return (path, [])
else:
relative_source_path = source_directory[
len(source_resources_repo.checkout_path) :
].lstrip(os.sep)
def get_path(path):
return (path[len(relative_source_path) :].lstrip(os.sep), [])
return dict(map(get_path, modified_files)), dict(map(get_path, removed_files))
@cached_property
def changed_locales_files(self):
"""
Map of changed files and locales they were changed for.
"""
files = {}
# VCS changes
repos = self.db_project.translation_repositories()
if self.repo_locales:
repos = repos.filter(pk__in=self.repo_locales.keys())
for repo in repos:
if repo.multi_locale:
locales = (
self.repo_locales[repo.pk]
if self.repo_locales
else self.db_project.locales.all()
)
for locale in locales:
changed_files = get_changed_files(
repo.type,
repo.locale_checkout_path(locale),
repo.get_last_synced_revisions(locale.code),
)[0]
for path in changed_files:
files.setdefault(path, []).append(locale)
else:
changed_files = get_changed_files(
repo.type, repo.checkout_path, repo.get_last_synced_revisions()
)[0]
log.info(
"Changed files in {} repository, all: {}".format(
self.db_project, changed_files
)
)
# Include only relevant (localizable) files
if self.configuration:
files = self.get_relevant_files_with_config(changed_files)
else:
files = self.get_relevant_files_without_config(
changed_files, self.locale_path_locales(repo.checkout_path)
)
log.info(
"Changed files in {} repository, relevant for enabled locales: {}".format(
self.db_project, files
)
)
# DB changes
vcs = files
db = self.db_project.changed_resources(self.now)
for path in set(list(vcs.keys()) + list(db.keys())):
if path in vcs and path in db:
vcs[path] = set(list(vcs[path]) + list(db[path]))
else:
vcs[path] = vcs[path] if path in vcs else db[path]
return files
@cached_property
def changed_config_files(self):
"""
A set of the changed project config files.
"""
config_files = set(
pc.path.replace(os.path.join(self.source_directory_path, ""), "")
for pc in self.configuration.parsed_configuration.configs
)
changed_files = set(self.changed_source_files[0])
return changed_files.intersection(config_files)
def get_relevant_files_with_config(self, paths):
"""
Check if given paths represent localizable files using project configuration.
Return a dict of relative reference paths of such paths and corresponding Locale
objects.
"""
files = {}
for locale in self.db_project.locales.all():
for path in paths:
absolute_path = os.path.join(self.source_directory_path, path)
reference_path = self.configuration.reference_path(
locale, absolute_path
)
if reference_path:
relative_reference_path = reference_path[
len(self.source_directory_path) :
].lstrip(os.sep)
files.setdefault(relative_reference_path, []).append(locale)
return files
def get_relevant_files_without_config(self, paths, locale_path_locales):
"""
Check if given paths represent localizable files by matching them against locale
repository paths. Return a dict of relative reference paths of such paths and
corresponding Locale objects.
"""
files = {}
locale_paths = locale_path_locales.keys()
for path in paths:
if is_hidden(path):
continue
for locale_path in locale_paths:
if path.startswith(locale_path):
locale = locale_path_locales[locale_path]
path = path[len(locale_path) :].lstrip(os.sep)
files.setdefault(path, []).append(locale)
break
return files
def locale_path_locales(self, repo_checkout_path):
"""
A map of relative locale directory paths and their respective locales.
"""
locale_path_locales = {}
for locale in self.db_project.locales.all():
locale_directory = self.locale_directory_paths[locale.code]
path = locale_directory[len(repo_checkout_path) :].lstrip(os.sep)
path = os.path.join(path, "") # Ensure the path ends with os.sep
locale_path_locales[path] = locale
return locale_path_locales
@cached_property
def locale_directory_paths(self):
"""
A map of locale codes and their absolute directory paths.
Create locale directory, if not in repository yet.
"""
locale_directory_paths = {}
parent_directories = set()
for locale in self.locales:
try:
if self.configuration:
locale_directory_paths[locale.code] = self.configuration.l10n_base
else:
locale_directory_paths[locale.code] = locale_directory_path(
self.checkout_path, locale.code, parent_directories,
)
parent_directory = get_parent_directory(
locale_directory_paths[locale.code]
)
except IOError:
if not self.db_project.has_multi_locale_repositories:
source_directory = self.source_directory_path
parent_directory = get_parent_directory(source_directory)
locale_code = locale.code
if uses_undercore_as_separator(parent_directory):
locale_code = locale_code.replace("-", "_")
locale_directory = os.path.join(parent_directory, locale_code)
# For asymmetric formats, create empty folder
if is_asymmetric_resource(next(self.relative_resource_paths())):
os.makedirs(locale_directory)
# For other formats, copy resources from source directory
else:
shutil.copytree(source_directory, locale_directory)
for root, dirnames, filenames in scandir.walk(locale_directory):
for filename in filenames:
path = os.path.join(root, filename)
if is_resource(filename):
os.rename(path, source_to_locale_path(path))
else:
os.remove(path)
locale_directory_paths[locale.code] = locale_directory
else:
raise MissingLocaleDirectoryError(
"Directory for locale `{0}` not found".format(locale.code)
)
parent_directories.add(parent_directory)
return locale_directory_paths
@cached_property
def resources(self):
"""
Lazy-loaded mapping of relative paths -> VCSResources that need to be synced:
* changed in repository
* changed in Pontoon DB
* corresponding source file added
* corresponding source file changed
* all paths relevant for newly enabled (unsynced) locales
Waiting until first access both avoids unnecessary file reads
and allows tests that don't need to touch the resources to run
with less mocking.
"""
resources = {}
log.info(
"Changed files in {} repository and Pontoon, relevant for enabled locales: {}".format(
self.db_project, self.changed_files
)
)
for path in self.relative_resource_paths():
# Syncing translations
if self.locales:
# Copy list instead of cloning
locales = list(self.db_project.unsynced_locales)
if self.changed_files is not None and (
(not self.changed_files or path not in self.changed_files)
and path not in self.added_paths
and path not in self.changed_paths
):
if not locales:
log.debug("Skipping unchanged file: {}".format(path))
continue
else:
if (
self.changed_files is None
or path in self.added_paths
or path in self.changed_paths
):
locales += self.locales
else:
locales += self.changed_files[path]
# Syncing resources
else:
if self.changed_files is not None and path not in self.changed_files:
log.debug("Skipping unchanged resource file: {}".format(path))
continue
locales = []
locales = set([l for l in locales if l in self.locales])
self.synced_locales.update(locales)
log.debug(
"Detected resource file {} for {}".format(
path, ",".join([l.code for l in locales]) or "source"
)
)
try:
resources[path] = VCSResource(self, path, locales=locales)
except ParseError as err:
log.error(
u"Skipping resource {path} due to ParseError: {err}".format(
path=path, err=err
)
)
log.info(
"Relative paths in {} that need to be synced: {}".format(
self.db_project, resources.keys()
)
)
return resources
@property
def entities(self):
return chain.from_iterable(
resource.entities.values() for resource in self.resources.values()
)
@property
def checkout_path(self):
return self.db_project.checkout_path
@cached_property
def source_directory_path(self):
"""
Path to the directory where source strings are stored.
Paths are identified using a scoring system; more likely
directory names get higher scores, as do directories with
formats that only used for source strings.
"""
source_repository = self.db_project.source_repository
# If project configuration provided, files could be stored in multiple
# directories, so we just use the source repository checkout path
if self.configuration:
return source_repository.checkout_path
# If source repository explicitly marked
if source_repository.source_repo:
return source_repository.checkout_path
possible_sources = []
for root, dirnames, filenames in scandir.walk(self.checkout_path):
for dirname in dirnames:
if dirname in self.SOURCE_DIR_NAMES:
score = self.SOURCE_DIR_SCORES[dirname]
# Ensure the matched directory contains resources.
directory_path = os.path.join(root, dirname)
if directory_contains_resources(directory_path):
# Extra points for source resources!
if directory_contains_resources(
directory_path, source_only=True
):
score += 3
possible_sources.append((directory_path, score))
if possible_sources:
return max(possible_sources, key=lambda s: s[1])[0]
else:
raise MissingSourceDirectoryError(
"No source directory found for project {0}".format(self.db_project.slug)
)
def relative_resource_paths(self):
"""
List of all source resource paths, relative to source_directory_path.
"""
if self.configuration:
paths = self.resource_paths_with_config()
else:
paths = self.resource_paths_without_config()
for path in paths:
if not self.configuration:
path = source_to_locale_path(path)
yield os.path.relpath(path, self.source_directory_path)
def resource_paths_with_config(self):
"""
List of absolute paths for all supported source resources
as specified through project configuration.
"""
path = self.source_directory_path
project_files = self.configuration.get_or_set_project_files(None)
for root, dirnames, filenames in scandir.walk(path):
if is_hidden(root):
continue
for filename in filenames:
absolute_path = os.path.join(root, filename)
if project_files.match(absolute_path):
yield absolute_path
def resource_paths_without_config(self):
"""
List of absolute paths for all supported source resources
found within the given path.
"""
path = self.source_directory_path
for root, dirnames, filenames in scandir.walk(path):
if is_hidden(root):
continue
# Ignore certain files in Mozilla repositories.
if self.db_project.repository_url in MOZILLA_REPOS:
filenames = [
f for f in filenames if not f.endswith("region.properties")
]
for filename in filenames:
if is_resource(filename):
yield os.path.join(root, filename)
class VCSConfiguration(object):
"""
Container for the project configuration, provided by the optional
configuration file.
For more information, see:
https://moz-l10n-config.readthedocs.io/en/latest/fileformat.html.
"""
def __init__(self, vcs_project):
self.vcs_project = vcs_project
self.configuration_file = vcs_project.db_project.configuration_file
self.project_files = {}
@cached_property
def l10n_base(self):
"""
If project configuration provided, files could be stored in multiple
directories, so we just use the translation repository checkout path
"""
return self.vcs_project.db_project.translation_repositories()[0].checkout_path
@cached_property
def parsed_configuration(self):
"""Return parsed project configuration file."""
return DownloadTOMLParser(
self.vcs_project.db_project.source_repository.checkout_path,
self.vcs_project.db_project.source_repository.permalink_prefix,
self.configuration_file,
).parse(env={"l10n_base": self.l10n_base})
def add_locale(self, locale_code):
"""
Add new locale to project configuration.
"""
locales = self.parsed_configuration.locales or []
locales.append(locale_code)
self.parsed_configuration.set_locales(locales)
"""
TODO: For now we don't make changes to the configuration file to
avoid committing it to the VCS. The pytoml serializer messes with the
file layout (indents and newlines) pretty badly. We should fix the
serializer and replace the content of this method with the following
code:
# Update configuration file
with open(self.configuration_path, 'r+b') as f:
data = pytoml.load(f)
data['locales'].append(locale_code)
f.seek(0)
f.write(pytoml.dumps(data, sort_keys=True))
f.truncate()
# Invalidate cached parsed configuration
del self.__dict__['parsed_configuration']
# Commit configuration file to VCS
commit_message = 'Update configuration file'
commit_author = User(
first_name=settings.VCS_SYNC_NAME,
email=settings.VCS_SYNC_EMAIL,
)
repo = self.vcs_project.db_project.source_repository
repo.commit(commit_message, commit_author, repo.checkout_path)
"""
def get_or_set_project_files(self, locale_code):
"""
Get or set project files for the given locale code. This approach
allows us to cache the files for later use.
Also, make sure that the requested locale_code is available in the
configuration file.
"""
if (
locale_code is not None
and locale_code not in self.parsed_configuration.all_locales
):
self.add_locale(locale_code)
return self.project_files.setdefault(
locale_code, ProjectFiles(locale_code, [self.parsed_configuration]),
)
def l10n_path(self, locale, reference_path):
"""
Return l10n path for the given locale and reference path.
"""
project_files = self.get_or_set_project_files(locale.code)
m = project_files.match(reference_path)
return m[0] if m is not None else None
def reference_path(self, locale, l10n_path):
"""
Return reference path for the given locale and l10n path.
"""
project_files = self.get_or_set_project_files(locale.code)
m = project_files.match(l10n_path)
return m[1] if m is not None else None
def locale_resources(self, locale):
"""
Return a list of Resource instances, which need to be enabled for the
given locale.
"""
resources = []
project_files = self.get_or_set_project_files(locale.code)
for resource in self.vcs_project.db_project.resources.all():
absolute_resource_path = os.path.join(
self.vcs_project.source_directory_path, resource.path,
)
if project_files.match(absolute_resource_path):
resources.append(resource)
return resources
class VCSResource(object):
"""Represents a single resource across multiple locales."""
def __init__(self, vcs_project, path, locales=None):
"""
Load the resource file for each enabled locale and store its
translations in VCSEntity instances.
"""
from pontoon.base.models import Locale
from pontoon.sync import formats # Avoid circular import.
self.vcs_project = vcs_project
self.path = path
self.locales = locales or []
self.files = {}
self.entities = {}
# Create entities using resources from the source directory,
source_resource_path = os.path.join(
vcs_project.source_directory_path, self.path
)
source_resource_path = locale_to_source_path(source_resource_path)
source_resource_file = formats.parse(
source_resource_path, locale=Locale.objects.get(code="en-US")
)
for index, translation in enumerate(source_resource_file.translations):
vcs_entity = VCSEntity(
resource=self,
key=translation.key,
string=translation.source_string,
string_plural=translation.source_string_plural,
comments=translation.comments,
group_comments=(
translation.group_comments
if hasattr(translation, "group_comments")
else None
),
resource_comments=(
translation.resource_comments
if hasattr(translation, "resource_comments")
else None
),
source=translation.source,
order=translation.order or index,
)
self.entities[vcs_entity.key] = vcs_entity
# Fill in translations from the locale resources.
for locale in locales:
locale_directory = self.vcs_project.locale_directory_paths[locale.code]
if self.vcs_project.configuration:
# Some resources might not be available for this locale
resource_path = self.vcs_project.configuration.l10n_path(
locale, source_resource_path,
)
if resource_path is None:
continue
else:
resource_path = os.path.join(locale_directory, self.path)
log.debug("Parsing resource file: %s", resource_path)
try:
resource_file = formats.parse(
resource_path, source_resource_path, locale
)
# File doesn't exist or is invalid: log it and move on
except (IOError, ParseError) as err:
log.error(
u"Skipping resource {path} due to {type}: {err}".format(
path=path, type=type(err).__name__, err=err
)
)
continue
self.files[locale] = resource_file
log.debug("Discovered %s translations.", len(resource_file.translations))
for translation in resource_file.translations:
try:
self.entities[translation.key].translations[
locale.code
] = translation
except KeyError:
# If the source is missing an entity, we consider it
# deleted and don't add it.
pass
def save(self, locale=None):
"""
Save changes made to any of the translations in this resource
back to the filesystem for all locales.
"""
if locale:
self.files[locale].save(locale)
else:
for locale, resource_file in self.files.items():
resource_file.save(locale)
class VCSEntity(object):
"""
An Entity is a single string to be translated, and a VCSEntity
stores the translations for an entity from several locales.
"""
def __init__(
self,
resource,
key,
string,
comments,
source,
string_plural="",
order=0,
group_comments=None,
resource_comments=None,
):
self.resource = resource
self.key = key
self.string = string
self.string_plural = string_plural
self.translations = {}
self.comments = comments
self.group_comments = group_comments or []
self.resource_comments = resource_comments or []
self.source = source
self.order = order
def has_translation_for(self, locale_code):
"""Return True if a translation exists for the given locale."""
return locale_code in self.translations
class VCSTranslation(object):
"""
A single translation of a source string into another language.
Since a string can have different translations based on plural
forms, all of the different forms are stored under self.strings, a
dict where the keys equal possible values for
pontoon.base.models.Translation.plural_form and the values equal the
translation for that plural form.
"""
def __init__(
self,
key,
strings,
comments,
fuzzy,
source_string="",
source_string_plural="",
group_comments=None,
resource_comments=None,
order=0,
source=None,
last_translator=None,
last_updated=None,
):
self.key = key
self.source_string = source_string
self.source_string_plural = source_string_plural
self.strings = strings
self.comments = comments
self.group_comments = group_comments
self.resource_comments = resource_comments
self.fuzzy = fuzzy
self.order = order
self.source = source or []
self.last_translator = last_translator
self.last_updated = last_updated
@property
def extra(self):
"""
Return a dict of custom properties to store in the database.
Useful for subclasses from specific formats that have extra data
that needs to be preserved.
"""
return {}
def update_from_db(self, db_translations):
"""
Update translation with current DB state.
"""
# If no DB translations are fuzzy, set fuzzy to False.
# Otherwise, it's true.
self.fuzzy = any(t for t in db_translations if t.fuzzy)
if len(db_translations) > 0:
last_translation = max(
db_translations,
key=lambda t: t.date or timezone.make_aware(datetime.min),
)
self.last_updated = last_translation.date
self.last_translator = last_translation.user
# Replace existing translations with ones from the database.
self.strings = {db.plural_form: db.string for db in db_translations}
| |
# Copyright 2012-2013 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.utils import excutils
from neutron.i18n import _LE
from neutron.openstack.common import log as logging
from neutron.plugins.nec.common import config
from neutron.plugins.nec.common import exceptions as nexc
from neutron.plugins.nec.db import packetfilter as pf_db
LOG = logging.getLogger(__name__)
class PacketFilterMixin(pf_db.PacketFilterDbMixin):
"""Mixin class to add packet filter to NECPluginV2."""
@property
def packet_filter_enabled(self):
if not hasattr(self, '_packet_filter_enabled'):
self._packet_filter_enabled = (
config.OFC.enable_packet_filter and
self.ofc.driver.filter_supported())
return self._packet_filter_enabled
def remove_packet_filter_extension_if_disabled(self, aliases):
if not self.packet_filter_enabled:
LOG.debug('Disabled packet-filter extension.')
aliases.remove('packet-filter')
def create_packet_filter(self, context, packet_filter):
"""Create a new packet_filter entry on DB, then try to activate it."""
LOG.debug("create_packet_filter() called, packet_filter=%s .",
packet_filter)
if hasattr(self.ofc.driver, 'validate_filter_create'):
pf = packet_filter['packet_filter']
self.ofc.driver.validate_filter_create(context, pf)
pf = super(PacketFilterMixin, self).create_packet_filter(
context, packet_filter)
return self.activate_packet_filter_if_ready(context, pf)
def update_packet_filter(self, context, id, packet_filter):
"""Update packet_filter entry on DB, and recreate it if changed.
If any rule of the packet_filter was changed, recreate it on OFC.
"""
LOG.debug("update_packet_filter() called, "
"id=%(id)s packet_filter=%(packet_filter)s .",
{'id': id, 'packet_filter': packet_filter})
pf_data = packet_filter['packet_filter']
if hasattr(self.ofc.driver, 'validate_filter_update'):
self.ofc.driver.validate_filter_update(context, pf_data)
# validate ownership
pf_old = self.get_packet_filter(context, id)
pf = super(PacketFilterMixin, self).update_packet_filter(
context, id, packet_filter)
def _packet_filter_changed(old_pf, new_pf):
LOG.debug('old_pf=%(old_pf)s, new_pf=%(new_pf)s',
{'old_pf': old_pf, 'new_pf': new_pf})
# When the status is ERROR, force sync to OFC.
if old_pf['status'] == pf_db.PF_STATUS_ERROR:
LOG.debug('update_packet_filter: Force filter update '
'because the previous status is ERROR.')
return True
for key in new_pf:
if key in ('id', 'name', 'tenant_id', 'network_id',
'in_port', 'status'):
continue
if old_pf[key] != new_pf[key]:
return True
return False
if _packet_filter_changed(pf_old, pf):
if hasattr(self.ofc.driver, 'update_filter'):
# admin_state is changed
if pf_old['admin_state_up'] != pf['admin_state_up']:
LOG.debug('update_packet_filter: admin_state '
'is changed to %s', pf['admin_state_up'])
if pf['admin_state_up']:
self.activate_packet_filter_if_ready(context, pf)
else:
self.deactivate_packet_filter(context, pf)
elif pf['admin_state_up']:
LOG.debug('update_packet_filter: admin_state is '
'unchanged (True)')
if self.ofc.exists_ofc_packet_filter(context, id):
pf = self._update_packet_filter(context, pf, pf_data)
else:
pf = self.activate_packet_filter_if_ready(context, pf)
else:
LOG.debug('update_packet_filter: admin_state is unchanged '
'(False). No need to update OFC filter.')
else:
pf = self.deactivate_packet_filter(context, pf)
pf = self.activate_packet_filter_if_ready(context, pf)
return pf
def _update_packet_filter(self, context, new_pf, pf_data):
pf_id = new_pf['id']
prev_status = new_pf['status']
try:
# If previous status is ERROR, try to sync all attributes.
pf = new_pf if prev_status == pf_db.PF_STATUS_ERROR else pf_data
self.ofc.update_ofc_packet_filter(context, pf_id, pf)
new_status = pf_db.PF_STATUS_ACTIVE
if new_status != prev_status:
self._update_resource_status(context, "packet_filter",
pf_id, new_status)
new_pf['status'] = new_status
return new_pf
except Exception as exc:
with excutils.save_and_reraise_exception():
if (isinstance(exc, nexc.OFCException) or
isinstance(exc, nexc.OFCConsistencyBroken)):
LOG.error(_LE("Failed to create packet_filter id=%(id)s "
"on OFC: %(exc)s"),
{'id': pf_id, 'exc': exc})
new_status = pf_db.PF_STATUS_ERROR
if new_status != prev_status:
self._update_resource_status(context, "packet_filter",
pf_id, new_status)
def delete_packet_filter(self, context, id):
"""Deactivate and delete packet_filter."""
LOG.debug("delete_packet_filter() called, id=%s .", id)
# validate ownership
pf = self.get_packet_filter(context, id)
# deactivate_packet_filter() raises an exception
# if an error occurs during processing.
pf = self.deactivate_packet_filter(context, pf)
super(PacketFilterMixin, self).delete_packet_filter(context, id)
def activate_packet_filter_if_ready(self, context, packet_filter):
"""Activate packet_filter by creating filter on OFC if ready.
Conditions to create packet_filter on OFC are:
* packet_filter admin_state is UP
* (if 'in_port' is specified) portinfo is available
"""
LOG.debug("activate_packet_filter_if_ready() called, "
"packet_filter=%s.", packet_filter)
pf_id = packet_filter['id']
current = packet_filter['status']
pf_status = current
if not packet_filter['admin_state_up']:
LOG.debug("activate_packet_filter_if_ready(): skip pf_id=%s, "
"packet_filter.admin_state_up is False.", pf_id)
elif self.ofc.exists_ofc_packet_filter(context, packet_filter['id']):
LOG.debug("_activate_packet_filter_if_ready(): skip, "
"ofc_packet_filter already exists.")
else:
LOG.debug("activate_packet_filter_if_ready(): create "
"packet_filter id=%s on OFC.", pf_id)
try:
self.ofc.create_ofc_packet_filter(context, pf_id,
packet_filter)
pf_status = pf_db.PF_STATUS_ACTIVE
except nexc.PortInfoNotFound:
LOG.debug("Skipped to create a packet filter pf_id=%s "
"on OFC, no portinfo for the in_port.", pf_id)
except (nexc.OFCException, nexc.OFCMappingNotFound) as exc:
LOG.error(_LE("Failed to create packet_filter id=%(id)s on "
"OFC: %(exc)s"), {'id': pf_id, 'exc': exc})
pf_status = pf_db.PF_STATUS_ERROR
if pf_status != current:
self._update_resource_status(context, "packet_filter", pf_id,
pf_status)
packet_filter.update({'status': pf_status})
return packet_filter
def deactivate_packet_filter(self, context, packet_filter):
"""Deactivate packet_filter by deleting filter from OFC if exixts."""
LOG.debug("deactivate_packet_filter_if_ready() called, "
"packet_filter=%s.", packet_filter)
pf_id = packet_filter['id']
if not self.ofc.exists_ofc_packet_filter(context, pf_id):
LOG.debug("deactivate_packet_filter(): skip, "
"Not found OFC Mapping for packet_filter id=%s.",
pf_id)
return packet_filter
LOG.debug("deactivate_packet_filter(): "
"deleting packet_filter id=%s from OFC.", pf_id)
try:
self.ofc.delete_ofc_packet_filter(context, pf_id)
self._update_resource_status_if_changed(
context, "packet_filter", packet_filter, pf_db.PF_STATUS_DOWN)
return packet_filter
except (nexc.OFCException, nexc.OFCMappingNotFound) as exc:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to delete packet_filter id=%(id)s "
"from OFC: %(exc)s"),
{'id': pf_id, 'exc': str(exc)})
self._update_resource_status_if_changed(
context, "packet_filter", packet_filter,
pf_db.PF_STATUS_ERROR)
def activate_packet_filters_by_port(self, context, port_id):
if not self.packet_filter_enabled:
return
filters = {'in_port': [port_id], 'admin_state_up': [True],
'status': [pf_db.PF_STATUS_DOWN]}
pfs = self.get_packet_filters(context, filters=filters)
for pf in pfs:
self.activate_packet_filter_if_ready(context, pf)
def deactivate_packet_filters_by_port(self, context, port_id,
raise_exc=True):
if not self.packet_filter_enabled:
return
filters = {'in_port': [port_id], 'status': [pf_db.PF_STATUS_ACTIVE]}
pfs = self.get_packet_filters(context, filters=filters)
error = False
for pf in pfs:
try:
self.deactivate_packet_filter(context, pf)
except (nexc.OFCException, nexc.OFCMappingNotFound):
error = True
if raise_exc and error:
raise nexc.OFCException(_('Error occurred while disabling packet '
'filter(s) for port %s'), port_id)
def get_packet_filters_for_port(self, context, port):
if self.packet_filter_enabled:
return super(PacketFilterMixin,
self).get_packet_filters_for_port(context, port)
| |
from gevent.pool import Pool
from gevent import monkey
monkey.patch_all()
from idb.postgres_backend.db import PostgresDB
from idb.helpers.storage import IDigBioStorage
from idb.helpers.media_validation import get_validator
from idb.helpers.conversions import get_accessuri, get_media_type
import psycopg2
import magic
import os
import requests
from requests.auth import HTTPBasicAuth
import traceback
import json
import datetime
import time
s = requests.Session()
adapter = requests.adapters.HTTPAdapter(pool_connections=100, pool_maxsize=100)
s.mount('http://', adapter)
s.mount('https://', adapter)
auth = HTTPBasicAuth(os.environ.get("IDB_UUID"), os.environ.get("IDB_APIKEY"))
db = PostgresDB()
local_pg = db._pg
local_cur = db._cur
def create_schema():
local_cur.execute("BEGIN")
local_cur.execute("""CREATE TABLE IF NOT EXISTS media (
id BIGSERIAL PRIMARY KEY,
url text UNIQUE,
type varchar(20),
mime varchar(255),
last_status integer,
last_check timestamp
)
""")
local_cur.execute("""CREATE TABLE IF NOT EXISTS objects (
id BIGSERIAL PRIMARY KEY,
bucket varchar(255) NOT NULL,
etag varchar(41) NOT NULL UNIQUE,
detected_mime varchar(255),
derivatives boolean DEFAULT false
)
""")
local_cur.execute("""CREATE TABLE IF NOT EXISTS media_objects (
id BIGSERIAL PRIMARY KEY,
url text NOT NULL REFERENCES media(url),
etag varchar(41) NOT NULL REFERENCES objects(etag),
modified timestamp NOT NULL DEFAULT now()
)
""")
local_pg.commit()
ignore_prefix = [
"http://media.idigbio.org/",
"http://firuta.huh.harvard.edu/"
]
tropicos = 'http://www.tropicos.org/%'
user_agent = {'User-Agent': 'iDigBio Media Ingestor (idigbio@acis.ufl.edu https://www.idigbio.org/wiki/index.php/Media_Ingestor)'}
def get_media(tup, cache_bad=False):
url, t, fmt = tup
url_path = "bad_media/"+url.replace("/","^^")
retries = 4 # number of times
retry_sleep = 18 # seconds
media_status = 1000
# Sleep because tropicos is severely rate-limited
# time.sleep(3)
while retries > 0:
try:
for p in ignore_prefix:
if url.startswith(p):
local_cur.execute("UPDATE media SET last_status=%s, last_check=now() WHERE url=%s", (1002,url))
print "Skip", url, t, fmt, p
return False
media_req = s.get(url, headers = user_agent)
media_status = media_req.status_code
media_req.raise_for_status()
validator = get_validator(fmt)
valid, detected_mime = validator(url,t,fmt,media_req.content)
if valid:
print datetime.datetime.now(), "Validated Media:", url, t, fmt, detected_mime
apiimg_req = s.post("http://media.idigbio.org/upload/" + t, data={"filereference": url}, files={'file': media_req.content }, auth=auth)
apiimg_req.raise_for_status()
apiimg_o = apiimg_req.json()
local_cur.execute("UPDATE media SET last_status=%s, last_check=now() WHERE url=%s", (200,url))
local_cur.execute("INSERT INTO objects (etag,bucket,detected_mime) SELECT %(etag)s, %(type)s, %(mime)s WHERE NOT EXISTS (SELECT 1 FROM objects WHERE etag=%(etag)s)", {"etag": apiimg_o["file_md5"], "type": t, "mime": detected_mime})
local_cur.execute("INSERT INTO media_objects (url,etag) VALUES (%s,%s)", (url,apiimg_o["file_md5"]))
local_pg.commit()
return True
else:
local_cur.execute("UPDATE media SET last_status=%s, last_check=now() WHERE url=%s", (1001,url))
local_pg.commit()
if cache_bad:
with open(url_path,"wb") as outf:
outf.write(media_req.content)
print datetime.datetime.now(), "Failure", url, t, valid, fmt, detected_mime
return False
except KeyboardInterrupt as e:
raise e
except:
if media_status != 404:
retries -= 1
if retries > 0 and media_status != 503:
print datetime.datetime.now(), "Retrying. Last status: ", media_status, url
if media_status == 504:
time.sleep(retry_sleep)
else:
time.sleep(retry_sleep)
continue
elif media_status == 503:
print datetime.datetime.now(), "(Service Unavailable) Retrying. Last status: ", media_status, url
time.sleep(180)
else:
time.sleep(1)
local_pg.rollback()
local_cur.execute("UPDATE media SET last_status=%s, last_check=now() WHERE url=%s", (media_status, url))
local_pg.commit()
print url, t, fmt, media_status
traceback.print_exc()
return False
def write_urls_to_db(media_urls):
print "Start Inserts"
inserted_urls = set()
inserts = 0
scanned = 0
total_inserts = 0
cur = db._get_ss_cursor()
cur.execute(
"SELECT type,data FROM idigbio_uuids_data WHERE type='mediarecord' and deleted=false")
local_cur.execute("BEGIN")
to_insert = []
to_update = []
with open("url_out.json_null", "wb") as outf:
for r in cur:
scanned += 1
url = get_accessuri(r["type"], r["data"])["accessuri"]
o = get_media_type(r["type"], r["data"])
form = o["format"]
t = o["mediatype"]
if url is not None:
url = url.replace("&", "&").strip()
for p in ignore_prefix:
if url.startswith(p):
break
else:
if url in media_urls:
# We're going to change something, but only if we're adding/replacing things, not nulling existing values.
if not (t,form) == media_urls[url] and form is not None and (t is not None or media_urls[url][0] is None):
to_update.append((t, form, url))
elif url not in inserted_urls:
to_insert.append((url, t, form))
inserted_urls.add(url)
if scanned % 100000 == 0:
print len(to_insert), len(to_update), scanned
local_cur.executemany("INSERT INTO media (url,type,mime) VALUES (%s,%s,%s)", to_insert)
local_cur.executemany("UPDATE media SET type=%s, mime=%s, last_status=NULL, last_check=NULL WHERE url=%s", to_update)
local_pg.commit()
print len(to_insert), len(to_update), scanned
def get_postgres_media_urls():
media_urls = dict()
print "Get Media URLs"
local_cur = local_pg.cursor()
local_cur.execute("SELECT url,type,mime FROM media where url like {0}".format(tropicos))
for r in local_cur:
media_urls[r[0]] = (r[1],r[2])
return media_urls
def get_postgres_media_objects():
cur = db._get_ss_cursor()
cur.execute("SELECT lookup_key, etag, date_modified FROM idb_object_keys")
count = 0
rowcount = 0
lrc = 0
for r in cur:
local_cur.execute("""INSERT INTO media_objects (url,etag,modified)
SELECT %(url)s, %(etag)s, %(modified)s WHERE EXISTS (SELECT 1 FROM media WHERE url=%(url)s) AND EXISTS (SELECT 1 FROM objects WHERE etag=%(etag)s) AND NOT EXISTS (SELECT 1 FROM media_objects WHERE url=%(url)s AND etag=%(etag)s)
""", {"url": r[0], "etag": r[1], "modified": r[2]})
count += 1
rowcount += local_cur.rowcount
if rowcount != lrc and rowcount % 10000 == 0:
local_pg.commit()
print count, rowcount
lrc = rowcount
local_pg.commit()
print count, rowcount
def get_objects_from_ceph():
local_cur.execute("SELECT etag FROM objects")
existing_objects = set()
for r in local_cur:
existing_objects.add(r[0])
print len(existing_objects)
s = IDigBioStorage()
buckets = ["datasets","images"]
count = 0
rowcount = 0
lrc = 0
for b_k in buckets:
b = s.get_bucket("idigbio-" + b_k + "-prod")
for k in b.list():
if k.name not in existing_objects:
try:
ks = k.get_contents_as_string(headers={'Range' : 'bytes=0-100'})
detected_mime = magic.from_buffer(ks, mime=True)
local_cur.execute("INSERT INTO objects (bucket,etag,detected_mime) SELECT %(bucket)s,%(etag)s,%(dm)s WHERE NOT EXISTS (SELECT 1 FROM objects WHERE etag=%(etag)s)", {"bucket": b_k, "etag": k.name, "dm": detected_mime})
existing_objects.add(k.name)
rowcount += local_cur.rowcount
except:
print "Ceph Error", b_k, k.name
count += 1
if rowcount != lrc and rowcount % 10000 == 0:
print count, rowcount
local_pg.commit()
lrc = rowcount
print count, rowcount
local_pg.commit()
def set_deriv_from_ceph():
s = IDigBioStorage()
b = s.get_bucket("idigbio-images-prod-thumbnail")
count = 0
for k in b.list():
local_cur.execute("UPDATE objects SET derivatives=true WHERE etag=%s", (k.name.split(".")[0],))
count += 1
if count % 10000 == 0:
print count
local_pg.commit()
print count
local_pg.commit()
def get_media_generator():
local_cur.execute("""SELECT * FROM
(
SELECT substring(url from 'https?://[^/]*/'), count(*) FROM
(
SELECT media.url, media_objects.etag FROM media LEFT JOIN media_objects ON media.url = media_objects.url WHERE
type IS NOT NULL AND
(last_status IS NULL or last_status >= 400) AND
media.url like 'http://www.tropicos.org/%' and last_check < now()-'10 days'::interval
)
AS a WHERE a.etag IS NULL GROUP BY substring(url from 'https?://[^/]*/')
)
AS b WHERE substring != '' ORDER BY count""")
subs_rows = local_cur.fetchall()
for sub_row in subs_rows:
subs = sub_row[0]
local_cur.execute("""SELECT url,type,mime FROM (
SELECT media.url,type,mime,etag FROM media LEFT JOIN media_objects ON media.url = media_objects.url
WHERE media.url LIKE 'http://www.tropicos.org/%' AND type IS NOT NULL AND (last_status IS NULL OR last_status >= 400) and media.last_check < now()-'10 days'::interval
) AS a WHERE a.etag IS NULL""")
url_rows = local_cur.fetchall()
for url_row in url_rows:
yield tuple(url_row[0:3])
def get_media_consumer():
p = Pool(2)
count = 0
t = 0
f = 0
for r in p.imap_unordered(get_media, get_media_generator()):
if r:
t += 1
else:
f += 1
count += 1
if count % 10000 == 0:
print count,t,f
print count,t,f
def main():
import sys
#create_schema()
if len(sys.argv) > 1 and sys.argv[1] == "get_media":
print "Starting get_media operations at ", datetime.datetime.now()
get_media_consumer()
print "Finished get_media operations at ", datetime.datetime.now()
else:
print "Starting media_urls operations at ", datetime.datetime.now()
media_urls = get_postgres_media_urls()
write_urls_to_db(media_urls)
# get_objects_from_ceph()
# get_postgres_media_objects()
#set_deriv_from_ceph()
print "Finished media_urls operations at ", datetime.datetime.now()
if __name__ == '__main__':
main()
# SQL Queries to import from old table:
#insert into media (url,type,owner) (select lookup_key,type,user_uuid::uuid from (select media.url, idb_object_keys.lookup_key, idb_object_keys.type, idb_object_keys.user_uuid from idb_object_keys left join media on lookup_key=url) as a where url is null);
#insert into objects (bucket,etag) (select type,etag from (select lookup_key, type, a.etag, b.etag as n from idb_object_keys as a left join objects as b on a.etag=b.etag) as c where n is null);
#insert into media_objects (url,etag,modified) (select lookup_key,etag,date_modified from (select media_objects.url, idb_object_keys.lookup_key, idb_object_keys.etag, idb_object_keys.date_modified from idb_object_keys left join media_objects on lookup_key=url and media_objects.etag=idb_object_keys.etag) as a where url is null);
| |
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tokenizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import REDACTED.transformer_lingvo.lingvo.compat as tf
from REDACTED.transformer_lingvo.lingvo.core import base_layer
from REDACTED.transformer_lingvo.lingvo.core import ops
from REDACTED.transformer_lingvo.lingvo.core import py_utils
from REDACTED.transformer_lingvo.lingvo.core import wpm_encoder
import numpy as np
class BaseTokenizer(base_layer.BaseLayer):
"""The base tokenizer."""
@classmethod
def Params(cls):
"""Defaults params for tokenizers."""
p = super(BaseTokenizer, cls).Params()
p.name = 'tokenizer'
p.Define('vocab_size', 64, 'The size of the vocabuary.')
p.Define(
'append_eos', True, 'Whether to append </s> at the end and treat '
'it as a non-padded label.')
p.Define('pad_to_max_length', True,
'If True, output ids will be padded to max_length.')
# TODO(ciprianchelba): there should be a check in __init__ that the ids
# below are consistent with the ones assigned by the vocabulary.
p.Define('target_unk_id', 0, 'Target unknown token id.')
p.Define('target_sos_id', 1, 'Target start of sequence id.')
p.Define('target_eos_id', 2, 'Target end of sequence id.')
p.Define('target_wb_id', -1, 'Target word boundary id.')
return p
@base_layer.initializer
def __init__(self, params):
super(BaseTokenizer, self).__init__(params)
p = self.params
self.sos_id = p.target_sos_id # <S>
self.eos_id = p.target_eos_id # </S>
self.unk_id = p.target_unk_id # <UNK>
def StringsToIds(self,
strs,
max_length,
external_append_eos=None,
languages=None):
"""Tokenize strs into vocab ids.
Args:
strs: A vector of strings.
max_length: An int providing the max_length for strs.
external_append_eos: Bool or None. If None, will be ignored and
`params.append_eos` will be used. If bool, will determine if an eos
symbol will be added to tokens.
languages: A vector of strings with the same length as `strs`.
Returns:
A tuple (ids, labels, paddings) with the same shape [batch, maxlen].
- ids[i, j] is the input token id of i-th sample for j-th step.
- labels[i, j] is the target token id of i-th sample for j-th step.
- paddings[i, j] is 1 iff i-th sample's j-th step is padded.
Raises:
ValueError: If unknown token type.
"""
p = self.params
if external_append_eos is None:
append_eos = p.append_eos
else:
append_eos = external_append_eos
return self._StringsToIdsImpl(strs, max_length, append_eos, languages)
def _StringsToIdsImpl(self, strs, max_length, append_eos, languages):
raise NotImplementedError('Abstract method.')
def IdsToStrings(self, ids, lens, languages=None):
"""Converts ids back to strings.
Args:
ids: A matrix of shape [batch, seqlen]. ids[i, :] is the i-th sample's
ids.
lens: A vector of shape [batch]. lens[i] is the sequence length of the
i-th sample. Only the first lens[i] tokens in ids[i, :] are valid tokens
for the i-th sequence.
languages: A vector of strings of shape [batch].
Returns:
sequences - A vector of shape [batch]. The converted string sequence.
Raises:
ValueError: If unknown token type.
"""
raise NotImplementedError('Abstract method.')
class AsciiTokenizer(BaseTokenizer):
"""A simple grapheme tokenizer.
Maps a small vocabulary of character tokens for (lower case) letters, digits,
and punctuation symbols.
"""
def _StringsToIdsImpl(self, strs, max_length, append_eos, languages):
p = self.params
return ops.ascii_to_token_id(
strs,
maxlen=max_length,
pad_to_maxlen=p.pad_to_max_length,
append_eos=append_eos)
def IdsToStrings(self, ids, lens):
return ops.id_to_ascii(ids, lens)
class VocabFileTokenizer(BaseTokenizer):
"""Tokenizers that use vocab files for look-up."""
@classmethod
def Params(cls):
p = super(VocabFileTokenizer, cls).Params()
p.Define('token_vocab_filepath', None,
'If set, specifies a filepath to the token vocab file.')
p.Define('ngram_vocab_filepath', None,
'If set, specifies a filepath to the Ngram vocab file.')
p.Define('ngram_separator', '',
'string separator to use when joining ngrams.')
p.Define('tokens_delimiter', ' ',
'The delimiter to split a string to tokens with.')
p.Define(
'load_token_ids_from_vocab', True,
'Whether token ids are present in vocab (i.e. vocab contains two '
'colums, one for IDs and one for words). If false, line numbers '
'are used.')
return p
@property
def _vocab_file_params(self):
return ['token_vocab_filepath', 'ngram_vocab_filepath']
def _CheckParams(self):
p = self.params
num_params_specified = sum(
[getattr(p, x) is not None for x in self._vocab_file_params])
if num_params_specified != 1:
raise ValueError('Exactly one vocab file should be specified!')
def _StringsToIdsImpl(self, strs, max_length, append_eos, languages):
self._CheckParams()
p = self.params
if p.token_vocab_filepath:
return ops.str_to_vocab_tokens(
strs,
maxlen=max_length,
pad_to_maxlen=p.pad_to_max_length,
append_eos=append_eos,
vocab_filepath=p.token_vocab_filepath,
load_token_ids_from_vocab=p.load_token_ids_from_vocab,
delimiter=p.tokens_delimiter)
elif p.ngram_vocab_filepath:
raise NotImplementedError('ngram vocab StringsToIds is not supported.')
def IdsToStrings(self, ids, lens):
self._CheckParams()
p = self.params
if p.token_vocab_filepath:
ngram_vocab_filepath = p.token_vocab_filepath
ngram_separator = p.tokens_delimiter
elif p.ngram_vocab_filepath:
ngram_vocab_filepath = p.ngram_vocab_filepath
ngram_separator = p.ngram_separator
return ops.ngram_id_to_token(
token_ids=ids,
seq_lengths=lens,
ngram_vocab_filepath=ngram_vocab_filepath,
ngram_separator=ngram_separator)
class BpeTokenizer(BaseTokenizer):
"""Tokenizers that use BPE vocab files and word to id lists for look-up."""
@classmethod
def Params(cls):
p = super(BpeTokenizer, cls).Params()
p.Define('codes_filepath', None,
'Specifies a filepath to the list of bpe codes vocab file.')
p.Define('words_to_ids_filepath', None,
'Specifies a filepath to the word bpe vocab file.')
return p
def _StringsToIdsImpl(self, strs, max_length, append_eos, languages):
p = self.params
return ops.bpe_words_to_ids(
strs,
maxlen=max_length,
append_eos=append_eos,
tokenization_filepath=p.words_to_ids_filepath)
def IdsToStrings(self, ids, lens):
p = self.params
return ops.bpe_ids_to_words(
token_ids=ids, seq_lengths=lens, vocab_filepath=p.codes_filepath)
class WpmTokenizer(BaseTokenizer):
"""Tokenizer for word-piece models."""
@classmethod
def Params(cls):
p = super(WpmTokenizer, cls).Params()
p.Define(
'vocab_filepath', None,
'Specifies a filepath to the WPM vocab. The vocab is sorted by '
'descending merge score.')
p.Define(
'merge_prob', 1.,
'Probability of merging WPMs. If less than 1, then decomposition '
'of words into wordpieces will no longer be deterministic, and '
'result in longer ID sequences. At 0, it will be graphemes.')
return p
@base_layer.initializer
def __init__(self, params):
super(WpmTokenizer, self).__init__(params)
p = self.params
self._wpm_encoder = wpm_encoder.WpmEncoder(p.vocab_filepath, p.merge_prob)
assert p.target_unk_id == self._wpm_encoder.unk_id
assert p.target_sos_id == self._wpm_encoder.sentence_start_id
assert p.target_eos_id == self._wpm_encoder.sentence_end_id
def _StringsToIdsImpl(self, strs, max_length, append_eos, languages):
"""Takes a tensor of strings and returns id/padding tensors.
This generates `token_ids`, `target_ids`, and `paddings` in the format that
is expected for tokenizers. This performs padding to a fixed length and
appends the end-of-sentence token as appropriate.
Args:
strs: a string Tensor.
max_length: a python integer. The second dimension of the returned arrays.
All sequences are padded or truncated to that length.
append_eos: a python bool. See `BaseTokenizer` for explanation.
languages: A vector of strings with the same length as `strs`.
Returns:
A tuple of 3 tensors:
- token_ids: a tensor of sequences of WPM ids starting with SOS. Sequences
always end with EOS unless the sequence exceeds the maximum length.
Always padded with EOS.
- target_ids: a tensor of sequences of WPM ids not starting with SOS
but ending with EOS. Always padded with EOS.
- paddings: a tensor of floats indicating, at each position, whether
the corresponding position is padded.
"""
p = self.params
if append_eos is None:
append_eos = p.append_eos
batch_size = py_utils.GetShape(strs)[0]
token_ids_ta = tf.TensorArray(tf.int32, batch_size)
target_ids_ta = tf.TensorArray(tf.int32, batch_size)
paddings_ta = tf.TensorArray(tf.float32, batch_size)
def _TokenizeOneSentence(i, strs, token_ids_ta, target_ids_ta, paddings_ta):
"""Tokenizes a single sentence."""
ids, _ = self._wpm_encoder.Encode(strs[i])
if append_eos:
ids = tf.concat([ids, [self.eos_id]], axis=0)
# This truncates after the eos is added, so some sentences might
# not have </s> at the end.
token_ids_ta = token_ids_ta.write(
i,
py_utils.PadOrTrimTo(
tf.concat([[self.sos_id], ids], axis=0), [max_length],
self.eos_id))
target_ids_ta = target_ids_ta.write(
i, py_utils.PadOrTrimTo(ids, [max_length], self.eos_id))
paddings_ta = paddings_ta.write(
i,
py_utils.PadOrTrimTo(
tf.zeros_like(ids, dtype=tf.float32), [max_length], 1.))
return i + 1, strs, token_ids_ta, target_ids_ta, paddings_ta
_, _, token_ids_ta, target_ids_ta, paddings_ta = tf.while_loop(
lambda i, *_: i < batch_size,
_TokenizeOneSentence,
loop_vars=(tf.constant(0, tf.int32), strs, token_ids_ta, target_ids_ta,
paddings_ta),
parallel_iterations=30,
back_prop=False)
token_ids = token_ids_ta.stack()
target_ids = target_ids_ta.stack()
paddings = paddings_ta.stack()
if not p.pad_to_max_length:
maxlen = tf.cast(
tf.round(tf.reduce_max(tf.reduce_sum(1.0 - paddings, axis=1))),
tf.int32)
token_ids = token_ids[:, :maxlen]
target_ids = target_ids[:, :maxlen]
paddings = paddings[:, :maxlen]
return token_ids, target_ids, paddings
def IdsToStrings(self, ids, lens):
"""Takes integer matrices and returns vectors of strings."""
ids = py_utils.with_dependencies([py_utils.assert_same_dim0([ids, lens])],
ids)
return tf.map_fn(
lambda inputs: self._wpm_encoder.Decode(inputs[0][:inputs[1]]),
(ids, lens),
dtype=tf.string,
parallel_iterations=30,
back_prop=False)
| |
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import cStringIO
import struct
import dns.exception
import dns.rdata
_pows = (1L, 10L, 100L, 1000L, 10000L, 100000L, 1000000L, 10000000L,
100000000L, 1000000000L, 10000000000L)
# default values are in centimeters
_default_size = 100.0
_default_hprec = 1000000.0
_default_vprec = 1000.0
def _exponent_of(what, desc):
if what == 0:
return 0
exp = None
for i in xrange(len(_pows)):
if what // _pows[i] == 0L:
exp = i - 1
break
if exp is None or exp < 0:
raise dns.exception.SyntaxError("%s value out of bounds" % desc)
return exp
def _float_to_tuple(what):
if what < 0:
sign = -1
what *= -1
else:
sign = 1
what = long(round(what * 3600000))
degrees = int(what // 3600000)
what -= degrees * 3600000
minutes = int(what // 60000)
what -= minutes * 60000
seconds = int(what // 1000)
what -= int(seconds * 1000)
what = int(what)
return (degrees * sign, minutes, seconds, what)
def _tuple_to_float(what):
if what[0] < 0:
sign = -1
value = float(what[0]) * -1
else:
sign = 1
value = float(what[0])
value += float(what[1]) / 60.0
value += float(what[2]) / 3600.0
value += float(what[3]) / 3600000.0
return sign * value
def _encode_size(what, desc):
what = long(what);
exponent = _exponent_of(what, desc) & 0xF
base = what // pow(10, exponent) & 0xF
return base * 16 + exponent
def _decode_size(what, desc):
exponent = what & 0x0F
if exponent > 9:
raise dns.exception.SyntaxError("bad %s exponent" % desc)
base = (what & 0xF0) >> 4
if base > 9:
raise dns.exception.SyntaxError("bad %s base" % desc)
return long(base) * pow(10, exponent)
class LOC(dns.rdata.Rdata):
"""LOC record
@ivar latitude: latitude
@type latitude: (int, int, int, int) tuple specifying the degrees, minutes,
seconds, and milliseconds of the coordinate.
@ivar longitude: longitude
@type longitude: (int, int, int, int) tuple specifying the degrees,
minutes, seconds, and milliseconds of the coordinate.
@ivar altitude: altitude
@type altitude: float
@ivar size: size of the sphere
@type size: float
@ivar horizontal_precision: horizontal precision
@type horizontal_precision: float
@ivar vertical_precision: vertical precision
@type vertical_precision: float
@see: RFC 1876"""
__slots__ = ['latitude', 'longitude', 'altitude', 'size',
'horizontal_precision', 'vertical_precision']
def __init__(self, rdclass, rdtype, latitude, longitude, altitude,
size=_default_size, hprec=_default_hprec, vprec=_default_vprec):
"""Initialize a LOC record instance.
The parameters I{latitude} and I{longitude} may be either a 4-tuple
of integers specifying (degrees, minutes, seconds, milliseconds),
or they may be floating point values specifying the number of
degrees. The other parameters are floats. Size, horizontal precision,
and vertical precision are specified in centimeters."""
super(LOC, self).__init__(rdclass, rdtype)
if isinstance(latitude, int) or isinstance(latitude, long):
latitude = float(latitude)
if isinstance(latitude, float):
latitude = _float_to_tuple(latitude)
self.latitude = latitude
if isinstance(longitude, int) or isinstance(longitude, long):
longitude = float(longitude)
if isinstance(longitude, float):
longitude = _float_to_tuple(longitude)
self.longitude = longitude
self.altitude = float(altitude)
self.size = float(size)
self.horizontal_precision = float(hprec)
self.vertical_precision = float(vprec)
def to_text(self, origin=None, relativize=True, **kw):
if self.latitude[0] > 0:
lat_hemisphere = 'N'
lat_degrees = self.latitude[0]
else:
lat_hemisphere = 'S'
lat_degrees = -1 * self.latitude[0]
if self.longitude[0] > 0:
long_hemisphere = 'E'
long_degrees = self.longitude[0]
else:
long_hemisphere = 'W'
long_degrees = -1 * self.longitude[0]
text = "%d %d %d.%03d %s %d %d %d.%03d %s %0.2fm" % (
lat_degrees, self.latitude[1], self.latitude[2], self.latitude[3],
lat_hemisphere, long_degrees, self.longitude[1], self.longitude[2],
self.longitude[3], long_hemisphere, self.altitude / 100.0
)
# do not print default values
if self.size != _default_size or \
self.horizontal_precision != _default_hprec or \
self.vertical_precision != _default_vprec:
text += " %0.2fm %0.2fm %0.2fm" % (
self.size / 100.0, self.horizontal_precision / 100.0,
self.vertical_precision / 100.0
)
return text
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
latitude = [0, 0, 0, 0]
longitude = [0, 0, 0, 0]
size = _default_size
hprec = _default_hprec
vprec = _default_vprec
latitude[0] = tok.get_int()
t = tok.get_string()
if t.isdigit():
latitude[1] = int(t)
t = tok.get_string()
if '.' in t:
(seconds, milliseconds) = t.split('.')
if not seconds.isdigit():
raise dns.exception.SyntaxError('bad latitude seconds value')
latitude[2] = int(seconds)
if latitude[2] >= 60:
raise dns.exception.SyntaxError('latitude seconds >= 60')
l = len(milliseconds)
if l == 0 or l > 3 or not milliseconds.isdigit():
raise dns.exception.SyntaxError('bad latitude milliseconds value')
if l == 1:
m = 100
elif l == 2:
m = 10
else:
m = 1
latitude[3] = m * int(milliseconds)
t = tok.get_string()
elif t.isdigit():
latitude[2] = int(t)
t = tok.get_string()
if t == 'S':
latitude[0] *= -1
elif t != 'N':
raise dns.exception.SyntaxError('bad latitude hemisphere value')
longitude[0] = tok.get_int()
t = tok.get_string()
if t.isdigit():
longitude[1] = int(t)
t = tok.get_string()
if '.' in t:
(seconds, milliseconds) = t.split('.')
if not seconds.isdigit():
raise dns.exception.SyntaxError('bad longitude seconds value')
longitude[2] = int(seconds)
if longitude[2] >= 60:
raise dns.exception.SyntaxError('longitude seconds >= 60')
l = len(milliseconds)
if l == 0 or l > 3 or not milliseconds.isdigit():
raise dns.exception.SyntaxError('bad longitude milliseconds value')
if l == 1:
m = 100
elif l == 2:
m = 10
else:
m = 1
longitude[3] = m * int(milliseconds)
t = tok.get_string()
elif t.isdigit():
longitude[2] = int(t)
t = tok.get_string()
if t == 'W':
longitude[0] *= -1
elif t != 'E':
raise dns.exception.SyntaxError('bad longitude hemisphere value')
t = tok.get_string()
if t[-1] == 'm':
t = t[0 : -1]
altitude = float(t) * 100.0 # m -> cm
token = tok.get().unescape()
if not token.is_eol_or_eof():
value = token.value
if value[-1] == 'm':
value = value[0 : -1]
size = float(value) * 100.0 # m -> cm
token = tok.get().unescape()
if not token.is_eol_or_eof():
value = token.value
if value[-1] == 'm':
value = value[0 : -1]
hprec = float(value) * 100.0 # m -> cm
token = tok.get().unescape()
if not token.is_eol_or_eof():
value = token.value
if value[-1] == 'm':
value = value[0 : -1]
vprec = float(value) * 100.0 # m -> cm
tok.get_eol()
return cls(rdclass, rdtype, latitude, longitude, altitude,
size, hprec, vprec)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
if self.latitude[0] < 0:
sign = -1
degrees = long(-1 * self.latitude[0])
else:
sign = 1
degrees = long(self.latitude[0])
milliseconds = (degrees * 3600000 +
self.latitude[1] * 60000 +
self.latitude[2] * 1000 +
self.latitude[3]) * sign
latitude = 0x80000000L + milliseconds
if self.longitude[0] < 0:
sign = -1
degrees = long(-1 * self.longitude[0])
else:
sign = 1
degrees = long(self.longitude[0])
milliseconds = (degrees * 3600000 +
self.longitude[1] * 60000 +
self.longitude[2] * 1000 +
self.longitude[3]) * sign
longitude = 0x80000000L + milliseconds
altitude = long(self.altitude) + 10000000L
size = _encode_size(self.size, "size")
hprec = _encode_size(self.horizontal_precision, "horizontal precision")
vprec = _encode_size(self.vertical_precision, "vertical precision")
wire = struct.pack("!BBBBIII", 0, size, hprec, vprec, latitude,
longitude, altitude)
file.write(wire)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(version, size, hprec, vprec, latitude, longitude, altitude) = \
struct.unpack("!BBBBIII", wire[current : current + rdlen])
if latitude > 0x80000000L:
latitude = float(latitude - 0x80000000L) / 3600000
else:
latitude = -1 * float(0x80000000L - latitude) / 3600000
if latitude < -90.0 or latitude > 90.0:
raise dns.exception.FormError("bad latitude")
if longitude > 0x80000000L:
longitude = float(longitude - 0x80000000L) / 3600000
else:
longitude = -1 * float(0x80000000L - longitude) / 3600000
if longitude < -180.0 or longitude > 180.0:
raise dns.exception.FormError("bad longitude")
altitude = float(altitude) - 10000000.0
size = _decode_size(size, "size")
hprec = _decode_size(hprec, "horizontal precision")
vprec = _decode_size(vprec, "vertical precision")
return cls(rdclass, rdtype, latitude, longitude, altitude,
size, hprec, vprec)
from_wire = classmethod(from_wire)
def _cmp(self, other):
f = cStringIO.StringIO()
self.to_wire(f)
wire1 = f.getvalue()
f.seek(0)
f.truncate()
other.to_wire(f)
wire2 = f.getvalue()
f.close()
return cmp(wire1, wire2)
def _get_float_latitude(self):
return _tuple_to_float(self.latitude)
def _set_float_latitude(self, value):
self.latitude = _float_to_tuple(value)
float_latitude = property(_get_float_latitude, _set_float_latitude,
doc="latitude as a floating point value")
def _get_float_longitude(self):
return _tuple_to_float(self.longitude)
def _set_float_longitude(self, value):
self.longitude = _float_to_tuple(value)
float_longitude = property(_get_float_longitude, _set_float_longitude,
doc="longitude as a floating point value")
| |
# -*- coding: utf-8 -*-
"""
Spectra analysis utilities
"""
import itertools
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import six
from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import RidgeCV
from sklearn.pipeline import make_pipeline
from sklearn.metrics import r2_score, median_absolute_error
def read_spectra(path_csv):
"""Read and parse data in pandas DataFrames.
Parameters
----------
path_csv : str
Path to the CSV file to read.
Returns
-------
spectra : pandas DataFrame, shape (n_spectra, n_freq_point)
DataFrame containing all Raman spectra.
concentration : pandas Series, shape (n_spectra,)
Series containing the concentration of the molecule.
molecule : pandas Series, shape (n_spectra,)
Series containing the type of chemotherapeutic agent.
"""
if not isinstance(path_csv, six.string_types):
raise TypeError("'path_csv' needs to be string. Got {}"
" instead.".format(type(path_csv)))
if not path_csv.endswith('.csv'):
raise ValueError('Wrong file format. Expecting csv file')
data = pd.read_csv(path_csv)
concentration = data['concentration']
molecule = data['molecule']
spectra_string = data['spectra']
spectra = []
for spec in spectra_string:
# remove the first and last bracket and convert to a numpy array
spectra.append(np.fromstring(spec[1:-1], sep=','))
spectra = pd.DataFrame(spectra)
return spectra, concentration, molecule
def _apply_axis_layout(ax, title):
"""Apply despine style and add labels to axis."""
ax.set_xlabel('Frequency')
ax.set_ylabel('Concentration')
ax.set_title(title)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.spines['left'].set_position(('outward', 10))
ax.spines['bottom'].set_position(('outward', 10))
def plot_spectra(frequency, spectra, title):
"""Plot a bunch of Raman spectra.
Parameters
----------
frequency : pandas Series, shape (n_freq_points,)
Frequencies for which the Raman spectra were acquired.
spectra : pandas DataFrame, shape (n_spectra, n_freq_points)
DataFrame containing all Raman spectra.
title : str
Title added to the plot.
Returns
-------
None
"""
fig, ax = plt.subplots()
ax.plot(frequency, spectra.T)
_apply_axis_layout(ax, title)
def plot_spectra_by_type(frequency, spectra, classes, title):
"""Plot mean spectrum with its variance for a given class.
Parameters
----------
frequency : pandas Series, shape (n_freq_points,)
Frequencies for which the Raman spectra were acquired.
spectra : pandas DataFrame, shape (n_spectra, n_freq_points)
DataFrame containing all Raman spectra.
classes : array-like, shape (n_classes,)
Array contining the different spectra class which will be plotted.
title : str
Title added to the plot.
Returns
-------
None
"""
fig, ax = plt.subplots()
for label in np.unique(classes):
label_index = np.flatnonzero(classes == label)
spectra_mean = np.mean(spectra.iloc[label_index], axis=0)
spectra_std = np.std(spectra.iloc[label_index], axis=0)
ax.plot(frequency, spectra_mean, label=label)
ax.fill_between(frequency,
spectra_mean + spectra_std,
spectra_mean - spectra_std,
alpha=0.2)
_apply_axis_layout(ax, title)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
def plot_cm(cm, classes, title):
"""Plot a confusion matrix.
Parameters
----------
cm : ndarray, shape (n_classes, n_classes)
Confusion matrix.
classes : array-like, shape (n_classes,)
Array contining the different spectra classes used in the
classification problem.
title : str
Title added to the plot.
Returns
-------
None
"""
fig, ax = plt.subplots()
plt.imshow(cm, interpolation='nearest', cmap='bwr')
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def plot_regression(y_true, y_pred, title):
"""Plot actual vs. predicted scatter plot.
Parameters
----------
y_true : array-like, shape (n_samples,)
Ground truth (correct) target values.
y_pred : array-like, shape (n_samples,)
Estimated targets as returned by a regressor.
title : str
Title added to the plot.
Returns
-------
None
"""
fig, ax = plt.subplots()
ax.scatter(y_true, y_pred)
ax.plot([0, 25000], [0, 25000], '--k')
ax.set_ylabel('Target predicted')
ax.set_xlabel('True Target')
ax.set_title(title)
ax.text(1000, 20000, r'$R^2$=%.2f, MAE=%.2f' % (
r2_score(y_true, y_pred), median_absolute_error(y_true, y_pred)))
ax.set_xlim([0, 25000])
ax.set_ylim([0, 25000])
def regression_experiment(X_train, X_test, y_train, y_test):
"""Perform regression experiment.
Build a pipeline using PCA and either a Ridge
or a RandomForestRegressor model.
Parameters
----------
X_train : pandas DataFrame, shape (n_spectra, n_freq_points)
DataFrame containing training Raman spectra.
X_test : pandas DataFrame, shape (n_spectra, n_freq_points)
DataFrame containing testing Raman spectra.
y_training : pandas Serie, shape (n_spectra,)
Serie containing the training concentrations acting as targets.
y_testing : pandas Serie, shape (n_spectra,)
Serie containing the testing concentrations acting as targets.
Returns
-------
None
"""
for reg in [RidgeCV(), RandomForestRegressor(random_state=0)]:
pipeline = make_pipeline(PCA(n_components=100), reg)
y_pred = pipeline.fit(X_train, y_train).predict(X_test)
plot_regression(y_test, y_pred,
'Regression using {}'.format(reg.__class__.__name__))
def fit_params(data):
"""Compute statistics for robustly scale data.
Compute the median and the variance, i.e. the difference
between the 75th and 25th percentiles.
These statistics are used later to scale data.
Parameters
----------
data : pandas DataFrame, shape (n_spectra, n_freq_point)
DataFrame containing all Raman spectra.
Returns
-------
median : ndarray, shape (n_freq_point,)
Median for each wavelength.
variance : ndarray, shape (n_freq_point,)
Variance (difference between the 75th and 25th
percentiles) for each wavelength.
"""
median = np.median(data, axis=0)
percentile_25 = np.percentile(data, 25, axis=0)
percentile_75 = np.percentile(data, 75, axis=0)
return median, (percentile_75 - percentile_25)
def transform(data, median, var_25_75):
"""Scale data using robust estimators.
Scale the data by subtracting the median and dividing by the
variance, i.e. the difference between the 75th and 25th percentiles.
Parameters
----------
data : pandas DataFrame, shape (n_spectra, n_freq_point)
DataFrame containing all Raman spectra.
median : ndarray, shape (n_freq_point,)
Median for each wavelength.
var_25_75 : ndarray, shape (n_freq_point,)
Variance (difference between the 75th and 25th
percentiles) for each wavelength.
Returns
-------
data_scaled : pandas DataFrame, shape (n_spectra, n_freq_point)
DataFrame containing all scaled Raman spectra.
"""
return (data - median) / var_25_75
| |
import contextlib
import sys
import inspect
import os
import shutil
import appuselfbot
import glob
import math
from PythonGists import PythonGists
from discord.ext import commands
from io import StringIO
from cogs.utils.checks import *
# Common imports that can be used by the debugger.
import requests
import json
import gc
import datetime
import time
import traceback
import prettytable
import re
import io
import asyncio
import discord
import random
import subprocess
from bs4 import BeautifulSoup
import urllib
import psutil
'''Module for the python interpreter as well as saving, loading, viewing, etc. the cmds/scripts ran with the interpreter.'''
# Used to get the output of exec()
@contextlib.contextmanager
def stdoutIO(stdout=None):
old = sys.stdout
if stdout is None:
stdout = StringIO()
sys.stdout = stdout
yield stdout
sys.stdout = old
class Debugger:
def __init__(self, bot):
self.bot = bot
# Executes/evaluates code. Got the idea from RoboDanny bot by Rapptz. RoboDanny uses eval() but I use exec() to cover a wider scope of possible inputs.
async def interpreter(self, env, code):
if code.startswith('[m]'):
code = code[3:].strip()
code_block = False
else:
code_block = True
try:
result = eval(code, env)
if inspect.isawaitable(result):
result = await result
if not result:
try:
with stdoutIO() as s:
result = exec(code, env)
if inspect.isawaitable(result):
result = await result
result = s.getvalue()
except Exception as g:
return appuselfbot.bot_prefix + '```{}```'.format(type(g).__name__ + ': ' + str(g))
except SyntaxError:
try:
with stdoutIO() as s:
result = exec(code, env)
if inspect.isawaitable(result):
result = await result
result = s.getvalue()
except Exception as g:
return appuselfbot.bot_prefix + '```{}```'.format(type(g).__name__ + ': ' + str(g))
except Exception as e:
return appuselfbot.bot_prefix + '```{}```'.format(type(e).__name__ + ': ' + str(e))
if len(str(result)) > 1950:
url = PythonGists.Gist(description='Py output', content=str(result), name='output.txt')
return appuselfbot.bot_prefix + 'Large output. Posted to Gist: %s' % url
else:
if code_block:
return appuselfbot.bot_prefix + '```py\n{}\n```'.format(result)
else:
return result
@commands.group(pass_context=True)
async def py(self, ctx):
"""Python interpreter. See the wiki for more info."""
if ctx.invoked_subcommand is None:
pre = cmd_prefix_len()
code = ctx.message.content[2 + pre:].strip().strip('` ')
env = {
'bot': self.bot,
'ctx': ctx,
'message': ctx.message,
'server': ctx.message.server,
'channel': ctx.message.channel,
'author': ctx.message.author
}
env.update(globals())
result = await self.interpreter(env, code)
os.chdir(os.getcwd())
with open('%s/cogs/utils/temp.txt' % os.getcwd(), 'w') as temp:
temp.write(ctx.message.content[2 + pre:].strip())
await self.bot.send_message(ctx.message.channel, result)
# Save last >py cmd/script.
@py.command(pass_context=True)
async def save(self, ctx, *, msg):
"""Save the code you last ran. Ex: >py save stuff"""
msg = msg.strip()[:-4] if msg.strip().endswith('.txt') else msg.strip()
os.chdir(os.getcwd())
if not os.path.exists('%s/cogs/utils/temp.txt' % os.getcwd()):
return await self.bot.send_message(ctx.message.channel, appuselfbot.bot_prefix + 'Nothing to save. Run a ``>py`` cmd/script first.')
if not os.path.isdir('%s/cogs/utils/save/' % os.getcwd()):
os.makedirs('%s/cogs/utils/save/' % os.getcwd())
if os.path.exists('%s/cogs/utils/save/%s.txt' % (os.getcwd(), msg)):
await self.bot.send_message(ctx.message.channel, appuselfbot.bot_prefix + '``%s.txt`` already exists. Overwrite? ``y/n``.' % msg)
reply = await self.bot.wait_for_message(author=ctx.message.author)
if reply.content.lower().strip() != 'y':
return await self.bot.send_message(ctx.message.channel, appuselfbot.bot_prefix + 'Cancelled.')
if os.path.exists('%s/cogs/utils/save/%s.txt' % (os.getcwd(), msg)):
os.remove('%s/cogs/utils/save/%s.txt' % (os.getcwd(), msg))
try:
shutil.move('%s/cogs/utils/temp.txt' % os.getcwd(), '%s/cogs/utils/save/%s.txt' % (os.getcwd(), msg))
await self.bot.send_message(ctx.message.channel, appuselfbot.bot_prefix + 'Saved last run cmd/script as ``%s.txt``' % msg)
except:
await self.bot.send_message(ctx.message.channel, appuselfbot.bot_prefix + 'Error saving file as ``%s.txt``' % msg)
# Load a cmd/script saved with the >save cmd
@py.command(aliases=['start'], pass_context=True)
async def run(self, ctx, *, msg):
"""Run code that you saved with the save commmand. Ex: >py run stuff"""
save_file = msg[:-4].strip() if msg.endswith('.txt') else msg.strip()
if not os.path.exists('%s/cogs/utils/save/%s.txt' % (os.getcwd(), save_file)):
return await self.bot.send_message(ctx.message.channel, appuselfbot.bot_prefix + 'Could not find file ``%s.txt``' % save_file)
script = open('%s/cogs/utils/save/%s.txt' % (os.getcwd(), save_file)).read()
env = {
'bot': self.bot,
'ctx': ctx,
'message': ctx.message,
'server': ctx.message.server,
'channel': ctx.message.channel,
'author': ctx.message.author
}
env.update(globals())
result = await self.interpreter(env, script.strip('` '))
await self.bot.send_message(ctx.message.channel, result)
# List saved cmd/scripts
@py.command(aliases=['ls'], pass_context=True)
async def list(self, ctx, txt: str = None):
"""List all saved scripts. Ex: >py list or >py ls"""
os.chdir('%s/cogs/utils/save/' % os.getcwd())
try:
if txt:
numb = txt.strip()
if numb.isdigit():
numb = int(numb)
else:
await self.bot.send_message(ctx.message.channel, appuselfbot.bot_prefix + 'Invalid syntax. Ex: ``>py list 1``')
else:
numb = 1
filelist = glob.glob('*.txt')
if len(filelist) == 0:
return await self.bot.send_message(ctx.message.channel, appuselfbot.bot_prefix + 'No saved cmd/scripts.')
filelist.sort()
msg = ''
pages = int(math.ceil(len(filelist) / 10))
if numb < 1:
numb = 1
elif numb > pages:
numb = pages
for i in range(10):
try:
msg += filelist[i + (10 * (numb-1))] + '\n'
except:
break
await self.bot.send_message(ctx.message.channel, appuselfbot.bot_prefix + 'List of saved cmd/scripts. Page ``%s of %s`` ```%s```' % (numb, pages, msg))
except Exception as e:
await self.bot.send_message(ctx.message.channel, appuselfbot.bot_prefix + 'Error, something went wrong: ``%s``' % e)
finally:
os.chdir('..')
os.chdir('..')
os.chdir('..')
# View a saved cmd/script
@py.group(aliases=['vi', 'vim'], pass_context=True)
async def view(self, ctx, *, msg: str):
"""View a saved script's contents. Ex: >py view stuff"""
msg = msg.strip()[:-4] if msg.strip().endswith('.txt') else msg.strip()
os.chdir('%s/cogs/utils/save/' % os.getcwd())
try:
if os.path.exists('%s.txt' % msg):
f = open('%s.txt' % msg, 'r').read()
await self.bot.send_message(ctx.message.channel, appuselfbot.bot_prefix + 'Viewing ``%s.txt``: ```%s```' % (msg, f.strip('` ')))
else:
await self.bot.send_message(ctx.message.channel, appuselfbot.bot_prefix + '``%s.txt`` does not exist.' % msg)
except Exception as e:
await self.bot.send_message(ctx.message.channel, appuselfbot.bot_prefix + 'Error, something went wrong: ``%s``' % e)
finally:
os.chdir('..')
os.chdir('..')
os.chdir('..')
# Delete a saved cmd/script
@py.group(aliases=['rm'], pass_context=True)
async def delete(self, ctx, *, msg: str):
"""Delete a saved script. Ex: >py delete stuff"""
msg = msg.strip()[:-4] if msg.strip().endswith('.txt') else msg.strip()
os.chdir('%s/cogs/utils/save/' % os.getcwd())
try:
if os.path.exists('%s.txt' % msg):
os.remove('%s.txt' % msg)
await self.bot.send_message(ctx.message.channel, appuselfbot.bot_prefix + 'Deleted ``%s.txt`` from saves.' % msg)
else:
await self.bot.send_message(ctx.message.channel, appuselfbot.bot_prefix + '``%s.txt`` does not exist.' % msg)
except Exception as e:
await self.bot.send_message(ctx.message.channel, appuselfbot.bot_prefix + 'Error, something went wrong: ``%s``' % e)
finally:
os.chdir('..')
os.chdir('..')
os.chdir('..')
@commands.command(pass_context=True)
async def load(self, ctx, *, msg):
"""Load a module"""
try:
self.bot.load_extension(msg)
except Exception as e:
await self.bot.send_message(ctx.message.channel, appuselfbot.bot_prefix + 'Failed to load module: `{}`'.format(msg))
await self.bot.send_message(ctx.message.channel, appuselfbot.bot_prefix + '{}: {}'.format(type(e).__name__, e))
else:
await self.bot.send_message(ctx.message.channel, appuselfbot.bot_prefix + 'Loaded module: `{}`'.format(msg))
await self.bot.delete_message(ctx.message)
@commands.command(pass_context=True)
async def unload(self, ctx, *, msg):
"""Unload a module"""
try:
self.bot.unload_extension(msg)
except Exception as e:
await self.bot.send_message(ctx.message.channel, appuselfbot.bot_prefix + 'Failed to unload module: `{}`'.format(msg))
await self.bot.send_message(ctx.message.channel, appuselfbot.bot_prefix + '{}: {}'.format(type(e).__name__, e))
else:
await self.bot.send_message(ctx.message.channel, appuselfbot.bot_prefix + 'Unloaded module: `{}`'.format(msg))
await self.bot.delete_message(ctx.message)
def setup(bot):
bot.add_cog(Debugger(bot))
| |
# Copyright 2017 The Armada Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import yaml
import testtools
from armada.handlers.override import Override
from armada.exceptions import override_exceptions
from armada import const
class OverrideTestCase(testtools.TestCase):
def setUp(self):
super(OverrideTestCase, self).setUp()
self.basepath = os.path.join(os.path.dirname(__file__))
self.base_manifest = '{}/templates/base.yaml'.format(self.basepath)
def test_update_manifests_no_overrides_and_values_valid(self):
with open(self.base_manifest) as f:
documents = list(yaml.safe_load_all(f.read()))
ovr = Override(documents)
ovr.update_manifests()
# no updates since no overrides and values provided
self.assertEqual(documents, ovr.documents)
def test_update_manifests_with_values_valid(self):
original = "{}/templates/override-{}.yaml".format(self.basepath, '01')
values_yaml = "{}/templates/override-{}-expected.yaml".format(
self.basepath, '01')
with open(original) as f, open(values_yaml) as g:
original_documents = list(yaml.safe_load_all(f.read()))
documents_copy = copy.deepcopy(original_documents)
values_documents = list(yaml.safe_load_all(g.read()))
ovr = Override(original_documents, None, [values_yaml])
ovr.update_manifests()
# updating values changed the original document
self.assertNotEqual(original_documents, documents_copy)
# verifying that these documents have the same value now
self.assertEqual(original_documents, values_documents)
def test_update_manifests_with_values_and_overrides_valid(self):
values_yaml = "{}/templates/override-{}-expected.yaml".format(
self.basepath, '01')
comparison_yaml = "{}/templates/override-{}-expected.yaml".format(
self.basepath, '03')
with open(self.base_manifest) as f, open(values_yaml) as g:
original_documents = list(yaml.safe_load_all(f.read()))
documents_copy = copy.deepcopy(original_documents)
values_documents = list(yaml.safe_load_all(g.read()))
override = ('manifest:simple-armada:release_prefix='
'overridden',)
# Case 1: Checking if primitive gets updated.
ovr = Override(original_documents, override, [values_yaml])
ovr.update_manifests()
# updating values changed the original document
self.assertNotEqual(original_documents, documents_copy)
# since overrides done, these documents aren't same anymore
self.assertNotEqual(original_documents, values_documents)
target_doc = [x
for x
in ovr.documents
if x.get('metadata').get('name') == 'simple-armada'][0]
self.assertEqual('overridden',
target_doc['data']['release_prefix'])
override = ('manifest:simple-armada:chart_groups='
'blog-group3,blog-group4',)
# Case 2: Checking if list gets updated.
ovr = Override(original_documents, override, [values_yaml])
ovr.update_manifests()
# updating values changed the original document
self.assertNotEqual(original_documents, documents_copy)
# since overrides done, these documents aren't same anymore
self.assertNotEqual(original_documents, values_documents)
with open(comparison_yaml) as c:
comparison_documents = list(yaml.safe_load_all(c.read()))
# verifying that the override is correct
self.assertEqual(original_documents[2]['data']['chart_groups'],
comparison_documents[0]['data']['chart_groups'])
def test_update_manifests_invalid_override_format(self):
with open(self.base_manifest) as f:
original_documents = list(yaml.safe_load_all(f.read()))
original_documents[-1]['data']['test'] = {'foo': 'bar'}
override = ('manifest:simple-armada:test='
'{"foo": "bar"}',)
ovr = Override(original_documents, override, [])
self.assertRaises(json.decoder.JSONDecodeError, ovr.update_manifests)
def test_load_yaml_file(self):
with open(self.base_manifest) as f:
documents = list(yaml.safe_load_all(f.read()))
ovr = Override(documents)
value = ovr._load_yaml_file(self.base_manifest)
self.assertIsInstance(value, list)
def test_find_document_type_valid(self):
with open(self.base_manifest) as f:
documents = list(yaml.safe_load_all(f.read()))
ovr = Override(documents)
test_group = ovr.find_document_type('chart_group')
self.assertEqual(test_group, const.DOCUMENT_GROUP)
test_chart = ovr.find_document_type('chart')
self.assertEqual(test_chart, const.DOCUMENT_CHART)
test_manifest = ovr.find_document_type('manifest')
self.assertEqual(test_manifest, const.DOCUMENT_MANIFEST)
def test_update_chart_document_valid(self):
with open(self.base_manifest) as f:
documents = list(yaml.safe_load_all(f.read()))
documents_modified = copy.deepcopy(documents)
# Case 1: Checking if primitives get updated
documents_modified[0]['data']['chart_name'] = 'modified'
# starting out, both doc have different values for data
self.assertNotEqual(documents[0], documents_modified[0])
ovr = Override(documents)
# update with document values with the modified ones
ovr.update_chart_document(documents_modified[0])
# after the update, both documents are equal
self.assertEqual(ovr.documents[0]['data']['chart_name'],
documents_modified[0]['data']['chart_name'])
self.assertEqual(ovr.documents[0], documents_modified[0])
# Case 2: Checking if dictionaries get updated
documents_modified[0]['data']['values'] = {'foo': 'bar'}
ovr.update_chart_document(documents_modified[0])
# after the update, both documents are equal
self.assertEqual(ovr.documents[0]['data']['values'],
documents_modified[0]['data']['values'])
self.assertEqual(ovr.documents[0], documents_modified[0])
# Case 3: Checking if lists get updated
documents_modified[0]['data']['dependencies'] = ['foo', 'bar']
ovr.update_chart_document(documents_modified[0])
# after the update, both documents are equal
self.assertEqual(['foo', 'bar'],
ovr.documents[0]['data']['dependencies'])
self.assertEqual(documents_modified[0]['data']['dependencies'],
ovr.documents[0]['data']['dependencies'])
self.assertEqual(ovr.documents[0], documents_modified[0])
def test_update_chart_document_keys_not_removed_with_override(self):
with open(self.base_manifest) as f:
documents = list(yaml.safe_load_all(f.read()))
documents_modified = copy.deepcopy(documents)
del documents_modified[0]['data']['chart_name']
# verify both doc have different values for data
self.assertNotEqual(documents[0], documents_modified[0])
ovr = Override(documents)
# update with document values with the modified ones
ovr.update_chart_document(documents_modified[0])
self.assertIn('chart_name', ovr.documents[0]['data'])
self.assertNotEqual(ovr.documents[0], documents_modified[0])
def test_update_chart_group_document_valid(self):
with open(self.base_manifest) as f:
documents = list(yaml.safe_load_all(f.read()))
documents_modified = copy.deepcopy(documents)
documents_modified[1]['data']['sequenced'] = True
# starting out, both doc have different values for data
self.assertNotEqual(documents[1], documents_modified[1])
ovr = Override(documents)
# update with document values with the modified ones
ovr.update_chart_group_document(documents_modified[1])
# after the update, both documents are equal
self.assertEqual(ovr.documents[1]['data']['sequenced'],
documents_modified[1]['data']['sequenced'])
self.assertEqual(ovr.documents[1], documents_modified[1])
def test_update_chart_group_document_keys_not_removed_with_override(self):
with open(self.base_manifest) as f:
documents = list(yaml.safe_load_all(f.read()))
documents_modified = copy.deepcopy(documents)
del documents_modified[1]['data']['sequenced']
# verify both doc have different values for data
self.assertNotEqual(documents[1], documents_modified[1])
ovr = Override(documents)
# update with document values with the modified ones
ovr.update_chart_group_document(documents_modified[1])
self.assertIn('sequenced', ovr.documents[1]['data'])
self.assertNotEqual(ovr.documents[1], documents_modified[1])
def test_update_armada_manifest_valid(self):
with open(self.base_manifest) as f:
documents = list(yaml.safe_load_all(f.read()))
documents_modified = copy.deepcopy(documents)
documents_modified[2]['data']['release_prefix'] = 'armada-modified'
# starting out, both doc have different values for data
self.assertNotEqual(documents[2], documents_modified[2])
ovr = Override(documents)
# update with document values with the modified ones
ovr.update_armada_manifest(documents_modified[2])
# after the update, both documents are equal
self.assertEqual(ovr.documents[2]['data']['release_prefix'],
documents_modified[2]['data']['release_prefix'])
self.assertEqual(ovr.documents[2], documents_modified[2])
def test_update_armada_manifest_keys_not_removed_with_override(self):
with open(self.base_manifest) as f:
documents = list(yaml.safe_load_all(f.read()))
documents_modified = copy.deepcopy(documents)
del documents_modified[2]['data']['release_prefix']
# verify both doc have different values for data
self.assertNotEqual(documents[2], documents_modified[2])
ovr = Override(documents)
# update with document values from base_manifest
ovr.update_armada_manifest(documents_modified[2])
self.assertIn('release_prefix', ovr.documents[2]['data'])
self.assertNotEqual(ovr.documents[2], documents_modified[2])
def test_update_dictionary_valid(self):
expected = "{}/templates/override-{}-expected.yaml".format(
self.basepath, '01')
merge = "{}/templates/override-{}.yaml".format(self.basepath, '01')
with open(self.base_manifest) as f, open(expected) as e, open(
merge) as m:
merging_values = list(yaml.safe_load_all(m.read()))
documents = list(yaml.safe_load_all(f.read()))
doc_path = ['chart', 'blog-1']
ovr = Override(documents)
ovr.update_document(merging_values)
ovr_doc = ovr.find_manifest_document(doc_path)
expect_doc = list(yaml.load_all(e.read()))[0]
self.assertEqual(ovr_doc, expect_doc)
def test_set_list_valid(self):
expected = "{}/templates/override-{}-expected.yaml".format(
self.basepath, '03')
with open(self.base_manifest) as f, open(expected) as e:
documents = list(yaml.safe_load_all(f.read()))
doc_path = ['manifest', 'simple-armada']
override = ('manifest:simple-armada:chart_groups=\
blog-group3,blog-group4',)
ovr = Override(documents, override)
ovr.update_manifests()
ovr_doc = ovr.find_manifest_document(doc_path)
target_docs = list(yaml.load_all(e.read()))
expected_doc = [x
for x
in target_docs
if x.get('schema') == 'armada/Manifest/v1'][0]
self.assertEqual(expected_doc.get('data'), ovr_doc.get('data'))
def test_find_manifest_document_valid(self):
expected = "{}/templates/override-{}-expected.yaml".format(
self.basepath, '02')
with open(self.base_manifest) as f, open(expected) as e:
doc_path = ['chart', 'blog-1']
documents = list(yaml.safe_load_all(f.read()))
ovr = Override(documents).find_manifest_document(doc_path)
expected_doc = list(yaml.safe_load_all(e.read()))[0]
self.assertEqual(ovr, expected_doc)
def test_convert_array_to_dict_valid(self):
data_path = ['a', 'b', 'c']
new_value = "dev"
expected_dict = {'a': {'b': {'c': 'dev'}}}
ovr = Override(self.base_manifest).array_to_dict(data_path, new_value)
self.assertEqual(ovr, expected_dict)
class OverrideNegativeTestCase(testtools.TestCase):
def setUp(self):
super(OverrideNegativeTestCase, self).setUp()
self.basepath = os.path.join(os.path.dirname(__file__))
self.base_manifest = '{}/templates/base.yaml'.format(self.basepath)
def test_update_manifests_invalid(self):
missing_yaml = "{}/templates/non_existing_yaml.yaml". \
format(self.basepath)
with open(self.base_manifest):
ovr = Override(missing_yaml)
self.assertRaises(
override_exceptions.InvalidOverrideValueException,
ovr.update_manifests)
def test_load_yaml_file_invalid(self):
missing_yaml = "{}/templates/non_existing_yaml.yaml". \
format(self.basepath)
with open(self.base_manifest) as f:
documents = list(yaml.safe_load_all(f.read()))
ovr = Override(documents)
self.assertRaises(override_exceptions.InvalidOverrideFileException,
ovr._load_yaml_file, missing_yaml)
def test_find_document_type_invalid(self):
with open(self.base_manifest) as f:
documents = list(yaml.safe_load_all(f.read()))
ovr = Override(documents)
self.assertRaises(ValueError, ovr.find_document_type,
'non_existing_document')
def test_convert_array_to_dict_invalid(self):
data_path = ['a', 'b', 'c']
new_value = ""
ovr = Override(self.base_manifest).array_to_dict(data_path, new_value)
self.assertIsNone(ovr)
ovr = Override(self.base_manifest).array_to_dict([], new_value)
self.assertIsNone(ovr)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.