gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
from functools import wraps
import inspect
import sys
import textwrap
from fabric import state
from fabric.utils import abort, warn, error
from fabric.network import to_dict, normalize_to_string, disconnect_all
from fabric.context_managers import settings
from fabric.job_queue import JobQueue
from fabric.task_utils import crawl, merge, parse_kwargs
from fabric.exceptions import NetworkError
import collections
if sys.version_info[:2] == (2, 5):
# Python 2.5 inspect.getargspec returns a tuple
# instead of ArgSpec namedtuple.
class ArgSpec(object):
def __init__(self, args, varargs, keywords, defaults):
self.args = args
self.varargs = varargs
self.keywords = keywords
self.defaults = defaults
self._tuple = (args, varargs, keywords, defaults)
def __getitem__(self, idx):
return self._tuple[idx]
def patched_get_argspec(func):
return ArgSpec(*inspect._getargspec(func))
inspect._getargspec = inspect.getargspec
inspect.getargspec = patched_get_argspec
def get_task_details(task):
details = [
textwrap.dedent(task.__doc__)
if task.__doc__
else 'No docstring provided']
argspec = inspect.getargspec(task)
default_args = [] if not argspec.defaults else argspec.defaults
num_default_args = len(default_args)
args_without_defaults = argspec.args[:len(argspec.args) - num_default_args]
args_with_defaults = argspec.args[-1 * num_default_args:]
details.append('Arguments: %s' % (
', '.join(
args_without_defaults + [
'%s=%r' % (arg, default)
for arg, default in zip(args_with_defaults, default_args)
])
))
return '\n'.join(details)
def _get_list(env):
def inner(key):
return env.get(key, [])
return inner
class Task(object):
"""
Abstract base class for objects wishing to be picked up as Fabric tasks.
Instances of subclasses will be treated as valid tasks when present in
fabfiles loaded by the :doc:`fab </usage/fab>` tool.
For details on how to implement and use `~fabric.tasks.Task` subclasses,
please see the usage documentation on :ref:`new-style tasks
<new-style-tasks>`.
.. versionadded:: 1.1
"""
name = 'undefined'
use_task_objects = True
aliases = None
is_default = False
# TODO: make it so that this wraps other decorators as expected
def __init__(self, alias=None, aliases=None, default=False, name=None,
*args, **kwargs):
if alias is not None:
self.aliases = [alias, ]
if aliases is not None:
self.aliases = aliases
if name is not None:
self.name = name
self.is_default = default
def __details__(self):
return get_task_details(self.run)
def run(self):
raise NotImplementedError
def get_hosts_and_effective_roles(self, arg_hosts, arg_roles, arg_exclude_hosts, env=None):
"""
Return a tuple containing the host list the given task should be using
and the roles being used.
See :ref:`host-lists` for detailed documentation on how host lists are
set.
.. versionchanged:: 1.9
"""
env = env or {'hosts': [], 'roles': [], 'exclude_hosts': []}
roledefs = env.get('roledefs', {})
# Command line per-task takes precedence over anything else.
if arg_hosts or arg_roles:
return merge(arg_hosts, arg_roles, arg_exclude_hosts, roledefs), arg_roles
# Decorator-specific hosts/roles go next
func_hosts = getattr(self, 'hosts', [])
func_roles = getattr(self, 'roles', [])
if func_hosts or func_roles:
return merge(func_hosts, func_roles, arg_exclude_hosts, roledefs), func_roles
# Finally, the env is checked (which might contain globally set lists
# from the CLI or from module-level code). This will be the empty list
# if these have not been set -- which is fine, this method should
# return an empty list if no hosts have been set anywhere.
env_vars = list(map(_get_list(env), "hosts roles exclude_hosts".split()))
env_vars.append(roledefs)
return merge(*env_vars), env.get('roles', [])
def get_pool_size(self, hosts, default):
# Default parallel pool size (calculate per-task in case variables
# change)
default_pool_size = default or len(hosts)
# Allow per-task override
# Also cast to int in case somebody gave a string
from_task = getattr(self, 'pool_size', None)
pool_size = int(from_task or default_pool_size)
# But ensure it's never larger than the number of hosts
pool_size = min((pool_size, len(hosts)))
# Inform user of final pool size for this task
if state.output.debug:
print("Parallel tasks now using pool size of %d" % pool_size)
return pool_size
class WrappedCallableTask(Task):
"""
Wraps a given callable transparently, while marking it as a valid Task.
Generally used via `~fabric.decorators.task` and not directly.
.. versionadded:: 1.1
.. seealso:: `~fabric.docs.unwrap_tasks`, `~fabric.decorators.task`
"""
def __init__(self, callable, *args, **kwargs):
super(WrappedCallableTask, self).__init__(*args, **kwargs)
self.wrapped = callable
# Don't use getattr() here -- we want to avoid touching self.name
# entirely so the superclass' value remains default.
if hasattr(callable, '__name__'):
if self.name == 'undefined':
self.__name__ = self.name = callable.__name__
else:
self.__name__ = self.name
if hasattr(callable, '__doc__'):
self.__doc__ = callable.__doc__
if hasattr(callable, '__module__'):
self.__module__ = callable.__module__
def __call__(self, *args, **kwargs):
return self.run(*args, **kwargs)
def run(self, *args, **kwargs):
return self.wrapped(*args, **kwargs)
def __getattr__(self, k):
return getattr(self.wrapped, k)
def __details__(self):
orig = self
while 'wrapped' in orig.__dict__:
orig = orig.__dict__.get('wrapped')
return get_task_details(orig)
def requires_parallel(task):
"""
Returns True if given ``task`` should be run in parallel mode.
Specifically:
* It's been explicitly marked with ``@parallel``, or:
* It's *not* been explicitly marked with ``@serial`` *and* the global
parallel option (``env.parallel``) is set to ``True``.
"""
return (
(state.env.parallel and not getattr(task, 'serial', False))
or getattr(task, 'parallel', False)
)
def _parallel_tasks(commands_to_run):
return any([requires_parallel(crawl(x[0], state.commands)) for x in commands_to_run])
def _is_network_error_ignored():
return not state.env.use_exceptions_for['network'] and state.env.skip_bad_hosts
def _execute(task, host, my_env, args, kwargs, jobs, queue, multiprocessing):
"""
Primary single-host work body of execute()
"""
# Log to stdout
if state.output.running and not hasattr(task, 'return_value'):
print("[%s] Executing task '%s'" % (host, my_env['command']))
# Create per-run env with connection settings
local_env = to_dict(host)
local_env.update(my_env)
# Set a few more env flags for parallelism
if queue is not None:
local_env.update({'parallel': True, 'linewise': True})
# Handle parallel execution
if queue is not None: # Since queue is only set for parallel
name = local_env['host_string']
# Wrap in another callable that:
# * expands the env it's given to ensure parallel, linewise, etc are
# all set correctly and explicitly. Such changes are naturally
# insulted from the parent process.
# * nukes the connection cache to prevent shared-access problems
# * knows how to send the tasks' return value back over a Queue
# * captures exceptions raised by the task
def inner(args, kwargs, queue, name, env):
state.env.update(env)
def submit(result):
queue.put({'name': name, 'result': result})
try:
state.connections.clear()
submit(task.run(*args, **kwargs))
except BaseException as e: # We really do want to capture everything
# SystemExit implies use of abort(), which prints its own
# traceback, host info etc -- so we don't want to double up
# on that. For everything else, though, we need to make
# clear what host encountered the exception that will
# print.
if e.__class__ is not SystemExit:
if not (isinstance(e, NetworkError) and
_is_network_error_ignored()):
sys.stderr.write("!!! Parallel execution exception under host %r:\n" % name)
submit(e)
# Here, anything -- unexpected exceptions, or abort()
# driven SystemExits -- will bubble up and terminate the
# child process.
if not (isinstance(e, NetworkError) and
_is_network_error_ignored()):
raise
# Stuff into Process wrapper
kwarg_dict = {
'args': args,
'kwargs': kwargs,
'queue': queue,
'name': name,
'env': local_env,
}
p = multiprocessing.Process(target=inner, kwargs=kwarg_dict)
# Name/id is host string
p.name = name
# Add to queue
jobs.append(p)
# Handle serial execution
else:
with settings(**local_env):
return task.run(*args, **kwargs)
def _is_task(task):
return isinstance(task, Task)
def execute(task, *args, **kwargs):
"""
Execute ``task`` (callable or name), honoring host/role decorators, etc.
``task`` may be an actual callable object, or it may be a registered task
name, which is used to look up a callable just as if the name had been
given on the command line (including :ref:`namespaced tasks <namespaces>`,
e.g. ``"deploy.migrate"``.
The task will then be executed once per host in its host list, which is
(again) assembled in the same manner as CLI-specified tasks: drawing from
:option:`-H`, :ref:`env.hosts <hosts>`, the `~fabric.decorators.hosts` or
`~fabric.decorators.roles` decorators, and so forth.
``host``, ``hosts``, ``role``, ``roles`` and ``exclude_hosts`` kwargs will
be stripped out of the final call, and used to set the task's host list, as
if they had been specified on the command line like e.g. ``fab
taskname:host=hostname``.
Any other arguments or keyword arguments will be passed verbatim into
``task`` (the function itself -- not the ``@task`` decorator wrapping your
function!) when it is called, so ``execute(mytask, 'arg1',
kwarg1='value')`` will (once per host) invoke ``mytask('arg1',
kwarg1='value')``.
:returns:
a dictionary mapping host strings to the given task's return value for
that host's execution run. For example, ``execute(foo, hosts=['a',
'b'])`` might return ``{'a': None, 'b': 'bar'}`` if ``foo`` returned
nothing on host `a` but returned ``'bar'`` on host `b`.
In situations where a task execution fails for a given host but overall
progress does not abort (such as when :ref:`env.skip_bad_hosts
<skip-bad-hosts>` is True) the return value for that host will be the
error object or message.
.. seealso::
:ref:`The execute usage docs <execute>`, for an expanded explanation
and some examples.
.. versionadded:: 1.3
.. versionchanged:: 1.4
Added the return value mapping; previously this function had no defined
return value.
"""
my_env = {'clean_revert': True}
results = {}
# Obtain task
is_callable = isinstance(task, collections.Callable)
if not (is_callable or _is_task(task)):
# Assume string, set env.command to it
my_env['command'] = task
task = crawl(task, state.commands)
if task is None:
msg = "%r is not callable or a valid task name" % (my_env['command'],)
if state.env.get('skip_unknown_tasks', False):
warn(msg)
return
else:
abort(msg)
# Set env.command if we were given a real function or callable task obj
else:
dunder_name = getattr(task, '__name__', None)
my_env['command'] = getattr(task, 'name', dunder_name)
# Normalize to Task instance if we ended up with a regular callable
if not _is_task(task):
task = WrappedCallableTask(task)
# Filter out hosts/roles kwargs
new_kwargs, hosts, roles, exclude_hosts = parse_kwargs(kwargs)
# Set up host list
my_env['all_hosts'], my_env['effective_roles'] = task.get_hosts_and_effective_roles(hosts, roles,
exclude_hosts, state.env)
parallel = requires_parallel(task)
if parallel:
# Import multiprocessing if needed, erroring out usefully
# if it can't.
try:
import multiprocessing
except ImportError:
import traceback
tb = traceback.format_exc()
abort(tb + """
At least one task needs to be run in parallel, but the
multiprocessing module cannot be imported (see above
traceback.) Please make sure the module is installed
or that the above ImportError is fixed.""")
else:
multiprocessing = None
# Get pool size for this task
pool_size = task.get_pool_size(my_env['all_hosts'], state.env.pool_size)
# Set up job queue in case parallel is needed
queue = multiprocessing.Queue() if parallel else None
jobs = JobQueue(pool_size, queue)
if state.output.debug:
jobs._debug = True
# Call on host list
if my_env['all_hosts']:
# Attempt to cycle on hosts, skipping if needed
for host in my_env['all_hosts']:
try:
results[host] = _execute(
task, host, my_env, args, new_kwargs, jobs, queue,
multiprocessing
)
except NetworkError as e:
results[host] = e
# Backwards compat test re: whether to use an exception or
# abort
if not state.env.use_exceptions_for['network']:
func = warn if state.env.skip_bad_hosts else abort
error(e.message, func=func, exception=e.wrapped)
else:
raise
# If requested, clear out connections here and not just at the end.
if state.env.eagerly_disconnect:
disconnect_all()
# If running in parallel, block until job queue is emptied
if jobs:
err = "One or more hosts failed while executing task '%s'" % (
my_env['command']
)
jobs.close()
# Abort if any children did not exit cleanly (fail-fast).
# This prevents Fabric from continuing on to any other tasks.
# Otherwise, pull in results from the child run.
ran_jobs = jobs.run()
for name, d in ran_jobs.items():
if d['exit_code'] != 0:
if isinstance(d['results'], NetworkError) and \
_is_network_error_ignored():
error(d['results'].message, func=warn, exception=d['results'].wrapped)
elif isinstance(d['results'], BaseException):
error(err, exception=d['results'])
else:
error(err)
results[name] = d['results']
# Or just run once for local-only
else:
with settings(**my_env):
results['<local-only>'] = task.run(*args, **new_kwargs)
# Return what we can from the inner task executions
return results
| |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from xml.dom import minidom
from lxml import etree
from nova.api.openstack import xmlutil
from nova import exception
from nova import test
from nova.tests import utils as tests_utils
class SelectorTest(test.NoDBTestCase):
obj_for_test = {
'test': {
'name': 'test',
'values': [1, 2, 3],
'attrs': {
'foo': 1,
'bar': 2,
'baz': 3,
},
},
}
def test_repr(self):
sel = xmlutil.Selector()
self.assertEqual(repr(sel), "Selector()")
def test_empty_selector(self):
sel = xmlutil.EmptyStringSelector()
self.assertEqual(len(sel.chain), 0)
self.assertEqual(sel(self.obj_for_test), self.obj_for_test)
self.assertEqual(
repr(self.obj_for_test),
"{'test': {'values': [1, 2, 3], 'name': 'test', 'attrs': "
"{'baz': 3, 'foo': 1, 'bar': 2}}}")
def test_dict_selector(self):
sel = xmlutil.Selector('test')
self.assertEqual(len(sel.chain), 1)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel(self.obj_for_test),
self.obj_for_test['test'])
def test_datum_selector(self):
sel = xmlutil.Selector('test', 'name')
self.assertEqual(len(sel.chain), 2)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel.chain[1], 'name')
self.assertEqual(sel(self.obj_for_test), 'test')
def test_list_selector(self):
sel = xmlutil.Selector('test', 'values', 0)
self.assertEqual(len(sel.chain), 3)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel.chain[1], 'values')
self.assertEqual(sel.chain[2], 0)
self.assertEqual(sel(self.obj_for_test), 1)
def test_items_selector(self):
sel = xmlutil.Selector('test', 'attrs', xmlutil.get_items)
self.assertEqual(len(sel.chain), 3)
self.assertEqual(sel.chain[2], xmlutil.get_items)
for key, val in sel(self.obj_for_test):
self.assertEqual(self.obj_for_test['test']['attrs'][key], val)
def test_missing_key_selector(self):
sel = xmlutil.Selector('test2', 'attrs')
self.assertIsNone(sel(self.obj_for_test))
self.assertRaises(KeyError, sel, self.obj_for_test, True)
def test_constant_selector(self):
sel = xmlutil.ConstantSelector('Foobar')
self.assertEqual(sel.value, 'Foobar')
self.assertEqual(sel(self.obj_for_test), 'Foobar')
self.assertEqual(repr(sel), "'Foobar'")
class TemplateElementTest(test.NoDBTestCase):
def test_element_initial_attributes(self):
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=dict(a=1, b=2, c=3),
c=4, d=5, e=6)
# Verify all the attributes are as expected
expected = dict(a=1, b=2, c=4, d=5, e=6)
for k, v in expected.items():
self.assertEqual(elem.attrib[k].chain[0], v)
self.assertTrue(repr(elem))
def test_element_get_attributes(self):
expected = dict(a=1, b=2, c=3)
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=expected)
# Verify that get() retrieves the attributes
for k, v in expected.items():
self.assertEqual(elem.get(k).chain[0], v)
def test_element_set_attributes(self):
attrs = dict(a=None, b='foo', c=xmlutil.Selector('foo', 'bar'))
# Create a bare template element with no attributes
elem = xmlutil.TemplateElement('test')
# Set the attribute values
for k, v in attrs.items():
elem.set(k, v)
# Now verify what got set
self.assertEqual(len(elem.attrib['a'].chain), 1)
self.assertEqual(elem.attrib['a'].chain[0], 'a')
self.assertEqual(len(elem.attrib['b'].chain), 1)
self.assertEqual(elem.attrib['b'].chain[0], 'foo')
self.assertEqual(elem.attrib['c'], attrs['c'])
def test_element_attribute_keys(self):
attrs = dict(a=1, b=2, c=3, d=4)
expected = set(attrs.keys())
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=attrs)
# Now verify keys
self.assertEqual(set(elem.keys()), expected)
def test_element_attribute_items(self):
expected = dict(a=xmlutil.Selector(1),
b=xmlutil.Selector(2),
c=xmlutil.Selector(3))
keys = set(expected.keys())
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=expected)
# Now verify items
for k, v in elem.items():
self.assertEqual(expected[k], v)
keys.remove(k)
# Did we visit all keys?
self.assertEqual(len(keys), 0)
def test_element_selector_none(self):
# Create a template element with no selector
elem = xmlutil.TemplateElement('test')
self.assertEqual(len(elem.selector.chain), 0)
def test_element_selector_string(self):
# Create a template element with a string selector
elem = xmlutil.TemplateElement('test', selector='test')
self.assertEqual(len(elem.selector.chain), 1)
self.assertEqual(elem.selector.chain[0], 'test')
def test_element_selector(self):
sel = xmlutil.Selector('a', 'b')
# Create a template element with an explicit selector
elem = xmlutil.TemplateElement('test', selector=sel)
self.assertEqual(elem.selector, sel)
def test_element_subselector_none(self):
# Create a template element with no subselector
elem = xmlutil.TemplateElement('test')
self.assertIsNone(elem.subselector)
def test_element_subselector_string(self):
# Create a template element with a string subselector
elem = xmlutil.TemplateElement('test', subselector='test')
self.assertEqual(len(elem.subselector.chain), 1)
self.assertEqual(elem.subselector.chain[0], 'test')
def test_element_subselector(self):
sel = xmlutil.Selector('a', 'b')
# Create a template element with an explicit subselector
elem = xmlutil.TemplateElement('test', subselector=sel)
self.assertEqual(elem.subselector, sel)
def test_element_append_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a child element
child = xmlutil.TemplateElement('child')
# Append the child to the parent
elem.append(child)
# Verify that the child was added
self.assertEqual(len(elem), 1)
self.assertEqual(elem[0], child)
self.assertIn('child', elem)
self.assertEqual(elem['child'], child)
# Ensure that multiple children of the same name are rejected
child2 = xmlutil.TemplateElement('child')
self.assertRaises(KeyError, elem.append, child2)
def test_element_extend_children(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [
xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'),
]
# Extend the parent by those children
elem.extend(children)
# Verify that the children were added
self.assertEqual(len(elem), 3)
for idx in range(len(elem)):
self.assertEqual(children[idx], elem[idx])
self.assertIn(children[idx].tag, elem)
self.assertEqual(elem[children[idx].tag], children[idx])
# Ensure that multiple children of the same name are rejected
children2 = [
xmlutil.TemplateElement('child4'),
xmlutil.TemplateElement('child1'),
]
self.assertRaises(KeyError, elem.extend, children2)
# Also ensure that child4 was not added
self.assertEqual(len(elem), 3)
self.assertEqual(elem[-1].tag, 'child3')
def test_element_insert_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [
xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'),
]
# Extend the parent by those children
elem.extend(children)
# Create a child to insert
child = xmlutil.TemplateElement('child4')
# Insert it
elem.insert(1, child)
# Ensure the child was inserted in the right place
self.assertEqual(len(elem), 4)
children.insert(1, child)
for idx in range(len(elem)):
self.assertEqual(children[idx], elem[idx])
self.assertIn(children[idx].tag, elem)
self.assertEqual(elem[children[idx].tag], children[idx])
# Ensure that multiple children of the same name are rejected
child2 = xmlutil.TemplateElement('child2')
self.assertRaises(KeyError, elem.insert, 2, child2)
def test_element_remove_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [
xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'),
]
# Extend the parent by those children
elem.extend(children)
# Create a test child to remove
child = xmlutil.TemplateElement('child2')
# Try to remove it
self.assertRaises(ValueError, elem.remove, child)
# Ensure that no child was removed
self.assertEqual(len(elem), 3)
# Now remove a legitimate child
elem.remove(children[1])
# Ensure that the child was removed
self.assertEqual(len(elem), 2)
self.assertEqual(elem[0], children[0])
self.assertEqual(elem[1], children[2])
self.assertEqual('child2' in elem, False)
# Ensure the child cannot be retrieved by name
def get_key(elem, key):
return elem[key]
self.assertRaises(KeyError, get_key, elem, 'child2')
def test_element_text(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Ensure that it has no text
self.assertIsNone(elem.text)
# Try setting it to a string and ensure it becomes a selector
elem.text = 'test'
self.assertEqual(hasattr(elem.text, 'chain'), True)
self.assertEqual(len(elem.text.chain), 1)
self.assertEqual(elem.text.chain[0], 'test')
# Try resetting the text to None
elem.text = None
self.assertIsNone(elem.text)
# Now make up a selector and try setting the text to that
sel = xmlutil.Selector()
elem.text = sel
self.assertEqual(elem.text, sel)
# Finally, try deleting the text and see what happens
del elem.text
self.assertIsNone(elem.text)
def test_apply_attrs(self):
# Create a template element
attrs = dict(attr1=xmlutil.ConstantSelector(1),
attr2=xmlutil.ConstantSelector(2))
tmpl_elem = xmlutil.TemplateElement('test', attrib=attrs)
# Create an etree element
elem = etree.Element('test')
# Apply the template to the element
tmpl_elem.apply(elem, None)
# Now, verify the correct attributes were set
for k, v in elem.items():
self.assertEqual(str(attrs[k].value), v)
def test_apply_text(self):
# Create a template element
tmpl_elem = xmlutil.TemplateElement('test')
tmpl_elem.text = xmlutil.ConstantSelector(1)
# Create an etree element
elem = etree.Element('test')
# Apply the template to the element
tmpl_elem.apply(elem, None)
# Now, verify the text was set
self.assertEqual(str(tmpl_elem.text.value), elem.text)
def test__render(self):
attrs = dict(attr1=xmlutil.ConstantSelector(1),
attr2=xmlutil.ConstantSelector(2),
attr3=xmlutil.ConstantSelector(3))
# Create a master template element
master_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1'])
# Create a couple of slave template element
slave_elems = [
xmlutil.TemplateElement('test', attr2=attrs['attr2']),
xmlutil.TemplateElement('test', attr3=attrs['attr3']),
]
# Try the render
elem = master_elem._render(None, None, slave_elems, None)
# Verify the particulars of the render
self.assertEqual(elem.tag, 'test')
self.assertEqual(len(elem.nsmap), 0)
for k, v in elem.items():
self.assertEqual(str(attrs[k].value), v)
# Create a parent for the element to be rendered
parent = etree.Element('parent')
# Try the render again...
elem = master_elem._render(parent, None, slave_elems, dict(a='foo'))
# Verify the particulars of the render
self.assertEqual(len(parent), 1)
self.assertEqual(parent[0], elem)
self.assertEqual(len(elem.nsmap), 1)
self.assertEqual(elem.nsmap['a'], 'foo')
def test_render(self):
# Create a template element
tmpl_elem = xmlutil.TemplateElement('test')
tmpl_elem.text = xmlutil.Selector()
# Create the object we're going to render
obj = ['elem1', 'elem2', 'elem3', 'elem4']
# Try a render with no object
elems = tmpl_elem.render(None, None)
self.assertEqual(len(elems), 0)
# Try a render with one object
elems = tmpl_elem.render(None, 'foo')
self.assertEqual(len(elems), 1)
self.assertEqual(elems[0][0].text, 'foo')
self.assertEqual(elems[0][1], 'foo')
# Now, try rendering an object with multiple entries
parent = etree.Element('parent')
elems = tmpl_elem.render(parent, obj)
self.assertEqual(len(elems), 4)
# Check the results
for idx in range(len(obj)):
self.assertEqual(elems[idx][0].text, obj[idx])
self.assertEqual(elems[idx][1], obj[idx])
# Check with a subselector
tmpl_elem = xmlutil.TemplateElement(
'test',
subselector=xmlutil.ConstantSelector('foo'))
parent = etree.Element('parent')
# Try a render with no object
elems = tmpl_elem.render(parent, obj)
self.assertEqual(len(elems), 4)
def test_subelement(self):
# Try the SubTemplateElement constructor
parent = xmlutil.SubTemplateElement(None, 'parent')
self.assertEqual(parent.tag, 'parent')
self.assertEqual(len(parent), 0)
# Now try it with a parent element
child = xmlutil.SubTemplateElement(parent, 'child')
self.assertEqual(child.tag, 'child')
self.assertEqual(len(parent), 1)
self.assertEqual(parent[0], child)
def test_wrap(self):
# These are strange methods, but they make things easier
elem = xmlutil.TemplateElement('test')
self.assertEqual(elem.unwrap(), elem)
self.assertEqual(elem.wrap().root, elem)
def test_dyntag(self):
obj = ['a', 'b', 'c']
# Create a template element with a dynamic tag
tmpl_elem = xmlutil.TemplateElement(xmlutil.Selector())
# Try the render
parent = etree.Element('parent')
elems = tmpl_elem.render(parent, obj)
# Verify the particulars of the render
self.assertEqual(len(elems), len(obj))
for idx in range(len(obj)):
self.assertEqual(elems[idx][0].tag, obj[idx])
def test_tree(self):
# Create a template element
elem = xmlutil.TemplateElement('test', attr3='attr3')
elem.text = 'test'
self.assertEqual(elem.tree(),
"<test !selector=Selector() "
"!text=Selector('test',) "
"attr3=Selector('attr3',)"
"/>")
# Create a template element
elem = xmlutil.TemplateElement('test2')
# Create a child element
child = xmlutil.TemplateElement('child')
# Append the child to the parent
elem.append(child)
self.assertEqual(elem.tree(),
"<test2 !selector=Selector()>"
"<child !selector=Selector()/></test2>")
class TemplateTest(test.NoDBTestCase):
def test_tree(self):
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem)
self.assertTrue(tmpl.tree())
def test_wrap(self):
# These are strange methods, but they make things easier
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem)
self.assertEqual(tmpl.unwrap(), elem)
self.assertEqual(tmpl.wrap(), tmpl)
def test__siblings(self):
# Set up a basic template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem)
# Check that we get the right siblings
siblings = tmpl._siblings()
self.assertEqual(len(siblings), 1)
self.assertEqual(siblings[0], elem)
def test__nsmap(self):
# Set up a basic template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem, nsmap=dict(a="foo"))
# Check out that we get the right namespace dictionary
nsmap = tmpl._nsmap()
self.assertNotEqual(id(nsmap), id(tmpl.nsmap))
self.assertEqual(len(nsmap), 1)
self.assertEqual(nsmap['a'], 'foo')
def test_master_attach(self):
# Set up a master template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.MasterTemplate(elem, 1)
# Make sure it has a root but no slaves
self.assertEqual(tmpl.root, elem)
self.assertEqual(len(tmpl.slaves), 0)
self.assertTrue(repr(tmpl))
# Try to attach an invalid slave
bad_elem = xmlutil.TemplateElement('test2')
self.assertRaises(ValueError, tmpl.attach, bad_elem)
self.assertEqual(len(tmpl.slaves), 0)
# Try to attach an invalid and a valid slave
good_elem = xmlutil.TemplateElement('test')
self.assertRaises(ValueError, tmpl.attach, good_elem, bad_elem)
self.assertEqual(len(tmpl.slaves), 0)
# Try to attach an inapplicable template
class InapplicableTemplate(xmlutil.Template):
def apply(self, master):
return False
inapp_tmpl = InapplicableTemplate(good_elem)
tmpl.attach(inapp_tmpl)
self.assertEqual(len(tmpl.slaves), 0)
# Now try attaching an applicable template
tmpl.attach(good_elem)
self.assertEqual(len(tmpl.slaves), 1)
self.assertEqual(tmpl.slaves[0].root, good_elem)
def test_master_copy(self):
# Construct a master template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.MasterTemplate(elem, 1, nsmap=dict(a='foo'))
# Give it a slave
slave = xmlutil.TemplateElement('test')
tmpl.attach(slave)
# Construct a copy
copy = tmpl.copy()
# Check to see if we actually managed a copy
self.assertNotEqual(tmpl, copy)
self.assertEqual(tmpl.root, copy.root)
self.assertEqual(tmpl.version, copy.version)
self.assertEqual(id(tmpl.nsmap), id(copy.nsmap))
self.assertNotEqual(id(tmpl.slaves), id(copy.slaves))
self.assertEqual(len(tmpl.slaves), len(copy.slaves))
self.assertEqual(tmpl.slaves[0], copy.slaves[0])
def test_slave_apply(self):
# Construct a master template
elem = xmlutil.TemplateElement('test')
master = xmlutil.MasterTemplate(elem, 3)
# Construct a slave template with applicable minimum version
slave = xmlutil.SlaveTemplate(elem, 2)
self.assertEqual(slave.apply(master), True)
self.assertTrue(repr(slave))
# Construct a slave template with equal minimum version
slave = xmlutil.SlaveTemplate(elem, 3)
self.assertEqual(slave.apply(master), True)
# Construct a slave template with inapplicable minimum version
slave = xmlutil.SlaveTemplate(elem, 4)
self.assertEqual(slave.apply(master), False)
# Construct a slave template with applicable version range
slave = xmlutil.SlaveTemplate(elem, 2, 4)
self.assertEqual(slave.apply(master), True)
# Construct a slave template with low version range
slave = xmlutil.SlaveTemplate(elem, 1, 2)
self.assertEqual(slave.apply(master), False)
# Construct a slave template with high version range
slave = xmlutil.SlaveTemplate(elem, 4, 5)
self.assertEqual(slave.apply(master), False)
# Construct a slave template with matching version range
slave = xmlutil.SlaveTemplate(elem, 3, 3)
self.assertEqual(slave.apply(master), True)
def test__serialize(self):
# Our test object to serialize
obj = {
'test': {
'name': 'foobar',
'values': [1, 2, 3, 4],
'attrs': {
'a': 1,
'b': 2,
'c': 3,
'd': 4,
},
'image': {
'name': 'image_foobar',
'id': 42,
},
},
}
# Set up our master template
root = xmlutil.TemplateElement('test', selector='test',
name='name')
value = xmlutil.SubTemplateElement(root, 'value', selector='values')
value.text = xmlutil.Selector()
attrs = xmlutil.SubTemplateElement(root, 'attrs', selector='attrs')
xmlutil.SubTemplateElement(attrs, 'attr', selector=xmlutil.get_items,
key=0, value=1)
master = xmlutil.MasterTemplate(root, 1, nsmap=dict(f='foo'))
# Set up our slave template
root_slave = xmlutil.TemplateElement('test', selector='test')
image = xmlutil.SubTemplateElement(root_slave, 'image',
selector='image', id='id')
image.text = xmlutil.Selector('name')
slave = xmlutil.SlaveTemplate(root_slave, 1, nsmap=dict(b='bar'))
# Attach the slave to the master...
master.attach(slave)
# Try serializing our object
siblings = master._siblings()
nsmap = master._nsmap()
result = master._serialize(None, obj, siblings, nsmap)
# Now we get to manually walk the element tree...
self.assertEqual(result.tag, 'test')
self.assertEqual(len(result.nsmap), 2)
self.assertEqual(result.nsmap['f'], 'foo')
self.assertEqual(result.nsmap['b'], 'bar')
self.assertEqual(result.get('name'), obj['test']['name'])
for idx, val in enumerate(obj['test']['values']):
self.assertEqual(result[idx].tag, 'value')
self.assertEqual(result[idx].text, str(val))
idx += 1
self.assertEqual(result[idx].tag, 'attrs')
for attr in result[idx]:
self.assertEqual(attr.tag, 'attr')
self.assertEqual(attr.get('value'),
str(obj['test']['attrs'][attr.get('key')]))
idx += 1
self.assertEqual(result[idx].tag, 'image')
self.assertEqual(result[idx].get('id'),
str(obj['test']['image']['id']))
self.assertEqual(result[idx].text, obj['test']['image']['name'])
templ = xmlutil.Template(None)
self.assertEqual(templ.serialize(None), '')
def test_serialize_with_colon_tagname_support(self):
# Our test object to serialize
obj = {'extra_specs': {'foo:bar': '999'}}
expected_xml = (("<?xml version='1.0' encoding='UTF-8'?>\n"
'<extra_specs><foo:bar xmlns:foo="foo">999</foo:bar>'
'</extra_specs>'))
# Set up our master template
root = xmlutil.TemplateElement('extra_specs', selector='extra_specs',
colon_ns=True)
value = xmlutil.SubTemplateElement(root, 'foo:bar', selector='foo:bar',
colon_ns=True)
value.text = xmlutil.Selector()
master = xmlutil.MasterTemplate(root, 1)
result = master.serialize(obj)
self.assertEqual(expected_xml, result)
def test__serialize_with_empty_datum_selector(self):
# Our test object to serialize
obj = {
'test': {
'name': 'foobar',
'image': ''
},
}
root = xmlutil.TemplateElement('test', selector='test',
name='name')
master = xmlutil.MasterTemplate(root, 1)
root_slave = xmlutil.TemplateElement('test', selector='test')
image = xmlutil.SubTemplateElement(root_slave, 'image',
selector='image')
image.set('id')
xmlutil.make_links(image, 'links')
slave = xmlutil.SlaveTemplate(root_slave, 1)
master.attach(slave)
siblings = master._siblings()
result = master._serialize(None, obj, siblings)
self.assertEqual(result.tag, 'test')
self.assertEqual(result[0].tag, 'image')
self.assertEqual(result[0].get('id'), str(obj['test']['image']))
class MasterTemplateBuilder(xmlutil.TemplateBuilder):
def construct(self):
elem = xmlutil.TemplateElement('test')
return xmlutil.MasterTemplate(elem, 1)
class SlaveTemplateBuilder(xmlutil.TemplateBuilder):
def construct(self):
elem = xmlutil.TemplateElement('test')
return xmlutil.SlaveTemplate(elem, 1)
class TemplateBuilderTest(test.NoDBTestCase):
def test_master_template_builder(self):
# Make sure the template hasn't been built yet
self.assertIsNone(MasterTemplateBuilder._tmpl)
# Now, construct the template
tmpl1 = MasterTemplateBuilder()
# Make sure that there is a template cached...
self.assertIsNotNone(MasterTemplateBuilder._tmpl)
# Make sure it wasn't what was returned...
self.assertNotEqual(MasterTemplateBuilder._tmpl, tmpl1)
# Make sure it doesn't get rebuilt
cached = MasterTemplateBuilder._tmpl
tmpl2 = MasterTemplateBuilder()
self.assertEqual(MasterTemplateBuilder._tmpl, cached)
# Make sure we're always getting fresh copies
self.assertNotEqual(tmpl1, tmpl2)
# Make sure we can override the copying behavior
tmpl3 = MasterTemplateBuilder(False)
self.assertEqual(MasterTemplateBuilder._tmpl, tmpl3)
def test_slave_template_builder(self):
# Make sure the template hasn't been built yet
self.assertIsNone(SlaveTemplateBuilder._tmpl)
# Now, construct the template
tmpl1 = SlaveTemplateBuilder()
# Make sure there is a template cached...
self.assertIsNotNone(SlaveTemplateBuilder._tmpl)
# Make sure it was what was returned...
self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
# Make sure it doesn't get rebuilt
tmpl2 = SlaveTemplateBuilder()
self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
# Make sure we're always getting the cached copy
self.assertEqual(tmpl1, tmpl2)
class MiscellaneousXMLUtilTests(test.NoDBTestCase):
def test_validate_schema(self):
xml = '''<?xml version='1.0' encoding='UTF-8'?>
<metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
<meta key="key6">value6</meta><meta key="key4">value4</meta>
</metadata>
'''
xmlutil.validate_schema(xml, 'metadata')
# No way to test the return value of validate_schema.
# It just raises an exception when something is wrong.
self.assertTrue(True)
def test_make_links(self):
elem = xmlutil.TemplateElement('image', selector='image')
self.assertTrue(repr(xmlutil.make_links(elem, 'links')))
def test_make_flat_dict(self):
expected_xml = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<wrapper><a>foo</a><b>bar</b></wrapper>')
root = xmlutil.make_flat_dict('wrapper')
tmpl = xmlutil.MasterTemplate(root, 1)
result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar')))
self.assertEqual(result, expected_xml)
expected_xml = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<ns0:wrapper xmlns:ns0="ns"><ns0:a>foo</ns0:a><ns0:b>bar</ns0:b>'
"</ns0:wrapper>")
root = xmlutil.make_flat_dict('wrapper', ns='ns')
tmpl = xmlutil.MasterTemplate(root, 1)
result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar')))
self.assertEqual(result, expected_xml)
def test_make_flat_dict_with_colon_tagname_support(self):
# Our test object to serialize
obj = {'extra_specs': {'foo:bar': '999'}}
expected_xml = (("<?xml version='1.0' encoding='UTF-8'?>\n"
'<extra_specs><foo:bar xmlns:foo="foo">999</foo:bar>'
'</extra_specs>'))
# Set up our master template
root = xmlutil.make_flat_dict('extra_specs', colon_ns=True)
master = xmlutil.MasterTemplate(root, 1)
result = master.serialize(obj)
self.assertEqual(expected_xml, result)
def test_make_flat_dict_with_parent(self):
# Our test object to serialize
obj = {"device": {"id": 1,
"extra_info": {"key1": "value1",
"key2": "value2"}}}
expected_xml = (("<?xml version='1.0' encoding='UTF-8'?>\n"
'<device id="1"><extra_info><key2>value2</key2>'
'<key1>value1</key1></extra_info></device>'))
root = xmlutil.TemplateElement('device', selector='device')
root.set('id')
extra = xmlutil.make_flat_dict('extra_info', root=root)
root.append(extra)
master = xmlutil.MasterTemplate(root, 1)
result = master.serialize(obj)
self.assertEqual(expected_xml, result)
def test_make_flat_dict_with_dicts(self):
# Our test object to serialize
obj = {"device": {"id": 1,
"extra_info": {"key1": "value1",
"key2": "value2"}}}
expected_xml = (("<?xml version='1.0' encoding='UTF-8'?>\n"
'<device><id>1</id><extra_info><key2>value2</key2>'
'<key1>value1</key1></extra_info></device>'))
root = xmlutil.make_flat_dict('device', selector='device',
ignore_sub_dicts=True)
extra = xmlutil.make_flat_dict('extra_info', selector='extra_info')
root.append(extra)
master = xmlutil.MasterTemplate(root, 1)
result = master.serialize(obj)
self.assertEqual(expected_xml, result)
def test_safe_parse_xml(self):
normal_body = ('<?xml version="1.0" ?>'
'<foo><bar><v1>hey</v1><v2>there</v2></bar></foo>')
dom = xmlutil.safe_minidom_parse_string(normal_body)
# Some versions of minidom inject extra newlines so we ignore them
result = str(dom.toxml()).replace('\n', '')
self.assertEqual(normal_body, result)
self.assertRaises(exception.MalformedRequestBody,
xmlutil.safe_minidom_parse_string,
tests_utils.killer_xml_body())
class SafeParserTestCase(test.NoDBTestCase):
def test_external_dtd(self):
xml_string = ("""<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<head/>
<body>html with dtd</body>
</html>""")
parser = xmlutil.ProtectedExpatParser(forbid_dtd=False,
forbid_entities=True)
self.assertRaises(ValueError,
minidom.parseString,
xml_string, parser)
def test_external_file(self):
xml_string = """<!DOCTYPE external [
<!ENTITY ee SYSTEM "file:///PATH/TO/root.xml">
]>
<root>ⅇ</root>"""
parser = xmlutil.ProtectedExpatParser(forbid_dtd=False,
forbid_entities=True)
self.assertRaises(ValueError,
minidom.parseString,
xml_string, parser)
def test_notation(self):
xml_string = """<?xml version="1.0" standalone="no"?>
<!-- comment data -->
<!DOCTYPE x [
<!NOTATION notation SYSTEM "notation.jpeg">
]>
<root attr1="value1">
</root>"""
parser = xmlutil.ProtectedExpatParser(forbid_dtd=False,
forbid_entities=True)
self.assertRaises(ValueError,
minidom.parseString,
xml_string, parser)
| |
from __future__ import unicode_literals
import transaction
from pyramid.view import view_config
from pyramid.security import remember
from pyramid.security import forget
from pyramid.security import authenticated_userid
from pyramid.httpexceptions import HTTPFound
from pyramid.httpexceptions import HTTPNotFound
from pyramid.httpexceptions import HTTPBadRequest
from pyramid.httpexceptions import HTTPForbidden
from ez2pay.models.user import UserModel
from ez2pay.mail import render_mail
from ez2pay.mail import send_mail
from ez2pay.i18n import LocalizerFactory
from ez2pay.utils import check_csrf_token
from .forms import FormFactory
get_localizer = LocalizerFactory()
@view_config(route_name='account.login',
renderer='templates/login.genshi')
@view_config(context='pyramid.httpexceptions.HTTPForbidden',
renderer='templates/login.genshi')
def login(request):
"""Display login form or do login
mainly borrowed from
https://docs.pylonsproject.org/projects/pyramid/1.1/tutorials/wiki/authorization.html?highlight=login#adding-login-and-logout-views
"""
from ez2pay.models.user import BadPassword
from ez2pay.models.user import UserNotExist
_ = get_localizer(request)
referrer = request.url
my_url = request.route_url('account.login')
if referrer == my_url:
referrer = request.route_url('front.home')
# never use the login form itself as came_from
came_from = request.params.get('came_from', referrer)
username_or_email = ''
password = ''
factory = FormFactory(_)
LoginForm = factory.make_login_form()
form = LoginForm(request.params)
if request.method == 'POST' and form.validate():
username_or_email = request.params['username_or_email']
password = request.params['password']
error = False
session = request.db_session
user_model = UserModel(session)
try:
user_id = user_model.authenticate_user(username_or_email, password)
except (UserNotExist, BadPassword):
msg = _(u'Wrong username or password')
request.add_flash(msg, 'error')
return dict(
came_from=came_from,
username_or_email=username_or_email,
password=password,
form=form,
)
if user_id is not None and not error:
headers = remember(request, user_id)
user = user_model.get(user_id)
msg = _(u"Welcome back, ${user_name}",
mapping=dict(user_name=user.user_name))
request.add_flash(msg)
return HTTPFound(location=came_from,
headers=headers)
return dict(
came_from=came_from,
username_or_email=username_or_email,
password=password,
form=form,
)
@view_config(route_name='account.logout')
def logout(request):
_ = get_localizer(request)
referrer = request.referrer
my_url = request.route_url('account.logout')
# never use the login form itself as came_from
# or, there is no referer
if referrer == my_url or not referrer:
referrer = request.route_url('front.home')
came_from = request.params.get('came_from', referrer)
session = request.db_session
user_model = UserModel(session)
user_id = authenticated_userid(request)
if user_id is None:
raise HTTPBadRequest('You are not logged in')
user = user_model.get(user_id)
if user is None:
raise HTTPBadRequest
headers = forget(request)
name = user.display_name or user.user_name
msg = _(u"Hope we will see you soon, ${user_name}",
mapping=dict(user_name=name))
request.add_flash(msg)
return HTTPFound(location=came_from,
headers=headers)
@view_config(route_name='account.register',
renderer='templates/register.genshi')
def register(request):
_ = get_localizer(request)
settings = request.registry.settings
user_model = UserModel(request.db_session)
factory = FormFactory(_)
RegisterForm = factory.make_register_form()
form = RegisterForm(request.params)
if request.method == 'POST':
check_csrf_token(request)
validate_result = form.validate()
user_name = request.params['user_name']
password = request.params['password']
email = request.params['email']
black_domain = set(settings.get('email_black_domain_list', []))
domain = email.split('@')[-1].lower()
if domain in black_domain:
msg = _(u'Invalid email address')
form.email.errors.append(msg)
validate_result = False
by_name = user_model.get_by_name(user_name)
if by_name is not None:
msg = _(u'Username %s already exists') % user_name
form.user_name.errors.append(msg)
validate_result = False
by_email = user_model.get_by_email(email)
if by_email is not None:
msg = _(u'Email %s already exists') % email
form.email.errors.append(msg)
validate_result = False
if validate_result:
with transaction.manager:
user_id = user_model.create(
user_name=user_name,
display_name=user_name,
password=password,
email=email,
)
auth_secret_key = settings['auth_secret_key']
code = user_model.get_verification_code(
user_id=user_id,
verify_type='create_user',
secret=auth_secret_key
)
link = request.route_url(
'account.activate',
user_name=user_name,
code=code
)
params = dict(link=link, user_name=user_name)
html = render_mail(
request,
'ez2pay:templates/mails/register_link.genshi',
params
)
subject = _('ez2pay account activation')
send_mail(
request=request,
subject=subject,
to_addresses=[email],
format='html',
body=html
)
msg = _(u"User ${user_name} has been registered",
mapping=dict(user_name=user_name))
request.add_flash(msg, 'success')
return HTTPFound(location=request.route_url('account.check_mailbox'))
return dict(form=form)
@view_config(route_name='account.check_mailbox',
renderer='templates/check_mailbox.genshi')
def check_mailbox(request):
return dict()
@view_config(route_name='account.activate',
renderer='templates/activate.genshi')
def activate(request):
_ = get_localizer(request)
settings = request.registry.settings
user_model = UserModel(request.db_session)
code = request.matchdict['code']
user_name = request.matchdict['user_name']
user = user_model.get_by_name(user_name)
auth_secret_key = settings['auth_secret_key']
valid_code = user_model.get_verification_code(
user_id=user.user_id,
verify_type='create_user',
secret=auth_secret_key
)
if valid_code != code:
msg = _(u"Invalid activation link",
mapping=dict(user_name=user_name))
return HTTPForbidden(msg)
if not user.verified:
with transaction.manager:
user_model.update_user(user.user_id, verified=True)
msg = _(u"User ${user_name} is activated",
mapping=dict(user_name=user_name))
request.add_flash(msg, 'success')
return dict()
@view_config(route_name='account.forgot_password',
renderer='templates/forgot_password.genshi')
def forgot_password(request):
"""Display forgot password form or do the password recovery
"""
import urllib
settings = request.registry.settings
_ = get_localizer(request)
factory = FormFactory(_)
ForgotPasswordForm = factory.make_forgot_password_form()
form = ForgotPasswordForm(request.params)
session = request.db_session
user_model = UserModel(session)
if request.method == 'POST' and form.validate():
email = request.params['email']
user = user_model.get_by_email(email)
if user is None:
msg = _(u'Cannot find the user')
form.email.errors.append(msg)
return dict(form=form)
user_name = user.user_name
user_id = user.user_id
# TODO: limit frequency here
# generate verification
auth_secret_key = settings['auth_secret_key']
code = user_model.get_recovery_code(auth_secret_key, user_id)
link = request.route_url('account.recovery_password')
query = dict(user_name=user_name, code=code)
link = link + '?' + urllib.urlencode(query)
params = dict(link=link, user_name=user_name)
html = render_mail(
request,
'ez2pay:templates/mails/password_recovery.genshi',
params
)
send_mail(
request=request,
subject=_('ez2pay password recovery'),
to_addresses=[email],
format='html',
body=html
)
request.add_flash(_(u'To reset your password, please check your '
'mailbox and click the password recovery link'))
return dict(form=form)
@view_config(route_name='account.recovery_password',
renderer='templates/recovery_password.genshi')
def recovery_password(request):
"""Display password recovery form or do the password change
"""
_ = get_localizer(request)
settings = request.registry.settings
user_model = UserModel(request.db_session)
user_name = request.params['user_name']
code = request.params['code']
user = user_model.get_by_name(user_name)
if user is None:
return HTTPNotFound(_('No such user %s') % user_name)
user_id = user.user_id
# generate verification
auth_secret_key = settings['auth_secret_key']
valid_code = user_model.get_recovery_code(auth_secret_key, user_id)
if code != valid_code:
return HTTPForbidden(_('Bad password recovery link'))
factory = FormFactory(_)
RecoveryPasswordForm = factory.make_recovery_password_form()
form = RecoveryPasswordForm(request.params, user_name=user_name, code=code)
invalid_msg = _(u'Invalid password recovery link')
redirect_url = request.route_url('front.home')
user = user_model.get_by_name(user_name)
if user is None:
request.add_flash(invalid_msg, 'error')
raise HTTPFound(location=redirect_url)
user_id = user.user_id
if request.method == 'POST' and form.validate():
new_password = request.POST['new_password']
with transaction.manager:
user_model.update_password(user_id, new_password)
msg = _(u'Your password has been updated')
request.add_flash(msg, 'success')
raise HTTPFound(location=redirect_url)
return dict(form=form)
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pfor and for_loop."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl import flags
import numpy as np
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients as gradient_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.parallel_for import control_flow_ops as pfor_control_flow_ops
from tensorflow.python.ops.parallel_for.test_util import PForTestCase
from tensorflow.python.platform import test
from tensorflow.python.util import nest
@test_util.run_all_in_graph_and_eager_modes
class PForTest(PForTestCase):
def test_op_conversion_fallback_to_while_loop(self):
# Note that we used top_k op for this test. If a converter gets defined for
# it, we will need to find another op for which a converter has not been
# defined.
x = random_ops.random_uniform([3, 2, 4])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return nn.top_k(x_i)
with self.assertRaisesRegexp(ValueError, "No converter defined"):
self._test_loop_fn(
loop_fn, 3, loop_fn_dtypes=[dtypes.float32, dtypes.int32])
flags.FLAGS.op_conversion_fallback_to_while_loop = True
self._test_loop_fn(
loop_fn, 3, loop_fn_dtypes=[dtypes.float32, dtypes.int32])
flags.FLAGS.op_conversion_fallback_to_while_loop = False
def test_parallel_iterations(self):
for parallel_iterations in [2, 3, 8, 10]:
x = random_ops.random_uniform([8, 3])
# pylint: disable=cell-var-from-loop
def loop_fn(i):
return array_ops.gather(x, i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 8, parallel_iterations=parallel_iterations)
self._test_loop_fn(loop_fn, 4 * constant_op.constant(2),
parallel_iterations=parallel_iterations)
def test_parallel_iterations_zero(self):
with self.assertRaisesRegexp(ValueError, "positive integer"):
pfor_control_flow_ops.pfor(lambda i: 1, 8, parallel_iterations=0)
with self.assertRaisesRegexp(TypeError, "positive integer"):
pfor_control_flow_ops.for_loop(lambda i: 1, dtypes.int32, 8,
parallel_iterations=0)
def test_parallel_iterations_one(self):
with self.assertRaisesRegexp(ValueError, "Use for_loop instead"):
pfor_control_flow_ops.pfor(lambda i: 1, 8, parallel_iterations=1)
@test_util.run_all_in_graph_and_eager_modes
class BitwiseTest(PForTestCase):
def test_unary_cwise(self):
for op in [bitwise_ops.invert]:
x = random_ops.random_uniform([7, 3, 5], maxval=10, dtype=dtypes.int32)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x1 = array_ops.gather(x, i)
return op(x1)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
def test_binary_cwise(self):
binary_ops = [
bitwise_ops.bitwise_and,
bitwise_ops.bitwise_or,
bitwise_ops.bitwise_xor,
bitwise_ops.left_shift,
bitwise_ops.right_shift,
]
for op in binary_ops:
x = random_ops.random_uniform([7, 3, 5], maxval=10, dtype=dtypes.int32)
y = random_ops.random_uniform([3, 5], maxval=10, dtype=dtypes.int32)
output_dtypes = []
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x1 = array_ops.gather(x, i)
y1 = array_ops.gather(y, i)
outputs = [op(x, y), op(x1, y), op(x, y1), op(x1, y1), op(x1, x1)]
del output_dtypes[:]
output_dtypes.extend([t.dtype for t in outputs])
return outputs
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=output_dtypes)
@test_util.run_all_in_graph_and_eager_modes
class NNTest(PForTestCase):
def test_conv2d(self):
x = random_ops.random_uniform([3, 2, 12, 12, 3])
filt = random_ops.random_uniform([3, 3, 3, 7])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return nn.conv2d(
x1, filt, strides=[1, 2, 2, 1], padding="VALID", data_format="NHWC")
self._test_loop_fn(loop_fn, 3)
def test_conv2d_backprop_input(self):
x_shape = [2, 12, 12, 3]
filt = random_ops.random_uniform([3, 3, 3, 7])
grad = random_ops.random_uniform([3, 2, 5, 5, 7])
def loop_fn(i):
grad1 = array_ops.gather(grad, i)
return nn.conv2d_backprop_input(
x_shape,
filt,
grad1,
strides=[1, 2, 2, 1],
padding="VALID",
data_format="NHWC")
self._test_loop_fn(loop_fn, 3)
def test_conv2d_backprop_filter(self):
x = random_ops.random_uniform([3, 2, 12, 12, 3])
x_0 = array_ops.gather(x, 0)
filter_sizes = [3, 3, 3, 7]
grad = random_ops.random_uniform([3, 2, 5, 5, 7])
def loop_fn(i):
x_i = array_ops.gather(x, i)
grad_i = array_ops.gather(grad, i)
return [
nn.conv2d_backprop_filter(
inp,
filter_sizes,
grad_i,
strides=[1, 2, 2, 1],
padding="VALID",
data_format="NHWC") for inp in [x_i, x_0]
]
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_avg_pool(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 2, 12, 12, 3])
g.watch(x)
ksize = [1, 3, 3, 1]
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
output = nn.avg_pool(
x1, ksize, strides=[1, 2, 2, 1], padding="VALID",
data_format="NHWC")
loss = nn.l2_loss(output)
return output, g.gradient(loss, x1)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_max_pool(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 2, 12, 12, 3])
g.watch(x)
ksize = [1, 3, 3, 1]
strides = [1, 2, 2, 1]
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
output = nn.max_pool(
x1, ksize, strides=strides, padding="VALID", data_format="NHWC")
loss = nn.l2_loss(output)
ones = array_ops.ones_like(output)
g.watch(ones)
grad = g.gradient(loss, x1, output_gradients=ones)
grad_grad = g.gradient(grad, ones)
return output, grad, grad_grad
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 3)
def test_max_pool3d(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 3, 2, 12, 12, 3])
g.watch(x)
ksize = [1, 1, 3, 3, 1]
strides = [1, 1, 2, 2, 1]
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
output = nn.max_pool3d(
x1, ksize, strides=strides, padding="VALID", data_format="NDHWC")
loss = nn.l2_loss(output)
ones = array_ops.ones_like(output)
g.watch(ones)
grad = g.gradient(loss, x1, output_gradients=ones)
grad_grad = g.gradient(grad, ones)
return output, grad, grad_grad
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 3)
@test_util.disable_xla("This test never passed for XLA")
def test_fused_batch_norm(self):
data_formats = ["NHWC"]
if test.is_gpu_available():
data_formats.append("NCHW")
for is_training in (True, False):
for data_format in data_formats:
with backprop.GradientTape(persistent=True) as g:
if data_format == "NCHW":
x = random_ops.random_uniform([3, 1, 2, 5, 5])
else:
x = random_ops.random_uniform([3, 1, 5, 5, 2])
g.watch(x)
scale = random_ops.random_uniform([2])
g.watch(scale)
offset = random_ops.random_uniform([2])
g.watch(offset)
mean = None if is_training else random_ops.random_uniform([2])
variance = None if is_training else random_ops.random_uniform([2])
# pylint: disable=cell-var-from-loop
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
outputs = nn.fused_batch_norm(
x1,
scale,
offset,
mean=mean,
variance=variance,
epsilon=0.01,
data_format=data_format,
is_training=is_training)
outputs = list(outputs)
# We only test the first value of outputs when is_training is False.
# It looks like CPU and GPU have different outputs for batch_mean
# and batch_variance for this case.
if not is_training:
outputs[1] = constant_op.constant(0.)
outputs[2] = constant_op.constant(0.)
loss = nn.l2_loss(outputs[0])
if is_training:
gradients = g.gradient(loss, [x1, scale, offset])
else:
gradients = [constant_op.constant(0.)] * 3
return outputs + gradients
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 6)
def test_softmax_cross_entropy_with_logits(self):
with backprop.GradientTape(persistent=True) as g:
logits = random_ops.random_uniform([3, 2, 4])
g.watch(logits)
labels = random_ops.random_uniform([3, 2, 4])
labels /= math_ops.reduce_sum(labels, axis=[2], keepdims=True)
def loop_fn(i):
with g:
logits_i = array_ops.gather(logits, i)
labels_i = array_ops.gather(labels, i)
loss = nn.softmax_cross_entropy_with_logits(
labels=labels_i, logits=logits_i)
total_loss = math_ops.reduce_sum(loss)
return loss, g.gradient(total_loss, logits_i)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
class RandomTest(PForTestCase):
# The random values generated in the two implementations are not guaranteed to
# match. So we only check the returned shapes.
def run_and_assert_equal(self, targets1, targets2):
outputs = self._run_targets(targets1, targets2)
n = len(outputs) // 2
for i in range(n):
self.assertAllEqual(outputs[i].shape, outputs[i + n].shape)
def test_random_uniform(self):
def loop_fn(_):
return random_ops.random_uniform([3])
self._test_loop_fn(loop_fn, 5)
def test_random_uniform_int(self):
def loop_fn(_):
return random_ops.random_uniform([3], maxval=1, dtype=dtypes.int32)
self._test_loop_fn(loop_fn, 5, loop_fn_dtypes=dtypes.int32)
def test_random_standard_normal(self):
def loop_fn(_):
return random_ops.random_normal([3])
self._test_loop_fn(loop_fn, 5)
def test_truncated_normal(self):
def loop_fn(_):
return random_ops.truncated_normal([3])
self._test_loop_fn(loop_fn, 5)
def test_random_gamma(self):
def loop_fn(_):
return random_ops.random_gamma([3], alpha=[0.5])
self._test_loop_fn(loop_fn, 5)
def test_random_poisson_v2(self):
def loop_fn(_):
return random_ops.random_poisson(lam=[1.3], shape=[3])
self._test_loop_fn(loop_fn, 5)
class LoggingTest(PForTestCase):
@test_util.run_v1_only("b/122612051")
def test_print(self):
x = random_ops.random_uniform([3, 5])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return logging_ops.Print(
x1, [x1, "x1", array_ops.shape(x1)], summarize=10)
self._test_loop_fn(loop_fn, 3)
def test_assert(self):
def loop_fn(i):
return control_flow_ops.Assert(i < 10, [i, [10], [i + 1]])
# TODO(agarwal): make this work with for_loop.
with session.Session() as sess:
sess.run(pfor_control_flow_ops.pfor(loop_fn, 3))
class TensorArrayTest(PForTestCase):
@test_util.run_v1_only("b/122612051")
def test_create_outside_and_read(self):
ta = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 0).write(1, 1)
def loop_fn(i):
return ta.read(i), ta.read(0)
self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 2)
@test_util.run_v1_only("b/122612051")
def test_create_outside_and_gather(self):
ta = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 0).write(1, 1)
def loop_fn(i):
return ta.gather([i]), ta.gather([0, 1])
self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 2)
@test_util.run_v1_only("b/122612051")
def test_create_outside_and_write_and_scatter(self):
t = tensor_array_ops.TensorArray(dtypes.int32, 10, clear_after_read=False)
handle = t.handle
def loop_fn(i):
ta = t.write(i + 2, 2 * i).write(i, 5)
ta = ta.scatter([4 + i], [4]).scatter([6 + i, 8 + i], [6 + i, 8 + i])
return ta.flow
t1 = pfor_control_flow_ops.pfor(loop_fn, iters=2)
out1 = tensor_array_ops.TensorArray(
dtypes.int32, handle=handle, flow=t1[-1]).stack()
output1 = self._run_targets(out1)
t2 = pfor_control_flow_ops.for_loop(loop_fn, dtypes.float32, iters=2)
out2 = tensor_array_ops.TensorArray(
dtypes.int32, handle=handle, flow=t2[-1]).stack()
output2 = self._run_targets(out2)
self.assertAllClose(output2, output1)
@test_util.run_v1_only("b/122612051")
def test_create_inside_and_write(self):
def loop_fn(i):
# TODO(agarwal): switching the order of writes to ta1 does not work.
ta1 = tensor_array_ops.TensorArray(dtypes.int32, 2).write(0, i).write(
1, 1)
ta2 = tensor_array_ops.TensorArray(dtypes.int32, 1).write(0, 1)
return ta1.stack(), ta2.stack()
self._test_loop_fn(loop_fn, 3, [dtypes.int32] * 2)
@test_util.run_v1_only("b/122612051")
def test_create_inside_and_scatter(self):
def loop_fn(i):
# TODO(agarwal): switching the order of scatter to ta1 does not work.
ta1 = tensor_array_ops.TensorArray(dtypes.int32, 2).scatter(
[0], [[i, 2]]).scatter([1], [[1, 2]])
ta2 = tensor_array_ops.TensorArray(dtypes.int32,
2).scatter([0], [3]).scatter([1], [4])
return ta1.stack(), ta2.stack()
self._test_loop_fn(loop_fn, 3, [dtypes.int32] * 2)
@test_util.run_v1_only("b/122612051")
def test_create_inside_and_read(self):
def loop_fn(i):
ta1 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, i).write(1, 1)
ta2 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 1).write(1, 2)
# TODO(agarwal): ta1.read(i) currently is not supported.
return ta1.read(0), ta2.read(0), ta2.read(i)
self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 3)
@test_util.run_v1_only("b/122612051")
def test_create_inside_and_gather(self):
def loop_fn(i):
ta1 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, i).write(1, 1)
ta2 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 1).write(1, 2)
# TODO(agarwal): ta1.read(i) currently is not supported.
return ta1.gather([0, 1]), ta2.gather([0, 1]), ta2.gather([i])
self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 3)
@test_util.run_v1_only("b/122612051")
def test_grad(self):
x = random_ops.random_uniform([3, 2])
ta = tensor_array_ops.TensorArray(
dtypes.float32, 3, clear_after_read=False).unstack(x)
y = math_ops.square(ta.stack())
def loop_fn(i):
y_i = array_ops.gather(y, i)
grad = gradient_ops.gradients(y_i, x)[0]
return array_ops.gather(grad, i)
t1 = pfor_control_flow_ops.pfor(loop_fn, iters=3)
# y = x * x. Hence dy/dx = 2 * x.
actual_grad = 2.0 * x
with session.Session() as sess:
actual_grad, computed_grad = sess.run([t1, actual_grad])
self.assertAllClose(actual_grad, computed_grad)
class StackTest(PForTestCase):
@test_util.run_v1_only("b/122612051")
def test_stack_inside_loop_invariant(self):
def loop_fn(_):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
op1 = data_flow_ops.stack_push_v2(s, 1)
with ops.control_dependencies([op1]):
op2 = data_flow_ops.stack_push_v2(s, 2)
with ops.control_dependencies([op2]):
e2 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
with ops.control_dependencies([e2]):
e1 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
return e1, e2
self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 2)
@test_util.run_v1_only("b/122612051")
def test_stack_inside_push_loop_dependent(self):
def loop_fn(i):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
op1 = data_flow_ops.stack_push_v2(s, i)
with ops.control_dependencies([op1]):
op2 = data_flow_ops.stack_push_v2(s, 2)
with ops.control_dependencies([op2]):
e2 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
with ops.control_dependencies([e2]):
e1 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
return e1, e2
self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 2)
@test_util.run_v1_only("b/122612051")
def test_stack_outside_pop(self):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
op = data_flow_ops.stack_push_v2(s, 5)
with ops.control_dependencies([op]):
op = data_flow_ops.stack_push_v2(s, 6)
with ops.control_dependencies([op]):
op = data_flow_ops.stack_push_v2(s, 7)
def loop_fn(_):
e1 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
with ops.control_dependencies([e1]):
e2 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
return e1, e2
with ops.control_dependencies([op]):
e1, e2 = pfor_control_flow_ops.pfor(loop_fn, iters=2)
with ops.control_dependencies([e1, e2]):
e3 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
v1, v2, v3 = self._run_targets([e1, e2, e3], run_init=False)
self.assertAllEqual([7, 7], v1)
self.assertAllEqual([6, 6], v2)
self.assertAllEqual(5, v3)
@test_util.run_v1_only("b/122612051")
def test_stack_outside_push(self):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
def loop_fn(_):
return data_flow_ops.stack_push_v2(s, 7)
with self.assertRaisesRegexp(ValueError, "StackPushV2 not allowed.*"):
pfor_control_flow_ops.pfor(loop_fn, iters=2)
# TODO(agarwal): test nested while_loops. This currently requires converting a
# tf.cond.
class ControlFlowTest(PForTestCase):
def test_while_outside_loop(self):
x = control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1, [0])
def loop_fn(i):
return x + i
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
@test_util.run_v1_only("b/122612051")
def test_invariant_while(self):
def loop_fn(_):
return control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1, [0])
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
@test_util.run_v1_only("b/122612051")
def test_invariant_while_with_control_dependency(self):
def loop_fn(i):
with ops.control_dependencies([i]):
return control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1,
[0])
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
@test_util.run_v1_only("b/122612051")
def test_while_with_stateful_ops(self):
def loop_fn(_):
return control_flow_ops.while_loop(
lambda j, x: j < 4,
lambda j, x: (j + 1, x + random_ops.random_uniform([])), [0, 0.])[0]
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
@test_util.run_v1_only("b/122612051")
def test_while_unstacked_condition(self):
def loop_fn(i):
return control_flow_ops.while_loop(lambda j, x: j < 4,
lambda j, x: (j + 1, x + i), [0, 0])
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32, dtypes.int32])
@test_util.run_v1_only("b/122612051")
def test_while(self):
x = random_ops.random_uniform([3, 5])
lengths = constant_op.constant([4, 0, 2])
def loop_fn(i):
x_i = array_ops.gather(x, i)
lengths_i = array_ops.gather(lengths, i)
_, total = control_flow_ops.while_loop(
lambda j, _: j < lengths_i,
lambda j, t: (j + 1, t + array_ops.gather(x_i, j)), [0, 0.])
return total
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32])
@test_util.run_v1_only("b/122612051")
def test_while_jacobian(self):
x = random_ops.random_uniform([1, 3])
y = random_ops.random_uniform([3, 3])
# out = x @ y @ y @ y @ y, where @ is matmul operator.
_, out = control_flow_ops.while_loop(
lambda i, _: i < 4, lambda i, out: (i + 1, math_ops.matmul(out, y)),
[0, x])
def loop_fn(i):
out_i = array_ops.gather(out, i, axis=1)
return array_ops.reshape(gradient_ops.gradients(out_i, x)[0], [-1])
out = pfor_control_flow_ops.pfor(loop_fn, iters=3)
# The above code does not work with tf.while_loop instead of pfor. So we
# manually compute the expected output here.
# Note that gradient of output w.r.t is (y @ y @ y @ y)^T.
expected_output = y
for _ in range(3):
expected_output = math_ops.matmul(expected_output, y)
expected_output = array_ops.transpose(expected_output, [1, 0])
with session.Session() as sess:
out, expected = sess.run([out, expected_output])
self.assertAllClose(expected, out)
@test_util.run_v1_only("b/122612051")
def test_tensor_array_as_loop_variable(self):
def loop_fn(i):
def body(j, ta):
ta = ta.write(j, i + j * j)
return j + 1, ta
_, ta = control_flow_ops.while_loop(
lambda j, _: j < 4, body,
(0, tensor_array_ops.TensorArray(dtypes.int32, size=4)))
return ta.stack()
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
@test_util.run_v1_only("b/122612051")
def test_read_tensor_array_partitioned_indices(self):
# Note that tensor array values are pfor loop dependent, and the while loop
# termination condition is also dependent on pfor iteration.
def loop_fn(i):
ta = tensor_array_ops.TensorArray(dtypes.int32, size=6)
ta = ta.unstack(i + list(range(5)))
def body(j, s):
return j + 1, s + ta.read(j)
_, s = control_flow_ops.while_loop(lambda j, _: j < i,
body,
(0, 0))
return s
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
@test_util.run_v1_only("b/122612051")
def test_external_while_loop_grad(self):
# Here we test that external while_loops that are extended from inside pfor
# (due to gradient calls) are not actually converted. If the below was
# converted all pfor iterations would write to the same tensor array
# indices.
x = constant_op.constant(1.)
def body(j, ta):
ta = ta.write(j, x)
return j + 1, ta
_, ta = control_flow_ops.while_loop(
lambda j, _: j < 4, body,
(0, tensor_array_ops.TensorArray(dtypes.float32, size=4)))
out = ta.stack()
def loop_fn(i):
out_i = array_ops.gather(out, i)
return gradient_ops.gradients(out_i, x)[0]
with session.Session() as sess:
# out is [x, x, x]. Hence the gradients should be [1, 1, 1].
self.assertAllEqual([1, 1, 1],
sess.run(pfor_control_flow_ops.pfor(loop_fn, 3)))
@test_util.run_v1_only("b/122612051")
def test_tensor_array_grad(self):
inp = constant_op.constant(np.random.rand(3, 4, 2), dtype=dtypes.float32)
ta = tensor_array_ops.TensorArray(dtypes.float32, size=3)
ta = ta.unstack(inp)
def loop_fn(i):
def body(j, x):
value = ta.gather([j])
value = array_ops.gather(array_ops.reshape(value, [4, 2]), i)
return j + 1, x + value
_, out = control_flow_ops.while_loop(lambda j, _: j < 3, body,
(0, array_ops.zeros([2])))
out = math_ops.reduce_prod(out)
return out, gradient_ops.gradients(out, inp)[0]
pfor_out, pfor_out_grad = pfor_control_flow_ops.pfor(loop_fn, 4)
# Note that tf.while_loop does not work in the setup above. So we manually
# construct the equivalent computation of the above loops here.
real_out = math_ops.reduce_sum(inp, axis=[0])
real_out = math_ops.reduce_prod(real_out, axis=[1])
# Note that gradients of real_out will accumulate the gradients across the
# output value. Hence we do the same aggregation on pfor_out_grad.
real_out_grad = gradient_ops.gradients(real_out, inp)[0]
sum_pfor_out_grad = math_ops.reduce_sum(pfor_out_grad, axis=[0])
with session.Session() as sess:
v1, v2, v1_grad, v2_grad = sess.run(
[pfor_out, real_out, sum_pfor_out_grad, real_out_grad])
self.assertAllClose(v1, v2)
self.assertAllClose(v1_grad, v2_grad)
def dynamic_lstm_input_fn(batch_size, state_size, max_steps):
# We make inputs and sequence_length constant so that multiple session.run
# calls produce the same result.
inputs = constant_op.constant(
np.random.rand(batch_size, max_steps, state_size), dtype=dtypes.float32)
sequence_length = np.random.randint(0, size=[batch_size], high=max_steps + 1)
sequence_length = constant_op.constant(sequence_length, dtype=dtypes.int32)
return inputs, sequence_length
def create_dynamic_lstm(cell_fn, batch_size, state_size, max_steps):
cell = cell_fn(state_size)
inputs, sequence_length = dynamic_lstm_input_fn(batch_size,
state_size,
max_steps)
inputs_ta = tensor_array_ops.TensorArray(
dtypes.float32, size=max_steps, element_shape=[batch_size, state_size])
inputs_time_major = array_ops.transpose(inputs, [1, 0, 2])
inputs_ta = inputs_ta.unstack(inputs_time_major)
zeros = array_ops.zeros([state_size])
def loop_fn(i):
sequence_length_i = array_ops.gather(sequence_length, i)
def body_fn(t, state, ta):
inputs_t = array_ops.expand_dims(
array_ops.gather(inputs_ta.read(t), i), 0)
output, new_state = cell(inputs_t, state)
output = array_ops.reshape(output, [-1])
# TODO(agarwal): one optimization that dynamic_rnn uses is to avoid the
# array_ops.where when t < min(sequence_length). Doing that requires
# supporting tf.cond pfor conversion.
done = t >= sequence_length_i
output = array_ops.where(done, zeros, output)
ta = ta.write(t, output)
new_state = [array_ops.where(done, s, ns) for s, ns in
zip(nest.flatten(state), nest.flatten(new_state))]
new_state = nest.pack_sequence_as(state, new_state)
return t + 1, new_state, ta
def condition_fn(t, _, unused):
del unused
return t < max_steps
initial_state = cell.zero_state(1, dtypes.float32)
_, state, ta = control_flow_ops.while_loop(condition_fn, body_fn, [
0, initial_state,
tensor_array_ops.TensorArray(dtypes.float32, max_steps)
])
new_state = [array_ops.reshape(x, [-1]) for x in nest.flatten(state)]
new_state = nest.pack_sequence_as(initial_state, new_state)
return ta.stack(), new_state
pfor_output = pfor_control_flow_ops.pfor(loop_fn, batch_size)
tf_output = rnn.dynamic_rnn(
cell,
inputs,
sequence_length=sequence_length,
initial_state=cell.zero_state(batch_size, dtypes.float32))
return pfor_output, tf_output
class RNNTest(PForTestCase):
@test_util.run_v1_only("b/122612051")
def test_dynamic_rnn(self):
pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicRNNCell,
3, 5, 7)
self.run_and_assert_equal(pfor_outputs, tf_outputs)
@test_util.run_v1_only("b/122612051")
def test_dynamic_lstm(self):
pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicLSTMCell,
3, 5, 7)
self.run_and_assert_equal(pfor_outputs, tf_outputs)
# TODO(agarwal): benchmark numbers on GPU for graphs based on while_loop
# conversion don't look good. Some of it seems like lot of copies between host
# and device. Optimize that.
class Benchmarks(test.Benchmark):
def _run(self, targets, iters, name=None):
def _done(t):
# Note that we don't use tf.control_dependencies since that will not make
# sure that the computation on GPU has actually finished. So we fetch the
# first element of the output, and assume that this will not be called on
# empty tensors.
return array_ops.gather(array_ops.reshape(t, [-1]), 0)
targets = [_done(x) for x in nest.flatten(targets)]
sess = session.Session()
with sess:
init = variables.global_variables_initializer()
sess.run(init)
run_fn = sess.make_callable(targets)
run_fn() # Warm up
begin = time.time()
for _ in range(iters):
run_fn()
end = time.time()
avg_time_ms = 1000 * (end - begin) / iters
self.report_benchmark(iters=iters, wall_time=avg_time_ms, name=name)
return avg_time_ms
def benchmark_sess_run_overhead(self):
with ops.Graph().as_default():
x = constant_op.constant(1.0)
self._run(x, 10000, name="session_run_overhead")
def benchmark_add(self):
with ops.Graph().as_default():
n = 256
params = 1000
x = random_ops.random_normal([n, params])
y = random_ops.random_normal([n, params])
def loop_fn(i):
x_i = array_ops.gather(x, i)
y_i = array_ops.gather(y, i)
return x_i + y_i
pfor_outputs = pfor_control_flow_ops.pfor(loop_fn, n)
while_outputs = pfor_control_flow_ops.for_loop(loop_fn, dtypes.float32, n)
manual = x + y
self._run(manual, 1000, name="manual_add")
self._run(pfor_outputs, 1000, name="pfor_add")
self._run(while_outputs, 100, name="while_add")
def benchmark_matmul(self):
with ops.Graph().as_default():
n = 1024
params = 1000
x = random_ops.random_normal([n, params])
y = random_ops.random_normal([params, params])
def loop_fn(i):
x_i = array_ops.expand_dims(array_ops.gather(x, i), 0)
return math_ops.matmul(x_i, y)
pfor_outputs = pfor_control_flow_ops.pfor(loop_fn, n)
while_outputs = pfor_control_flow_ops.for_loop(loop_fn, dtypes.float32, n)
manual = math_ops.matmul(x, y)
self._run(manual, 1000, name="manual_matmul")
self._run(pfor_outputs, 1000, name="pfor_matmul")
self._run(while_outputs, 100, name="while_matmul")
def benchmark_map_fn(self):
with ops.Graph().as_default():
b = 256
params = 1000
inp = random_ops.random_normal((b, params))
fn = lambda x: x * x
def pfor_map_fn(f, x):
return pfor_control_flow_ops.pfor(
lambda i: f(array_ops.gather(x, i)),
array_ops.shape(x)[0])
map_output = map_fn.map_fn(fn, inp)
pfor_output = pfor_map_fn(fn, inp)
self._run(map_output, 100, name="tf_map_fn")
self._run(pfor_output, 100, name="pfor_map_fn")
def benchmark_basic_while(self):
with ops.Graph().as_default():
def loop_fn(i):
_, s = control_flow_ops.while_loop(
lambda t, x: t < i,
lambda t, x: (t + 1, x + i),
[0, 0])
return s
iters = 50
pfor_output = pfor_control_flow_ops.pfor(loop_fn, iters)
for_loop_output = pfor_control_flow_ops.for_loop(loop_fn, dtypes.int32,
iters)
self._run(pfor_output, 100, name="pfor_basic")
self._run(for_loop_output, 100, name="for_loop_basic")
def benchmark_dynamic_rnn(self):
with ops.Graph().as_default():
pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicRNNCell,
128, 512, 16)
self._run(pfor_outputs, 100, name="pfor_rnn")
self._run(tf_outputs, 100, name="tf_rnn")
class SparseTest(PForTestCase):
@test_util.run_v1_only("b/122612051")
def test_var_loop_len(self):
num_iters = array_ops.placeholder(dtypes.int32)
def loop_fn(_):
return sparse_tensor.SparseTensor([[0], [1], [2]], [4, 5, 6],
[3]) # [0, 2, 0]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
with self.cached_session() as sess:
sess.run(pfor, feed_dict={num_iters: 3})
@test_util.run_v1_only("b/122612051")
def test_sparse_result_none_stacked(self):
num_iters = 10
def loop_fn(_):
return sparse_tensor.SparseTensor([[0], [1], [2]], [4, 5, 6],
[3]) # [0, 2, 0]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
indices = [[i, j] for i in range(num_iters) for j in range(3)]
values = [4, 5, 6] * num_iters
dense_shapes = [num_iters, 3]
# Expected result: [[4, 5, 6], [4, 5, 6], [4, 5, 6], ...]
manual = sparse_tensor.SparseTensor(indices, values, dense_shapes)
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_all_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
indices = array_ops.expand_dims(i, 0)
return sparse_tensor.SparseTensor(indices, i, i + 1) # [0, ..., 0, i]
# Expected result: [[0], [0, 1], [0, 0, 2], [0, 0, 0, 3], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, i] for i in range(num_iters)],
list(range(num_iters)),
(num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_indices_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
indices = array_ops.expand_dims(i, 0)
return sparse_tensor.SparseTensor(indices, [1], [num_iters])
# Expected result: identity matrix size num_iters * num_iters
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, i] for i in range(num_iters)],
[1] * num_iters, (num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_values_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
return sparse_tensor.SparseTensor([[0]], i, [num_iters]) # [i, 0, ..., 0]
# Expected result: [[1, 0, ...], [2, 0, ...], [3, 0, ...], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, 0] for i in range(num_iters)],
list(range(num_iters)),
(num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_shapes_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
return sparse_tensor.SparseTensor([[0]], [1], i + 1) # [1, 0, ..., 0]
# Expected result: [[1, 0, 0, ...], [1, 0, 0, ...], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, 0] for i in range(num_iters)],
[1] * num_iters, (num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_shapes_stacked_2D(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i + 1, dtypes.int64), 0)
shape = array_ops.concat([i, i], 0)
return sparse_tensor.SparseTensor([[0, 0]], [1], shape) # [1, 0, ..., 0]
# Expected result: [[[1, 0, ...], [0, ..., 0], [0, ..., 0], ...], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, 0, 0] for i in range(num_iters)],
[1] * num_iters,
(num_iters, num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
class ParsingTest(PForTestCase):
def test_decode_csv(self):
csv_tensor = constant_op.constant([["1:2:3"], ["::"], ["7:8:9"]])
kwargs = {"record_defaults": [[10], [20], [30]], "field_delim": ":"}
def loop_fn(i):
line = array_ops.gather(csv_tensor, i)
return parsing_ops.decode_csv(line, **kwargs)
self._test_loop_fn(loop_fn, iters=3, loop_fn_dtypes=[dtypes.int32] * 3)
@test_util.run_v1_only("b/122612051")
def test_parse_single_example(self):
def _int64_feature(*values):
return feature_pb2.Feature(int64_list=feature_pb2.Int64List(value=values))
def _bytes_feature(*values):
return feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[v.encode("utf-8") for v in values]))
examples = constant_op.constant([
example_pb2.Example(
features=feature_pb2.Features(
feature={
"dense_int": _int64_feature(i),
"dense_str": _bytes_feature(str(i)),
"sparse_int": _int64_feature(i, i * 2, i * 4, i * 8),
"sparse_str": _bytes_feature(*["abc"] * i)
})).SerializeToString() for i in range(10)
])
features = {
"dense_int": parsing_ops.FixedLenFeature((), dtypes.int64, 0),
"dense_str": parsing_ops.FixedLenFeature((), dtypes.string, ""),
"sparse_int": parsing_ops.VarLenFeature(dtypes.int64),
"sparse_str": parsing_ops.VarLenFeature(dtypes.string),
}
def loop_fn(i):
example_proto = array_ops.gather(examples, i)
f = parsing_ops.parse_single_example(example_proto, features)
return f
pfor = pfor_control_flow_ops.pfor(loop_fn, iters=10)
manual = parsing_ops.parse_example(examples, features)
self.run_and_assert_equal(pfor, manual)
if __name__ == "__main__":
test.main()
| |
import signal
import time
import redis
from rq import get_current_job
from rq.job import JobStatus
from rq.timeouts import JobTimeoutException
from rq.exceptions import NoSuchJobError
from redash import models, redis_connection, settings
from redash.query_runner import InterruptException
from redash.tasks.worker import Queue, Job
from redash.tasks.alerts import check_alerts_for_query
from redash.tasks.failure_report import track_failure
from redash.utils import gen_query_hash, json_dumps, utcnow
from redash.worker import get_job_logger
logger = get_job_logger(__name__)
TIMEOUT_MESSAGE = "Query exceeded Redash query execution time limit."
def _job_lock_id(query_hash, data_source_id):
return "query_hash_job:%s:%s" % (data_source_id, query_hash)
def _unlock(query_hash, data_source_id):
redis_connection.delete(_job_lock_id(query_hash, data_source_id))
def enqueue_query(
query, data_source, user_id, is_api_key=False, scheduled_query=None, metadata={}
):
query_hash = gen_query_hash(query)
logger.info("Inserting job for %s with metadata=%s", query_hash, metadata)
try_count = 0
job = None
while try_count < 5:
try_count += 1
pipe = redis_connection.pipeline()
try:
pipe.watch(_job_lock_id(query_hash, data_source.id))
job_id = pipe.get(_job_lock_id(query_hash, data_source.id))
if job_id:
logger.info("[%s] Found existing job: %s", query_hash, job_id)
job_complete = None
job_cancelled = None
try:
job = Job.fetch(job_id)
job_exists = True
status = job.get_status()
job_complete = status in [JobStatus.FINISHED, JobStatus.FAILED]
job_cancelled = job.is_cancelled
if job_complete:
message = "job found is complete (%s)" % status
elif job_cancelled:
message = "job found has ben cancelled"
except NoSuchJobError:
message = "job found has expired"
job_exists = False
lock_is_irrelevant = job_complete or job_cancelled or not job_exists
if lock_is_irrelevant:
logger.info("[%s] %s, removing lock", query_hash, message)
redis_connection.delete(_job_lock_id(query_hash, data_source.id))
job = None
if not job:
pipe.multi()
if scheduled_query:
queue_name = data_source.scheduled_queue_name
scheduled_query_id = scheduled_query.id
else:
queue_name = data_source.queue_name
scheduled_query_id = None
time_limit = settings.dynamic_settings.query_time_limit(
scheduled_query, user_id, data_source.org_id
)
metadata["Queue"] = queue_name
queue = Queue(queue_name)
enqueue_kwargs = {
"user_id": user_id,
"scheduled_query_id": scheduled_query_id,
"is_api_key": is_api_key,
"job_timeout": time_limit,
"failure_ttl": settings.JOB_DEFAULT_FAILURE_TTL,
"meta": {
"data_source_id": data_source.id,
"org_id": data_source.org_id,
"scheduled": scheduled_query_id is not None,
"query_id": metadata.get("query_id"),
"user_id": user_id,
},
}
if not scheduled_query:
enqueue_kwargs["result_ttl"] = settings.JOB_EXPIRY_TIME
job = queue.enqueue(
execute_query, query, data_source.id, metadata, **enqueue_kwargs
)
logger.info("[%s] Created new job: %s", query_hash, job.id)
pipe.set(
_job_lock_id(query_hash, data_source.id),
job.id,
settings.JOB_EXPIRY_TIME,
)
pipe.execute()
break
except redis.WatchError:
continue
if not job:
logger.error("[Manager][%s] Failed adding job for query.", query_hash)
return job
def signal_handler(*args):
raise InterruptException
class QueryExecutionError(Exception):
pass
def _resolve_user(user_id, is_api_key, query_id):
if user_id is not None:
if is_api_key:
api_key = user_id
if query_id is not None:
q = models.Query.get_by_id(query_id)
else:
q = models.Query.by_api_key(api_key)
return models.ApiUser(api_key, q.org, q.groups)
else:
return models.User.get_by_id(user_id)
else:
return None
class QueryExecutor(object):
def __init__(
self, query, data_source_id, user_id, is_api_key, metadata, is_scheduled_query
):
self.job = get_current_job()
self.query = query
self.data_source_id = data_source_id
self.metadata = metadata
self.data_source = self._load_data_source()
self.query_id = metadata.get("query_id")
self.user = _resolve_user(user_id, is_api_key, metadata.get("query_id"))
self.query_model = (
models.Query.query.get(self.query_id)
if self.query_id and self.query_id != "adhoc"
else None
)
# Close DB connection to prevent holding a connection for a long time while the query is executing.
models.db.session.close()
self.query_hash = gen_query_hash(self.query)
self.is_scheduled_query = is_scheduled_query
if self.is_scheduled_query:
# Load existing tracker or create a new one if the job was created before code update:
models.scheduled_queries_executions.update(self.query_model.id)
def run(self):
signal.signal(signal.SIGINT, signal_handler)
started_at = time.time()
logger.debug("Executing query:\n%s", self.query)
self._log_progress("executing_query")
query_runner = self.data_source.query_runner
annotated_query = self._annotate_query(query_runner)
try:
data, error = query_runner.run_query(annotated_query, self.user)
except Exception as e:
if isinstance(e, JobTimeoutException):
error = TIMEOUT_MESSAGE
else:
error = str(e)
data = None
logger.warning("Unexpected error while running query:", exc_info=1)
run_time = time.time() - started_at
logger.info(
"job=execute_query query_hash=%s ds_id=%d data_length=%s error=[%s]",
self.query_hash,
self.data_source_id,
data and len(data),
error,
)
_unlock(self.query_hash, self.data_source.id)
if error is not None and data is None:
result = QueryExecutionError(error)
if self.is_scheduled_query:
self.query_model = models.db.session.merge(self.query_model, load=False)
track_failure(self.query_model, error)
raise result
else:
if self.query_model and self.query_model.schedule_failures > 0:
self.query_model = models.db.session.merge(self.query_model, load=False)
self.query_model.schedule_failures = 0
self.query_model.skip_updated_at = True
models.db.session.add(self.query_model)
query_result = models.QueryResult.store_result(
self.data_source.org_id,
self.data_source,
self.query_hash,
self.query,
data,
run_time,
utcnow(),
)
updated_query_ids = models.Query.update_latest_result(query_result)
models.db.session.commit() # make sure that alert sees the latest query result
self._log_progress("checking_alerts")
for query_id in updated_query_ids:
check_alerts_for_query.delay(query_id)
self._log_progress("finished")
result = query_result.id
models.db.session.commit()
return result
def _annotate_query(self, query_runner):
self.metadata["Job ID"] = self.job.id
self.metadata["Query Hash"] = self.query_hash
self.metadata["Scheduled"] = self.is_scheduled_query
return query_runner.annotate_query(self.query, self.metadata)
def _log_progress(self, state):
logger.info(
"job=execute_query state=%s query_hash=%s type=%s ds_id=%d "
"job_id=%s queue=%s query_id=%s username=%s",
state,
self.query_hash,
self.data_source.type,
self.data_source.id,
self.job.id,
self.metadata.get("Queue", "unknown"),
self.metadata.get("query_id", "unknown"),
self.metadata.get("Username", "unknown"),
)
def _load_data_source(self):
logger.info("job=execute_query state=load_ds ds_id=%d", self.data_source_id)
return models.DataSource.query.get(self.data_source_id)
# user_id is added last as a keyword argument for backward compatability -- to support executing previously submitted
# jobs before the upgrade to this version.
def execute_query(
query,
data_source_id,
metadata,
user_id=None,
scheduled_query_id=None,
is_api_key=False,
):
try:
return QueryExecutor(
query,
data_source_id,
user_id,
is_api_key,
metadata,
scheduled_query_id is not None,
).run()
except QueryExecutionError as e:
models.db.session.rollback()
return e
| |
import ctypes
# Library
libkrb5 = ctypes.cdll.LoadLibrary("libkrb5.so.3")
# Constants
KRB5_GC_USER_USER = 1
KRB5_GC_CACHED = 2
# Types
krb5_int32 = ctypes.c_int32
krb5_error_code = krb5_int32
krb5_magic = krb5_error_code
krb5_flags = krb5_int32
krb5_enctype = krb5_int32
krb5_octet = ctypes.c_ubyte
krb5_timestamp = krb5_int32
krb5_boolean = ctypes.c_uint
krb5_addrtype = krb5_int32
krb5_authdatatype = krb5_int32
krb5_kvno = ctypes.c_uint
class _krb5_context(ctypes.Structure): pass
krb5_context = ctypes.POINTER(_krb5_context)
class _krb5_ccache(ctypes.Structure): pass
krb5_ccache = ctypes.POINTER(_krb5_ccache)
class _krb5_kt(ctypes.Structure): pass
krb5_keytab = ctypes.POINTER(_krb5_kt)
class krb5_data(ctypes.Structure):
_fields_ = [('magic', krb5_magic),
('length', ctypes.c_uint),
('data', ctypes.POINTER(ctypes.c_char))]
def as_str(self):
return ctypes.string_at(self.data, self.length)
class krb5_principal_data(ctypes.Structure):
_fields_ = [('magic', krb5_magic),
('realm', krb5_data),
('data', ctypes.POINTER(krb5_data)),
('length', krb5_int32),
('type', krb5_int32)]
krb5_principal = ctypes.POINTER(krb5_principal_data)
krb5_const_principal = ctypes.POINTER(krb5_principal_data)
class krb5_keyblock(ctypes.Structure):
_fields_ = [('magic', krb5_magic),
('enctype', krb5_enctype),
('length', ctypes.c_uint),
('contents', ctypes.POINTER(krb5_octet))]
def contents_as_str(self):
return ctypes.string_at(self.contents, self.length)
class krb5_ticket_times(ctypes.Structure):
_fields_ = [('authtime', krb5_timestamp),
('starttime', krb5_timestamp),
('endtime', krb5_timestamp),
('renew_till', krb5_timestamp)]
class krb5_address(ctypes.Structure):
_fields_ = [('magic', krb5_magic),
('addrtype', krb5_addrtype),
('length', ctypes.c_uint),
('contents', ctypes.POINTER(krb5_octet))]
def contents_as_str(self):
return ctypes.string_at(self.contents, self.length)
class krb5_authdata(ctypes.Structure):
_fields_ = [('magic', krb5_magic),
('ad_type', krb5_authdatatype),
('length', ctypes.c_uint),
('contents', ctypes.POINTER(krb5_octet))]
class krb5_creds(ctypes.Structure):
_fields_ = [('magic', krb5_magic),
('client', krb5_principal),
('server', krb5_principal),
('keyblock', krb5_keyblock),
('times', krb5_ticket_times),
('is_skey', krb5_boolean),
('ticket_flags', krb5_flags),
('addresses', ctypes.POINTER(ctypes.POINTER(krb5_address))),
('ticket', krb5_data),
('second_ticket', krb5_data),
('authdata', ctypes.POINTER(ctypes.POINTER(krb5_authdata)))]
krb5_creds_ptr = ctypes.POINTER(krb5_creds)
class krb5_enc_data(ctypes.Structure):
_fields_ = [('magic', krb5_magic),
('enctype', krb5_enctype),
('kvno', krb5_kvno),
('ciphertext', krb5_data)]
class krb5_transited(ctypes.Structure):
_fields_ = [('magic', krb5_magic),
('tr_type', krb5_octet),
('tr_contents', krb5_data)]
class krb5_enc_tkt_part(ctypes.Structure):
_fields_ = [('magic', krb5_magic),
('flags', krb5_flags),
('session', ctypes.POINTER(krb5_keyblock)),
('client', krb5_principal),
('transited', krb5_transited),
('times', krb5_ticket_times),
('caddrs', ctypes.POINTER(ctypes.POINTER(krb5_address))),
('authorization_data', ctypes.POINTER(ctypes.POINTER(krb5_authdata)))]
class krb5_ticket(ctypes.Structure):
_fields_ = [('magic', krb5_magic),
('server', krb5_principal),
('enc_part', krb5_enc_data),
('enc_part2', ctypes.POINTER(krb5_enc_tkt_part))]
krb5_ticket_ptr = ctypes.POINTER(krb5_ticket)
# Don't do the conversion on return.
class _c_char_p_noconv(ctypes.c_char_p): pass
# Functions
krb5_init_context = libkrb5.krb5_init_context
krb5_init_context.restype = krb5_error_code
krb5_init_context.argtypes = (ctypes.POINTER(krb5_context),)
krb5_free_context = libkrb5.krb5_free_context
krb5_free_context.restype = None
krb5_free_context.argtypes = (krb5_context,)
krb5_cc_default = libkrb5.krb5_cc_default
krb5_cc_default.restype = krb5_error_code
krb5_cc_default.argtypes = (krb5_context, ctypes.POINTER(krb5_ccache))
krb5_cc_close = libkrb5.krb5_cc_close
krb5_cc_close.restype = krb5_error_code
krb5_cc_close.argtypes = (krb5_context, krb5_ccache)
krb5_cc_get_principal = libkrb5.krb5_cc_get_principal
krb5_cc_get_principal.restype = krb5_error_code
krb5_cc_get_principal.argtypes = (krb5_context,
krb5_ccache,
ctypes.POINTER(krb5_principal))
krb5_free_principal = libkrb5.krb5_free_principal
krb5_free_principal.restype = None
krb5_free_principal.argtypes = (krb5_context, krb5_principal)
krb5_unparse_name = libkrb5.krb5_unparse_name
krb5_unparse_name.restype = krb5_error_code
krb5_unparse_name.argtypes = (krb5_context,
krb5_const_principal,
ctypes.POINTER(ctypes.c_char_p))
krb5_free_unparsed_name = libkrb5.krb5_free_unparsed_name
krb5_free_unparsed_name.restype = None
krb5_free_unparsed_name.argtypes = (krb5_context, ctypes.c_char_p)
krb5_get_error_message = libkrb5.krb5_get_error_message
krb5_get_error_message.restype = _c_char_p_noconv
krb5_get_error_message.argtypes = (krb5_context, krb5_error_code)
krb5_free_error_message = libkrb5.krb5_free_error_message
krb5_free_error_message.restype = None
krb5_free_error_message.argtypes = (krb5_context, ctypes.c_char_p)
krb5_build_principal = libkrb5.krb5_build_principal
krb5_build_principal.restype = krb5_error_code
# This takes varargs. Supposedly things using the C calling convention
# can take extra args in ctypes?
krb5_build_principal.argtypes = (krb5_context,
ctypes.POINTER(krb5_principal),
ctypes.c_uint,
ctypes.POINTER(ctypes.c_char))
krb5_get_credentials = libkrb5.krb5_get_credentials
krb5_get_credentials.restype = krb5_error_code
krb5_get_credentials.argtypes = (krb5_context,
krb5_flags,
krb5_ccache,
krb5_creds_ptr,
ctypes.POINTER(krb5_creds_ptr))
krb5_free_creds = libkrb5.krb5_free_creds
krb5_free_creds.restype = None
krb5_free_creds.argtypes = (krb5_context, krb5_creds_ptr)
krb5_decode_ticket = libkrb5.krb5_decode_ticket
krb5_decode_ticket.restype = krb5_error_code
krb5_decode_ticket.argtypes = (ctypes.POINTER(krb5_data),
ctypes.POINTER(krb5_ticket_ptr))
krb5_free_ticket = libkrb5.krb5_free_ticket
krb5_free_ticket.restype = None
krb5_free_ticket.argtypes = (krb5_context, krb5_ticket_ptr)
krb5_kt_default = libkrb5.krb5_kt_default
krb5_kt_default.restype = krb5_error_code
krb5_kt_default.argtypes = (krb5_context, ctypes.POINTER(krb5_keytab))
krb5_kt_resolve = libkrb5.krb5_kt_resolve
krb5_kt_resolve.restype = krb5_error_code
krb5_kt_resolve.argtypes = (krb5_context, ctypes.c_char_p,
ctypes.POINTER(krb5_keytab))
krb5_kt_close = libkrb5.krb5_kt_close
krb5_kt_close.restype = krb5_error_code
krb5_kt_default.argtypes = (krb5_context, krb5_keytab)
krb5_server_decrypt_ticket_keytab = libkrb5.krb5_server_decrypt_ticket_keytab
krb5_server_decrypt_ticket_keytab.restype = krb5_error_code
krb5_server_decrypt_ticket_keytab.argtypes = (krb5_context,
krb5_keytab,
ctypes.POINTER(krb5_ticket))
| |
from sympy import (Symbol, Rational, ln, exp, log, sqrt, E, O, pi, I, sinh,
sin, cosh, cos, tanh, coth, asinh, acosh, atanh, acoth, tan, cot, Integer,
PoleError, floor, ceiling, asin, symbols, limit, Piecewise, Eq, sign,
Derivative)
from sympy.abc import x, y, z
from sympy.utilities.pytest import raises, XFAIL
def test_simple_1():
assert x.nseries(x, n=5) == x
assert y.nseries(x, n=5) == y
assert (1/(x*y)).nseries(y, n=5) == 1/(x*y)
assert Rational(3, 4).nseries(x, n=5) == Rational(3, 4)
assert x.nseries() == x
def test_mul_0():
assert (x*ln(x)).nseries(x, n=5) == x*ln(x)
def test_mul_1():
assert (x*ln(2 + x)).nseries(x, n=5) == x*log(2) + x**2/2 - x**3/8 + \
x**4/24 + O(x**5)
assert (x*ln(1 + x)).nseries(
x, n=5) == x**2 - x**3/2 + x**4/3 + O(x**5)
def test_pow_0():
assert (x**2).nseries(x, n=5) == x**2
assert (1/x).nseries(x, n=5) == 1/x
assert (1/x**2).nseries(x, n=5) == 1/x**2
assert (x**Rational(2, 3)).nseries(x, n=5) == (x**Rational(2, 3))
assert (sqrt(x)**3).nseries(x, n=5) == (sqrt(x)**3)
def test_pow_1():
assert ((1 + x)**2).nseries(x, n=5) == 1 + 2*x + x**2
def test_geometric_1():
assert (1/(1 - x)).nseries(x, n=5) == 1 + x + x**2 + x**3 + x**4 + O(x**5)
assert (x/(1 - x)).nseries(x, n=6) == x + x**2 + x**3 + x**4 + x**5 + O(x**6)
assert (x**3/(1 - x)).nseries(x, n=8) == x**3 + x**4 + x**5 + x**6 + \
x**7 + O(x**8)
def test_sqrt_1():
assert sqrt(1 + x).nseries(x, n=5) == 1 + x/2 - x**2/8 + x**3/16 - 5*x**4/128 + O(x**5)
def test_exp_1():
assert exp(x).nseries(x, n=5) == 1 + x + x**2/2 + x**3/6 + x**4/24 + O(x**5)
assert exp(x).nseries(x, n=12) == 1 + x + x**2/2 + x**3/6 + x**4/24 + x**5/120 + \
x**6/720 + x**7/5040 + x**8/40320 + x**9/362880 + x**10/3628800 + \
x**11/39916800 + O(x**12)
assert exp(1/x).nseries(x, n=5) == exp(1/x)
assert exp(1/(1 + x)).nseries(x, n=4) == \
(E*(1 - x - 13*x**3/6 + 3*x**2/2)).expand() + O(x**4)
assert exp(2 + x).nseries(x, n=5) == \
(exp(2)*(1 + x + x**2/2 + x**3/6 + x**4/24)).expand() + O(x**5)
def test_exp_sqrt_1():
assert exp(1 + sqrt(x)).nseries(x, n=3) == \
(exp(1)*(1 + sqrt(x) + x/2 + sqrt(x)*x/6)).expand() + O(sqrt(x)**3)
def test_power_x_x1():
assert (exp(x*ln(x))).nseries(x, n=4) == \
1 + x*log(x) + x**2*log(x)**2/2 + x**3*log(x)**3/6 + O(x**4*log(x)**4)
def test_power_x_x2():
assert (x**x).nseries(x, n=4) == \
1 + x*log(x) + x**2*log(x)**2/2 + x**3*log(x)**3/6 + O(x**4*log(x)**4)
def test_log_singular1():
assert log(1 + 1/x).nseries(x, n=5) == x - log(x) - x**2/2 + x**3/3 - \
x**4/4 + O(x**5)
def test_log_power1():
e = 1 / (1/x + x ** (log(3)/log(2)))
assert e.nseries(x, n=5) == x - x**(2 + log(3)/log(2)) + O(x**5)
def test_log_series():
l = Symbol('l')
e = 1/(1 - log(x))
assert e.nseries(x, n=5, logx=l) == 1/(1 - l)
def test_log2():
e = log(-1/x)
assert e.nseries(x, n=5) == -log(x) + log(-1)
def test_log3():
l = Symbol('l')
e = 1/log(-1/x)
assert e.nseries(x, n=4, logx=l) == 1/(-l + log(-1))
def test_series1():
e = sin(x)
assert e.nseries(x, 0, 0) != 0
assert e.nseries(x, 0, 0) == O(1, x)
assert e.nseries(x, 0, 1) == O(x, x)
assert e.nseries(x, 0, 2) == x + O(x**2, x)
assert e.nseries(x, 0, 3) == x + O(x**3, x)
assert e.nseries(x, 0, 4) == x - x**3/6 + O(x**4, x)
e = (exp(x) - 1)/x
assert e.nseries(x, 0, 3) == 1 + x/2 + O(x**2, x)
assert x.nseries(x, 0, 2) == x
@XFAIL
def test_series1_failing():
assert x.nseries(x, 0, 0) == O(1, x)
assert x.nseries(x, 0, 1) == O(x, x)
def test_seriesbug1():
assert (1/x).nseries(x, 0, 3) == 1/x
assert (x + 1/x).nseries(x, 0, 3) == x + 1/x
def test_series2x():
assert ((x + 1)**(-2)).nseries(x, 0, 4) == 1 - 2*x + 3*x**2 - 4*x**3 + O(x**4, x)
assert ((x + 1)**(-1)).nseries(x, 0, 4) == 1 - x + x**2 - x**3 + O(x**4, x)
assert ((x + 1)**0).nseries(x, 0, 3) == 1
assert ((x + 1)**1).nseries(x, 0, 3) == 1 + x
assert ((x + 1)**2).nseries(x, 0, 3) == 1 + 2*x + x**2
assert ((x + 1)**3).nseries(
x, 0, 3) == 1 + 3*x + 3*x**2 + x**3 # 1+3*x+3*x**2+O(x**3)
assert (1/(1 + x)).nseries(x, 0, 4) == 1 - x + x**2 - x**3 + O(x**4, x)
assert (x + 3/(1 + 2*x)).nseries(x, 0, 4) == 3 - 5*x + 12*x**2 - 24*x**3 + O(x**4, x)
assert ((1/x + 1)**3).nseries(x, 0, 3) == 1 + x**(-3) + 3*x**(-2) + 3/x
assert (1/(1 + 1/x)).nseries(x, 0, 4) == x - x**2 + x**3 - O(x**4, x)
assert (1/(1 + 1/x**2)).nseries(x, 0, 6) == x**2 - x**4 + O(x**6, x)
def test_bug2(): # 1/log(0) * log(0) problem
w = Symbol("w")
e = (w**(-1) + w**(
-log(3)*log(2)**(-1)))**(-1)*(3*w**(-log(3)*log(2)**(-1)) + 2*w**(-1))
e = e.expand()
assert e.nseries(w, 0, 4).subs(w, 0) == 3
def test_exp():
e = (1 + x)**(1/x)
assert e.nseries(x, n=3) == exp(1) - x*exp(1)/2 + O(x**2, x)
def test_exp2():
w = Symbol("w")
e = w**(1 - log(x)/(log(2) + log(x)))
logw = Symbol("logw")
assert e.nseries(
w, 0, 1, logx=logw) == exp(logw - logw*log(x)/(log(2) + log(x)))
def test_bug3():
e = (2/x + 3/x**2)/(1/x + 1/x**2)
assert e.nseries(x, n=3) == 3 + O(x)
def test_generalexponent():
p = 2
e = (2/x + 3/x**p)/(1/x + 1/x**p)
assert e.nseries(x, 0, 3) == 3 + O(x)
p = Rational(1, 2)
e = (2/x + 3/x**p)/(1/x + 1/x**p)
assert e.nseries(x, 0, 2) == 2 + sqrt(x) + O(x)
e = 1 + sqrt(x)
assert e.nseries(x, 0, 4) == 1 + sqrt(x)
# more complicated example
def test_genexp_x():
e = 1/(1 + sqrt(x))
assert e.nseries(x, 0, 2) == \
1 + x - sqrt(x) - sqrt(x)**3 + O(x**2, x)
# more complicated example
def test_genexp_x2():
p = Rational(3, 2)
e = (2/x + 3/x**p)/(1/x + 1/x**p)
assert e.nseries(x, 0, 3) == 3 - sqrt(x) + x + O(sqrt(x)**3)
def test_seriesbug2():
w = Symbol("w")
#simple case (1):
e = ((2*w)/w)**(1 + w)
assert e.nseries(w, 0, 1) == 2 + O(w, w)
assert e.nseries(w, 0, 1).subs(w, 0) == 2
def test_seriesbug2b():
w = Symbol("w")
#test sin
e = sin(2*w)/w
assert e.nseries(w, 0, 3) == 2 + O(w**2, w)
def test_seriesbug2d():
w = Symbol("w", real=True)
e = log(sin(2*w)/w)
assert e.series(w, n=5) == log(2) - 2*w**2/3 - 4*w**4/45 + O(w**5)
def test_seriesbug2c():
w = Symbol("w", real=True)
#more complicated case, but sin(x)~x, so the result is the same as in (1)
e = (sin(2*w)/w)**(1 + w)
assert e.series(w, 0, 1) == 2 + O(w)
assert e.series(w, 0, 3) == 2 + 2*w*log(2) + \
w**2*(-Rational(4, 3) + log(2)**2) + O(w**3)
assert e.series(w, 0, 2).subs(w, 0) == 2
def test_expbug4():
x = Symbol("x", real=True)
assert (log(
sin(2*x)/x)*(1 + x)).series(x, 0, 2) == log(2) + x*log(2) + O(x**2, x)
assert exp(
log(sin(2*x)/x)*(1 + x)).series(x, 0, 2) == 2 + 2*x*log(2) + O(x**2)
assert exp(log(2) + O(x)).nseries(x, 0, 2) == 2 + O(x)
assert ((2 + O(x))**(1 + x)).nseries(x, 0, 2) == 2 + O(x)
def test_logbug4():
assert log(2 + O(x)).nseries(x, 0, 2) == log(2) + O(x, x)
def test_expbug5():
assert exp(log(1 + x)/x).nseries(x, n=3) == exp(1) + -exp(1)*x/2 + O(x**2)
assert exp(O(x)).nseries(x, 0, 2) == 1 + O(x)
def test_sinsinbug():
assert sin(sin(x)).nseries(x, 0, 8) == x - x**3/3 + x**5/10 - 8*x**7/315 + O(x**8)
def test_issue_3258():
a = x/(exp(x) - 1)
assert a.nseries(x, 0, 5) == 1 - x/2 - x**4/720 + x**2/12 + O(x**5)
def test_issue_3204():
x = Symbol("x", nonnegative=True)
f = sin(x**3)**Rational(1, 3)
assert f.nseries(x, 0, 17) == x - x**7/18 - x**13/3240 + O(x**17)
def test_issue_3224():
f = sqrt(1 - sqrt(y))
assert f.nseries(y, 0, 2) == 1 - sqrt(y)/2 - y/8 - sqrt(y)**3/16 + O(y**2)
def test_issue_3463():
from sympy import summation, symbols
w, i = symbols('w,i')
r = log(5)/log(3)
p = w**(-1 + r)
e = 1/x*(-log(w**(1 + r)) + log(w + w**r))
e_ser = -r*log(w)/x + p/x - p**2/(2*x) + O(p**3)
assert e.nseries(w, n=3) == e_ser
def test_sin():
assert sin(8*x).nseries(x, n=4) == 8*x - 256*x**3/3 + O(x**4)
assert sin(x + y).nseries(x, n=1) == sin(y) + O(x)
assert sin(x + y).nseries(x, n=2) == sin(y) + cos(y)*x + O(x**2)
assert sin(x + y).nseries(x, n=5) == sin(y) + cos(y)*x - sin(y)*x**2/2 - \
cos(y)*x**3/6 + sin(y)*x**4/24 + O(x**5)
def test_issue_3515():
e = sin(8*x)/x
assert e.nseries(x, n=6) == 8 - 256*x**2/3 + 4096*x**4/15 + O(x**5)
def test_issue_3505():
e = sin(x)**(-4)*(sqrt(cos(x))*sin(x)**2 -
cos(x)**Rational(1, 3)*sin(x)**2)
assert e.nseries(x, n=9) == -Rational(1)/12 - 7*x**2/288 - \
43*x**4/10368 + O(x**5)
def test_issue_3501():
a = Symbol("a")
e = x**(-2)*(x*sin(a + x) - x*sin(a))
assert e.nseries(x, n=6) == cos(a) - sin(a)*x/2 - cos(a)*x**2/6 + \
sin(a)*x**3/24 + O(x**4)
e = x**(-2)*(x*cos(a + x) - x*cos(a))
assert e.nseries(x, n=6) == -sin(a) - cos(a)*x/2 + sin(a)*x**2/6 + \
cos(a)*x**3/24 + O(x**4)
def test_issue_3502():
e = sin(5*x)/sin(2*x)
assert e.nseries(x, n=2) == Rational(5, 2) + O(x)
assert e.nseries(x, n=6) == \
Rational(5, 2) - 35*x**2/4 + 329*x**4/48 + O(x**5)
def test_issue_3503():
e = sin(2 + x)/(2 + x)
assert e.nseries(x, n=2) == sin(2)/2 + x*cos(2)/2 - x*sin(2)/4 + O(x**2)
def test_issue_3506():
e = (x + sin(3*x))**(-2)*(x*(x + sin(3*x)) - (x + sin(3*x))*sin(2*x))
assert e.nseries(x, n=7) == \
-Rational(1, 4) + 5*x**2/96 + 91*x**4/768 + O(x**5)
def test_issue_3508():
x = Symbol("x", real=True)
assert log(sin(x)).series(x, n=5) == log(x) - x**2/6 - x**4/180 + O(x**5)
e = -log(x) + x*(-log(x) + log(sin(2*x))) + log(sin(2*x))
assert e.series(x, n=5) == \
log(2) + log(2)*x - 2*x**2/3 - 2*x**3/3 - 4*x**4/45 + O(x**5)
def test_issue_3507():
e = x**(-4)*(x**2 - x**2*sqrt(cos(x)))
assert e.nseries(x, n=9) == \
Rational(1, 4) + x**2/96 + 19*x**4/5760 + O(x**5)
def test_issue_3639():
assert sin(cos(x)).nseries(x, n=5) == \
sin(1) - x**2*cos(1)/2 - x**4*sin(1)/8 + x**4*cos(1)/24 + O(x**5)
def test_hyperbolic():
assert sinh(x).nseries(x, n=6) == x + x**3/6 + x**5/120 + O(x**6)
assert cosh(x).nseries(x, n=5) == 1 + x**2/2 + x**4/24 + O(x**5)
assert tanh(x).nseries(x, n=6) == x - x**3/3 + 2*x**5/15 + O(x**6)
assert coth(x).nseries(x, n=6) == \
1/x - x**3/45 + x/3 + 2*x**5/945 + O(x**6)
assert asinh(x).nseries(x, n=6) == x - x**3/6 + 3*x**5/40 + O(x**6)
assert acosh(x).nseries(x, n=6) == \
pi*I/2 - I*x - 3*I*x**5/40 - I*x**3/6 + O(x**6)
assert atanh(x).nseries(x, n=6) == x + x**3/3 + x**5/5 + O(x**6)
assert acoth(x).nseries(x, n=6) == x + x**3/3 + x**5/5 + pi*I/2 + O(x**6)
def test_series2():
w = Symbol("w", real=True)
x = Symbol("x", real=True)
e = w**(-2)*(w*exp(1/x - w) - w*exp(1/x))
assert e.nseries(w, n=4) == -exp(1/x) + w * exp(1/x) / 2 + O(w**2)
def test_series3():
w = Symbol("w", real=True)
x = Symbol("x", real=True)
e = w**(-6)*(w**3*tan(w) - w**3*sin(w))
assert e.nseries(w, n=8) == Integer(1)/2 + O(w**2)
def test_bug4():
w = Symbol("w")
e = x/(w**4 + x**2*w**4 + 2*x*w**4)*w**4
assert e.nseries(w, n=2) in [x/(1 + 2*x + x**2),
1/(1 + x/2 + 1/x/2)/2, 1/x/(1 + 2/x + x**(-2))]
def test_bug5():
w = Symbol("w")
l = Symbol('l')
e = (-log(w) + log(1 + w*log(x)))**(-2)*w**(-2)*((-log(w) +
log(1 + x*w))*(-log(w) + log(1 + w*log(x)))*w - x*(-log(w) +
log(1 + w*log(x)))*w)
assert e.nseries(w, n=2, logx=l) == x/w/l + 1/w + O(1, w)
assert e.nseries(w, n=3, logx=l) == x/w/l + 1/w - x/l + 1/l*log(x) \
+ x*log(x)/l**2 + O(w)
def test_issue_4115():
assert (sin(x)/(1 - cos(x))).nseries(x, n=1) == O(1/x)
assert (sin(x)**2/(1 - cos(x))).nseries(x, n=1) == O(1, x)
def test_pole():
raises(PoleError, lambda: sin(1/x).series(x, 0, 5))
raises(PoleError, lambda: sin(1 + 1/x).series(x, 0, 5))
raises(PoleError, lambda: (x*sin(1/x)).series(x, 0, 5))
def test_expsinbug():
assert exp(sin(x)).series(x, 0, 0) == O(1, x)
assert exp(sin(x)).series(x, 0, 1) == 1 + O(x)
assert exp(sin(x)).series(x, 0, 2) == 1 + x + O(x**2)
assert exp(sin(x)).series(x, 0, 3) == 1 + x + x**2/2 + O(x**3)
assert exp(sin(x)).series(x, 0, 4) == 1 + x + x**2/2 + O(x**4)
assert exp(sin(x)).series(x, 0, 5) == 1 + x + x**2/2 - x**4/8 + O(x**5)
def test_floor():
x = Symbol('x')
assert floor(x).series(x) == 0
assert floor(-x).series(x) == -1
assert floor(sin(x)).series(x) == 0
assert floor(sin(-x)).series(x) == -1
assert floor(x**3).series(x) == 0
assert floor(-x**3).series(x) == -1
assert floor(cos(x)).series(x) == 0
assert floor(cos(-x)).series(x) == 0
assert floor(5 + sin(x)).series(x) == 5
assert floor(5 + sin(-x)).series(x) == 4
assert floor(x).series(x, 2) == 2
assert floor(-x).series(x, 2) == -3
x = Symbol('x', negative=True)
assert floor(x + 1.5).series(x) == 1
def test_ceiling():
assert ceiling(x).series(x) == 1
assert ceiling(-x).series(x) == 0
assert ceiling(sin(x)).series(x) == 1
assert ceiling(sin(-x)).series(x) == 0
assert ceiling(1 - cos(x)).series(x) == 1
assert ceiling(1 - cos(-x)).series(x) == 1
assert ceiling(x).series(x, 2) == 3
assert ceiling(-x).series(x, 2) == -2
def test_abs():
a = Symbol('a')
assert abs(x).nseries(x, n=4) == x
assert abs(-x).nseries(x, n=4) == x
assert abs(x + 1).nseries(x, n=4) == x + 1
assert abs(sin(x)).nseries(x, n=4) == x - Rational(1, 6)*x**3 + O(x**4)
assert abs(sin(-x)).nseries(x, n=4) == x - Rational(1, 6)*x**3 + O(x**4)
assert abs(x - a).nseries(x, 1) == Piecewise((x - 1, Eq(1 - a, 0)),
((x - a)*sign(1 - a), True))
def test_dir():
assert abs(x).series(x, 0, dir="+") == x
assert abs(x).series(x, 0, dir="-") == -x
assert floor(x + 2).series(x, 0, dir='+') == 2
assert floor(x + 2).series(x, 0, dir='-') == 1
assert floor(x + 2.2).series(x, 0, dir='-') == 2
assert ceiling(x + 2.2).series(x, 0, dir='-') == 3
assert sin(x + y).series(x, 0, dir='-') == sin(x + y).series(x, 0, dir='+')
def test_issue_3504():
a = Symbol("a")
e = asin(a*x)/x
assert e.series(x, 4, n=2).removeO().subs(x, x - 4) == \
(x - 4)*(a/(4*sqrt(-16*a**2 + 1)) - asin(4*a)/16) + asin(4*a)/4
def test_issue_4441():
a, b = symbols('a,b')
f = 1/(1 + a*x)
assert f.series(x, 0, 5) == 1 - a*x + a**2*x**2 - a**3*x**3 + \
a**4*x**4 + O(x**5)
f = 1/(1 + (a + b)*x)
assert f.series(x, 0, 3) == 1 + x*(-a - b) + \
x**2*(a**2 + 2*a*b + b**2) + O(x**3)
def test_issue_4329():
assert tan(x).series(x, pi/2, n=3).removeO().subs(x, x - pi/2) == \
-pi/6 + x/3 - 1/(x - pi/2)
assert cot(x).series(x, pi, n=3).removeO().subs(x, x - pi) == \
-x/3 + pi/3 + 1/(x - pi)
assert limit(tan(x)**tan(2*x), x, pi/4) == exp(-1)
def test_issue_5183():
assert abs(x + x**2).series(n=1) == O(x)
assert abs(x + x**2).series(n=2) == x + O(x**2)
assert ((1 + x)**2).series(x, n=6) == 1 + 2*x + x**2
assert (1 + 1/x).series() == 1 + 1/x
assert Derivative(exp(x).series(), x).doit() == \
1 + x + x**2/2 + x**3/6 + x**4/24 + O(x**5)
def test_issue_5654():
a = Symbol('a')
assert (1/(x**2+a**2)**2).nseries(x, x0=I*a, n=0) == \
-I/(4*a**3*x) - 1/(4*a**2*x**2) + O(1, x)
assert (1/(x**2+a**2)**2).nseries(x, x0=I*a, n=1) == \
3/(16*a**4) - I/(4*a**3*x) - 1/(4*a**2*x**2) + O(x)
def test_issue_5925():
sx = sqrt(x + z).series(z, 0, 1)
sxy = sqrt(x + y + z).series(z, 0, 1)
s1, s2 = sx.subs(x, x + y), sxy
assert (s1 - s2).expand().removeO().simplify() == 0
sx = sqrt(x + z).series(z, 0, 1)
sxy = sqrt(x + y + z).series(z, 0, 1)
assert sxy.subs({x:1, y:2}) == sx.subs(x, 3)
| |
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""A page model for LHN"""
from lib import base
from lib.constants import locator
from lib.element import lhn
from lib.page import modal
class _Programs(lhn.AccordionGroup):
"""Programs dropdown in LHN"""
_locator_spinny = locator.LhnMenu.SPINNY_PROGRAMS
_locator_button_create_new = locator.LhnMenu.BUTTON_CREATE_NEW_PROGRAMS
_locator_accordion_members = locator.LhnMenu.ACCORDION_MEMBERS_PROGRAMS
_create_new_modal_cls = modal.create_new_object.Programs
class _Workflows(lhn.AccordionGroup):
"""Workflows dropdown in LHN"""
_locator_spinny = locator.LhnMenu.SPINNY_WORKFLOWS
_locator_button_create_new = locator.LhnMenu.BUTTON_CREATE_NEW_WORKFLOWS
def __init__(self, driver):
super(_Workflows, self).__init__(driver)
self.button_active = base.Button(
self._driver, locator.LhnMenu.BUTTON_WORKFLOWS_ACTIVE)
self.button_inactive = base.Button(
self._driver, locator.LhnMenu.BUTTON_WORKFLOWS_INACTIVE)
self.button_draft = base.Button(
self._driver, locator.LhnMenu.BUTTON_WORKFLOWS_DRAFT)
class _Audits(lhn.AccordionGroup):
"""Audits dropdown in LHN"""
_locator_spinny = locator.LhnMenu.SPINNY_AUDITS
_locator_button_create_new = locator.LhnMenu.BUTTON_CREATE_NEW_AUDITS
class _Assessments(lhn.AccordionGroup):
"""Assessments dropdown in LHN"""
_locator_spinny = locator.LhnMenu.SPINNY_ASSESSMENTS
_locator_button_create_new = locator.LhnMenu \
.BUTTON_CREATE_NEW_ASSESSMENTS
class _Issues(lhn.AccordionGroup):
"""Issues dropdown in LHN"""
_locator_spinny = locator.LhnMenu.SPINNY_ISSUES
_locator_button_create_new = locator.LhnMenu.BUTTON_CREATE_NEW_ISSUES
_locator_accordion_members = locator.LhnMenu.ACCORDION_MEMBERS_ISSUES
_create_new_modal_cls = modal.create_new_object.Issues
class _Directives(lhn.DropdownStatic):
"""Directives dropdown in LHN"""
_locator_element = locator.LhnMenu.DIRECTIVES
def __init__(self, driver):
super(_Directives, self).__init__(driver)
self.toggle_regulations = lhn.Toggle(
self._driver,
locator.LhnMenu.REGULATIONS,
locator.LhnMenu.REGULATIONS_COUNT)
self.toggle_policies = lhn.Toggle(
self._driver,
locator.LhnMenu.POLICIES,
locator.LhnMenu.POLICIES_COUNT)
self.toggle_standards = lhn.Toggle(
self._driver,
locator.LhnMenu.STANDARDS,
locator.LhnMenu.STANDARDS_COUNT)
self.toggle_contracts = lhn.Toggle(
self._driver,
locator.LhnMenu.CONTRACTS,
locator.LhnMenu.CONTRACTS_COUNT)
self.toggle_clauses = lhn.Toggle(
self._driver,
locator.LhnMenu.CLAUSES,
locator.LhnMenu.CLAUSES_COUNT)
self.toggle_sections = lhn.Toggle(
self._driver,
locator.LhnMenu.SECTIONS,
locator.LhnMenu.SECTIONS_COUNT)
def select_regulations(self):
"""
Returns:
_Regulations
"""
self.toggle_regulations.toggle()
return _Regulations(self._driver)
def select_policies(self):
"""
Returns:
_Policies
"""
self.toggle_policies.toggle()
return _Policies(self._driver)
def select_standards(self):
"""
Returns:
_Standards
"""
self.toggle_standards.toggle()
return _Standards(self._driver)
def select_contracts(self):
"""
Returns:
_Contracts
"""
self.toggle_contracts.toggle()
return _Contracts(self._driver)
def select_clauses(self):
"""
Returns:
_Clauses
"""
self.toggle_clauses.toggle()
return _Clauses(self._driver)
def select_sections(self):
"""
Returns:
_Sections
"""
self.toggle_sections.toggle()
return _Sections(self._driver)
class _Regulations(lhn.AccordionGroup):
"""Regulations dropdown in LHN"""
_locator_spinny = locator.LhnMenu.SPINNY_REGULATIONS
_locator_button_create_new = locator.LhnMenu.BUTTON_CREATE_NEW_REGULATIONS
class _Policies(lhn.AccordionGroup):
"""Policies dropdown in LHN"""
_locator_button_create_new = locator.LhnMenu.BUTTON_CREATE_NEW_POLICIES
_locator_spinny = locator.LhnMenu.SPINNY_POLICIES
class _Standards(lhn.AccordionGroup):
"""Standards dropdown in LHN"""
_locator_button_create_new = locator.LhnMenu.BUTTON_CREATE_NEW_STANDARDS
_locator_spinny = locator.LhnMenu.SPINNY_STANDARDS
class _Contracts(lhn.AccordionGroup):
"""Contracts dropdown in LHN"""
_locator_button_create_new = locator.LhnMenu.BUTTON_CREATE_NEW_CONTRACTS
_locator_spinny = locator.LhnMenu.SPINNY_REGULATIONS
class _Clauses(lhn.AccordionGroup):
"""Clauses dropdown in LHN"""
_locator_button_create_new = locator.LhnMenu.BUTTON_CREATE_NEW_CLAUSES
_locator_spinny = locator.LhnMenu.SPINNY_CLAUSES
class _Sections(lhn.AccordionGroup):
"""Sections dropdown in LHN"""
_locator_button_create_new = locator.LhnMenu.BUTTON_CREATE_NEW_SECTIONS
_locator_spinny = locator.LhnMenu.SPINNY_SECTIONS
class _ControlsOrObjectives(lhn.DropdownStatic):
"""Controls or objects dropdown in LHN"""
_locator_element = locator.LhnMenu.TOGGLE_CONTROLS_OR_OBJECTIVES
def __init__(self, driver):
super(_ControlsOrObjectives, self).__init__(driver)
self.toggle_controls = lhn.Toggle(
self._driver,
locator.LhnMenu.TOGGLE_CONTROLS,
locator.LhnMenu.COUNT_CONTROLS)
self.toggle_objectives = lhn.Toggle(
self._driver,
locator.LhnMenu.TOGGLE_OBJECTIVES,
locator.LhnMenu.COUNT_OBJECTIVES)
def select_controls(self):
"""
Returns:
Controls
"""
self.toggle_controls.toggle()
return Controls(self._driver)
def select_objectives(self):
"""
Returns:
_Objectives
"""
self.toggle_objectives.toggle()
return _Objectives(self._driver)
class Controls(lhn.AccordionGroup):
"""Controls dropdown in LHN"""
_locator_button_create_new = locator.LhnMenu.BUTTON_CREATE_NEW_CONTROLS
_locator_spinny = locator.LhnMenu.SPINNY_CONTROLS
_locator_accordion_members = locator.LhnMenu.ACCORDION_MEMBERS_CONTROLS
_create_new_modal_cls = modal.create_new_object.Controls
class _Objectives(lhn.AccordionGroup):
"""Objectives dropdown in LHN"""
_locator_button_create_new = locator.LhnMenu.BUTTON_CREATE_NEW_OBJECTIVES
_locator_spinny = locator.LhnMenu.SPINNY_OBJECTIVES
class _PeopleOrGroups(lhn.DropdownStatic):
"""People or groups dropdown in LHN"""
_locator_element = locator.LhnMenu.TOGGLE_PEOPLE_OR_GROUPS
def __init__(self, driver):
super(_PeopleOrGroups, self).__init__(driver)
self.toggle_people = lhn.Toggle(
self._driver,
locator.LhnMenu.TOGGLE_PEOPLE,
locator.LhnMenu.COUNT_PEOPLE)
self.toggle_org_groups = lhn.Toggle(
self._driver,
locator.LhnMenu.TOGGLE_ORG_GROUPS,
locator.LhnMenu.COUNT_ORG_GROUPS)
self.toggle_vendors = lhn.Toggle(
self._driver,
locator.LhnMenu.TOGGLE_VENDORS,
locator.LhnMenu.COUNT_VENDORS)
self.toggle_access_groups = lhn.Toggle(
self._driver,
locator.LhnMenu.TOGGLE_ACCESS_GROUPS,
locator.LhnMenu.COUNT_ACCESS_GROUPS)
def select_people(self):
"""
Returns:
_People
"""
self.toggle_people.toggle()
return _People(self._driver)
def select_org_groups(self):
"""
Returns:
_OrgGroups
"""
self.toggle_org_groups.toggle()
return _OrgGroups(self._driver)
def select_vendors(self):
"""
Returns:
_Vendors
"""
self.toggle_vendors.toggle()
return _Vendors(self._driver)
def select_access_groups(self):
"""
Returns:
_AccessGroups
"""
self.toggle_access_groups.toggle()
return _AccessGroups(self._driver)
class _People(lhn.AccordionGroup):
"""People dropdown in LHN"""
_locator_button_create_new = locator.LhnMenu.BUTTON_CREATE_NEW_PEOPLE
_locator_spinny = locator.LhnMenu.SPINNY_PEOPLE
class _OrgGroups(lhn.AccordionGroup):
"""Org groups dropdown in LHN"""
_locator_button_create_new = locator.LhnMenu.BUTTON_CREATE_NEW_ORG_GROUPS
_locator_spinny = locator.LhnMenu.SPINNY_ORG_GROUPS
_locator_accordion_members = locator.LhnMenu.ACCORDION_MEMBERS_ORG_GROUPS
_create_new_modal_cls = modal.create_new_object.OrgGroups
class _Vendors(lhn.AccordionGroup):
"""Vendors dropdown in LHN"""
_locator_button_create_new = locator.LhnMenu.BUTTON_CREATE_NEW_VENDORS
_locator_spinny = locator.LhnMenu.SPINNY_VENDORS
class _AccessGroups(lhn.AccordionGroup):
"""Access groups dropdown in LHN"""
_locator_button_create_new = locator.LhnMenu.BUTTON_CREATE_NEW_ACCESS_GROUPS
_locator_spinny = locator.LhnMenu.SPINNY_ACCESS_GROUPS
class _AssetsOrBusiness(lhn.DropdownStatic):
"""Assets or business dropdown in LHN"""
_locator_element = locator.LhnMenu.TOGGLE_ASSETS_OR_BUSINESS
def __init__(self, driver):
super(_AssetsOrBusiness, self).__init__(driver)
self.toggle_systems = lhn.Toggle(
self._driver,
locator.LhnMenu.TOGGLE_SYSTEMS,
locator.LhnMenu.COUNT_SYSTEMS)
self.toggle_processes = lhn.Toggle(
self._driver,
locator.LhnMenu.TOGGLE_PROCESSES,
locator.LhnMenu.COUNT_PROCESSES)
self.toggle_data_assets = lhn.Toggle(
self._driver,
locator.LhnMenu.TOGGLE_DATA_ASSETS,
locator.LhnMenu.COUNT_DATA_ASSETS)
self.toggle_products = lhn.Toggle(
self._driver,
locator.LhnMenu.TOGGLE_PRODUCTS,
locator.LhnMenu.COUNT_PRODUCTS)
self.toggle_projects = lhn.Toggle(
self._driver,
locator.LhnMenu.TOGGLE_PROJECTS,
locator.LhnMenu.COUNT_PROJECTS)
self.toggle_facilities = lhn.Toggle(
self._driver,
locator.LhnMenu.TOGGLE_FACILITIES,
locator.LhnMenu.COUNT_FACILITIES)
self.toggle_markets = lhn.Toggle(
self._driver,
locator.LhnMenu.TOGGLE_MARKETS,
locator.LhnMenu.COUNT_MARKETS)
def select_systems(self):
"""
Returns:
_Systems
"""
self.toggle_systems.toggle()
return _Systems(self._driver)
def select_processes(self):
"""
Returns:
_Processes
"""
self.toggle_processes.toggle()
return _Processes(self._driver)
def select_data_assets(self):
"""
Returns:
_DataAssets
"""
self.toggle_data_assets.toggle()
return _DataAssets(self._driver)
def select_products(self):
"""
Returns:
_Products
"""
self.toggle_products.toggle()
return _Products(self._driver)
def select_projects(self):
"""
Returns:
_Projects
"""
self.toggle_projects.toggle()
return _Projects(self._driver)
def select_facilities(self):
"""
Returns:
_Facilities
"""
self.toggle_facilities.toggle()
return _Facilities(self._driver)
def select_markets(self):
"""
Returns:
_Markets
"""
self.toggle_markets.toggle()
return _Markets(self._driver)
class _Systems(lhn.AccordionGroup):
"""Systems dropdown in LHN"""
_locator_button_create_new = locator.LhnMenu.BUTTON_CREATE_NEW_SYSTEMS
_locator_spinny = locator.LhnMenu.SPINNY_SYSTEMS
_create_new_modal_cls = modal.create_new_object.Systems
_locator_accordion_members = locator.LhnMenu.ACCORDION_MEMBERS_SYSTEMS
class _Processes(lhn.AccordionGroup):
"""Processes dropdown in LHN"""
_locator_button_create_new = locator.LhnMenu.BUTTON_CREATE_NEW_PROCESSES
_locator_spinny = locator.LhnMenu.SPINNY_PROCESSES
_create_new_modal_cls = modal.create_new_object.Processes
_locator_accordion_members = locator.LhnMenu.ACCORDION_MEMBERS_PROCESSES
class _DataAssets(lhn.AccordionGroup):
"""Data assets dropdown in LHN"""
_locator_button_create_new = locator.LhnMenu.BUTTON_CREATE_NEW_DATA_ASSETS
_locator_spinny = locator.LhnMenu.SPINNY_DATA_ASSETS
_create_new_modal_cls = modal.create_new_object.DataAssets
_locator_accordion_members = locator.LhnMenu.ACCORDION_MEMBERS_DATA_ASSETS
class _Products(lhn.AccordionGroup):
"""Products dropdown in LHN"""
_locator_button_create_new = locator.LhnMenu.BUTTON_CREATE_NEW_PRODUCTS
_locator_spinny = locator.LhnMenu.SPINNY_PRODUCTS
_create_new_modal_cls = modal.create_new_object.Products
_locator_accordion_members = locator.LhnMenu.ACCORDION_MEMBERS_PRODUCTS
class _Projects(lhn.AccordionGroup):
"""Projects dropdown in LHN"""
_locator_button_create_new = locator.LhnMenu.BUTTON_CREATE_NEW_PROJECTS
_locator_spinny = locator.LhnMenu.SPINNY_PROJECTS
_create_new_modal_cls = modal.create_new_object.Processes
_locator_accordion_members = locator.LhnMenu.ACCORDION_MEMBERS_PROJECTS
class _Facilities(lhn.AccordionGroup):
"""Facilities dropdown in LHN"""
_locator_button_create_new = locator.LhnMenu.BUTTON_CREATE_NEW_FACILITIES
_locator_spinny = locator.LhnMenu.SPINNY_FACILITIES
class _Markets(lhn.AccordionGroup):
"""Markets dropdown in LHN"""
_locator_button_create_new = locator.LhnMenu.BUTTON_CREATE_NEW_MARKETS
_locator_spinny = locator.LhnMenu.SPINNY_MARKETS
class _RisksOrThreats(lhn.DropdownStatic):
"""Risks or threats dropdown in LHN"""
_locator_element = locator.LhnMenu.TOGGLE_RISK_OR_THREATS
def __init__(self, driver):
super(_RisksOrThreats, self).__init__(driver)
self.toggle_risks = lhn.Toggle(
self._driver,
locator.LhnMenu.TOGGLE_RISKS,
locator.LhnMenu.COUNT_RISKS)
self.toggle_threats = lhn.Toggle(
self._driver,
locator.LhnMenu.TOGGLE_THREATS,
locator.LhnMenu.COUNT_THREATS)
def select_risks(self):
"""
Returns:
_Risks
"""
self.toggle_risks.toggle()
return _Risks(self._driver)
def select_threats(self):
"""
Returns:
_Threats
"""
self.toggle_threats.toggle()
return _Threats(self._driver)
class _Risks(lhn.AccordionGroup):
"""Risks dropdown in LHN"""
_locator_button_create_new = locator.LhnMenu.BUTTON_CREATE_NEW_RISKS
_locator_spinny = locator.LhnMenu.SPINNY_RISKS
_locator_accordion_members = locator.LhnMenu.ACCORDION_MEMBERS_RISKS
_create_new_modal_cls = modal.create_new_object.Risks
class _Threats(lhn.AccordionGroup):
"""Threats dropdown in LHN"""
_locator_button_create_new = locator.LhnMenu.BUTTON_CREATE_NEW_THREATS
_locator_spinny = locator.LhnMenu.SPINNY_THREATS
class Menu(base.AnimatedComponent):
"""Model of the LHN menu"""
def __init__(self, driver):
super(Menu, self).__init__(
driver,
[lhn.MyObjectsTab.locator_element,
lhn.AllObjectsTab.locator_element],
wait_until_visible=True)
self.my_objects = lhn.MyObjectsTab(driver)
self.all_objects = lhn.AllObjectsTab(driver)
self.pin = None
self.filter = None
self.toggle_programs = None
self.toggle_workflows = None
self.toggle_audits = None
self.toggle_assessments = None
self.toggle_issues = None
self.toggle_directives = None
self.toggle_controls_or_objectives = None
self.toggle_people_or_groups = None
self.toggle_assets_or_business = None
self.toggle_risks_or_threats = None
self.reload_elements()
def reload_elements(self):
"""Each dropdown in LHN has a count of members in brackets which we
update."""
self.filter = base.Filter(
self._driver,
locator.LhnMenu.FILTER_TEXT_BOX,
locator.LhnMenu.FILTER_SUBMIT_BUTTON,
locator.LhnMenu.FILTER_CLEAR_BUTTON)
self.pin = base.Toggle(self._driver, locator.LhnMenu.PIN)
self.toggle_programs = lhn.Toggle(
self._driver,
locator.LhnMenu.TOGGLE_PROGRAMS,
locator.LhnMenu.COUNT_PROGRAMS)
self.toggle_workflows = lhn.Toggle(
self._driver,
locator.LhnMenu.TOGGLE_WORKFLOWS,
locator.LhnMenu.COUNT_WORKFLOWS)
self.toggle_audits = lhn.Toggle(
self._driver,
locator.LhnMenu.TOGGLE_AUDITS,
locator.LhnMenu.COUNT_AUDITS)
self.toggle_assessments = lhn.Toggle(
self._driver,
locator.LhnMenu.TOGGLE_ASSESSMENTS,
locator.LhnMenu.COUNT_ASSESSMENTS)
self.toggle_issues = lhn.Toggle(
self._driver,
locator.LhnMenu.TOGGLE_ISSUES,
locator.LhnMenu.COUNT_ISSUES)
self.toggle_directives = base.Toggle(
self._driver,
locator.LhnMenu.DIRECTIVES)
self.toggle_controls_or_objectives = base.Toggle(
self._driver,
locator.LhnMenu.TOGGLE_CONTROLS_OR_OBJECTIVES)
self.toggle_people_or_groups = base.Toggle(
self._driver,
locator.LhnMenu.TOGGLE_PEOPLE_OR_GROUPS)
self.toggle_assets_or_business = base.Toggle(
self._driver,
locator.LhnMenu.TOGGLE_ASSETS_OR_BUSINESS)
self.toggle_risks_or_threats = base.Toggle(
self._driver,
locator.LhnMenu.TOGGLE_RISK_OR_THREATS)
def filter_query(self, query):
self.filter.filter_query(query)
def submit_query(self):
self.filter.submit_query()
self.reload_elements()
def select_programs(self):
"""
Returns:
_Programs
"""
self.toggle_programs.toggle()
return _Programs(self._driver)
def select_workflows(self):
"""
Returns:
_Workflows
"""
self.toggle_workflows.toggle()
return _Workflows(self._driver)
def select_audits(self):
"""
Returns:
_Audits
"""
self.toggle_audits.toggle()
return _Audits(self._driver)
def select_assessments(self):
"""
Returns:
_Assessments
"""
self.toggle_assessments.toggle()
return _Assessments(self._driver)
def select_issues(self):
"""
Returns:
_Issues
"""
self.toggle_issues.toggle()
return _Issues(self._driver)
def select_directives(self):
"""
Returns:
_Directives
"""
self.toggle_directives.toggle()
return _Directives(self._driver)
def select_controls_or_objectives(self):
"""
Returns:
_ControlsOrObjectives
"""
self.toggle_controls_or_objectives.toggle()
return _ControlsOrObjectives(self._driver)
def select_people_or_groups(self):
"""
Returns:
_PeopleOrGroups
"""
self.toggle_people_or_groups.toggle()
return _PeopleOrGroups(self._driver)
def select_assets_or_business(self):
"""
Returns:
_AssetsOrBusiness
"""
self.toggle_assets_or_business.toggle()
return _AssetsOrBusiness(self._driver)
def select_risks_or_threats(self):
"""
Returns:
_RisksOrThreats
"""
self.toggle_risks_or_threats.toggle()
return _RisksOrThreats(self._driver)
def select_my_objects(self):
"""In LHN selects the tab "My Objects"
Returns:
LhnContents
"""
self.my_objects.click()
self.all_objects.is_activated = False
return self.__class__(self._driver)
def select_all_objects(self):
""" In LHN selects the tab "All Objects"
Returns:
LhnContents
"""
self.all_objects.click()
self.my_objects.is_activated = False
return self.__class__(self._driver)
| |
"""
Contains implementation interface for finding spots on one or many images
"""
from __future__ import annotations
import logging
import math
import pickle
from typing import Iterable, Tuple
import libtbx
from dxtbx.format.image import ImageBool
from dxtbx.imageset import ImageSequence, ImageSet
from dxtbx.model import ExperimentList
from dials.array_family import flex
from dials.model.data import PixelList, PixelListLabeller
from dials.util import Sorry, log
from dials.util.log import rehandle_cached_records
from dials.util.mp import available_cores, batch_multi_node_parallel_map
logger = logging.getLogger(__name__)
class ExtractPixelsFromImage:
"""
A class to extract pixels from a single image
"""
def __init__(
self,
imageset,
threshold_function,
mask,
region_of_interest,
max_strong_pixel_fraction,
compute_mean_background,
):
"""
Initialise the class
:param imageset: The imageset to extract from
:param threshold_function: The function to threshold with
:param mask: The image mask
:param region_of_interest: A region of interest to process
:param max_strong_pixel_fraction: The maximum fraction of pixels allowed
"""
self.threshold_function = threshold_function
self.imageset = imageset
self.mask = mask
self.region_of_interest = region_of_interest
self.max_strong_pixel_fraction = max_strong_pixel_fraction
self.compute_mean_background = compute_mean_background
if self.mask is not None:
detector = self.imageset.get_detector()
assert len(self.mask) == len(detector)
def __call__(self, index):
"""
Extract strong pixels from an image
:param index: The index of the image
"""
# Get the frame number
if isinstance(self.imageset, ImageSequence):
frame = self.imageset.get_array_range()[0] + index
else:
ind = self.imageset.indices()
if len(ind) > 1:
assert all(i1 + 1 == i2 for i1, i2 in zip(ind[0:-1], ind[1:-1]))
frame = ind[index]
# Create the list of pixel lists
pixel_list = []
# Get the image and mask
image = self.imageset.get_corrected_data(index)
mask = self.imageset.get_mask(index)
# Set the mask
if self.mask is not None:
assert len(self.mask) == len(mask)
mask = tuple(m1 & m2 for m1, m2 in zip(mask, self.mask))
logger.debug(
"Number of masked pixels for image %i: %i",
index,
sum(m.count(False) for m in mask),
)
# Add the images to the pixel lists
num_strong = 0
average_background = 0
for i_panel, (im, mk) in enumerate(zip(image, mask)):
if self.region_of_interest is not None:
x0, x1, y0, y1 = self.region_of_interest
height, width = im.all()
assert x0 < x1, "x0 < x1"
assert y0 < y1, "y0 < y1"
assert x0 >= 0, "x0 >= 0"
assert y0 >= 0, "y0 >= 0"
assert x1 <= width, "x1 <= width"
assert y1 <= height, "y1 <= height"
im_roi = im[y0:y1, x0:x1]
mk_roi = mk[y0:y1, x0:x1]
tm_roi = self.threshold_function.compute_threshold(
im_roi,
mk_roi,
imageset=self.imageset,
i_panel=i_panel,
region_of_interest=self.region_of_interest,
)
threshold_mask = flex.bool(im.accessor(), False)
threshold_mask[y0:y1, x0:x1] = tm_roi
else:
threshold_mask = self.threshold_function.compute_threshold(
im, mk, imageset=self.imageset, i_panel=i_panel
)
# Add the pixel list
plist = PixelList(frame, im, threshold_mask)
pixel_list.append(plist)
# Get average background
if self.compute_mean_background:
background = im.as_1d().select((mk & ~threshold_mask).as_1d())
average_background += flex.mean(background)
# Add to the spot count
num_strong += len(plist)
# Make average background
average_background /= len(image)
# Check total number of strong pixels
if self.max_strong_pixel_fraction < 1:
num_image = 0
for im in image:
num_image += len(im)
max_strong = int(math.ceil(self.max_strong_pixel_fraction * num_image))
if num_strong > max_strong:
raise RuntimeError(
f"""
The number of strong pixels found ({num_strong}) is greater than the
maximum allowed ({max_strong}). Try changing spot finding parameters
"""
)
# Print some info
if self.compute_mean_background:
logger.info(
"Found %d strong pixels on image %d with average background %f",
num_strong,
frame + 1,
average_background,
)
else:
logger.info("Found %d strong pixels on image %d", num_strong, frame + 1)
# Return the result
return pixel_list
class ExtractPixelsFromImage2DNoShoeboxes(ExtractPixelsFromImage):
"""
A class to extract pixels from a single image
"""
def __init__(
self,
imageset,
threshold_function,
mask,
region_of_interest,
max_strong_pixel_fraction,
compute_mean_background,
min_spot_size,
max_spot_size,
filter_spots,
):
"""
Initialise the class
:param imageset: The imageset to extract from
:param threshold_function: The function to threshold with
:param mask: The image mask
:param region_of_interest: A region of interest to process
:param max_strong_pixel_fraction: The maximum fraction of pixels allowed
"""
super().__init__(
imageset,
threshold_function,
mask,
region_of_interest,
max_strong_pixel_fraction,
compute_mean_background,
)
# Save some stuff
self.min_spot_size = min_spot_size
self.max_spot_size = max_spot_size
self.filter_spots = filter_spots
def __call__(self, index):
"""
Extract strong pixels from an image
:param index: The index of the image
"""
# Initialise the pixel labeller
num_panels = len(self.imageset.get_detector())
pixel_labeller = [PixelListLabeller() for p in range(num_panels)]
# Call the super function
result = super().__call__(index)
# Add pixel lists to the labeller
assert len(pixel_labeller) == len(result), "Inconsistent size"
for plabeller, plist in zip(pixel_labeller, result):
plabeller.add(plist)
# Create shoeboxes from pixel list
reflections, _ = pixel_list_to_reflection_table(
self.imageset,
pixel_labeller,
filter_spots=self.filter_spots,
min_spot_size=self.min_spot_size,
max_spot_size=self.max_spot_size,
write_hot_pixel_mask=False,
)
# Delete the shoeboxes
del reflections["shoeboxes"]
# Return the reflections
return [reflections]
class ExtractSpotsParallelTask:
"""
Execute the spot finder task in parallel
We need this external class so that we can pickle it for cluster jobs
"""
def __init__(self, function):
"""
Initialise with the function to call
"""
self.function = function
def __call__(self, task):
"""
Call the function with th task and save the IO
"""
log.config_simple_cached()
result = self.function(task)
handlers = logging.getLogger("dials").handlers
assert len(handlers) == 1, "Invalid number of logging handlers"
return result, handlers[0].records
def pixel_list_to_shoeboxes(
imageset: ImageSet,
pixel_labeller: Iterable[PixelListLabeller],
min_spot_size: int,
max_spot_size: int,
write_hot_pixel_mask: bool,
) -> Tuple[flex.shoebox, Tuple[flex.size_t, ...]]:
"""Convert a pixel list to shoeboxes"""
# Extract the pixel lists into a list of reflections
shoeboxes = flex.shoebox()
spotsizes = flex.size_t()
hotpixels = tuple(flex.size_t() for i in range(len(imageset.get_detector())))
if isinstance(imageset, ImageSequence):
twod = imageset.get_scan().is_still()
else:
twod = True
for i, (p, hp) in enumerate(zip(pixel_labeller, hotpixels)):
if p.num_pixels() > 0:
creator = flex.PixelListShoeboxCreator(
p,
i, # panel
0, # zrange
twod, # twod
min_spot_size, # min_pixels
max_spot_size, # max_pixels
write_hot_pixel_mask,
)
shoeboxes.extend(creator.result())
spotsizes.extend(creator.spot_size())
hp.extend(creator.hot_pixels())
logger.info("\nExtracted %d spots", len(shoeboxes))
# Get the unallocated spots and print some info
selection = shoeboxes.is_allocated()
shoeboxes = shoeboxes.select(selection)
ntoosmall = (spotsizes < min_spot_size).count(True)
ntoolarge = (spotsizes > max_spot_size).count(True)
assert ntoosmall + ntoolarge == selection.count(False)
logger.info("Removed %d spots with size < %d pixels", ntoosmall, min_spot_size)
logger.info("Removed %d spots with size > %d pixels", ntoolarge, max_spot_size)
# Return the shoeboxes
return shoeboxes, hotpixels
def shoeboxes_to_reflection_table(
imageset: ImageSet, shoeboxes: flex.shoebox, filter_spots
) -> flex.reflection_table:
"""Filter shoeboxes and create reflection table"""
# Calculate the spot centroids
centroid = shoeboxes.centroid_valid()
logger.info("Calculated %d spot centroids", len(shoeboxes))
# Calculate the spot intensities
intensity = shoeboxes.summed_intensity()
logger.info("Calculated %d spot intensities", len(shoeboxes))
# Create the observations
observed = flex.observation(shoeboxes.panels(), centroid, intensity)
# Filter the reflections and select only the desired spots
flags = filter_spots(
None, sweep=imageset, observations=observed, shoeboxes=shoeboxes
)
observed = observed.select(flags)
shoeboxes = shoeboxes.select(flags)
# Return as a reflection list
return flex.reflection_table(observed, shoeboxes)
def pixel_list_to_reflection_table(
imageset: ImageSet,
pixel_labeller: Iterable[PixelListLabeller],
filter_spots,
min_spot_size: int,
max_spot_size: int,
write_hot_pixel_mask: bool,
) -> Tuple[flex.shoebox, Tuple[flex.size_t, ...]]:
"""Convert pixel list to reflection table"""
shoeboxes, hot_pixels = pixel_list_to_shoeboxes(
imageset,
pixel_labeller,
min_spot_size=min_spot_size,
max_spot_size=max_spot_size,
write_hot_pixel_mask=write_hot_pixel_mask,
)
# Setup the reflection table converter
return (
shoeboxes_to_reflection_table(imageset, shoeboxes, filter_spots=filter_spots),
hot_pixels,
)
class ExtractSpots:
"""
Class to find spots in an image and extract them into shoeboxes.
"""
def __init__(
self,
threshold_function=None,
mask=None,
region_of_interest=None,
max_strong_pixel_fraction=0.1,
compute_mean_background=False,
mp_method=None,
mp_nproc=1,
mp_njobs=1,
mp_chunksize=1,
min_spot_size=1,
max_spot_size=20,
filter_spots=None,
no_shoeboxes_2d=False,
min_chunksize=50,
write_hot_pixel_mask=False,
):
"""
Initialise the class with the strategy
:param threshold_function: The image thresholding strategy
:param mask: The mask to use
:param mp_method: The multi processing method
:param nproc: The number of processors
:param max_strong_pixel_fraction: The maximum number of strong pixels
"""
# Set the required strategies
self.threshold_function = threshold_function
self.mask = mask
self.mp_method = mp_method
self.mp_chunksize = mp_chunksize
self.mp_nproc = mp_nproc
self.mp_njobs = mp_njobs
self.max_strong_pixel_fraction = max_strong_pixel_fraction
self.compute_mean_background = compute_mean_background
self.region_of_interest = region_of_interest
self.min_spot_size = min_spot_size
self.max_spot_size = max_spot_size
self.filter_spots = filter_spots
self.no_shoeboxes_2d = no_shoeboxes_2d
self.min_chunksize = min_chunksize
self.write_hot_pixel_mask = write_hot_pixel_mask
def __call__(self, imageset):
"""
Find the spots in the imageset
:param imageset: The imageset to process
:return: The list of spot shoeboxes
"""
if not self.no_shoeboxes_2d:
return self._find_spots(imageset)
else:
return self._find_spots_2d_no_shoeboxes(imageset)
def _compute_chunksize(self, nimg, nproc, min_chunksize):
"""
Compute the chunk size for a given number of images and processes
"""
chunksize = int(math.ceil(nimg / nproc))
remainder = nimg % (chunksize * nproc)
test_chunksize = chunksize - 1
while test_chunksize >= min_chunksize:
test_remainder = nimg % (test_chunksize * nproc)
if test_remainder <= remainder:
chunksize = test_chunksize
remainder = test_remainder
test_chunksize -= 1
return chunksize
def _find_spots(self, imageset):
"""
Find the spots in the imageset
:param imageset: The imageset to process
:return: The list of spot shoeboxes
"""
# Change the number of processors if necessary
mp_nproc = self.mp_nproc
mp_njobs = self.mp_njobs
if mp_nproc is libtbx.Auto:
mp_nproc = available_cores()
logger.info(f"Setting nproc={mp_nproc}")
if mp_nproc * mp_njobs > len(imageset):
mp_nproc = min(mp_nproc, len(imageset))
mp_njobs = int(math.ceil(len(imageset) / mp_nproc))
mp_method = self.mp_method
mp_chunksize = self.mp_chunksize
if mp_chunksize is libtbx.Auto:
mp_chunksize = self._compute_chunksize(
len(imageset), mp_njobs * mp_nproc, self.min_chunksize
)
logger.info("Setting chunksize=%i", mp_chunksize)
len_by_nproc = int(math.floor(len(imageset) / (mp_njobs * mp_nproc)))
if mp_chunksize > len_by_nproc:
mp_chunksize = len_by_nproc
if mp_chunksize == 0:
mp_chunksize = 1
assert mp_nproc > 0, "Invalid number of processors"
assert mp_njobs > 0, "Invalid number of jobs"
assert mp_njobs == 1 or mp_method is not None, "Invalid cluster method"
assert mp_chunksize > 0, "Invalid chunk size"
# The extract pixels function
function = ExtractPixelsFromImage(
imageset=imageset,
threshold_function=self.threshold_function,
mask=self.mask,
max_strong_pixel_fraction=self.max_strong_pixel_fraction,
compute_mean_background=self.compute_mean_background,
region_of_interest=self.region_of_interest,
)
# The indices to iterate over
indices = list(range(len(imageset)))
# Initialise the pixel labeller
num_panels = len(imageset.get_detector())
pixel_labeller = [PixelListLabeller() for p in range(num_panels)]
# Do the processing
logger.info("Extracting strong pixels from images")
if mp_njobs > 1:
logger.info(
" Using %s with %d parallel job(s) and %d processes per node\n",
mp_method,
mp_njobs,
mp_nproc,
)
else:
logger.info(" Using multiprocessing with %d parallel job(s)\n", mp_nproc)
if mp_nproc > 1 or mp_njobs > 1:
def process_output(result):
rehandle_cached_records(result[1])
assert len(pixel_labeller) == len(result[0]), "Inconsistent size"
for plabeller, plist in zip(pixel_labeller, result[0]):
plabeller.add(plist)
batch_multi_node_parallel_map(
func=ExtractSpotsParallelTask(function),
iterable=indices,
nproc=mp_nproc,
njobs=mp_njobs,
cluster_method=mp_method,
chunksize=mp_chunksize,
callback=process_output,
)
else:
for task in indices:
result = function(task)
assert len(pixel_labeller) == len(result), "Inconsistent size"
for plabeller, plist in zip(pixel_labeller, result):
plabeller.add(plist)
result.clear()
# Create shoeboxes from pixel list
return pixel_list_to_reflection_table(
imageset,
pixel_labeller,
filter_spots=self.filter_spots,
min_spot_size=self.min_spot_size,
max_spot_size=self.max_spot_size,
write_hot_pixel_mask=self.write_hot_pixel_mask,
)
def _find_spots_2d_no_shoeboxes(self, imageset):
"""
Find the spots in the imageset
:param imageset: The imageset to process
:return: The list of spot shoeboxes
"""
# Change the number of processors if necessary
mp_nproc = self.mp_nproc
mp_njobs = self.mp_njobs
if mp_nproc * mp_njobs > len(imageset):
mp_nproc = min(mp_nproc, len(imageset))
mp_njobs = int(math.ceil(len(imageset) / mp_nproc))
mp_method = self.mp_method
mp_chunksize = self.mp_chunksize
if mp_chunksize == libtbx.Auto:
mp_chunksize = self._compute_chunksize(
len(imageset), mp_njobs * mp_nproc, self.min_chunksize
)
logger.info("Setting chunksize=%i", mp_chunksize)
len_by_nproc = int(math.floor(len(imageset) / (mp_njobs * mp_nproc)))
if mp_chunksize > len_by_nproc:
mp_chunksize = len_by_nproc
assert mp_nproc > 0, "Invalid number of processors"
assert mp_njobs > 0, "Invalid number of jobs"
assert mp_njobs == 1 or mp_method is not None, "Invalid cluster method"
assert mp_chunksize > 0, "Invalid chunk size"
# The extract pixels function
function = ExtractPixelsFromImage2DNoShoeboxes(
imageset=imageset,
threshold_function=self.threshold_function,
mask=self.mask,
max_strong_pixel_fraction=self.max_strong_pixel_fraction,
compute_mean_background=self.compute_mean_background,
region_of_interest=self.region_of_interest,
min_spot_size=self.min_spot_size,
max_spot_size=self.max_spot_size,
filter_spots=self.filter_spots,
)
# The indices to iterate over
indices = list(range(len(imageset)))
# The resulting reflections
reflections = flex.reflection_table()
# Do the processing
logger.info("Extracting strong spots from images")
if mp_njobs > 1:
logger.info(
" Using %s with %d parallel job(s) and %d processes per node\n",
mp_method,
mp_njobs,
mp_nproc,
)
else:
logger.info(" Using multiprocessing with %d parallel job(s)\n", mp_nproc)
if mp_nproc > 1 or mp_njobs > 1:
def process_output(result):
for message in result[1]:
logger.log(message.levelno, message.msg)
reflections.extend(result[0][0])
result[0][0] = None
batch_multi_node_parallel_map(
func=ExtractSpotsParallelTask(function),
iterable=indices,
nproc=mp_nproc,
njobs=mp_njobs,
cluster_method=mp_method,
chunksize=mp_chunksize,
callback=process_output,
)
else:
for task in indices:
reflections.extend(function(task)[0])
# Return the reflections
return reflections, None
class SpotFinder:
"""
A class to do spot finding and filtering.
"""
def __init__(
self,
threshold_function=None,
mask=None,
region_of_interest=None,
max_strong_pixel_fraction=0.1,
compute_mean_background=False,
mp_method=None,
mp_nproc=1,
mp_njobs=1,
mp_chunksize=1,
mask_generator=None,
filter_spots=None,
scan_range=None,
write_hot_mask=True,
hot_mask_prefix="hot_mask",
min_spot_size=1,
max_spot_size=20,
no_shoeboxes_2d=False,
min_chunksize=50,
is_stills=False,
):
"""
Initialise the class.
:param find_spots: The spot finding algorithm
:param filter_spots: The spot filtering algorithm
:param scan_range: The scan range to find spots over
:param is_stills: [ADVANCED] Force still-handling of experiment
ID remapping for dials.stills_process.
"""
# Set the filter and some other stuff
self.threshold_function = threshold_function
self.mask = mask
self.region_of_interest = region_of_interest
self.max_strong_pixel_fraction = max_strong_pixel_fraction
self.compute_mean_background = compute_mean_background
self.mask_generator = mask_generator
self.filter_spots = filter_spots
self.scan_range = scan_range
self.write_hot_mask = write_hot_mask
self.hot_mask_prefix = hot_mask_prefix
self.min_spot_size = min_spot_size
self.max_spot_size = max_spot_size
self.mp_method = mp_method
self.mp_chunksize = mp_chunksize
self.mp_nproc = mp_nproc
self.mp_njobs = mp_njobs
self.no_shoeboxes_2d = no_shoeboxes_2d
self.min_chunksize = min_chunksize
self.is_stills = is_stills
def find_spots(self, experiments: ExperimentList) -> flex.reflection_table:
"""
Do spotfinding for a set of experiments.
Args:
experiments: The experiment list to process
Returns:
A new reflection table of found reflections
"""
# Loop through all the experiments and get the unique imagesets
imagesets = []
for experiment in experiments:
if experiment.imageset not in imagesets:
imagesets.append(experiment.imageset)
# Loop through all the imagesets and find the strong spots
reflections = flex.reflection_table()
for j, imageset in enumerate(imagesets):
# Find the strong spots in the sequence
logger.info(
"-" * 80 + "\nFinding strong spots in imageset %d\n" + "-" * 80, j
)
table, hot_mask = self._find_spots_in_imageset(imageset)
# Fix up the experiment ID's now
table["id"] = flex.int(table.nrows(), -1)
for i, experiment in enumerate(experiments):
if experiment.imageset is not imageset:
continue
if not self.is_stills and experiment.scan:
z0, z1 = experiment.scan.get_array_range()
z = table["xyzobs.px.value"].parts()[2]
table["id"].set_selected((z > z0) & (z < z1), i)
if experiment.identifier:
table.experiment_identifiers()[i] = experiment.identifier
else:
table["id"] = flex.int(table.nrows(), j)
if experiment.identifier:
table.experiment_identifiers()[j] = experiment.identifier
missed = table["id"] == -1
assert missed.count(True) == 0, "Failed to remap {} experiment IDs".format(
missed.count(True)
)
reflections.extend(table)
# Write a hot pixel mask
if self.write_hot_mask:
if not imageset.external_lookup.mask.data.empty():
for m1, m2 in zip(hot_mask, imageset.external_lookup.mask.data):
m1 &= m2.data()
imageset.external_lookup.mask.data = ImageBool(hot_mask)
else:
imageset.external_lookup.mask.data = ImageBool(hot_mask)
imageset.external_lookup.mask.filename = "%s_%d.pickle" % (
self.hot_mask_prefix,
i,
)
# Write the hot mask
with open(imageset.external_lookup.mask.filename, "wb") as outfile:
pickle.dump(hot_mask, outfile, protocol=pickle.HIGHEST_PROTOCOL)
# Set the strong spot flag
reflections.set_flags(
flex.size_t_range(len(reflections)), reflections.flags.strong
)
# Check for overloads
reflections.is_overloaded(experiments)
# Return the reflections
return reflections
def _find_spots_in_imageset(self, imageset):
"""
Do the spot finding.
:param imageset: The imageset to process
:return: The observed spots
"""
# The input mask
mask = self.mask_generator(imageset)
if self.mask is not None:
mask = tuple(m1 & m2 for m1, m2 in zip(mask, self.mask))
# Set the spot finding algorithm
extract_spots = ExtractSpots(
threshold_function=self.threshold_function,
mask=mask,
region_of_interest=self.region_of_interest,
max_strong_pixel_fraction=self.max_strong_pixel_fraction,
compute_mean_background=self.compute_mean_background,
mp_method=self.mp_method,
mp_nproc=self.mp_nproc,
mp_njobs=self.mp_njobs,
mp_chunksize=self.mp_chunksize,
min_spot_size=self.min_spot_size,
max_spot_size=self.max_spot_size,
filter_spots=self.filter_spots,
no_shoeboxes_2d=self.no_shoeboxes_2d,
min_chunksize=self.min_chunksize,
write_hot_pixel_mask=self.write_hot_mask,
)
# Get the max scan range
if isinstance(imageset, ImageSequence):
max_scan_range = imageset.get_array_range()
else:
max_scan_range = (0, len(imageset))
# Get list of scan ranges
if not self.scan_range or self.scan_range[0] is None:
scan_range = [(max_scan_range[0] + 1, max_scan_range[1])]
else:
scan_range = self.scan_range
# Get spots from bits of scan
hot_pixels = tuple(flex.size_t() for i in range(len(imageset.get_detector())))
reflections = flex.reflection_table()
for j0, j1 in scan_range:
# Make sure we were asked to do something sensible
if j1 < j0:
raise Sorry("Scan range must be in ascending order")
elif j0 < max_scan_range[0] or j1 > max_scan_range[1]:
raise Sorry(
"Scan range must be within image range {}..{}".format(
max_scan_range[0] + 1, max_scan_range[1]
)
)
logger.info("\nFinding spots in image %s to %s...", j0, j1)
j0 -= 1
if len(imageset) == 1:
r, h = extract_spots(imageset)
else:
r, h = extract_spots(imageset[j0:j1])
reflections.extend(r)
if h is not None:
for h1, h2 in zip(hot_pixels, h):
h1.extend(h2)
# Find hot pixels
hot_mask = self._create_hot_mask(imageset, hot_pixels)
# Return as a reflection list
return reflections, hot_mask
def _create_hot_mask(self, imageset, hot_pixels):
"""
Find hot pixels in images
"""
# Write the hot mask
if self.write_hot_mask:
# Create the hot pixel mask
hot_mask = tuple(
flex.bool(flex.grid(p.get_image_size()[::-1]), True)
for p in imageset.get_detector()
)
num_hot = 0
if hot_pixels:
for hp, hm in zip(hot_pixels, hot_mask):
for i in range(len(hp)):
hm[hp[i]] = False
num_hot += len(hp)
logger.info("Found %d possible hot pixel(s)", num_hot)
else:
hot_mask = None
# Return the hot mask
return hot_mask
| |
import sys
from numpy.testing import *
import numpy as np
types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,
np.int_, np.uint, np.longlong, np.ulonglong,
np.single, np.double, np.longdouble, np.csingle,
np.cdouble, np.clongdouble]
# This compares scalarmath against ufuncs.
class TestTypes(TestCase):
def test_types(self, level=1):
for atype in types:
a = atype(1)
assert a == 1, "error with %r: got %r" % (atype,a)
def test_type_add(self, level=1):
# list of types
for k, atype in enumerate(types):
vala = atype(3)
val1 = np.array([3],dtype=atype)
for l, btype in enumerate(types):
valb = btype(1)
val2 = np.array([1],dtype=btype)
val = vala + valb
valo = val1 + val2
assert val.dtype.num == valo.dtype.num and \
val.dtype.char == valo.dtype.char, \
"error with (%d,%d)" % (k,l)
def test_type_subtract(self, level=1):
# list of types
for k, atype in enumerate(types):
vala = atype(3)
val1 = np.array([3],dtype=atype)
for l, btype in enumerate(types):
valb = btype(1)
val2 = np.array([1],dtype=btype)
val = vala - valb
valo = val1 - val2
assert val.dtype.num == valo.dtype.num and \
val.dtype.char == valo.dtype.char, \
"error with (%d,%d)" % (k,l)
def test_type_multiply(self, level=1):
# list of types
for k, atype in enumerate(types):
vala = atype(3)
val1 = np.array([3],dtype=atype)
for l, btype in enumerate(types):
valb = btype(1)
val2 = np.array([1],dtype=btype)
val = vala * valb
valo = val1 * val2
assert val.dtype.num == valo.dtype.num and \
val.dtype.char == valo.dtype.char, \
"error with (%d,%d)" % (k,l)
def test_type_divide(self, level=1):
# Choose more interesting operands for this operation.
# list of types
for k, atype in enumerate(types):
vala = atype(6)
val1 = np.array([6],dtype=atype)
for l, btype in enumerate(types):
valb = btype(2)
val2 = np.array([2],dtype=btype)
val = vala / valb
valo = val1 / val2
assert val.dtype.num == valo.dtype.num and \
val.dtype.char == valo.dtype.char, \
"error with (%d,%d)" % (k,l)
def test_type_remainder(self, level=1):
# Choose more interesting operands for this operation.
# list of types
for k, atype in enumerate(types):
vala = atype(6)
val1 = np.array([6],dtype=atype)
for l, btype in enumerate(types):
valb = btype(2)
val2 = np.array([2],dtype=btype)
try:
val = vala % valb
valo = val1 % val2
except TypeError, e:
# Some combos just don't work, like byte % complex. We
# just don't worry about classifying the cases here, and
# instead just ignore these types of problems. <grin>
pass
assert val.dtype.num == valo.dtype.num and \
val.dtype.char == valo.dtype.char, \
"error with (%d,%d)" % (k,l)
def test_type_negative(self, level=1):
# Uhh, "negate" ??? Or maybe "unary minus".
# But shouldn't this fail for unsigned types? Hmmm...
# list of types
# NOTE: unary operators don't require the double loop over types,
# since there's only one operand.
for k, atype in enumerate(types):
vala = atype(3)
val1 = np.array([3],dtype=atype)
val = -vala
valo = -val1
assert val.dtype.num == valo.dtype.num and \
val.dtype.char == valo.dtype.char, \
"error with (%d)" % (k)
def test_type_positive(self, level=1):
# Otherwise known as "unary plus".
# list of types
# NOTE: unary operators don't require the double loop over types,
# since there's only one operand.
for k, atype in enumerate(types):
vala = atype(3)
val1 = np.array([3],dtype=atype)
val = +vala
valo = +val1
assert val.dtype.num == valo.dtype.num and \
val.dtype.char == valo.dtype.char, \
"error with (%d)" % (k)
def test_type_power(self, level=1):
# Choose more interesting operands for this operation.
# list of types
for k, atype in enumerate(types):
vala = atype(2)
val1 = np.array([2],dtype=atype)
# Skip the boolean types
if vala.dtype.char == '?': continue
for l, btype in enumerate(types):
valb = btype(3)
val2 = np.array([3],dtype=btype)
# Skip the boolean types
if valb.dtype.char == '?': continue
val = vala ** valb
valo = val1 ** val2
assert val.dtype.num == valo.dtype.num and \
val.dtype.char == valo.dtype.char, \
"error with (%d,%d)" % (k,l)
def test_type_absolute(self, level=1):
# list of types
for k, atype in enumerate(types):
vala = atype(-3)
val1 = np.array([-3],dtype=atype)
val = abs(vala)
valo = abs(val1)
assert val.dtype.num == valo.dtype.num and \
val.dtype.char == valo.dtype.char, \
"error with (%d)" % (k)
# I guess we can't really test for the right result here, unless
# we can figure out how to exclude the unsigned types.
#assert val == atype(3) and valo == atype(3), \
# "error taking absolute value (%d)." % k
def test_type_hex(self, level=1):
# list of types
for k, atype in enumerate(types):
vala = atype(3)
val1 = np.array([3],dtype=atype)
try:
val = hex(vala)
valo = hex(val1)
except:
#print "Can't hexify ", k
pass
#assert val.dtype.num == valo.dtype.num and \
# val.dtype.char == valo.dtype.char, \
# "error with (%d)" % (k)
# We can't demand equivalent repr's either.
#assert val == valo, "Trouble with hex (%d)" % k
# So there's not really so much we can check here, beyond simply
# that the code executes without throwing exceptions.
def test_type_float(self, level=1):
# list of types
for k, atype in enumerate(types):
vala = atype(3)
val1 = np.array([3],dtype=atype)
try:
val = float(vala)
valo = float(val1)
except TypeError, e:
# The complex type, for example, can't be cast to float, so
# just skip it.
continue
assert val == valo, "Trouble with float (%d)" % k
# Skip over bool.
if vala.dtype.char == '?': continue
assert val == 3 and valo == 3, "Trouble with float (%d)" % k
def test_misc_niggles(self, level=1):
# Verify the nonzero method on longdouble and clongdouble types.
# This is done essentially by evaluating an apprpriately typed
# (number) object as a condition. I guess that's probably done
# elsewhere in the test suite for the other interesting data types.
x = np.longdouble( 4.4 )
y = np.nonzero( x )
assert x, "Trouble with longdouble_nonzero"
z = np.clongdouble( 4 + 5j )
assert z, "Trouble with clongdouble_nonzero"
from operator import itruediv
itruediv( z, x )
q = int(z)
divmod( x, 1.1 )
r = np.nonzero( z )
s = np.longlong( 99 )
t = int(s)
def xtest_scalarmath_module_methods( self, level=1 ):
# Rename to test_* when ready.
# The purpose of this method is to actually exercise the scalarmath
# mdoule's module-methods, whose names are:
# use_scalarmath
# use_pythonmath
# alter_pyscalars
# restore_pyscalars
# Those module methods need to be exercised.
pass
def test_type_create(self, level=1):
for k, atype in enumerate(types):
a = np.array([1,2,3],atype)
b = atype([1,2,3])
assert_equal(a,b)
class TestPower(TestCase):
def test_small_types(self):
for t in [np.int8, np.int16]:
a = t(3)
b = a ** 4
assert b == 81, "error with %r: got %r" % (t,b)
def test_large_types(self):
for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]:
a = t(51)
b = a ** 4
msg = "error with %r: got %r" % (t,b)
if np.issubdtype(t, np.integer):
assert b == 6765201, msg
else:
assert_almost_equal(b, 6765201, err_msg=msg)
class TestConversion(TestCase):
def test_int_from_long(self):
l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18]
li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18]
for T in [None, np.float64, np.int64]:
a = np.array(l,dtype=T)
assert_equal(map(int,a), li)
a = np.array(l[:3], dtype=np.uint64)
assert_equal(map(int,a), li[:3])
#class TestRepr(TestCase):
# def test_repr(self):
# for t in types:
# val = t(1197346475.0137341)
# val_repr = repr(val)
# val2 = eval(val_repr)
# assert_equal( val, val2 )
class TestRepr(TestCase):
def _test_type_repr(self, t):
finfo=np.finfo(t)
last_fraction_bit_idx = finfo.nexp + finfo.nmant
last_exponent_bit_idx = finfo.nexp
storage_bytes = np.dtype(t).itemsize*8
# could add some more types to the list below
for which in ['small denorm','small norm']:
# Values from http://en.wikipedia.org/wiki/IEEE_754
constr = np.array([0x00]*storage_bytes,dtype=np.uint8)
if which == 'small denorm':
byte = last_fraction_bit_idx // 8
bytebit = 7-(last_fraction_bit_idx % 8)
constr[byte] = 1<<bytebit
elif which == 'small norm':
byte = last_exponent_bit_idx // 8
bytebit = 7-(last_exponent_bit_idx % 8)
constr[byte] = 1<<bytebit
else:
raise ValueError('hmm')
val = constr.view(t)[0]
val_repr = repr(val)
val2 = t(eval(val_repr))
if not (val2 == 0 and val < 1e-100):
assert_equal(val, val2)
def test_float_repr(self):
# long double test cannot work, because eval goes through a python
# float
for t in [np.float32, np.float64]:
yield test_float_repr, t
if __name__ == "__main__":
run_module_suite()
| |
# MIT License
#
# Copyright (c) 2020 Tri Minh Cao
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Data Structures for LEF Parser
Author: Tri Minh Cao
Email: tricao@utdallas.edu
Date: August 2016
"""
from .util import *
class Statement:
"""
General class for all types of Statements in the LEF file
"""
def __init__(self):
pass
def parse_next(self, data):
"""
Method to add information from a statement from LEF file to the
Statement object.
:param data: a list of strings that contains pieces of information
:return: 1 if parsing is done, -1 if error, otherwise, return the
object that will be parsed next.
"""
# the program assumes the syntax of LEF file is correct
if data[0] == "MACRO":
name = data[1]
new_state = Macro(name)
return new_state
elif data[0] == "LAYER" and len(data) == 2: # does not have ;
name = data[1]
new_state = Layer(name)
return new_state
elif data[0] == "VIA":
name = data[1]
new_state = Via(name)
return new_state
elif data[0] == "UNITS":
new_state = Units()
return new_state
elif data[0] == "END":
return 1
return 0
def __str__(self):
"""
turn a statement object into string
:return: string representation of Statement objects
"""
s = ""
s += self.type + " " + self.name
return s
class Macro(Statement):
"""
Macro class represents a MACRO (cell) in the LEF file.
"""
def __init__(self, name):
# initiate the Statement superclass
Statement.__init__(self)
self.type = 'MACRO'
self.name = name
# other info is stored in this dictionary
self.info = {}
# pin dictionary
self.pin_dict = {}
def __str__(self):
"""
turn a statement object into string
:return: string representation of Statement objects
"""
s = ""
s += self.type + " " + self.name + "\n"
for key in self.info:
if key == "PIN":
s += " " + key + ":\n"
for pin in self.info[key]:
s += " " + str(pin) + "\n"
else:
s += " " + key + ": " + str(self.info[key]) + "\n"
return s
def parse_next(self, data):
"""
Method to add information from a statement from LEF file to a Macro
object.
:param data: a list of strings that contains pieces of information
:return: 0 if in progress, 1 if parsing is done, -1 if error,
otherwise, return the object that will be parsed next.
"""
if data[0] == "CLASS":
self.info["CLASS"] = data[1]
elif data[0] == "ORIGIN":
x_cor = float(data[1])
y_cor = float(data[2])
self.info["ORIGIN"] = (x_cor, y_cor)
elif data[0] == "FOREIGN":
self.info["FOREIGN"] = data[1:]
elif data[0] == "SIZE":
width = float(data[1])
height = float(data[3])
self.info["SIZE"] = (width, height)
elif data[0] == "SYMMETRY":
self.info["SYMMETRY"] = data[1:]
elif data[0] == "SITE":
self.info["SITE"] = data[1]
elif data[0] == "PIN":
new_pin = Pin(data[1])
self.pin_dict[data[1]] = new_pin
if "PIN" in self.info:
self.info["PIN"].append(new_pin)
else:
self.info["PIN"] = [new_pin]
return new_pin
elif data[0] == "OBS":
new_obs = Obs()
self.info["OBS"] = new_obs
return new_obs
elif data[0] == "END":
if data[1] == self.name:
return 1
else:
return -1
return 0
def get_pin(self, pin_name):
return self.pin_dict[pin_name]
class Pin(Statement):
"""
Class Pin represents a PIN statement in the LEF file.
"""
def __init__(self, name):
Statement.__init__(self)
self.type = "PIN"
self.name = name
self.info = {}
def __str__(self):
s = ""
for layer in self.info["PORT"].info["LAYER"]:
s += layer.type + " " + layer.name + "\n"
return s
def parse_next(self, data):
if data[0] == "DIRECTION":
self.info["DIRECTION"] = data[1]
elif data[0] == "USE":
self.info["USE"] = data[1]
elif data[0] == "PORT":
new_port = Port()
self.info["PORT"] = new_port
return new_port
elif data[0] == "SHAPE":
self.info["SHAPE"] = data[1]
elif data[0] == "END":
if data[1] == self.name:
return 1
else:
return -1
# return 0 when we parse a undefined statement
return 0
def is_lower_metal(self, split_layer):
return self.info["PORT"].is_lower_metal(split_layer)
def get_top_metal(self):
return self.info["PORT"].get_top_metal()
class Port(Statement):
"""
Class Port represents an PORT statement in the LEF file.
"""
# Note: PORT statement does not have name
def __init__(self):
Statement.__init__(self)
self.type = "PORT"
self.name = ""
self.info = {}
def parse_next(self, data):
if data[0] == "END":
return 1
elif data[0] == "LAYER":
name = data[1]
new_layerdef = LayerDef(data[1])
if "LAYER" in self.info:
self.info["LAYER"].append(new_layerdef)
else:
self.info["LAYER"] = [new_layerdef]
elif data[0] == "RECT":
# error if the self.info["LAYER"] does not exist
self.info["LAYER"][-1].add_rect(data)
elif data[0] == "POLYGON":
self.info["LAYER"][-1].add_polygon(data)
return 0
def is_lower_metal(self, split_layer):
lower = True
for layer in self.info["LAYER"]:
if compare_metal(layer.name, split_layer) >= 0:
lower = False
break
return lower
def get_top_metal(self):
highest = "poly"
for layer in self.info["LAYER"]:
if compare_metal(layer.name, highest) > 0:
highest = layer.name
return highest
class Obs(Statement):
"""
Class Obs represents an OBS statement in the LEF file.
"""
# Note: OBS statement does not have name
def __init__(self):
Statement.__init__(self)
self.type = "OBS"
self.name = ""
self.info = {}
def __str__(self):
s = ""
for layer in self.info["LAYER"]:
s += layer.type + " " + layer.name + "\n"
return s
def parse_next(self, data):
if data[0] == "END":
return 1
elif data[0] == "LAYER":
name = data[1]
new_layerdef = LayerDef(data[1])
if "LAYER" in self.info:
self.info["LAYER"].append(new_layerdef)
else:
self.info["LAYER"] = [new_layerdef]
elif data[0] == "RECT":
# error if the self.info["LAYER"] does not exist
self.info["LAYER"][-1].add_rect(data) # [-1] means the latest layer
elif data[0] == "POLYGON":
self.info["LAYER"][-1].add_polygon(data)
return 0
class LayerDef:
"""
Class LayerDef represents the Layer definition inside a PORT or OBS
statement.
"""
# NOTE: LayerDef has no END statement
# I think I still need a LayerDef class, but it will not be a subclass of
# Statement. It will be a normal object that stores information.
def __init__(self, name):
self.type = "LayerDef"
self.name = name
self.shapes = []
def add_rect(self, data):
x0 = float(data[1])
y0 = float(data[2])
x1 = float(data[3])
y1 = float(data[4])
points = [(x0, y0), (x1, y1)]
rect = Rect(points)
self.shapes.append(rect)
def add_polygon(self, data):
points = []
# add each pair of (x, y) points to a list
for idx in range(1, len(data) - 2, 2):
x_cor = float(data[idx])
y_cor = float(data[idx+1])
points.append([x_cor, y_cor])
polygon = Polygon(points)
self.shapes.append(polygon)
class Rect:
"""
Class Rect represents a Rect definition in a LayerDef
"""
# Question: Do I really need a Rect class?
def __init__(self, points):
self.type = "RECT"
self.points = points
class Polygon:
"""
Class Polygon represents a Polygon definition in a LayerDef
"""
def __init__(self, points):
self.type = "POLYGON"
self.points = points
class Layer(Statement):
"""
Layer class represents a LAYER section in LEF file.
"""
def __init__(self, name):
# initiate the Statement superclass
Statement.__init__(self)
self.type = "LAYER"
self.name = name
self.layer_type = None
self.spacing_table = None
self.spacing = None
self.width = None
self.pitch = None
self.direction = None
self.offset = None
self.resistance = None
self.thickness = None
self.height = None
self.capacitance = None
self.edge_cap = None
self.property = None
# I added this spacingTable = 0 to indicate that the spacingTable
# has not started yet.
self.spacingTable = 0;
def parse_next(self, data):
"""
Method to add information from a statement from LEF file to a Layer
object.
:param data: a list of strings that contains pieces of information
:return: 0 if in progress, 1 if parsing is done, -1 if error,
otherwise, return the object that will be parsed next.
"""
if data[0] == "TYPE":
self.layer_type = data[1]
elif data[0] == "SPACINGTABLE":
self.spacingTable = 1
pass
elif data[0] == "SPACING":
self.spacing = float(data[1])
elif data[0] == "WIDTH":
# I manually added this spacingTable variable to ignore the width if it comes after SPACINGTABLE section
# this is done because earlier, it used overwrite the old resistence
if(self.spacingTable == 0):
self.width = float(data[1])
elif data[0] == "PITCH":
self.pitch = float(data[1])
elif data[0] == "DIRECTION":
self.direction = data[1]
elif data[0] == "OFFSET":
self.offset = (float(data[1]))
#self.offset = (float(data[1]), float(data[2]))
elif data[0] == "RESISTANCE":
if self.layer_type == "ROUTING":
self.resistance = (data[1], float(data[2]))
elif self.layer_type == "CUT":
self.resistance = float(data[1])
elif data[0] == "THICKNESS":
self.thickness = float(data[1])
elif data[0] == "HEIGHT":
self.height = float(data[1])
elif data[0] == "CAPACITANCE":
self.capacitance = (data[1], float(data[2]))
elif data[0] == "EDGECAPACITANCE":
self.edge_cap = float(data[1])
elif data[0] == "PROPERTY":
if data[1] != "LEF58_TYPE":
self.property = (data[1], float(data[2]))
elif data[0] == "END":
if data[1] == self.name:
return 1
else:
return -1
return 0
class Via(Statement):
"""
Via class represents a VIA section in LEF file.
"""
def __init__(self, name):
# initiate the Statement superclass
Statement.__init__(self)
self.type = "VIA"
self.name = name
self.layers = []
def parse_next(self, data):
if data[0] == "END":
return 1
elif data[0] == "LAYER":
name = data[1]
new_layerdef = LayerDef(data[1])
self.layers.append(new_layerdef)
elif data[0] == "RECT":
self.layers[-1].add_rect(data) # [-1] means the latest layer
elif data[0] == "POLYGON":
self.layers.add_polygon(data)
return 0
class Units(Statement):
"""
Class Units represents the UNITS statement in the LEF file.
"""
# Note: UNITS statement does not have name
def __init__(self):
Statement.__init__(self)
self.type = "UNITS"
self.name = ""
self.info = {}
def __str__(self):
s = ""
for name, unit in self.info.items:
s += name + " " + unit[0] + " " + unit[1] + "\n"
return s
def parse_next(self, data):
if data[0] == "END":
return 1
else:
name = data[0]
unit = data[1]
factor = data[2]
self.info[name] = (unit, factor)
return 0
| |
###############################################################################
# Orkid SCONS Build System
# Copyright 2010, Michael T. Mayers
# email: michael@tweakoz.com
# The Orkid Build System is published under the GPL 2.0 license
# see http://www.gnu.org/licenses/gpl-2.0.html
###############################################################################
import glob
import re
import string
#import commands
import sys
import os
import shutil
import fnmatch
import platform
SYSTEM = platform.system()
print("SYSTEM<%s>" % SYSTEM)
###############################################################################
IsOsx = (SYSTEM=="Darwin")
IsIrix = (SYSTEM=="IRIX64")
IsLinux = (SYSTEM=="Linux")
IsIx = IsLinux or IsOsx or IsIrix
TargetPlatform = "ix"
if IsOsx:
TargetPlatform = "osx"
###############################################################################
BuildArgs = dict()
BuildArgs["PLATFORM"] = TargetPlatform
BuildArgs["BUILD"] = "release"
###############################################################################
#if IsIx!=True:
# import win32pipe
# import win32api
# import win32process
###############################################################################
# Python Module Export Declaration
__all__ = [
"builddir_replace","globber", "DumpBuildEnv", "SetCompilerOptions",
"SourceEnumerator", "RunUnitTest", "Project", "orkpath", "posixpath",
"msplit", "recursive_glob", "deco"
]
__version__ = "1.0"
###############################################################################
# INIT local options
###############################################################################
curpyname = sys.argv[0]
pydir = os.path.dirname(__file__)
#locopath = os.path.normpath( "%s/localopts.py"%pydir )
#if os.path.exists( locopath ) == False:
# print "%s not found, creating from template... (feel free to edit it)" % locopath
# shutil.copy( locotpath, locopath )
# import localopts
# localopts.dump()
###############################################################################
def builddir_replace( filelist, searchkey, replacekey ):
a = [s.replace(searchkey,replacekey) for s in filelist]
#print a
return a
###############################################################################
def replace( file, searchkey, replacekey ):
regex = re.compile( searchkey )
str_file = str(file)
str_rep = regex.sub( replacekey, string.join( str_file, '' ) )
return posixpath(str_rep)
###############################################################################
def recursive_glob_get_dirs(path):
#print "recursive_glob_get_dirs path<%s>" % (path)
d=[]
try:
ld = os.listdir(path)
#print "ld<%s>" % ld
for i in ld:
if os.path.isdir(path+i):
d.append(os.path.basename(i))
except:pass
return d
###############################################################################
def recursive_glob(path,pattern):
#print "recursive_glob path<%s> pattern<%s>" % (path,pattern)
l=[]
# if path[-1]!='\\':
# path=path+'\\'
if path[-1]!='/':
path=path+'/'
for i in recursive_glob_get_dirs(path):
#print path+i
l=l+recursive_glob(path+i,pattern)
try:
dirlist = os.listdir(path)
for i in dirlist:
ii=i
i=path+i
#print i
if os.path.isfile(i):
if fnmatch.fnmatch(ii,pattern):
l.append(i)
except:
pass
return l
###############################################################################
def globber( folderbase, wildcard, subdirlist, excludelist=[] ):
globs = []
filtered_globs = []
for s in subdirlist:
str_glob = folderbase
if s!=".":
str_glob += s + '/'
str_glob += wildcard
these_globs = glob.glob( str_glob )
globs += these_globs
#print "globbing %s" % ( str_glob )
for s in globs:
incfil = int(1)
for t in excludelist:
regex = re.compile( t )
matchobj = re.search( t, s )
if( matchobj ):
#print "excluding " + s + " : (matches " + t + " )"
incfil = int(0)
if incfil == 1:
filtered_globs += [ posixpath(s) ]
#print filtered_globs
return filtered_globs
###############################################################################
def rmdirforce(basepath):
if os.path.isdir(basepath):
for root, dirs, files in os.walk(basepath, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(basepath)
###############################################################################
def rmtree( pathglob ):
paths = glob.glob( os.path.normpath(pathglob) )
for path in paths:
npath = os.path.normpath(path)
if os.path.isdir(npath):
try:
rmdirforce(npath)
except OSError as x:
print("cannot remove dir<%s>" % path)
elif os.path.isfile(npath):
try:
os.remove(npath)
except OSError as x:
print("cannot remove file<%s>" % path)
###############################################################################
def msplit( str, sep=" " ):
ret = []
if sep in str:
#print "sep<%s>" % str
list = string.split(str,sep) # sep.split(str)
#print "list<%s>" % list
for item in list:
#print "item<%s>" % item
ret.append(item)
else:
#print "nosep<%s>" % str
ret.append(str)
return ret
###############################################################################
def RunUnitTest(env,target,source):
import subprocess
app = str(source[0].abspath)
if not subprocess.call(app):
open(str(target[0]),'w').write("PASSED\n")
###############################################################################
def orkpath(posix_path):
return os.sep.join(posix_path.split('/'))
###############################################################################
def cygpath(output_type, str):
if sys.platform == 'cygwin':
if str[-1] == '\\': str = str[:-1]
fin, fout = os.popen4('cygpath -%s "%s"' % (output_type, str))
str = fout.read().rstrip()
return str
###############################################################################
def posixpath(path):
return '/'.join(os.path.normpath(path).split(os.sep))
###############################################################################
class deco:
###############################
def __init__(self,bash=False):
self.bash = bash
###############################
def rgb256(self,r,g,b):
r = int((r*5)/255)
g = int((g*5)/255)
b = int((b*5)/255)
color = 16 + 36 * r + 6 * g + b
rval = "\033[38;5;%dm" % color
if self.bash:
rval = "\[" + rval + "\]"
return rval
###############################
def reset(self):
rval = "\033[m"
if self.bash:
rval = "\["+rval+"\]"
return rval
###############################
def magenta(self,string):
return self.rgb256(255,0,255)+str(string)+self.reset()
def cyan(self,string):
return self.rgb256(0,255,255)+str(string)+self.reset()
def white(self,string):
return self.rgb256(255,255,255)+str(string)+self.reset()
def orange(self,string):
return self.rgb256(255,128,0)+str(string)+self.reset()
def yellow(self,string):
return self.rgb256(255,255,0)+str(string)+self.reset()
def red(self,string):
return self.rgb256(255,0,0)+str(string)+self.reset()
###############################
def key(self,string):
return self.rgb256(255,255,0)+str(string)+self.reset()
def val(self,string):
return self.rgb256(255,255,255)+str(string)+self.reset()
def path(self,string):
return self.rgb256(255,255,128)+str(string)+self.reset()
def inf(self,string):
return self.rgb256(128,128,255)+str(string)+self.reset()
def warn(self,string):
return self.yellow(string)+self.reset()
def err(self,string):
return self.red(string)+self.reset()
###############################
adeco = deco()
print("IsLinux<%s>" % adeco.val(IsLinux))
print("IsIrix<%s>" % adeco.val(IsIrix))
print("IsOsx<%s>" % adeco.val(IsOsx))
print("IsIx<%s>" % adeco.val(IsIx))
| |
from toontown.toonbase.ToontownGlobals import *
zoneUtilNotify = directNotify.newCategory('ZoneUtil')
tutorialDict = None
def isGoofySpeedwayZone(zoneId):
return zoneId == 8000
def isCogHQZone(zoneId):
return zoneId >= 10000 and zoneId < 15000
def isMintInteriorZone(zoneId):
return zoneId in (CashbotMintIntA, CashbotMintIntB, CashbotMintIntC)
def isDynamicZone(zoneId):
return zoneId >= DynamicZonesBegin and zoneId < DynamicZonesEnd
def getStreetName(branchId):
global tutorialDict
if tutorialDict:
return StreetNames[20000][-1]
else:
return StreetNames[branchId][-1]
def getLoaderName(zoneId):
if tutorialDict:
if zoneId == ToontownCentral:
loaderName = 'safeZoneLoader'
else:
loaderName = 'townLoader'
else:
suffix = zoneId % 1000
if suffix >= 500:
suffix -= 500
if isCogHQZone(zoneId):
loaderName = 'cogHQLoader'
elif suffix < 100:
loaderName = 'safeZoneLoader'
else:
loaderName = 'townLoader'
return loaderName
def getBranchLoaderName(zoneId):
return getLoaderName(getBranchZone(zoneId))
def getSuitWhereName(zoneId):
where = getWhereName(zoneId, 0)
return where
def getToonWhereName(zoneId):
where = getWhereName(zoneId, 1)
return where
def isPlayground(zoneId):
whereName = getWhereName(zoneId, False)
if whereName == 'cogHQExterior':
return True
else:
return zoneId % 1000 == 0 and zoneId < DynamicZonesBegin
def isPetshop(zoneId):
if zoneId == 2522 or zoneId == 1510 or zoneId == 3511 or zoneId == 4508 or zoneId == 5505 or zoneId == 9508:
return True
return False
def getWhereName(zoneId, isToon):
if tutorialDict:
if zoneId in tutorialDict['interiors']:
where = 'toonInterior'
elif zoneId in tutorialDict['exteriors']:
where = 'street'
elif zoneId == ToontownCentral or zoneId == WelcomeValleyToken:
where = 'playground'
else:
zoneUtilNotify.error('No known zone: ' + str(zoneId))
else:
suffix = zoneId % 1000
suffix = suffix - suffix % 100
if isCogHQZone(zoneId):
if suffix == 0:
where = 'cogHQExterior'
elif suffix == 100:
where = 'cogHQLobby'
elif suffix == 200:
where = 'factoryExterior'
elif getHoodId(zoneId) == LawbotHQ and suffix in (300, 400, 500, 600):
where = 'stageInterior'
elif getHoodId(zoneId) == BossbotHQ and suffix in (500, 600, 700):
where = 'countryClubInterior'
elif suffix >= 500:
if getHoodId(zoneId) == SellbotHQ:
where = 'factoryInterior'
elif getHoodId(zoneId) == CashbotHQ:
where = 'mintInterior'
else:
zoneUtilNotify.error('unknown cogHQ interior for hood: ' + str(getHoodId(zoneId)))
else:
zoneUtilNotify.error('unknown cogHQ where: ' + str(zoneId))
elif suffix == 0:
where = 'playground'
elif suffix >= 500:
if isToon:
where = 'toonInterior'
else:
where = 'suitInterior'
else:
where = 'street'
return where
def getBranchZone(zoneId):
if tutorialDict:
branchId = tutorialDict['branch']
else:
branchId = zoneId - zoneId % 100
if not isCogHQZone(zoneId):
if zoneId % 1000 >= 500:
branchId -= 500
return branchId
def getCanonicalBranchZone(zoneId):
return getBranchZone(getCanonicalZoneId(zoneId))
def isWelcomeValley(zoneId):
return zoneId == WelcomeValleyToken or zoneId >= WelcomeValleyBegin and zoneId < WelcomeValleyEnd
def getCanonicalZoneId(zoneId):
if zoneId == WelcomeValleyToken:
zoneId = ToontownCentral
elif zoneId >= WelcomeValleyBegin and zoneId < WelcomeValleyEnd:
zoneId = zoneId % 2000
if zoneId < 1000:
zoneId = zoneId + ToontownCentral
else:
zoneId = zoneId - 1000 + GoofySpeedway
return zoneId
def getTrueZoneId(zoneId, currentZoneId):
if zoneId >= WelcomeValleyBegin and zoneId < WelcomeValleyEnd or zoneId == WelcomeValleyToken:
zoneId = getCanonicalZoneId(zoneId)
if currentZoneId >= WelcomeValleyBegin and currentZoneId < WelcomeValleyEnd:
hoodId = getHoodId(zoneId)
offset = currentZoneId - currentZoneId % 2000
if hoodId == ToontownCentral:
return zoneId - ToontownCentral + offset
elif hoodId == GoofySpeedway:
return zoneId - GoofySpeedway + offset + 1000
return zoneId
def getHoodId(zoneId):
if tutorialDict:
hoodId = Tutorial
else:
hoodId = zoneId - zoneId % 1000
return hoodId
def getSafeZoneId(zoneId):
hoodId = getHoodId(zoneId)
if hoodId in HQToSafezone:
hoodId = HQToSafezone[hoodId]
return hoodId
def getCanonicalHoodId(zoneId):
return getHoodId(getCanonicalZoneId(zoneId))
def getCanonicalSafeZoneId(zoneId):
return getSafeZoneId(getCanonicalZoneId(zoneId))
def isInterior(zoneId):
if tutorialDict:
if zoneId in tutorialDict['interiors']:
r = 1
else:
r = 0
else:
r = zoneId % 1000 >= 500
return r
def overrideOn(branch, exteriorList, interiorList):
global tutorialDict
if tutorialDict:
zoneUtilNotify.warning('setTutorialDict: tutorialDict is already set!')
tutorialDict = {'branch': branch,
'exteriors': exteriorList,
'interiors': interiorList}
def overrideOff():
global tutorialDict
tutorialDict = None
return
def getWakeInfo(hoodId = None, zoneId = None):
wakeWaterHeight = 0
showWake = 0
try:
if hoodId is None:
hoodId = base.cr.playGame.getPlaceId()
if zoneId is None:
zoneId = base.cr.playGame.getPlace().getZoneId()
canonicalZoneId = getCanonicalZoneId(zoneId)
if canonicalZoneId == DonaldsDock:
wakeWaterHeight = DDWakeWaterHeight
showWake = 1
elif canonicalZoneId == ToontownCentral:
wakeWaterHeight = TTWakeWaterHeight
showWake = 1
elif canonicalZoneId == OutdoorZone:
wakeWaterHeight = OZWakeWaterHeight
showWake = 1
elif hoodId == MyEstate:
wakeWaterHeight = EstateWakeWaterHeight
showWake = 1
except AttributeError:
pass
return (showWake, wakeWaterHeight)
| |
#!/usr/bin/env python
#-*- coding: utf8 -*-
from animations import BulletExplosion, FullSizeExplosion
from stuff_on_map import *
import ai
import math
import random
class World (object):
def __init__(self, game_map, players, texture_loader):
self.players = players
self.map = game_map
self.texture_loader = texture_loader
self._drawables = []
self.enemies = []
self.ai = ai.ZombieDriver(self)
self.enemies_killed = 0
self.enemeis_to_kill = 20
self.un_flags = []
self._bullets = pygame.sprite.RenderUpdates()
self._visible_terrain = pygame.sprite.RenderUpdates()
self._all_unpassable = pygame.sprite.RenderUpdates()
self._all_passable = pygame.sprite.RenderUpdates()
self._movable = pygame.sprite.RenderUpdates()
self._animations = pygame.sprite.RenderUpdates()
def init(self):
for player in self.players:
self._movable.add(player.tank)
self._visible_terrain.add(*[self.map.objects + self.map.unpassable +
self.map.un_flags + self.map.passable])
self._all_unpassable.add(*[self.map.objects + self.map.unpassable +
self.map.limits_guard + self.map.un_flags])
self._all_passable.add(*self.map.passable)
self.map.render.set_background(self._visible_terrain)
for flag in self.map.un_flags:
self.un_flags.append(flag)
def get_end_game_stats(self):
return _("Enemies killed: %d / %d") % (self.enemies_killed, self.enemeis_to_kill)
def tick_only_animations(self, deltat, events):
'''
Progress the currently active animations and nothing else.
'''
self.map.render.clear([self._movable, self._bullets, self._animations])
for anim in self._animations:
if anim.finished:
self._animations.remove(anim)
self._animations.update(deltat)
self._drawables = [self._movable, self._bullets, self._animations]
def tick(self, deltat, events):
'''
Progresses the game world forward. This includes moving the game objects, processing
events from players, trying for collisions and checking the game objectives.
'''
if self.enemies_killed >= self.enemeis_to_kill:
return GAME_WON
bullets = self._bullets
unpassable = self._all_unpassable
self.map.render.clear([bullets, self._movable, self._animations])
for anim in self._animations:
if anim.finished:
self._animations.remove(anim)
self._animations.update(deltat)
players_tanks = []
alive_enemies = len(self.enemies)
if alive_enemies < 6 and random.randint(0, 100) < 0.05 and \
(self.enemies_killed + alive_enemies) < self.enemeis_to_kill:
self.spawn_enemy()
for player in self.players:
player.process_events(events)
if player.tank is None:
continue
players_tanks.append(player.tank)
bullets.add(*player.tank.bullets)
if len(players_tanks) < 1:
return GAME_OVER
self.ai.tick(deltat, self.enemies)
for enemy in self.enemies:
bullets.add(*enemy.bullets)
tanks = pygame.sprite.RenderUpdates(*(players_tanks + self.enemies))
bullet_stoppers = players_tanks + self.map.objects + self.enemies + \
bullets.sprites() + self.map.limits_guard + self.map.un_flags
bullet_stoppers = pygame.sprite.Group(bullet_stoppers)
collisions = pygame.sprite.groupcollide(bullets, bullet_stoppers, False, False)
for bullet in collisions:
collided_with = collisions[bullet]
if len(collided_with) == 1 and bullet in collided_with:
continue
if bullet.owner is not None:
bullet.owner.bullets.remove(bullet)
bullet.explode_sound()
bullets.remove(bullet)
non_self = None
for obj in collided_with:
if obj is bullet:
continue
non_self = obj
ex, ey = bullet.rect.center
if bullet.direction == DIRECTION_LEFT:
ex = non_self.rect.centerx + non_self.rect.width * 0.5
if bullet.direction == DIRECTION_RIGHT:
ex = non_self.rect.centerx - non_self.rect.width * 0.5
if bullet.direction == DIRECTION_UP:
ey = non_self.rect.centery + non_self.rect.height * 0.5
if bullet.direction == DIRECTION_DOWN:
ey = non_self.rect.centery - non_self.rect.height * 0.5
explosion_animation = BulletExplosion((ex, ey))
self._animations.add(explosion_animation)
for collided in collided_with:
if collided == bullet:
continue
if isinstance(collided, UnFlag):
self.map.un_flags.remove(collided)
self._visible_terrain.remove(collided)
self.map.render.set_background(self._visible_terrain)
explosion_animation = FullSizeExplosion(collided.rect.center)
self._animations.add(explosion_animation)
return GAME_OVER
if not isinstance(collided, BasicTank):
continue
if collided is bullet.owner:
continue
if not collided.is_player and not bullet.is_player_bullet:
continue
for orphan in collided.bullets:
orphan.owner = None
self._movable.remove(collided)
explosion_animation = FullSizeExplosion(collided.rect.center)
self._animations.add(explosion_animation)
if isinstance(collided, EnemyTank):
self.enemies.remove(collided)
collided.stop()
collided.explode_sound()
self.enemies_killed += 1
if isinstance(collided, Tank):
tanks.remove(collided)
collided.stop()
for player in self.players:
if player.tank is collided:
player.tank.explode_sound()
player.tank = None
bullets.update(deltat)
for tank in tanks:
tank.reset_speed_modifier()
collisions = pygame.sprite.spritecollide(tank, self._all_passable, False, False)
has_sand = False
has_ice = False
for collision in collisions:
if isinstance(collision, Sand):
has_sand = True
break
if isinstance(collision, Ice):
has_ice = True
if has_sand:
tank.set_speed_modifier(0.7)
if has_ice:
tank.set_speed_modifier(1.2)
for tank in tanks:
other_tanks = [t for t in tanks if t != tank]
previously_collided = pygame.sprite.spritecollide(tank, other_tanks, False, False)
tank.update(deltat)
collision = pygame.sprite.spritecollideany(tank, unpassable)
if collision is not None:
tank.undo()
continue
others = pygame.sprite.spritecollide(tank, other_tanks, False, False)
if len(others) < 1:
continue
for other in others:
if other not in previously_collided:
tank.undo()
break
dist = math.sqrt(
abs(tank.rect.centerx - other.rect.centerx) ** 2 +
abs(tank.rect.centery - other.rect.centery) ** 2
)
if dist < self.map.scaled_box_size * 0.75:
tank.undo()
break
self._drawables = [self._movable, bullets, self._animations]
return GAME_CONTINUE
def active_animations_count(self):
return len(self._animations)
def spawn_enemy(self):
player_objects = []
for player in self.players:
if player.tank is None:
continue
player_objects.append(player.tank)
player_objects += player.tank.bullets
for i in range(10):
index = random.randint(0, len(self.map.enemy_starts) - 1)
position = self.map.enemy_starts[index]
new_enemy = EnemyTank(position, self.texture_loader)
collisions = pygame.sprite.groupcollide(
[new_enemy],
self._movable,
False,
False
)
if len(collisions):
# we should not spawn an enemy on top of an other enemy
continue
self._movable.add(new_enemy)
self.enemies.append(new_enemy)
break
def get_drawables(self):
return self._drawables
def objects_at(self, coords):
return []
| |
"""
analysis/network.py
Functions to plot and analyze connectivity-related results
Contributors: salvadordura@gmail.com
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import open
from builtins import next
from builtins import range
from builtins import str
try:
basestring
except NameError:
basestring = str
from builtins import zip
from builtins import round
from future import standard_library
standard_library.install_aliases()
from netpyne import __gui__
if __gui__:
import matplotlib.pyplot as plt
import numpy as np
from numbers import Number
from .utils import colorList, exception, _roundFigures, getCellsInclude, getCellsIncludeTags
from .utils import _saveFigData, _showFigure
# -------------------------------------------------------------------------------------------------------------------
## Support function for plotConn() - calculate conn using data from sim object
# -------------------------------------------------------------------------------------------------------------------
def _plotConnCalculateFromSim(includePre, includePost, feature, orderBy, groupBy, groupByIntervalPre, groupByIntervalPost, synOrConn, synMech):
from .. import sim
def list_of_dict_unique_by_key(seq, key):
seen = set()
seen_add = seen.add
return [x for x in seq if x[key] not in seen and not seen_add(x[key])]
# adapt indices/keys based on compact vs long conn format
if sim.cfg.compactConnFormat:
connsFormat = sim.cfg.compactConnFormat
# set indices of fields to read compact format (no keys)
missing = []
preGidIndex = connsFormat.index('preGid') if 'preGid' in connsFormat else missing.append('preGid')
synMechIndex = connsFormat.index('synMech') if 'synMech' in connsFormat else missing.append('synMech')
weightIndex = connsFormat.index('weight') if 'weight' in connsFormat else missing.append('weight')
delayIndex = connsFormat.index('delay') if 'delay' in connsFormat else missing.append('delay')
preLabelIndex = connsFormat.index('preLabel') if 'preLabel' in connsFormat else -1
if len(missing) > 0:
print(" Error: cfg.compactConnFormat missing:")
print(missing)
return None, None, None
else:
# using long conn format (dict)
preGidIndex = 'preGid'
synMechIndex = 'synMech'
weightIndex = 'weight'
delayIndex = 'delay'
preLabelIndex = 'preLabel'
# Calculate pre and post cells involved
cellsPre, cellGidsPre, netStimPopsPre = getCellsInclude(includePre)
if includePre == includePost:
cellsPost, cellGidsPost, netStimPopsPost = cellsPre, cellGidsPre, netStimPopsPre
else:
cellsPost, cellGidsPost, netStimPopsPost = getCellsInclude(includePost)
if isinstance(synMech, basestring): synMech = [synMech] # make sure synMech is a list
# Calculate matrix if grouped by cell
if groupBy == 'cell':
if feature in ['weight', 'delay', 'numConns']:
connMatrix = np.zeros((len(cellGidsPre), len(cellGidsPost)))
countMatrix = np.zeros((len(cellGidsPre), len(cellGidsPost)))
else:
print('Conn matrix with groupBy="cell" only supports features= "weight", "delay" or "numConns"')
return fig
cellIndsPre = {cell['gid']: ind for ind,cell in enumerate(cellsPre)}
cellIndsPost = {cell['gid']: ind for ind,cell in enumerate(cellsPost)}
# Order by
if len(cellsPre) > 0 and len(cellsPost) > 0:
if orderBy not in cellsPre[0]['tags'] or orderBy not in cellsPost[0]['tags']: # if orderBy property doesn't exist or is not numeric, use gid
orderBy = 'gid'
elif not isinstance(cellsPre[0]['tags'][orderBy], Number) or not isinstance(cellsPost[0]['tags'][orderBy], Number):
orderBy = 'gid'
if orderBy == 'gid':
yorderPre = [cell[orderBy] for cell in cellsPre]
yorderPost = [cell[orderBy] for cell in cellsPost]
else:
yorderPre = [cell['tags'][orderBy] for cell in cellsPre]
yorderPost = [cell['tags'][orderBy] for cell in cellsPost]
sortedGidsPre = {gid:i for i,(y,gid) in enumerate(sorted(zip(yorderPre,cellGidsPre)))}
cellIndsPre = sortedGidsPre
if includePre == includePost:
sortedGidsPost = sortedGidsPre
cellIndsPost = cellIndsPre
else:
sortedGidsPost = {gid:i for i,(y,gid) in enumerate(sorted(zip(yorderPost,cellGidsPost)))}
cellIndsPost = sortedGidsPost
# Calculate conn matrix
for cell in cellsPost: # for each postsyn cell
if synOrConn=='syn':
cellConns = cell['conns'] # include all synapses
else:
cellConns = list_of_dict_unique_by_key(cell['conns'], preGidIndex)
if synMech:
cellConns = [conn for conn in cellConns if conn[synMechIndex] in synMech]
for conn in cellConns:
if conn[preGidIndex] != 'NetStim' and conn[preGidIndex] in cellIndsPre:
if feature in ['weight', 'delay']:
featureIndex = weightIndex if feature == 'weight' else delayIndex
if conn[preGidIndex] in cellIndsPre:
connMatrix[cellIndsPre[conn[preGidIndex]], cellIndsPost[cell['gid']]] += conn[featureIndex]
countMatrix[cellIndsPre[conn[preGidIndex]], cellIndsPost[cell['gid']]] += 1
if feature in ['weight', 'delay']: connMatrix = connMatrix / countMatrix
elif feature in ['numConns']: connMatrix = countMatrix
pre, post = cellsPre, cellsPost
# Calculate matrix if grouped by pop
elif groupBy == 'pop':
# get list of pops
popsTempPre = list(set([cell['tags']['pop'] for cell in cellsPre]))
popsPre = [pop for pop in sim.net.allPops if pop in popsTempPre]+netStimPopsPre
popIndsPre = {pop: ind for ind,pop in enumerate(popsPre)}
if includePre == includePost:
popsPost = popsPre
popIndsPost = popIndsPre
else:
popsTempPost = list(set([cell['tags']['pop'] for cell in cellsPost]))
popsPost = [pop for pop in sim.net.allPops if pop in popsTempPost]+netStimPopsPost
popIndsPost = {pop: ind for ind,pop in enumerate(popsPost)}
# initialize matrices
if feature in ['weight', 'strength']:
weightMatrix = np.zeros((len(popsPre), len(popsPost)))
elif feature == 'delay':
delayMatrix = np.zeros((len(popsPre), len(popsPost)))
countMatrix = np.zeros((len(popsPre), len(popsPost)))
# calculate max num conns per pre and post pair of pops
numCellsPopPre = {}
for pop in popsPre:
if pop in netStimPopsPre:
numCellsPopPre[pop] = -1
else:
numCellsPopPre[pop] = len([cell for cell in cellsPre if cell['tags']['pop']==pop])
if includePre == includePost:
numCellsPopPost = numCellsPopPre
else:
numCellsPopPost = {}
for pop in popsPost:
if pop in netStimPopsPost:
numCellsPopPost[pop] = -1
else:
numCellsPopPost[pop] = len([cell for cell in cellsPost if cell['tags']['pop']==pop])
maxConnMatrix = np.zeros((len(popsPre), len(popsPost)))
if feature == 'convergence': maxPostConnMatrix = np.zeros((len(popsPre), len(popsPost)))
if feature == 'divergence': maxPreConnMatrix = np.zeros((len(popsPre), len(popsPost)))
for prePop in popsPre:
for postPop in popsPost:
if numCellsPopPre[prePop] == -1: numCellsPopPre[prePop] = numCellsPopPost[postPop]
maxConnMatrix[popIndsPre[prePop], popIndsPost[postPop]] = numCellsPopPre[prePop]*numCellsPopPost[postPop]
if feature == 'convergence': maxPostConnMatrix[popIndsPre[prePop], popIndsPost[postPop]] = numCellsPopPost[postPop]
if feature == 'divergence': maxPreConnMatrix[popIndsPre[prePop], popIndsPost[postPop]] = numCellsPopPre[prePop]
# Calculate conn matrix
for cell in cellsPost: # for each postsyn cell
if synOrConn=='syn':
cellConns = cell['conns'] # include all synapses
else:
cellConns = list_of_dict_unique_by_key(cell['conns'], preGidIndex)
if synMech:
cellConns = [conn for conn in cellConns if conn[synMechIndex] in synMech]
for conn in cellConns:
if conn[preGidIndex] == 'NetStim':
prePopLabel = conn[preLabelIndex] if preLabelIndex in conn else 'NetStim'
else:
preCell = next((cell for cell in cellsPre if cell['gid']==conn[preGidIndex]), None)
prePopLabel = preCell['tags']['pop'] if preCell else None
if prePopLabel in popIndsPre:
if feature in ['weight', 'strength']:
weightMatrix[popIndsPre[prePopLabel], popIndsPost[cell['tags']['pop']]] += conn[weightIndex]
elif feature == 'delay':
delayMatrix[popIndsPre[prePopLabel], popIndsPost[cell['tags']['pop']]] += conn[delayIndex]
countMatrix[popIndsPre[prePopLabel], popIndsPost[cell['tags']['pop']]] += 1
pre, post = popsPre, popsPost
# Calculate matrix if grouped by numeric tag (eg. 'y')
elif groupBy in sim.net.allCells[0]['tags'] and isinstance(sim.net.allCells[0]['tags'][groupBy], Number):
if not isinstance(groupByIntervalPre, Number) or not isinstance(groupByIntervalPost, Number):
print('groupByIntervalPre or groupByIntervalPost not specified')
return
# group cells by 'groupBy' feature (eg. 'y') in intervals of 'groupByInterval')
cellValuesPre = [cell['tags'][groupBy] for cell in cellsPre]
minValuePre = _roundFigures(groupByIntervalPre * np.floor(min(cellValuesPre) / groupByIntervalPre), 3)
maxValuePre = _roundFigures(groupByIntervalPre * np.ceil(max(cellValuesPre) / groupByIntervalPre), 3)
groupsPre = np.arange(minValuePre, maxValuePre, groupByIntervalPre)
groupsPre = [_roundFigures(x,3) for x in groupsPre]
if includePre == includePost:
groupsPost = groupsPre
else:
cellValuesPost = [cell['tags'][groupBy] for cell in cellsPost]
minValuePost = _roundFigures(groupByIntervalPost * np.floor(min(cellValuesPost) / groupByIntervalPost), 3)
maxValuePost = _roundFigures(groupByIntervalPost * np.ceil(max(cellValuesPost) / groupByIntervalPost), 3)
groupsPost = np.arange(minValuePost, maxValuePost, groupByIntervalPost)
groupsPost = [_roundFigures(x,3) for x in groupsPost]
# only allow matrix sizes >= 2x2 [why?]
# if len(groupsPre) < 2 or len(groupsPost) < 2:
# print 'groupBy %s with groupByIntervalPre %s and groupByIntervalPost %s results in <2 groups'%(str(groupBy), str(groupByIntervalPre), str(groupByIntervalPre))
# return
# set indices for pre and post groups
groupIndsPre = {group: ind for ind,group in enumerate(groupsPre)}
groupIndsPost = {group: ind for ind,group in enumerate(groupsPost)}
# initialize matrices
if feature in ['weight', 'strength']:
weightMatrix = np.zeros((len(groupsPre), len(groupsPost)))
elif feature == 'delay':
delayMatrix = np.zeros((len(groupsPre), len(groupsPost)))
countMatrix = np.zeros((len(groupsPre), len(groupsPost)))
# calculate max num conns per pre and post pair of pops
numCellsGroupPre = {}
for groupPre in groupsPre:
numCellsGroupPre[groupPre] = len([cell for cell in cellsPre if groupPre <= cell['tags'][groupBy] < (groupPre+groupByIntervalPre)])
if includePre == includePost:
numCellsGroupPost = numCellsGroupPre
else:
numCellsGroupPost = {}
for groupPost in groupsPost:
numCellsGroupPost[groupPost] = len([cell for cell in cellsPost if groupPost <= cell['tags'][groupBy] < (groupPost+groupByIntervalPost)])
maxConnMatrix = np.zeros((len(groupsPre), len(groupsPost)))
if feature == 'convergence': maxPostConnMatrix = np.zeros((len(groupsPre), len(groupsPost)))
if feature == 'divergence': maxPreConnMatrix = np.zeros((len(groupsPre), len(groupsPost)))
for preGroup in groupsPre:
for postGroup in groupsPost:
if numCellsGroupPre[preGroup] == -1: numCellsGroupPre[preGroup] = numCellsGroupPost[postGroup]
maxConnMatrix[groupIndsPre[preGroup], groupIndsPost[postGroup]] = numCellsGroupPre[preGroup]*numCellsGroupPost[postGroup]
if feature == 'convergence': maxPostConnMatrix[groupIndsPre[preGroup], groupIndsPost[postGroup]] = numCellsGroupPost[postGroup]
if feature == 'divergence': maxPreConnMatrix[groupIndsPre[preGroup], groupIndsPost[postGroup]] = numCellsGroupPre[preGroup]
# Calculate conn matrix
for cell in cellsPost: # for each postsyn cell
if synOrConn=='syn':
cellConns = cell['conns'] # include all synapses
else:
cellConns = list_of_dict_unique_by_key(cell['conns'], preGidIndex)
if synMech:
cellConns = [conn for conn in cellConns if conn[synMechIndex] in synMech]
for conn in cellConns:
if conn[preGidIndex] == 'NetStim':
prePopLabel = -1 # maybe add in future
else:
preCell = next((c for c in cellsPre if c['gid']==conn[preGidIndex]), None)
if preCell:
preGroup = _roundFigures(groupByIntervalPre * np.floor(preCell['tags'][groupBy] / groupByIntervalPre), 3)
else:
preGroup = None
postGroup = _roundFigures(groupByIntervalPost * np.floor(cell['tags'][groupBy] / groupByIntervalPost), 3)
if preGroup in groupIndsPre:
if feature in ['weight', 'strength']:
weightMatrix[groupIndsPre[preGroup], groupIndsPost[postGroup]] += conn[weightIndex]
elif feature == 'delay':
delayMatrix[groupIndsPre[preGroup], groupIndsPost[postGroup]] += conn[delayIndex]
countMatrix[groupIndsPre[preGroup], groupIndsPost[postGroup]] += 1
pre, post = groupsPre, groupsPost
# no valid groupBy
else:
print('groupBy (%s) is not valid'%(str(groupBy)))
return
# normalize by number of postsyn cells
if groupBy != 'cell':
if feature == 'weight':
connMatrix = weightMatrix / countMatrix # avg weight per conn (fix to remove divide by zero warning)
elif feature == 'delay':
connMatrix = delayMatrix / countMatrix
elif feature == 'numConns':
connMatrix = countMatrix
elif feature in ['probability', 'strength']:
connMatrix = countMatrix / maxConnMatrix # probability
if feature == 'strength':
connMatrix = connMatrix * weightMatrix # strength
elif feature == 'convergence':
connMatrix = countMatrix / maxPostConnMatrix
elif feature == 'divergence':
connMatrix = countMatrix / maxPreConnMatrix
return connMatrix, pre, post
# -------------------------------------------------------------------------------------------------------------------
## Support function for plotConn() - calculate conn using data from files with short format (no keys)
# -------------------------------------------------------------------------------------------------------------------
def _plotConnCalculateFromFile(includePre, includePost, feature, orderBy, groupBy, groupByIntervalPre, groupByIntervalPost, synOrConn, synMech, connsFile, tagsFile):
from .. import sim
import json
from time import time
def list_of_dict_unique_by_key(seq, index):
seen = set()
seen_add = seen.add
return [x for x in seq if x[index] not in seen and not seen_add(x[index])]
# load files with tags and conns
start = time()
tags, conns = None, None
if tagsFile:
print('Loading tags file...')
with open(tagsFile, 'r') as fileObj: tagsTmp = json.load(fileObj)['tags']
tagsFormat = tagsTmp.pop('format', [])
tags = {int(k): v for k,v in tagsTmp.items()} # find method to load json with int keys?
del tagsTmp
if connsFile:
print('Loading conns file...')
with open(connsFile, 'r') as fileObj: connsTmp = json.load(fileObj)['conns']
connsFormat = connsTmp.pop('format', [])
conns = {int(k): v for k,v in connsTmp.items()}
del connsTmp
print('Finished loading; total time (s): %.2f'%(time()-start))
# find pre and post cells
if tags and conns:
cellGidsPre = getCellsIncludeTags(includePre, tags, tagsFormat)
if includePre == includePost:
cellGidsPost = cellGidsPre
else:
cellGidsPost = getCellsIncludeTags(includePost, tags, tagsFormat)
else:
print('Error loading tags and conns from file')
return None, None, None
# set indices of fields to read compact format (no keys)
missing = []
popIndex = tagsFormat.index('pop') if 'pop' in tagsFormat else missing.append('pop')
preGidIndex = connsFormat.index('preGid') if 'preGid' in connsFormat else missing.append('preGid')
synMechIndex = connsFormat.index('synMech') if 'synMech' in connsFormat else missing.append('synMech')
weightIndex = connsFormat.index('weight') if 'weight' in connsFormat else missing.append('weight')
delayIndex = connsFormat.index('delay') if 'delay' in connsFormat else missing.append('delay')
preLabelIndex = connsFormat.index('preLabel') if 'preLabel' in connsFormat else -1
if len(missing) > 0:
print("Missing:")
print(missing)
return None, None, None
if isinstance(synMech, basestring): synMech = [synMech] # make sure synMech is a list
# Calculate matrix if grouped by cell
if groupBy == 'cell':
print('plotConn from file for groupBy=cell not implemented yet')
return None, None, None
# Calculate matrix if grouped by pop
elif groupBy == 'pop':
# get list of pops
print(' Obtaining list of populations ...')
popsPre = list(set([tags[gid][popIndex] for gid in cellGidsPre]))
popIndsPre = {pop: ind for ind,pop in enumerate(popsPre)}
netStimPopsPre = [] # netstims not yet supported
netStimPopsPost = []
if includePre == includePost:
popsPost = popsPre
popIndsPost = popIndsPre
else:
popsPost = list(set([tags[gid][popIndex] for gid in cellGidsPost]))
popIndsPost = {pop: ind for ind,pop in enumerate(popsPost)}
# initialize matrices
if feature in ['weight', 'strength']:
weightMatrix = np.zeros((len(popsPre), len(popsPost)))
elif feature == 'delay':
delayMatrix = np.zeros((len(popsPre), len(popsPost)))
countMatrix = np.zeros((len(popsPre), len(popsPost)))
# calculate max num conns per pre and post pair of pops
print(' Calculating max num conns for each pair of population ...')
numCellsPopPre = {}
for pop in popsPre:
if pop in netStimPopsPre:
numCellsPopPre[pop] = -1
else:
numCellsPopPre[pop] = len([gid for gid in cellGidsPre if tags[gid][popIndex]==pop])
if includePre == includePost:
numCellsPopPost = numCellsPopPre
else:
numCellsPopPost = {}
for pop in popsPost:
if pop in netStimPopsPost:
numCellsPopPost[pop] = -1
else:
numCellsPopPost[pop] = len([gid for gid in cellGidsPost if tags[gid][popIndex]==pop])
maxConnMatrix = np.zeros((len(popsPre), len(popsPost)))
if feature == 'convergence': maxPostConnMatrix = np.zeros((len(popsPre), len(popsPost)))
if feature == 'divergence': maxPreConnMatrix = np.zeros((len(popsPre), len(popsPost)))
for prePop in popsPre:
for postPop in popsPost:
if numCellsPopPre[prePop] == -1: numCellsPopPre[prePop] = numCellsPopPost[postPop]
maxConnMatrix[popIndsPre[prePop], popIndsPost[postPop]] = numCellsPopPre[prePop]*numCellsPopPost[postPop]
if feature == 'convergence': maxPostConnMatrix[popIndsPre[prePop], popIndsPost[postPop]] = numCellsPopPost[postPop]
if feature == 'divergence': maxPreConnMatrix[popIndsPre[prePop], popIndsPost[postPop]] = numCellsPopPre[prePop]
# Calculate conn matrix
print(' Calculating weights, strength, prob, delay etc matrices ...')
for postGid in cellGidsPost: # for each postsyn cell
print(' cell %d'%(int(postGid)))
if synOrConn=='syn':
cellConns = conns[postGid] # include all synapses
else:
cellConns = list_of_dict_unique_by_index(conns[postGid], preGidIndex)
if synMech:
cellConns = [conn for conn in cellConns if conn[synMechIndex] in synMech]
for conn in cellConns:
if conn[preGidIndex] == 'NetStim':
prePopLabel = conn[preLabelIndex] if preLabelIndex >=0 else 'NetStims'
else:
preCellGid = next((gid for gid in cellGidsPre if gid==conn[preGidIndex]), None)
prePopLabel = tags[preCellGid][popIndex] if preCellGid else None
if prePopLabel in popIndsPre:
if feature in ['weight', 'strength']:
weightMatrix[popIndsPre[prePopLabel], popIndsPost[tags[postGid][popIndex]]] += conn[weightIndex]
elif feature == 'delay':
delayMatrix[popIndsPre[prePopLabel], popIndsPost[tags[postGid][popIndex]]] += conn[delayIndex]
countMatrix[popIndsPre[prePopLabel], popIndsPost[tags[postGid][popIndex]]] += 1
pre, post = popsPre, popsPost
# Calculate matrix if grouped by numeric tag (eg. 'y')
elif groupBy in sim.net.allCells[0]['tags'] and isinstance(sim.net.allCells[0]['tags'][groupBy], Number):
print('plotConn from file for groupBy=[arbitrary property] not implemented yet')
return None, None, None
# no valid groupBy
else:
print('groupBy (%s) is not valid'%(str(groupBy)))
return
if groupBy != 'cell':
if feature == 'weight':
connMatrix = weightMatrix / countMatrix # avg weight per conn (fix to remove divide by zero warning)
elif feature == 'delay':
connMatrix = delayMatrix / countMatrix
elif feature == 'numConns':
connMatrix = countMatrix
elif feature in ['probability', 'strength']:
connMatrix = countMatrix / maxConnMatrix # probability
if feature == 'strength':
connMatrix = connMatrix * weightMatrix # strength
elif feature == 'convergence':
connMatrix = countMatrix / maxPostConnMatrix
elif feature == 'divergence':
connMatrix = countMatrix / maxPreConnMatrix
print(' plotting ...')
return connMatrix, pre, post
# -------------------------------------------------------------------------------------------------------------------
## Plot connectivity
# -------------------------------------------------------------------------------------------------------------------
@exception
def plotConn (includePre = ['all'], includePost = ['all'], feature = 'strength', orderBy = 'gid', figSize = (10,10), groupBy = 'pop', groupByIntervalPre = None, groupByIntervalPost = None, graphType = 'matrix', synOrConn = 'syn', synMech = None, connsFile = None, tagsFile = None, clim = None, fontSize = 12, saveData = None, saveFig = None, showFig = True):
'''
Plot network connectivity
- includePre (['all',|'allCells','allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]): Cells to show (default: ['all'])
- includePost (['all',|'allCells','allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]): Cells to show (default: ['all'])
- feature ('weight'|'delay'|'numConns'|'probability'|'strength'|'convergence'|'divergence'): Feature to show in connectivity matrix;
the only features applicable to groupBy='cell' are 'weight', 'delay' and 'numConns'; 'strength' = weight * probability (default: 'strength')
- groupBy ('pop'|'cell'|'y'|: Show matrix for individual cells, populations, or by other numeric tag such as 'y' (default: 'pop')
- groupByInterval (int or float): Interval of groupBy feature to group cells by in conn matrix, e.g. 100 to group by cortical depth in steps of 100 um (default: None)
- orderBy ('gid'|'y'|'ynorm'|...): Unique numeric cell property to order x and y axes by, e.g. 'gid', 'ynorm', 'y' (requires groupBy='cells') (default: 'gid')
- graphType ('matrix','bar','pie'): Type of graph to represent data (default: 'matrix')
- synOrConn ('syn'|'conn'): Use synapses or connections; note 1 connection can have multiple synapses (default: 'syn')
- figSize ((width, height)): Size of figure (default: (10,10))
- synMech (['AMPA', 'GABAA',...]): Show results only for these syn mechs (default: None)
- saveData (None|True|'fileName'): File name where to save the final data used to generate the figure;
if set to True uses filename from simConfig (default: None)
- saveFig (None|True|'fileName'): File name where to save the figure;
if set to True uses filename from simConfig (default: None)
- showFig (True|False): Whether to show the figure or not (default: True)
- Returns figure handles
'''
from .. import sim
print('Plotting connectivity matrix...')
if connsFile and tagsFile:
connMatrix, pre, post = _plotConnCalculateFromFile(includePre, includePost, feature, orderBy, groupBy, groupByIntervalPre, groupByIntervalPost, synOrConn, synMech, connsFile, tagsFile)
else:
connMatrix, pre, post = _plotConnCalculateFromSim(includePre, includePost, feature, orderBy, groupBy, groupByIntervalPre, groupByIntervalPost, synOrConn, synMech)
if connMatrix is None:
print("Error calculating connMatrix in plotConn()")
return None
# set font size
plt.rcParams.update({'font.size': fontSize})
# matrix plot
if graphType == 'matrix':
# Create plot
fig = plt.figure(figsize=figSize)
fig.subplots_adjust(right=0.98) # Less space on right
fig.subplots_adjust(top=0.96) # Less space on top
fig.subplots_adjust(bottom=0.02) # Less space on bottom
h = plt.axes()
plt.imshow(connMatrix, interpolation='nearest', cmap='viridis', vmin=np.nanmin(connMatrix), vmax=np.nanmax(connMatrix)) #_bicolormap(gap=0)
# Plot grid lines
if groupBy == 'cell':
cellsPre, cellsPost = pre, post
# Make pretty
stepy = max(1, int(len(cellsPre)/10.0))
basey = 100 if stepy>100 else 10
stepy = max(1, int(basey * np.floor(float(stepy)/basey)))
stepx = max(1, int(len(cellsPost)/10.0))
basex = 100 if stepx>100 else 10
stepx = max(1, int(basex * np.floor(float(stepx)/basex)))
h.set_xticks(np.arange(0,len(cellsPost),stepx))
h.set_yticks(np.arange(0,len(cellsPre),stepy))
h.set_xticklabels(np.arange(0,len(cellsPost),stepx))
h.set_yticklabels(np.arange(0,len(cellsPost),stepy))
h.xaxis.set_ticks_position('top')
plt.xlim(-0.5,len(cellsPost)-0.5)
plt.ylim(len(cellsPre)-0.5,-0.5)
elif groupBy == 'pop':
popsPre, popsPost = pre, post
for ipop, pop in enumerate(popsPre):
plt.plot(np.array([0,len(popsPre)])-0.5,np.array([ipop,ipop])-0.5,'-',c=(0.7,0.7,0.7))
for ipop, pop in enumerate(popsPost):
plt.plot(np.array([ipop,ipop])-0.5,np.array([0,len(popsPost)])-0.5,'-',c=(0.7,0.7,0.7))
# Make pretty
h.set_xticks(list(range(len(popsPost))))
h.set_yticks(list(range(len(popsPre))))
h.set_xticklabels(popsPost)
h.set_yticklabels(popsPre)
h.xaxis.set_ticks_position('top')
plt.xlim(-0.5,len(popsPost)-0.5)
plt.ylim(len(popsPre)-0.5,-0.5)
else:
groupsPre, groupsPost = pre, post
for igroup, group in enumerate(groupsPre):
plt.plot(np.array([0,len(groupsPre)])-0.5,np.array([igroup,igroup])-0.5,'-',c=(0.7,0.7,0.7))
for igroup, group in enumerate(groupsPost):
plt.plot(np.array([igroup,igroup])-0.5,np.array([0,len(groupsPost)])-0.5,'-',c=(0.7,0.7,0.7))
# Make pretty
h.set_xticks([i-0.5 for i in range(len(groupsPost))])
h.set_yticks([i-0.5 for i in range(len(groupsPre))])
h.set_xticklabels([int(x) if x>1 else x for x in groupsPost])
h.set_yticklabels([int(x) if x>1 else x for x in groupsPre])
h.xaxis.set_ticks_position('top')
plt.xlim(-0.5,len(groupsPost)-0.5)
plt.ylim(len(groupsPre)-0.5,-0.5)
if not clim: clim = [np.nanmin(connMatrix), np.nanmax(connMatrix)]
plt.clim(clim[0], clim[1])
plt.colorbar(label=feature, shrink=0.8) #.set_label(label='Fitness',size=20,weight='bold')
plt.xlabel('post')
h.xaxis.set_label_coords(0.5, 1.06)
plt.ylabel('pre')
plt.title ('Connection '+feature+' matrix', y=1.08)
# stacked bar graph
elif graphType == 'bar':
if groupBy == 'pop':
popsPre, popsPost = pre, post
from netpyne.support import stackedBarGraph
SBG = stackedBarGraph.StackedBarGrapher()
fig = plt.figure(figsize=figSize)
ax = fig.add_subplot(111)
SBG.stackedBarPlot(ax, connMatrix.transpose(), colorList, xLabels=popsPost, gap = 0.1, scale=False, xlabel='postsynaptic', ylabel = feature)
plt.title ('Connection '+feature+' stacked bar graph')
plt.legend(popsPre)
plt.tight_layout()
elif groupBy == 'cell':
print('Error: plotConn graphType="bar" with groupBy="cell" not implemented')
elif graphType == 'pie':
print('Error: plotConn graphType="pie" not yet implemented')
#save figure data
if saveData:
figData = {'connMatrix': connMatrix, 'feature': feature, 'groupBy': groupBy,
'includePre': includePre, 'includePost': includePost, 'saveData': saveData, 'saveFig': saveFig, 'showFig': showFig}
_saveFigData(figData, saveData, 'conn')
# save figure
if saveFig:
if isinstance(saveFig, basestring):
filename = saveFig
else:
filename = sim.cfg.filename+'_'+'conn_'+feature+'.png'
plt.savefig(filename)
# show fig
if showFig: _showFigure()
return fig, {'connMatrix': connMatrix, 'feature': feature, 'groupBy': groupBy, 'includePre': includePre, 'includePost': includePost}
# -------------------------------------------------------------------------------------------------------------------
## Plot 2D representation of network cell positions and connections
# -------------------------------------------------------------------------------------------------------------------
@exception
def plot2Dnet (include = ['allCells'], figSize = (12,12), view = 'xy', showConns = True, popColors = None, fontSize = 12,
tagsFile = None, saveData = None, saveFig = None, showFig = True):
'''
Plot 2D representation of network cell positions and connections
- include (['all',|'allCells','allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]): Cells to show (default: ['all'])
- showConns (True|False): Whether to show connections or not (default: True)
- figSize ((width, height)): Size of figure (default: (12,12))
- view ('xy', 'xz'): Perspective view: front ('xy') or top-down ('xz')
- popColors (dict): Dictionary with color (value) used for each population (key) (default: None)
- saveData (None|'fileName'): File name where to save the final data used to generate the figure (default: None)
- saveFig (None|'fileName'): File name where to save the figure;
if set to True uses filename from simConfig (default: None)(default: None)
- showFig (True|False): Whether to show the figure or not;
if set to True uses filename from simConfig (default: None)
- Returns figure handles
'''
from .. import sim
print('Plotting 2D representation of network cell locations and connections...')
fig = plt.figure(figsize=figSize)
# front view
if view == 'xy':
ycoord = 'y'
elif view == 'xz':
ycoord = 'z'
if tagsFile:
print('Loading tags file...')
import json
with open(tagsFile, 'r') as fileObj: tagsTmp = json.load(fileObj)['tags']
tagsFormat = tagsTmp.pop('format', [])
tags = {int(k): v for k,v in tagsTmp.items()} # find method to load json with int keys?
del tagsTmp
# set indices of fields to read compact format (no keys)
missing = []
popIndex = tagsFormat.index('pop') if 'pop' in tagsFormat else missing.append('pop')
xIndex = tagsFormat.index('x') if 'x' in tagsFormat else missing.append('x')
yIndex = tagsFormat.index('y') if 'y' in tagsFormat else missing.append('y')
zIndex = tagsFormat.index('z') if 'z' in tagsFormat else missing.append('z')
if len(missing) > 0:
print("Missing:")
print(missing)
return None, None, None
# find pre and post cells
if tags:
cellGids = getCellsIncludeTags(include, tags, tagsFormat)
popLabels = list(set([tags[gid][popIndex] for gid in cellGids]))
# pop and cell colors
popColorsTmp = {popLabel: colorList[ipop%len(colorList)] for ipop,popLabel in enumerate(popLabels)} # dict with color for each pop
if popColors: popColorsTmp.update(popColors)
popColors = popColorsTmp
cellColors = [popColors[tags[gid][popIndex]] for gid in cellGids]
# cell locations
posX = [tags[gid][xIndex] for gid in cellGids] # get all x positions
if ycoord == 'y':
posY = [tags[gid][yIndex] for gid in cellGids] # get all y positions
elif ycoord == 'z':
posY = [tags[gid][zIndex] for gid in cellGids] # get all y positions
else:
print('Error loading tags from file')
return None
else:
cells, cellGids, _ = getCellsInclude(include)
selectedPops = [cell['tags']['pop'] for cell in cells]
popLabels = [pop for pop in sim.net.allPops if pop in selectedPops] # preserves original ordering
# pop and cell colors
popColorsTmp = {popLabel: colorList[ipop%len(colorList)] for ipop,popLabel in enumerate(popLabels)} # dict with color for each pop
if popColors: popColorsTmp.update(popColors)
popColors = popColorsTmp
cellColors = [popColors[cell['tags']['pop']] for cell in cells]
# cell locations
posX = [cell['tags']['x'] for cell in cells] # get all x positions
posY = [cell['tags'][ycoord] for cell in cells] # get all y positions
plt.scatter(posX, posY, s=60, color = cellColors) # plot cell soma positions
posXpre, posYpre = [], []
posXpost, posYpost = [], []
if showConns and not tagsFile:
for postCell in cells:
for con in postCell['conns']: # plot connections between cells
if not isinstance(con['preGid'], basestring) and con['preGid'] in cellGids:
posXpre,posYpre = next(((cell['tags']['x'],cell['tags'][ycoord]) for cell in cells if cell['gid']==con['preGid']), None)
posXpost,posYpost = postCell['tags']['x'], postCell['tags'][ycoord]
color='red'
if con['synMech'] in ['inh', 'GABA', 'GABAA', 'GABAB']:
color = 'blue'
width = 0.1 #50*con['weight']
plt.plot([posXpre, posXpost], [posYpre, posYpost], color=color, linewidth=width) # plot line from pre to post
plt.xlabel('x (um)')
plt.ylabel(ycoord+' (um)')
plt.xlim([min(posX)-0.05*max(posX),1.05*max(posX)])
plt.ylim([min(posY)-0.05*max(posY),1.05*max(posY)])
fontsiz = fontSize
for popLabel in popLabels:
plt.plot(0,0,color=popColors[popLabel],label=popLabel)
plt.legend(fontsize=fontsiz, bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.)
ax = plt.gca()
ax.invert_yaxis()
# save figure data
if saveData:
figData = {'posX': posX, 'posY': posY, 'posX': cellColors, 'posXpre': posXpre, 'posXpost': posXpost, 'posYpre': posYpre, 'posYpost': posYpost,
'include': include, 'saveData': saveData, 'saveFig': saveFig, 'showFig': showFig}
_saveFigData(figData, saveData, '2Dnet')
# save figure
if saveFig:
if isinstance(saveFig, basestring):
filename = saveFig
else:
filename = sim.cfg.filename+'_'+'2Dnet.png'
plt.savefig(filename)
# show fig
if showFig: _showFigure()
return fig, {'include': include, 'posX': posX, 'posY': posY, 'posXpre': posXpre, 'posXpost': posXpost, 'posYpre': posYpre, 'posYpost': posYpost}
# -------------------------------------------------------------------------------------------------------------------
## Plot cell shape
# -------------------------------------------------------------------------------------------------------------------
@exception
def plotShape (includePost = ['all'], includePre = ['all'], showSyns = False, showElectrodes = False, synStyle = '.', synSiz=3, dist=0.6, cvar=None, cvals=None,
iv=False, ivprops=None, includeAxon=True, bkgColor = None, fontSize = 12, figSize = (10,8), saveData = None, dpi = 300, saveFig = None, showFig = True):
'''
Plot 3D cell shape using NEURON Interview PlotShape
- includePre: (['all',|'allCells','allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]): List of presynaptic cells to consider
when plotting connections (default: ['all'])
- includePost: (['all',|'allCells','allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]): List of cells to show shape of (default: ['all'])
- showSyns (True|False): Show synaptic connections in 3D view (default: False)
- showElectrodes (True|False): Show LFP electrodes in 3D view (default: False)
- synStyle: Style of marker to show synapses (default: '.')
- dist: 3D distance (like zoom) (default: 0.6)
- synSize: Size of marker to show synapses (default: 3)
- cvar: ('numSyns'|'weightNorm') Variable to represent in shape plot (default: None)
- cvals: List of values to represent in shape plot; must be same as num segments (default: None)
- iv: Use NEURON Interviews (instead of matplotlib) to show shape plot (default: None)
- ivprops: Dict of properties to plot using Interviews (default: None)
- includeAxon: Include axon in shape plot (default: True)
- bkgColor (list/tuple with 4 floats): RGBA list/tuple with bakcground color eg. (0.5, 0.2, 0.1, 1.0) (default: None)
- figSize ((width, height)): Size of figure (default: (10,8))
- saveData (None|True|'fileName'): File name where to save the final data used to generate the figure;
if set to True uses filename from simConfig (default: None)
- saveFig (None|True|'fileName'): File name where to save the figure;
if set to True uses filename from simConfig (default: None)
- showFig (True|False): Whether to show the figure or not (default: True)
- Returns figure handles
'''
from .. import sim
from neuron import h
print('Plotting 3D cell shape ...')
cellsPreGids = [c.gid for c in sim.getCellsList(includePre)] if includePre else []
cellsPost = sim.getCellsList(includePost)
if not hasattr(sim.net, 'compartCells'): sim.net.compartCells = [c for c in cellsPost if type(c) is sim.CompartCell]
sim.net.defineCellShapes() # in case some cells had stylized morphologies without 3d pts
if not iv: # plot using Python instead of interviews
from mpl_toolkits.mplot3d import Axes3D
from netpyne.support import morphology as morph # code adapted from https://github.com/ahwillia/PyNeuron-Toolbox
# create secList from include
secs = None
# Set cvals and secs
if not cvals and cvar:
cvals = []
secs = []
# weighNorm
if cvar == 'weightNorm':
for cellPost in cellsPost:
cellSecs = list(cellPost.secs.values()) if includeAxon else [s for s in list(cellPost.secs.values()) if 'axon' not in s['hObj'].hname()]
for sec in cellSecs:
if 'weightNorm' in sec:
secs.append(sec['hObj'])
cvals.extend(sec['weightNorm'])
cvals = np.array(cvals)
cvals = cvals/min(cvals)
# numSyns
elif cvar == 'numSyns':
for cellPost in cellsPost:
cellSecs = cellPost.secs if includeAxon else {k:s for k,s in cellPost.secs.items() if 'axon' not in s['hObj'].hname()}
for secLabel,sec in cellSecs.items():
nseg=sec['hObj'].nseg
nsyns = [0] * nseg
secs.append(sec['hObj'])
conns = [conn for conn in cellPost.conns if conn['sec']==secLabel and conn['preGid'] in cellsPreGids]
for conn in conns: nsyns[int(round(conn['loc']*nseg))-1] += 1
cvals.extend(nsyns)
cvals = np.array(cvals)
if not secs: secs = [s['hObj'] for cellPost in cellsPost for s in list(cellPost.secs.values())]
if not includeAxon:
secs = [sec for sec in secs if 'axon' not in sec.hname()]
# Plot shapeplot
cbLabels = {'numSyns': 'number of synapses per segment', 'weightNorm': 'weight scaling'}
plt.rcParams.update({'font.size': fontSize})
fig=plt.figure(figsize=figSize)
shapeax = plt.subplot(111, projection='3d')
shapeax.elev=90 # 90
shapeax.azim=-90 # -90
shapeax.dist=dist*shapeax.dist
plt.axis('equal')
cmap = plt.cm.viridis #plt.cm.jet #plt.cm.rainbow #plt.cm.jet #YlOrBr_r
morph.shapeplot(h,shapeax, sections=secs, cvals=cvals, cmap=cmap)
fig.subplots_adjust(left=0, right=1, bottom=0, top=1)
if cvals is not None and len(cvals)>0:
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=np.min(cvals), vmax=np.max(cvals)))
sm._A = [] # fake up the array of the scalar mappable
cb = plt.colorbar(sm, fraction=0.15, shrink=0.5, pad=0.05, aspect=20)
if cvar: cb.set_label(cbLabels[cvar], rotation=90, fontsize=fontSize)
if bkgColor:
shapeax.w_xaxis.set_pane_color(bkgColor)
shapeax.w_yaxis.set_pane_color(bkgColor)
shapeax.w_zaxis.set_pane_color(bkgColor)
#shapeax.grid(False)
# Synapses
if showSyns:
synColor='red'
for cellPost in cellsPost:
for sec in list(cellPost.secs.values()):
for synMech in sec['synMechs']:
morph.mark_locations(h, sec['hObj'], synMech['loc'], markspec=synStyle, color=synColor, markersize=synSiz)
# Electrodes
if showElectrodes:
ax = plt.gca()
colorOffset = 0
if 'avg' in showElectrodes:
showElectrodes.remove('avg')
colorOffset = 1
coords = sim.net.recXElectrode.pos.T[np.array(showElectrodes).astype(int),:]
ax.scatter(coords[:,0],coords[:,1],coords[:,2], s=150, c=colorList[colorOffset:len(coords)+colorOffset],
marker='v', depthshade=False, edgecolors='k', linewidth=2)
for i in range(coords.shape[0]):
ax.text(coords[i,0],coords[i,1],coords[i,2], ' '+str(showElectrodes[i]), fontweight='bold' )
cb.set_label('Segment total transfer resistance to electrodes (kiloohm)', rotation=90, fontsize=fontSize)
#plt.title(str(includePre)+' -> '+str(includePost) + ' ' + str(cvar))
shapeax.set_xticklabels([])
shapeax.set_yticklabels([])
shapeax.set_zticklabels([])
#shapeax.set_ylabel('y location (um)')
# save figure
if saveFig:
if isinstance(saveFig, basestring):
filename = saveFig
else:
filename = sim.cfg.filename+'_shape.png'
plt.savefig(filename, dpi=dpi)
# show fig
if showFig: _showFigure()
else: # Plot using Interviews
# colors: 0 white, 1 black, 2 red, 3 blue, 4 green, 5 orange, 6 brown, 7 violet, 8 yellow, 9 gray
from neuron import gui
fig = h.Shape()
secList = h.SectionList()
if not ivprops:
ivprops = {'colorSecs': 1, 'colorSyns':2 ,'style': 'O', 'siz':5}
for cell in [c for c in cellsPost]:
for sec in list(cell.secs.values()):
if 'axon' in sec['hObj'].hname() and not includeAxon: continue
sec['hObj'].push()
secList.append()
h.pop_section()
if showSyns:
for synMech in sec['synMechs']:
if synMech['hObj']:
# find pre pop using conn[preGid]
# create dict with color for each pre pop; check if exists; increase color counter
# colorsPre[prePop] = colorCounter
# find synMech using conn['loc'], conn['sec'] and conn['synMech']
fig.point_mark(synMech['hObj'], ivprops['colorSyns'], ivprops['style'], ivprops['siz'])
fig.observe(secList)
fig.color_list(secList, ivprops['colorSecs'])
fig.flush()
fig.show(0) # show real diam
# save figure
if saveFig:
if isinstance(saveFig, basestring):
filename = saveFig
else:
filename = sim.cfg.filename+'_'+'shape.ps'
fig.printfile(filename)
return fig, {}
# -------------------------------------------------------------------------------------------------------------------
## Calculate number of disynaptic connections
# -------------------------------------------------------------------------------------------------------------------
@exception
def calculateDisynaptic(includePost = ['allCells'], includePre = ['allCells'], includePrePre = ['allCells'],
tags=None, conns=None, tagsFile=None, connsFile=None):
import json
from time import time
from .. import sim
numDis = 0
totCon = 0
start = time()
if tagsFile:
print('Loading tags file...')
with open(tagsFile, 'r') as fileObj: tagsTmp = json.load(fileObj)['tags']
tags = {int(k): v for k,v in tagsTmp.items()}
del tagsTmp
if connsFile:
print('Loading conns file...')
with open(connsFile, 'r') as fileObj: connsTmp = json.load(fileObj)['conns']
conns = {int(k): v for k,v in connsTmp.items()}
del connsTmp
print(' Calculating disynaptic connections...')
# loading from json files
if tags and conns:
cellsPreGids = getCellsIncludeTags(includePre, tags)
cellsPrePreGids = getCellsIncludeTags(includePrePre, tags)
cellsPostGids = getCellsIncludeTags(includePost, tags)
preGidIndex = conns['format'].index('preGid') if 'format' in conns else 0
for postGid in cellsPostGids:
preGidsAll = [conn[preGidIndex] for conn in conns[postGid] if isinstance(conn[preGidIndex], Number) and conn[preGidIndex] in cellsPreGids+cellsPrePreGids]
preGids = [gid for gid in preGidsAll if gid in cellsPreGids]
for preGid in preGids:
prePreGids = [conn[preGidIndex] for conn in conns[preGid] if conn[preGidIndex] in cellsPrePreGids]
totCon += 1
if not set(prePreGids).isdisjoint(preGidsAll):
numDis += 1
else:
if sim.cfg.compactConnFormat:
if 'preGid' in sim.cfg.compactConnFormat:
preGidIndex = sim.cfg.compactConnFormat.index('preGid') # using compact conn format (list)
else:
print(' Error: cfg.compactConnFormat does not include "preGid"')
return -1
else:
preGidIndex = 'preGid' # using long conn format (dict)
_, cellsPreGids, _ = getCellsInclude(includePre)
_, cellsPrePreGids, _ = getCellsInclude(includePrePre)
cellsPost, _, _ = getCellsInclude(includePost)
for postCell in cellsPost:
print(postCell['gid'])
preGidsAll = [conn[preGidIndex] for conn in postCell['conns'] if isinstance(conn[preGidIndex], Number) and conn[preGidIndex] in cellsPreGids+cellsPrePreGids]
preGids = [gid for gid in preGidsAll if gid in cellsPreGids]
for preGid in preGids:
preCell = sim.net.allCells[preGid]
prePreGids = [conn[preGidIndex] for conn in preCell['conns'] if conn[preGidIndex] in cellsPrePreGids]
totCon += 1
if not set(prePreGids).isdisjoint(preGidsAll):
numDis += 1
print(' Total disynaptic connections: %d / %d (%.2f%%)' % (numDis, totCon, float(numDis)/float(totCon)*100 if totCon>0 else 0.0))
try:
sim.allSimData['disynConns'] = numDis
except:
pass
print(' time ellapsed (s): ', time() - start)
return numDis
| |
import wx
from copy import deepcopy
from collections import namedtuple
from src.wizard.view.clsDataConfigPanel \
import DataConfigPanelView, MyColLabelRenderer
from src.handlers.csvHandler import CSVReader
from src.common.functions import searchDict
from src.controllers.Database import Database
from src.wizard.controller.frmSeriesWizard import SeriesWizardController
from src.wizard.controller.frmVirtualGrid import GridBase
from src.wizard.controller.frmSeriesDialog import SeriesSelectDialog
from src.wizard.models.ResultMapping import ResultMapping
from odm2api.ODM2.services.readService import ReadODM2
class DataConfigPanelController(DataConfigPanelView):
def __init__(self, daddy, **kwargs):
super(DataConfigPanelController, self).__init__(daddy, **kwargs)
self.parent = daddy
self.prev_data = {}
self.inputDict = {}
self.lstMappings.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.rightClick)
def rightClick(self, event):
if len(event.GetEventObject().GetSelectedObjects()) < 1:
return
menu = wx.Menu()
menu.Append(11, "Edit Mapping")
menu.Append(12, "Delete Mapping")
wx.EVT_MENU(menu, 11, self.editMapping)
wx.EVT_MENU(menu, 12, self.deleteMapping)
if len(event.GetEventObject().GetSelectedObjects()) > 1:
menu.Enable(11, False)
menu.SetLabel(12, "Delete Mappings")
self.PopupMenu(menu)
menu.Destroy()
event.Skip()
def editMapping(self, event):
self.selectedColumn = \
self.lstMappings.GetSelectedObject().variableName
self.runSeriesSelectDialog()
event.Skip()
def deleteMapping(self, event):
names = [obj.variableName for obj in \
self.lstMappings.GetSelectedObjects()]
for i in range(0, self.m_listCtrl1.GetNumberCols()):
if str(self.m_listCtrl1.GetColLabelValue(i)) in names:
self.m_listCtrl1.SetColLabelRenderer(\
i,
MyColLabelRenderer('#f0f0f0'))
self.m_listCtrl1.Refresh()
# Instead of deleting from mapping right now,
# add to a list that can be deleted when finish
# button is clicked.
for name in self.lstMappings.GetSelectedObjects():
self.deletedMappings.append(name.variableName)
#for name in self.m_listCtrl3.GetSelectedObjects():
# self.inputDict['Mappings'].pop(name.variableName)
self.lstMappings.RemoveObjects(\
self.lstMappings.GetSelectedObjects())
self.lstMappings.RepopulateList()
event.Skip()
def getInput(self):
'''
A method which returns a dict of data.
Used to share data between panels.
'''
try:
lastUpdate = str(self.inputDict['Schedule']['LastUpdate'])
except KeyError:
self.inputDict['Schedule']['LastUpdate'] = "--"
i = self.choiceTimeCol.GetSelection()
self.inputDict['Settings']['DateTimeColumnName'] = \
str(self.choiceTimeCol.GetString(i))
self.inputDict['Settings']['FillGaps'] = 'false'
# Add mappings.
for mapping in self.newMappings:
self.inputDict['Mappings'].update(mapping)
# Delete mappings.
for mapping in self.deletedMappings:
self.inputDict['Mappings'].pop(mapping)
for k,v in self.inputDict['Mappings'].iteritems():
#print v
v['IntendedTimeSpacing'] = self.spinTimeSpacing.GetValue()
i = self.choiceUnitID.GetSelection()
unitID = self.timeUnits[str(self.choiceUnitID.GetString(i))]
v['IntendedTimeSpacingUnitID'] = unitID
self.inputDict['Settings']['UTCOffset'] = \
self.spinUTCOffset.GetValue()
return self.inputDict
def setInput(self, data):
"""
setInput: Populates the form with pre-existing data.
"""
self.deletedMappings = []
self.newMappings = []
read = self.parent.db.getReadSession()
self.inputDict.update(data)
self.setInputGrid(data)
self.setInputTimeColumn()
self.setInputMappingList(self.inputDict, read)
self.setInputIntendedTimeSpacing()
self.setInputUTCOffset()
self.setInputUnit(read)
def setInputGrid(self, data):
"""
getInputGrid: Populate the grid that
displays the csv data file.
"""
csv = CSVReader()
df = csv.dataFrameReader(searchDict(data, 'FileLocation'),
header=searchDict(data, 'HeaderRowPosition'), sep=searchDict(data, 'Delimiter'),
dataBegin=searchDict(data, 'DataRowPosition'))
self.columns = csv.getColumnNames(df)
# Create the underlying table of data for
# the grid control. Having a virtual table
# enables the control to display millions of
# cells of data efficiently.
base = GridBase(csv.getData(df[:50]), self.columns)
# Assign the table to the grid control.
self.m_listCtrl1.setTable(base)
self.m_listCtrl1.AutoSizeColumns()
def setInputMappingList(self, existingData, read):
"""
setInputMappingList: Populates the list of mappings
with any mappings that already exist *and*
match the variables from the configuration file.
"""
# Determine if any mappings exist.
try:
existingData["Mappings"]
except KeyError:
existingData.update({"Mappings": {}})
self.lstMappings.DeleteAllItems()
self.lstMappings.RepopulateList()
popThese = []
# Iterate through the mappings
for variableName, values in existingData["Mappings"].iteritems():
# Fist check if the variable name appears in the data file.
if str(variableName) not in self.columns:
# If it doesn't exist, then remove it.
popThese.append(variableName)
continue
# Add the variable name to the mapping list.
mapping = read.getDetailedResultInfo("Time series coverage", values['ResultID'])
# mapped = mapping[0]
for mapped in mapping:
self.lstMappings.AddObject(
ResultMapping(mapped.ResultID,
mapped.SamplingFeatureCode,
mapped.SamplingFeatureName,
mapped.MethodCode,
mapped.MethodName,
mapped.VariableCode,
mapped.VariableNameCV,
mapped.ProcessingLevelCode,
mapped.ProcessingLevelDefinition,
mapped.UnitsName,
variableName))
if popThese:
wx.MessageBox("Mappings for the following variables exist, but do not appear in the selected data file:\n'%s'\n\nThese mappings will be deleted if you continue." \
% (", ".join(popThese)),
"No matching variables")
for var in popThese:
existingData["Mappings"].pop(var)
# Color columns with mappings.
for i in range(0, self.m_listCtrl1.GetNumberCols()):
if str(self.m_listCtrl1.GetColLabelValue(i)) \
in existingData["Mappings"].keys():
self.m_listCtrl1.SetColLabelRenderer(\
i,
MyColLabelRenderer('#50c061'))
else:
self.m_listCtrl1.SetColLabelRenderer(i,
MyColLabelRenderer('#f0f0f0'))
def setInputTimeColumn(self):
"""
setInputTimeColumn: Populates the combo box
used to define which column is the time column.
"""
self.choiceTimeCol.Clear()
[self.choiceTimeCol.Append(column) \
for column in self.columns]
try:
dateCol = self.inputDict['Settings']['DateTimeColumnName']
i = self.choiceTimeCol.FindString(str(dateCol))
self.choiceTimeCol.SetSelection(i)
except Exception as e:
self.choiceTimeCol.SetSelection(0)
def setInputIntendedTimeSpacing(self):
"""
setInputUTCOffset: Attempts to set the value of
intended time spacing spin ctrl to a pre-existing number.
"""
try:
self.spinTimeSpacing.SetValue(\
searchDict(self.inputDict['Mappings'],
'IntendedTimeSpacing'))
except KeyError:
self.spinTimeSpacing.SetValue(0)
def setInputUTCOffset(self):
"""
setInputUTCOffset: Attempts to set the value of
UTC offset to a pre-existing number.
"""
try:
offset = self.inputDict['Settings']['UTCOffset']
self.spinUTCOffset.SetValue(int(offset))
except KeyError:
self.spinUTCOffset.SetValue(0)
def setInputUnit(self, read):
"""
setInputUTCOffset: Attempts to set the units
combo box to a pre-existing value.
"""
self.choiceUnitID.Clear()
timeUnits = read.getUnits(type='Time')
self.timeUnits = {}
try:
[self.timeUnits.update({i.UnitsName:i.UnitsID}) for i in timeUnits]
except Exception as e:
print e
wx.MessageBox("Error reading time units from database.", "Time Units Error")
for unit in timeUnits:
self.choiceUnitID.Append(unit.UnitsName)
self.choiceUnitID.Append("Create New ...")
try:
unitID = searchDict(self.inputDict['Mappings'],
'IntendedTimeSpacingUnitID')
#unit = read.getUnitById(int(unitID))
#unit = read.getUnits(ids = [int(unitID)])[0]
unit = read.getUnits(ids = [int(unitID)])
if(unit is not None and len(unit) > 0):
i = self.choiceUnitID.FindString(unit[0].UnitsName)
self.choiceUnitID.SetSelection(i)
# else:
# defaultTimeUnits = ['seconds', 'minutes', 'hours', 'days']
# for unit in defaultTimeUnits:
# self.choiceUnitID.Append(unit)
# self.choiceUnitID.SetSelection(0)
except KeyError:
self.choiceUnitID.SetSelection(0)
def onAddNew(self, event):
self.runSeriesSelectDialog()
# event.Skip()
def onCellClick(self, event):
event.Skip()
def onColClick(self, event):
if event.GetCol() > -1:
# Get the column header.
self.selectedColumn = \
self.m_listCtrl1.GetColLabelValue(event.GetCol())
# event.Skip
def onColDoubleClick(self, event):
if event.GetCol() > -1:
self.selectedColumn = \
self.m_listCtrl1.GetColLabelValue(event.GetCol())
if self.selectedColumn == str(self.choiceTimeCol.GetStringSelection()):
msg = wx.MessageBox('This column is not mappable because you have chosen it as your date time column.', 'Configuration Error')
else:
if self.runSeriesSelectDialog():
self.m_listCtrl1.SetColLabelRenderer(\
int(event.GetCol()),
MyColLabelRenderer('#50c061'))
self.m_listCtrl1.Refresh()
event.Skip()
def onTimeSelect(self, event):
index = self.choiceTimeCol.GetSelection()
self.selectedDateColumn = self.choiceTimeCol.GetString(index)
event.Skip()
def onTimeChoice(self, event):
self.selectedDateColumn = event.GetEventObject().GetString(event.GetEventObject().GetSelection())
event.Skip()
def onSelectUnit(self, event):
value = event.GetEventObject().GetString(event.GetEventObject().GetSelection())
if value == "Create New ...":
from src.wizard.controller.frmAddNewUnitPanel import AddNewUnitPanelController
from src.wizard.controller.frmNewSeriesDialog import NewSeriesDialog
dlg = NewSeriesDialog(self, 'Create New Unit')
controller = AddNewUnitPanelController(dlg, self.parent.db, type = "Time")
dlg.addPanel(controller)
dlg.CenterOnScreen()
if dlg.ShowModal() == wx.ID_OK and controller.unit is not None:
newUnit = controller.unit
i = self.choiceUnitID.InsertItems([newUnit.UnitsName], len(self.choiceUnitID.Items)-1)
# i = self.choiceUnitID.FindString(newUnit.UnitName)
self.choiceUnitID.Select(i)
dlg.Destroy()
event.Skip()
def runSeriesSelectDialog(self):
unitid = self.parent.db.getReadSession().getUnits(name = self.choiceUnitID.GetString(self.choiceUnitID.Selection))[0].UnitsID
dlg = SeriesSelectDialog(self,
variable=self.selectedColumn,
database=self.parent.db,
time_spacing = {"value": self.spinTimeSpacing.Value, "unit": unitid}
)
#dlg.CenterOnParent()
if dlg.ShowModal() == wx.ID_OK:
dlg.selectedResult.variableName = self.selectedColumn
#print dlg.selectedResult.variableNameCV
#import pprint
# Instead of adding immediately to mappings
# add it to a list to be added when finish button
# is clicked.
self.newMappings.append({str(self.selectedColumn): {\
'ResultID': int(dlg.selectedResult.resultID),
'LastByteRead': 0,
'CalculateAggInterval': 'false'}})
#pprint.pprint(self.inputDict)
for m in self.lstMappings.GetObjects():
if m.variableName == dlg.selectedResult.variableName:
self.lstMappings.RemoveObjects([m])
break
self.lstMappings.AddObject(dlg.selectedResult)
dlg.Destroy()
return True
dlg.Destroy()
return False
| |
import requests # For the core of the program
import json # For the core of the program
from coinmarketcap import Market # For exchange prices
import os # Clear screen command
import sys # For collecting the flags
from time import gmtime, strftime, sleep # For time stamps
os.system('cls' if os.name == 'nt' else 'clear') # Clears screen
""" Varibles """
helptext = """
DogeBalanceChecker 1.4
python DogeBalanceChecker [FLAG]
-[letter] : defintion : usage <default: -[LETTER]>
Help:
-b : shows balance of recorded addresses
-l : lets you lookup the balance of any address in dogecoin only : -l [ADDRESS]
-a : adds address to the list of addresses ; -a [ADDRESS]
-h : display help text
-v : display the program version
-B : display balance of bitcoin addresses
-L : display balance of litecoin addresses
-d : tracks an address for future trasactions : -d [ADDRESS]
"""
version = "DogeBalanceChecker 1.4"
""" Price Data """
coinmarketcap = Market()
class doge: # For dogecoin prices
dogecoin = coinmarketcap.ticker("Dogecoin", limit=3, convert="USD")[0]
usdprice = dogecoin["price_usd"]
btcprice = dogecoin["price_btc"]
dogecoin = coinmarketcap.ticker("Dogecoin", limit=3, convert="EUR")[0]
eurprice = dogecoin["price_eur"]
dogecoin = coinmarketcap.ticker("Dogecoin", limit=3, convert="GBP")[0]
gbpprice = dogecoin["price_gbp"]
dogecoin = coinmarketcap.ticker("Dogecoin", limit=3, convert="AUD")[0]
audprice = dogecoin["price_aud"]
dogecoin = coinmarketcap.ticker("Dogecoin", limit=3, convert="CAD")[0]
cadprice = dogecoin["price_cad"]
dogecoin = coinmarketcap.ticker("Dogecoin", limit=3, convert="LTC")[0]
ltcprice = dogecoin["price_ltc"]
class btc: # For bitcoin prices
bitcoin = coinmarketcap.ticker("Bitcoin", limit=3, convert="USD")[0]
usdprice = bitcoin["price_usd"]
btcprice = bitcoin["price_btc"]
bitcoin = coinmarketcap.ticker("Bitcoin", limit=3, convert="EUR")[0]
eurprice = bitcoin["price_eur"]
bitcoin = coinmarketcap.ticker("Bitcoin", limit=3, convert="GBP")[0]
gbpprice = bitcoin["price_gbp"]
bitcoin = coinmarketcap.ticker("Bitcoin", limit=3, convert="AUD")[0]
audprice = bitcoin["price_aud"]
bitcoin = coinmarketcap.ticker("Bitcoin", limit=3, convert="CAD")[0]
cadprice = bitcoin["price_cad"]
bitcoin = coinmarketcap.ticker("Bitcoin", limit=3, convert="DOGE")[0]
dogeprice = bitcoin["price_doge"]
class ltc: # For litecoin data
litecoin = coinmarketcap.ticker("Litecoin", limit=3, convert="USD")[0]
usdprice = litecoin["price_usd"]
btcprice = litecoin["price_btc"]
litecoin = coinmarketcap.ticker("Litecoin", limit=3, convert="EUR")[0]
eurprice = litecoin["price_eur"]
litecoin = coinmarketcap.ticker("Litecoin", limit=3, convert="GBP")[0]
gbpprice = litecoin["price_gbp"]
litecoin = coinmarketcap.ticker("Litecoin", limit=3, convert="AUD")[0]
audprice = litecoin["price_aud"]
litecoin = coinmarketcap.ticker("Litecoin", limit=3, convert="CAD")[0]
cadprice = litecoin["price_cad"]
litecoin = coinmarketcap.ticker("Litecoin", limit=3, convert="DOGE")[0]
dogeprice = litecoin["price_doge"]
""" Functions """
def verifyAddresses(currency="addresses",prefix=["D","A","9"], documented=True, address=None): #Verifys addresses
# Part for getting addresses
if documented:
addresses = importAddresses(currency)
elif not documented:
addresses = [address]
# Part for enforcing address syntax
for i in addresses:
if not i[0] in prefix:
raise ValueError("Invalid Address")
elif len(i) != 34:
raise ValueError("Invalid Address")
def dogebalance(): # Finding the balance of Dogecoin addresses
addresses = importAddresses() # Import addresses
verifyAddresses() # Verifys addresses
os.system('cls' if os.name == 'nt' else 'clear')
balance = []
# Finds the balance of addresses
for i in addresses:
get_address_info = requests.get('https://api.blockcypher.com/v1/doge/main/addrs/'+i+'/full?limit=99999')
address_info = get_address_info.text
j_address_info = json.loads(address_info)
balance.append(j_address_info['balance'])
# Prints balances and conversions
print('address : '+str(i)+' - balance : '+str(j_address_info['balance']/100000000)+' doge')
print('total balance : '+str(sum(balance)/100000000)+' doge')
totalBalance = sum(balance)/100000000
print("balance usd :", str(float(doge.usdprice) * totalBalance))
print("balance btc :", str(float(doge.btcprice) * totalBalance))
print("balance ltc :", str(float(doge.ltcprice) * totalBalance))
print("balance gbp :", str(float(doge.gbpprice) * totalBalance))
print("balance eur :", str(float(doge.eurprice) * totalBalance))
print("balance aud :", str(float(doge.audprice) * totalBalance))
print("balance cad :", str(float(doge.cadprice) * totalBalance))
def btcbalance(): # Finding the balance of Bitcoin addresses
addresses = importAddresses('btc')
verifyAddresses("btc", ["1", "3"])
os.system('cls' if os.name == 'nt' else 'clear')
balance = []
for i in addresses:
get_address_info = requests.get('https://api.blockcypher.com/v1/btc/main/addrs/'+i+'/full?limit=99999')
address_info = get_address_info.text
j_address_info = json.loads(address_info)
balance.append(j_address_info['balance'])
print('address : '+str(i)+' - balance : '+str(j_address_info['balance']/100000000)+' btc')
print('total balance : '+str(sum(balance)/100000000)+' btc')
totalBalance = sum(balance)/100000000
print("balance doge :", str(float(btc.dogeprice) * totalBalance))
print("balance usd :", str(float(btc.usdprice) * totalBalance))
print("balance btc :", str(float(btc.btcprice) * totalBalance))
print("balance gbp :", str(float(btc.gbpprice) * totalBalance))
print("balance eur :", str(float(btc.eurprice) * totalBalance))
print("balance aud :", str(float(btc.audprice) * totalBalance))
print("balance cad :", str(float(btc.cadprice) * totalBalance))
def ltcbalance(): # Finding the balance of Litecoin addresses
addresses = importAddresses('ltc')
verifyAddresses("ltc", ["L"])
os.system('cls' if os.name == 'nt' else 'clear')
balance = []
for i in addresses:
get_address_info = requests.get('https://api.blockcypher.com/v1/ltc/main/addrs/'+i+'/full?limit=99999')
address_info = get_address_info.text
j_address_info = json.loads(address_info)
balance.append(j_address_info['balance'])
print('address : '+str(i)+' - balance : '+str(j_address_info['balance']/100000000)+' ltc')
print('total balance : '+str(sum(balance)/100000000)+' ltc')
totalBalance = sum(balance)/100000000
print("balance doge :", str(float(ltc.dogeprice) * totalBalance))
print("balance usd :", str(float(ltc.usdprice) * totalBalance))
print("balance btc :", str(float(ltc.btcprice) * totalBalance))
print("balance gbp :", str(float(ltc.gbpprice) * totalBalance))
print("balance eur :", str(float(ltc.eurprice) * totalBalance))
print("balance aud :", str(float(ltc.audprice) * totalBalance))
print("balance cad :", str(float(ltc.cadprice) * totalBalance))
def lookup(address): # Looks up addresses that aren't recorded
loop = True # Sets the loop value
while(loop): # Loops function
# Sets up function more
addresses = []
addresses.append(address)
os.system('cls' if os.name == 'nt' else 'clear')
balance = []
# Finds balance
for i in addresses:
get_address_info = requests.get('https://api.blockcypher.com/v1/'+currency+'/main/addrs/'+i+'/full?limit=99999')
address_info = get_address_info.text
j_address_info = json.loads(address_info)
balance.append(j_address_info['balance'])
# Prints balance
print(addresses[0]+"'s balance : "+str(sum(balance)/100000000)+' doge')
totalBalance = sum(balance)/100000000
print("balance usd :", str(float(doge.usdprice) * totalBalance))
print("balance btc :", str(float(doge.btcprice) * totalBalance))
print("balance gbp :", str(float(doge.gbpprice) * totalBalance))
print("balance eur :", str(float(doge.eurprice) * totalBalance))
print("balance aud :", str(float(doge.audprice) * totalBalance))
print("balance cad :", str(float(doge.cadprice) * totalBalance))
# Asks the user if they would like to search for another address
if input("Do you want to search another address(Y/n)? ").lower() == "n":
loop = False
break
address = [input("What address balance do you want to lookup? ")]
def importAddresses(currency="addresses"): # Imports addresses to the function
addresses = open(currency+".txt", "r") # Opens up address file
x = []
for line in addresses:
x.append(line) # Appends every entry by line
for i in range(len(x)): # "Fixes" the entries
y = x[i]
x[i] = y[0:-1]
addresses.close # Closes file
addresses = x
return addresses # Sends back addresses
def addAddress(address, currency="addresses"): # Adds an address to the record
addresses = open(currency+".txt", "r+") # Opens address file
x = importAddresses() # Gets the addresses pre record
for i in range(len(x)):
addresses.write(x[i] + '\n') # Writes old addresses to the now blank file
addresses.write(address) # Writes new address
addresses.close # Closes file
print("Success!") # Prints out verfication
def detect(address): # Tracks the balance of an address
verifyAddresses(documented=False, address=address) # Verifys the address
os.system('cls' if os.name == 'nt' else 'clear') # Clears screen
# Sets up function
addresses = []
theBalance = -1
addresses.append(address)
while True: # Sets up loop
balance = []
time = strftime("%Y-%m-%d %H:%M:%S", gmtime()) # Grabs the time
# Gets the balance
for i in addresses:
get_address_info = requests.get('https://api.blockcypher.com/v1/doge/main/addrs/'+i+'/full?limit=99999')
address_info = get_address_info.text
j_address_info = json.loads(address_info)
balance.append(j_address_info['balance'])
newBalance = int(j_address_info['balance']/100000000)
# Figures out what to print
if newBalance != theBalance:
if theBalance == -1:
print("["+time+"]", "Initial balance:", newBalance)
elif newBalance > theBalance:
print("["+time+"]", "Address has received", newBalance - theBalance, "New balance:", newBalance)
elif newBalance < theBalance:
print("["+time+"]", "Address has withdrawn", str(theBalance - newBalance) + "doge", "New balance:", newBalance)
theBalance = newBalance
sleep(60) # Sleeps the program for 60 seconds to prevent timeout
def main(flag): # Decides what function of the program to run
if flag[0] == "-h":
print(helptext)
elif flag[0] == "-a":
try:
addAddress(flag[1])
except IndexError:
print(helptext)
print("Error: Please use the correct usage")
elif flag[0] == "-l":
try:
lookup(flag[1])
except IndexError:
print(helptext)
print("Error: Please use the correct usage")
elif flag[0] == "-b":
dogebalance()
elif flag[0] == "-v":
print(version)
elif flag[0] == "-B":
btcbalance()
elif flag[0] == "-L":
ltcbalance()
elif flag[0] == "-d":
try:
detect(flag[1])
except IndexError:
print(helptext)
print("Error: Please use the correct usage")
else:
print(helptext)
""" Rest of program """
if __name__ == "__main__":
main(sys.argv[1:])
| |
'''
Created on 22/01/2014
@author: Dani
'''
#from ConfigParser import ConfigParser
import codecs
import os, sys, traceback
import json
from lpentities.observation import Observation
from lpentities.value import Value
from lpentities.indicator import Indicator
from lpentities.computation import Computation
from lpentities.instant import Instant
from lpentities.interval import Interval
from lpentities.measurement_unit import MeasurementUnit
from lpentities.dataset import Dataset
from lpentities.user import User
from lpentities.data_source import DataSource
from lpentities.license import License
from lpentities.organization import Organization
from es.weso.landmatrix.translator.deals_analyser import DealsAnalyser
from es.weso.landmatrix.translator.deals_builder import DealsBuilder
from .keys_dicts import KeyDicts
from datetime import datetime
from lpentities.year_interval import YearInterval
from model2xml.model2xml import ModelToXMLTransformer
try:
import xml.etree.cElementTree as ETree
except:
import xml.etree.ElementTree as ETree
class LandMatrixTranslator(object):
'''
classdocs
'''
INFO_NODE = "item"
def __init__(self, log, config, look_for_historical):
"""
Constructor
"""
self._log = log
self._config = config
self._look_for_historical = look_for_historical
#Initializing variable ids
if self._look_for_historical:
self._obs_int = 0
self._sli_int = 0
self._dat_int = 0
self._igr_int = 0
else:
self._obs_int = int(self._config.get("TRANSLATOR", "obs_int"))
self._sli_int = int(self._config.get("TRANSLATOR", "sli_int"))
self._dat_int = int(self._config.get("TRANSLATOR", "dat_int"))
self._igr_int = int(self._config.get("TRANSLATOR", "igr_int"))
# Organization (included acronym)
self._default_organization = self._build_default_organization()
#Indicators's dict
self._indicators_dict = self._build_indicators_dict()
#Common objects
self._default_user = self._build_default_user()
self._default_datasource = self._build_default_datasource()
self._default_dataset = self._build_default_dataset()
self._default_license = self._build_default_license()
self._default_computation = self._build_default_computation()
self._relate_common_objects()
@staticmethod
def _build_default_user():
return User(user_login="LANDMATRIXIMPORTER")
@staticmethod
def _build_default_computation():
return Computation(Computation.RAW)
def _build_default_organization(self):
acronym = self._read_config_value("ORGANIZATION", "acronym")
result = Organization(chain_for_id=acronym)
result.acronym = acronym
result.name = self._read_config_value("ORGANIZATION", "name")
result.url = self._read_config_value("ORGANIZATION", "url")
result.description_en = self._read_config_value("ORGANIZATION", "description_en")
result.description_es = self._read_config_value("ORGANIZATION", "description_es")
result.description_fr = self._read_config_value("ORGANIZATION", "description_fr")
return result
def _build_default_datasource(self):
result = DataSource(chain_for_id=self._default_organization.acronym,
int_for_id=self._config.get("DATASOURCE", "id"))
result.name = self._config.get("DATASOURCE", "name")
return result
def _build_default_dataset(self):
result = Dataset(chain_for_id=self._default_organization.acronym, int_for_id=self._config.get("DATASOURCE", "id")
)
self._dat_int += 1 # Needed increment
for key in self._indicators_dict:
result.add_indicator(self._indicators_dict[key])
result.frequency = Dataset.YEARLY
return result
def _build_default_license(self):
result = License()
result.republish = self._config.get("LICENSE", "republish")
result.description = self._config.get("LICENSE", "description")
result.name = self._config.get("LICENSE", "name")
result.url = self._config.get("LICENSE", "url")
return result
def _relate_common_objects(self):
self._default_organization.add_user(self._default_user)
self._default_organization.add_data_source(self._default_datasource)
self._default_datasource.add_dataset(self._default_dataset)
self._default_dataset.license_type = self._default_license
#No return needed
def run(self):
"""
Translates the downloaded data into model objects. look_for_historical is a boolean
that indicates if we have to consider old information or only bear in mind actual one
"""
try:
info_nodes = self._get_info_nodes_from_file().find("deals")
self._log.info("Number of info_nodes read = %i" %len(info_nodes))
deals = self._turn_info_nodes_into_deals(info_nodes)
self._log.info("Number of info_nodes turn into deals = %i" %len(deals))
deal_entrys = self._turn_deals_into_deal_entrys(deals)
self._log.info("Number of deals turn into deal_entries = %i" %len(deal_entrys))
observations = self._turn_deal_entrys_into_obs_objects(deal_entrys)
self._log.info("Number of observations generated = %i" %len(observations))
for obs in observations:
self._default_dataset.add_observation(obs)
except BaseException as e:
raise RuntimeError("Error while trying to build model objects: " + e.message)
m2x = ModelToXMLTransformer(dataset=self._default_dataset,
import_process=ModelToXMLTransformer.XML,
user=self._default_user,
path_to_original_file=self._path_to_original_file())
try:
m2x.run()
self._actualize_config_values()
except BaseException as e:
raise RuntimeError("Error while sendig info to te receiver module: " + e.message)
def _path_to_original_file(self):
raw_path = self._config.get("LAND_MATRIX", "target_file")
return os.path.abspath(raw_path)
def _actualize_config_values(self):
self._config.set("TRANSLATOR", "obs_int", self._obs_int)
self._config.set("TRANSLATOR", "dat_int", self._dat_int)
self._config.set("TRANSLATOR", "sli_int", self._sli_int)
self._config.set("TRANSLATOR", "igr_int", self._igr_int)
with open("./config/configuration.ini.new", 'wb') as config_file:
self._config.write(config_file)
def _turn_deal_entrys_into_obs_objects(self, deal_entrys):
result = []
for key in deal_entrys:
new_obs = self._turn_deal_entry_into_obs(deal_entrys[key])
if self._pass_filters(new_obs):
result.append(new_obs) # The method returns a list
return result
def _pass_filters(self, obs):
if self._look_for_historical:
return True
if not "_target_date" in self.__dict__:
self._target_date = self._get_current_date()
elif self._get_year_of_observation(obs) < self._target_date:
return False
return True
@staticmethod
def _get_year_of_observation(obs):
date_obj = obs.ref_time
if type(date_obj) == YearInterval:
return int(date_obj.year)
else:
raise RuntimeError("Unexpected object date. Impossible to build observation from it: " + type(date_obj))
def _get_current_date(self):
return int(self._config.get("HISTORICAL", "first_valid_year"))
def _turn_deal_entry_into_obs(self, deal_entry):
result = Observation(chain_for_id=self._default_organization.acronym, int_for_id=self._obs_int)
self._obs_int += 1 # Updating obs id
#Indicator
result.indicator = deal_entry.indicator
#Value
result.value = self._build_value_object(deal_entry) # Done
#Computation
result.computation = self._default_computation
#Issued
result.issued = self._build_issued_object() # No param needed
#ref_time
result.ref_time = self._build_ref_time_object(deal_entry) # Done
#country
deal_entry.country.add_observation(result) # And that establish the relationship in both directions
return result
@staticmethod
def _build_issued_object():
return Instant(datetime.now())
def _build_ref_time_object(self, deal_entry):
return Interval(start_time=deal_entry.first_date, end_time=deal_entry.last_date)
@staticmethod
def _build_value_object(deal_entry):
result = Value()
result.value = deal_entry.value
result.value_type = Value.INTEGER
result.obs_status = Value.AVAILABLE
return result
def _turn_deals_into_deal_entrys(self, deals):
analyser = DealsAnalyser(deals, self._indicators_dict)
result = analyser.run()
return result
def _turn_info_nodes_into_deals(self, info_nodes):
result = []
for info_node in info_nodes:
try:
self._log.debug("Parsing deal id = " + info_node.findtext("./field[@name='Deal ID']").strip())
deal = DealsBuilder.turn_node_into_deal_object(info_node)
self._log.debug("Parsing finished of deal = " + str(deal))
result.append(deal)
except BaseException as ex:
self._log.warning("Problem while parsing a node of a deal. Deal will be ignored. Cause: " + ex.message)
e = sys.exc_info()[0]
print "Error: %s" % e
traceback.print_exc(file=sys.stdout)
return result
def _get_info_nodes_from_file(self):
"""
Return a list of node objects that contains
"""
file_path = self._config.get("LAND_MATRIX", "target_file")
try:
content_file = codecs.open(file_path, encoding="utf-8")
lines = content_file.read()
content_file.close()
return ETree.fromstring(lines.encode(encoding="utf-8"))
except Exception as e:
print e.message, e.args
raise RuntimeError("Impossible to parse xml in path: {0}. \
It looks that it is not a valid xml file.".format(file_path))
def _build_indicators_dict(self):
# Possibilities. Putting this ugly and huge code here, or refactor it, charging properties using
# patterns: *_name_en, *_name_fr...
# If i do that, we will have much less code, but we must to ensure that the property names never change.
# I am not sure of which one is the most dangerous option, but also i'm not sure about if
# that is a question that deserves to waste time with it. So huge and ugly code.
hectares = MeasurementUnit(name="hectares",
convert_to=MeasurementUnit.SQ_KM,
factor=0.01)
units = MeasurementUnit(name="units",
convert_to=MeasurementUnit.UNITS)
default_topic = 'LAND_USE'
result = {}
indicator_codes = json.loads(self._config.get("INDICATORS", "codes"))
self._log.info("Init process to add %d indicators in the indicators dictionary" %len(indicator_codes))
for indicator in indicator_codes:
try:
id = self._read_config_value(indicator, "id")
ind = Indicator(chain_for_id=self._default_organization.acronym, int_for_id=id)
ind.name_en = self._read_config_value(indicator, "name_en")
ind.name_es = self._read_config_value(indicator, "name_es")
ind.name_fr = self._read_config_value(indicator, "name_fr")
ind.description_en = self._read_config_value(indicator, "desc_en")
ind.description_es = self._read_config_value(indicator, "desc_es")
ind.description_fr = self._read_config_value(indicator, "desc_fr")
ind.topic = default_topic # TODO improve
ind.preferable_tendency = Indicator.IRRELEVANT # TODO improve
ind.measurement_unit = self._get_unit(self._read_config_value(indicator, "unit"))
generated_code = id
result[generated_code] = ind # Add the indicator in the dictionary
except:
print("exception on id = %s" %id) #TODO improve exception
self._log.info("Added %d indicators in the indicators dictionary" % len(result))
return result
def _read_config_value(self, section, field):
return (self._config.get(section, field)).decode(encoding="utf-8")
@staticmethod
def _get_unit(unit):
hectares = MeasurementUnit(name="hectares",
convert_to=MeasurementUnit.SQ_KM,
factor=0.01)
units = MeasurementUnit(name="units",
convert_to=MeasurementUnit.UNITS)
unit = unit.upper()
if unit == "UNITS":
return units
elif unit == "HECTARES":
return hectares
else:
raise ValueError("No valid units value: %s" %unit)
| |
from datetime import datetime
import unittest
from trac.core import *
from trac.test import EnvironmentStub
from trac.util.datefmt import utc, to_timestamp
from trac.wiki import WikiPage, IWikiChangeListener
class TestWikiChangeListener(Component):
implements(IWikiChangeListener)
def __init__(self):
self.added = []
self.changed = []
self.deleted = []
self.deleted_version = []
def wiki_page_added(self, page):
self.added.append(page)
def wiki_page_changed(self, page, version, t, comment, author, ipnr):
self.changed.append((page, version, t, comment, author, ipnr))
def wiki_page_deleted(self, page):
self.deleted.append(page)
def wiki_page_version_deleted(self, page):
self.deleted_version.append(page)
class WikiPageTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub()
self.db = self.env.get_db_cnx()
def tearDown(self):
self.env.reset_db()
def test_new_page(self):
page = WikiPage(self.env)
self.assertEqual(False, page.exists)
self.assertEqual(None, page.name)
self.assertEqual(0, page.version)
self.assertEqual('', page.text)
self.assertEqual(0, page.readonly)
self.assertEqual('', page.author)
self.assertEqual('', page.comment)
self.assertEqual(None, page.time)
def test_existing_page(self):
t = datetime(2001, 1, 1, 1, 1, 1, 0, utc)
cursor = self.db.cursor()
cursor.execute("INSERT INTO wiki VALUES(%s,%s,%s,%s,%s,%s,%s,%s)",
('TestPage', 1, to_timestamp(t), 'joe', '::1',
'Bla bla', 'Testing', 0))
page = WikiPage(self.env, 'TestPage')
self.assertEqual(True, page.exists)
self.assertEqual('TestPage', page.name)
self.assertEqual(1, page.version)
self.assertEqual(None, page.resource.version) # FIXME: Intentional?
self.assertEqual('Bla bla', page.text)
self.assertEqual(0, page.readonly)
self.assertEqual('joe', page.author)
self.assertEqual('Testing', page.comment)
self.assertEqual(t, page.time)
history = list(page.get_history())
self.assertEqual(1, len(history))
self.assertEqual((1, t, 'joe', 'Testing', '::1'), history[0])
page = WikiPage(self.env, 'TestPage', 1)
self.assertEqual(1, page.resource.version)
def test_create_page(self):
page = WikiPage(self.env)
page.name = 'TestPage'
page.text = 'Bla bla'
t = datetime(2001, 1, 1, 1, 1, 1, 0, utc)
page.save('joe', 'Testing', '::1', t)
self.assertEqual(True, page.exists)
self.assertEqual(1, page.version)
self.assertEqual(1, page.resource.version)
self.assertEqual(0, page.readonly)
self.assertEqual('joe', page.author)
self.assertEqual('Testing', page.comment)
self.assertEqual(t, page.time)
cursor = self.db.cursor()
cursor.execute("SELECT version,time,author,ipnr,text,comment,"
"readonly FROM wiki WHERE name=%s", ('TestPage',))
self.assertEqual((1, to_timestamp(t), 'joe', '::1', 'Bla bla',
'Testing', 0),
cursor.fetchone())
listener = TestWikiChangeListener(self.env)
self.assertEqual(page, listener.added[0])
def test_update_page(self):
cursor = self.db.cursor()
t = datetime(2001, 1, 1, 1, 1, 1, 0, utc)
t2 = datetime(2002, 1, 1, 1, 1, 1, 0, utc)
cursor.execute("INSERT INTO wiki VALUES(%s,%s,%s,%s,%s,%s,%s,%s)",
('TestPage', 1, to_timestamp(t), 'joe', '::1',
'Bla bla', 'Testing', 0))
page = WikiPage(self.env, 'TestPage')
page.text = 'Bla'
page.save('kate', 'Changing', '192.168.0.101', t2)
self.assertEqual(2, page.version)
self.assertEqual(2, page.resource.version)
self.assertEqual(0, page.readonly)
self.assertEqual('kate', page.author)
self.assertEqual('Changing', page.comment)
self.assertEqual(t2, page.time)
cursor.execute("SELECT version,time,author,ipnr,text,comment,"
"readonly FROM wiki WHERE name=%s", ('TestPage',))
self.assertEqual((1, to_timestamp(t), 'joe', '::1', 'Bla bla',
'Testing', 0),
cursor.fetchone())
self.assertEqual((2, to_timestamp(t2), 'kate', '192.168.0.101', 'Bla',
'Changing', 0), cursor.fetchone())
listener = TestWikiChangeListener(self.env)
self.assertEqual((page, 2, t2, 'Changing', 'kate', '192.168.0.101'),
listener.changed[0])
page = WikiPage(self.env, 'TestPage')
history = list(page.get_history())
self.assertEqual(2, len(history))
self.assertEqual((2, t2, 'kate', 'Changing', '192.168.0.101'),
history[0])
self.assertEqual((1, t, 'joe', 'Testing', '::1'), history[1])
def test_delete_page(self):
cursor = self.db.cursor()
cursor.execute("INSERT INTO wiki VALUES(%s,%s,%s,%s,%s,%s,%s,%s)",
('TestPage', 1, 42, 'joe', '::1', 'Bla bla', 'Testing',
0))
page = WikiPage(self.env, 'TestPage')
page.delete()
self.assertEqual(False, page.exists)
cursor.execute("SELECT version,time,author,ipnr,text,comment,"
"readonly FROM wiki WHERE name=%s", ('TestPage',))
self.assertEqual(None, cursor.fetchone())
listener = TestWikiChangeListener(self.env)
self.assertEqual(page, listener.deleted[0])
def test_delete_page_version(self):
cursor = self.db.cursor()
cursor.executemany("INSERT INTO wiki VALUES(%s,%s,%s,%s,%s,%s,%s,%s)",
[('TestPage', 1, 42, 'joe', '::1', 'Bla bla',
'Testing', 0),
('TestPage', 2, 43, 'kate', '192.168.0.101', 'Bla',
'Changing', 0)])
page = WikiPage(self.env, 'TestPage')
page.delete(version=2)
self.assertEqual(True, page.exists)
cursor.execute("SELECT version,time,author,ipnr,text,comment,"
"readonly FROM wiki WHERE name=%s", ('TestPage',))
self.assertEqual((1, 42, 'joe', '::1', 'Bla bla', 'Testing', 0),
cursor.fetchone())
self.assertEqual(None, cursor.fetchone())
listener = TestWikiChangeListener(self.env)
self.assertEqual(page, listener.deleted_version[0])
def test_delete_page_last_version(self):
cursor = self.db.cursor()
cursor.execute("INSERT INTO wiki VALUES(%s,%s,%s,%s,%s,%s,%s,%s)",
('TestPage', 1, 42, 'joe', '::1', 'Bla bla', 'Testing',
0))
page = WikiPage(self.env, 'TestPage')
page.delete(version=1)
self.assertEqual(False, page.exists)
cursor.execute("SELECT version,time,author,ipnr,text,comment,"
"readonly FROM wiki WHERE name=%s", ('TestPage',))
self.assertEqual(None, cursor.fetchone())
listener = TestWikiChangeListener(self.env)
self.assertEqual(page, listener.deleted[0])
def suite():
return unittest.makeSuite(WikiPageTestCase, 'test')
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| |
import unittest
import re
import sys
import os
from test import test_support
from subprocess import Popen, PIPE
# Skip this test if the _tkinter module wasn't built.
_tkinter = test_support.import_module('_tkinter')
import Tkinter as tkinter
from Tkinter import Tcl
from _tkinter import TclError
try:
from _testcapi import INT_MAX, PY_SSIZE_T_MAX
except ImportError:
INT_MAX = PY_SSIZE_T_MAX = sys.maxsize
tcl_version = tuple(map(int, _tkinter.TCL_VERSION.split('.')))
_tk_patchlevel = None
def get_tk_patchlevel():
global _tk_patchlevel
if _tk_patchlevel is None:
tcl = Tcl()
patchlevel = tcl.call('info', 'patchlevel')
m = re.match(r'(\d+)\.(\d+)([ab.])(\d+)$', patchlevel)
major, minor, releaselevel, serial = m.groups()
major, minor, serial = int(major), int(minor), int(serial)
releaselevel = {'a': 'alpha', 'b': 'beta', '.': 'final'}[releaselevel]
if releaselevel == 'final':
_tk_patchlevel = major, minor, serial, releaselevel, 0
else:
_tk_patchlevel = major, minor, 0, releaselevel, serial
return _tk_patchlevel
class TkinterTest(unittest.TestCase):
def testFlattenLen(self):
# flatten(<object with no length>)
self.assertRaises(TypeError, _tkinter._flatten, True)
class TclTest(unittest.TestCase):
def setUp(self):
self.interp = Tcl()
self.wantobjects = self.interp.tk.wantobjects()
def testEval(self):
tcl = self.interp
tcl.eval('set a 1')
self.assertEqual(tcl.eval('set a'),'1')
def testEvalException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.eval,'set a')
def testEvalException2(self):
tcl = self.interp
self.assertRaises(TclError,tcl.eval,'this is wrong')
def testCall(self):
tcl = self.interp
tcl.call('set','a','1')
self.assertEqual(tcl.call('set','a'),'1')
def testCallException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.call,'set','a')
def testCallException2(self):
tcl = self.interp
self.assertRaises(TclError,tcl.call,'this','is','wrong')
def testSetVar(self):
tcl = self.interp
tcl.setvar('a','1')
self.assertEqual(tcl.eval('set a'),'1')
def testSetVarArray(self):
tcl = self.interp
tcl.setvar('a(1)','1')
self.assertEqual(tcl.eval('set a(1)'),'1')
def testGetVar(self):
tcl = self.interp
tcl.eval('set a 1')
self.assertEqual(tcl.getvar('a'),'1')
def testGetVarArray(self):
tcl = self.interp
tcl.eval('set a(1) 1')
self.assertEqual(tcl.getvar('a(1)'),'1')
def testGetVarException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.getvar,'a')
def testGetVarArrayException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.getvar,'a(1)')
def testUnsetVar(self):
tcl = self.interp
tcl.setvar('a',1)
self.assertEqual(tcl.eval('info exists a'),'1')
tcl.unsetvar('a')
self.assertEqual(tcl.eval('info exists a'),'0')
def testUnsetVarArray(self):
tcl = self.interp
tcl.setvar('a(1)',1)
tcl.setvar('a(2)',2)
self.assertEqual(tcl.eval('info exists a(1)'),'1')
self.assertEqual(tcl.eval('info exists a(2)'),'1')
tcl.unsetvar('a(1)')
self.assertEqual(tcl.eval('info exists a(1)'),'0')
self.assertEqual(tcl.eval('info exists a(2)'),'1')
def testUnsetVarException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.unsetvar,'a')
def get_integers(self):
integers = (0, 1, -1, 2**31-1, -2**31)
if tcl_version >= (8, 4): # wideInt was added in Tcl 8.4
integers += (2**31, -2**31-1, 2**63-1, -2**63)
# bignum was added in Tcl 8.5, but its support is able only since 8.5.8
if (get_tk_patchlevel() >= (8, 6, 0, 'final') or
(8, 5, 8) <= get_tk_patchlevel() < (8, 6)):
integers += (2**63, -2**63-1, 2**1000, -2**1000)
return integers
def test_getint(self):
tcl = self.interp.tk
for i in self.get_integers():
result = tcl.getint(' %d ' % i)
self.assertEqual(result, i)
self.assertIsInstance(result, type(int(result)))
if tcl_version >= (8, 5):
self.assertEqual(tcl.getint(' {:#o} '.format(i)), i)
self.assertEqual(tcl.getint(' %#o ' % i), i)
self.assertEqual(tcl.getint(' %#x ' % i), i)
if tcl_version < (8, 5): # bignum was added in Tcl 8.5
self.assertRaises(TclError, tcl.getint, str(2**1000))
self.assertEqual(tcl.getint(42), 42)
self.assertRaises(TypeError, tcl.getint)
self.assertRaises(TypeError, tcl.getint, '42', '10')
self.assertRaises(TypeError, tcl.getint, 42.0)
self.assertRaises(TclError, tcl.getint, 'a')
self.assertRaises((TypeError, ValueError, TclError),
tcl.getint, '42\0')
if test_support.have_unicode:
self.assertEqual(tcl.getint(unicode('42')), 42)
self.assertRaises((UnicodeEncodeError, ValueError, TclError),
tcl.getint, '42' + unichr(0xd800))
def test_getdouble(self):
tcl = self.interp.tk
self.assertEqual(tcl.getdouble(' 42 '), 42.0)
self.assertEqual(tcl.getdouble(' 42.5 '), 42.5)
self.assertEqual(tcl.getdouble(42.5), 42.5)
self.assertRaises(TypeError, tcl.getdouble)
self.assertRaises(TypeError, tcl.getdouble, '42.5', '10')
self.assertRaises(TypeError, tcl.getdouble, 42)
self.assertRaises(TclError, tcl.getdouble, 'a')
self.assertRaises((TypeError, ValueError, TclError),
tcl.getdouble, '42.5\0')
if test_support.have_unicode:
self.assertEqual(tcl.getdouble(unicode('42.5')), 42.5)
self.assertRaises((UnicodeEncodeError, ValueError, TclError),
tcl.getdouble, '42.5' + unichr(0xd800))
def test_getboolean(self):
tcl = self.interp.tk
self.assertIs(tcl.getboolean('on'), True)
self.assertIs(tcl.getboolean('1'), True)
self.assertIs(tcl.getboolean(u'on'), True)
self.assertIs(tcl.getboolean(u'1'), True)
self.assertIs(tcl.getboolean(42), True)
self.assertIs(tcl.getboolean(0), False)
self.assertIs(tcl.getboolean(42L), True)
self.assertIs(tcl.getboolean(0L), False)
self.assertRaises(TypeError, tcl.getboolean)
self.assertRaises(TypeError, tcl.getboolean, 'on', '1')
self.assertRaises(TypeError, tcl.getboolean, 1.0)
self.assertRaises(TclError, tcl.getboolean, 'a')
self.assertRaises((TypeError, ValueError, TclError),
tcl.getboolean, 'on\0')
if test_support.have_unicode:
self.assertIs(tcl.getboolean(unicode('on')), True)
self.assertRaises((UnicodeEncodeError, ValueError, TclError),
tcl.getboolean, 'on' + unichr(0xd800))
def testEvalFile(self):
tcl = self.interp
filename = "testEvalFile.tcl"
fd = open(filename,'w')
script = """set a 1
set b 2
set c [ expr $a + $b ]
"""
fd.write(script)
fd.close()
tcl.evalfile(filename)
os.remove(filename)
self.assertEqual(tcl.eval('set a'),'1')
self.assertEqual(tcl.eval('set b'),'2')
self.assertEqual(tcl.eval('set c'),'3')
def test_evalfile_null_in_result(self):
tcl = self.interp
with open(test_support.TESTFN, 'wb') as f:
self.addCleanup(test_support.unlink, test_support.TESTFN)
f.write("""
set a "a\0b"
set b "a\\0b"
""")
tcl.evalfile(test_support.TESTFN)
self.assertEqual(tcl.eval('set a'), 'a\xc0\x80b')
self.assertEqual(tcl.eval('set b'), 'a\xc0\x80b')
def testEvalFileException(self):
tcl = self.interp
filename = "doesnotexists"
try:
os.remove(filename)
except Exception,e:
pass
self.assertRaises(TclError,tcl.evalfile,filename)
def testPackageRequireException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.eval,'package require DNE')
@unittest.skipUnless(sys.platform == 'win32', "only applies to Windows")
def testLoadWithUNC(self):
# Build a UNC path from the regular path.
# Something like
# \\%COMPUTERNAME%\c$\python27\python.exe
fullname = os.path.abspath(sys.executable)
if fullname[1] != ':':
self.skipTest('unusable path: %r' % fullname)
unc_name = r'\\%s\%s$\%s' % (os.environ['COMPUTERNAME'],
fullname[0],
fullname[3:])
with test_support.EnvironmentVarGuard() as env:
env.unset("TCL_LIBRARY")
cmd = '%s -c "import Tkinter; print Tkinter"' % (unc_name,)
try:
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
except WindowsError as e:
if e.winerror == 5:
self.skipTest('Not permitted to start the child process')
else:
raise
out_data, err_data = p.communicate()
msg = '\n\n'.join(['"Tkinter.py" not in output',
'Command:', cmd,
'stdout:', out_data,
'stderr:', err_data])
self.assertIn('Tkinter.py', out_data, msg)
self.assertEqual(p.wait(), 0, 'Non-zero exit code')
def test_exprstring(self):
tcl = self.interp
tcl.call('set', 'a', 3)
tcl.call('set', 'b', 6)
def check(expr, expected):
result = tcl.exprstring(expr)
self.assertEqual(result, expected)
self.assertIsInstance(result, str)
self.assertRaises(TypeError, tcl.exprstring)
self.assertRaises(TypeError, tcl.exprstring, '8.2', '+6')
self.assertRaises(TclError, tcl.exprstring, 'spam')
check('', '0')
check('8.2 + 6', '14.2')
check('3.1 + $a', '6.1')
check('2 + "$a.$b"', '5.6')
check('4*[llength "6 2"]', '8')
check('{word one} < "word $a"', '0')
check('4*2 < 7', '0')
check('hypot($a, 4)', '5.0')
check('5 / 4', '1')
check('5 / 4.0', '1.25')
check('5 / ( [string length "abcd"] + 0.0 )', '1.25')
check('20.0/5.0', '4.0')
check('"0x03" > "2"', '1')
check('[string length "a\xc2\xbd\xe2\x82\xac"]', '3')
check(r'[string length "a\xbd\u20ac"]', '3')
check('"abc"', 'abc')
check('"a\xc2\xbd\xe2\x82\xac"', 'a\xc2\xbd\xe2\x82\xac')
check(r'"a\xbd\u20ac"', 'a\xc2\xbd\xe2\x82\xac')
check(r'"a\0b"', 'a\xc0\x80b')
if tcl_version >= (8, 5): # bignum was added in Tcl 8.5
check('2**64', str(2**64))
def test_exprdouble(self):
tcl = self.interp
tcl.call('set', 'a', 3)
tcl.call('set', 'b', 6)
def check(expr, expected):
result = tcl.exprdouble(expr)
self.assertEqual(result, expected)
self.assertIsInstance(result, float)
self.assertRaises(TypeError, tcl.exprdouble)
self.assertRaises(TypeError, tcl.exprdouble, '8.2', '+6')
self.assertRaises(TclError, tcl.exprdouble, 'spam')
check('', 0.0)
check('8.2 + 6', 14.2)
check('3.1 + $a', 6.1)
check('2 + "$a.$b"', 5.6)
check('4*[llength "6 2"]', 8.0)
check('{word one} < "word $a"', 0.0)
check('4*2 < 7', 0.0)
check('hypot($a, 4)', 5.0)
check('5 / 4', 1.0)
check('5 / 4.0', 1.25)
check('5 / ( [string length "abcd"] + 0.0 )', 1.25)
check('20.0/5.0', 4.0)
check('"0x03" > "2"', 1.0)
check('[string length "a\xc2\xbd\xe2\x82\xac"]', 3.0)
check(r'[string length "a\xbd\u20ac"]', 3.0)
self.assertRaises(TclError, tcl.exprdouble, '"abc"')
if tcl_version >= (8, 5): # bignum was added in Tcl 8.5
check('2**64', float(2**64))
def test_exprlong(self):
tcl = self.interp
tcl.call('set', 'a', 3)
tcl.call('set', 'b', 6)
def check(expr, expected):
result = tcl.exprlong(expr)
self.assertEqual(result, expected)
self.assertIsInstance(result, int)
self.assertRaises(TypeError, tcl.exprlong)
self.assertRaises(TypeError, tcl.exprlong, '8.2', '+6')
self.assertRaises(TclError, tcl.exprlong, 'spam')
check('', 0)
check('8.2 + 6', 14)
check('3.1 + $a', 6)
check('2 + "$a.$b"', 5)
check('4*[llength "6 2"]', 8)
check('{word one} < "word $a"', 0)
check('4*2 < 7', 0)
check('hypot($a, 4)', 5)
check('5 / 4', 1)
check('5 / 4.0', 1)
check('5 / ( [string length "abcd"] + 0.0 )', 1)
check('20.0/5.0', 4)
check('"0x03" > "2"', 1)
check('[string length "a\xc2\xbd\xe2\x82\xac"]', 3)
check(r'[string length "a\xbd\u20ac"]', 3)
self.assertRaises(TclError, tcl.exprlong, '"abc"')
if tcl_version >= (8, 5): # bignum was added in Tcl 8.5
self.assertRaises(TclError, tcl.exprlong, '2**64')
def test_exprboolean(self):
tcl = self.interp
tcl.call('set', 'a', 3)
tcl.call('set', 'b', 6)
def check(expr, expected):
result = tcl.exprboolean(expr)
self.assertEqual(result, expected)
self.assertIsInstance(result, int)
self.assertNotIsInstance(result, bool)
self.assertRaises(TypeError, tcl.exprboolean)
self.assertRaises(TypeError, tcl.exprboolean, '8.2', '+6')
self.assertRaises(TclError, tcl.exprboolean, 'spam')
check('', False)
for value in ('0', 'false', 'no', 'off'):
check(value, False)
check('"%s"' % value, False)
check('{%s}' % value, False)
for value in ('1', 'true', 'yes', 'on'):
check(value, True)
check('"%s"' % value, True)
check('{%s}' % value, True)
check('8.2 + 6', True)
check('3.1 + $a', True)
check('2 + "$a.$b"', True)
check('4*[llength "6 2"]', True)
check('{word one} < "word $a"', False)
check('4*2 < 7', False)
check('hypot($a, 4)', True)
check('5 / 4', True)
check('5 / 4.0', True)
check('5 / ( [string length "abcd"] + 0.0 )', True)
check('20.0/5.0', True)
check('"0x03" > "2"', True)
check('[string length "a\xc2\xbd\xe2\x82\xac"]', True)
check(r'[string length "a\xbd\u20ac"]', True)
self.assertRaises(TclError, tcl.exprboolean, '"abc"')
if tcl_version >= (8, 5): # bignum was added in Tcl 8.5
check('2**64', True)
@unittest.skipUnless(tcl_version >= (8, 5), 'requires Tcl version >= 8.5')
def test_booleans(self):
tcl = self.interp
def check(expr, expected):
result = tcl.call('expr', expr)
if tcl.wantobjects():
self.assertEqual(result, expected)
self.assertIsInstance(result, int)
else:
self.assertIn(result, (expr, str(int(expected))))
self.assertIsInstance(result, str)
check('true', True)
check('yes', True)
check('on', True)
check('false', False)
check('no', False)
check('off', False)
check('1 < 2', True)
check('1 > 2', False)
def test_expr_bignum(self):
tcl = self.interp
for i in self.get_integers():
result = tcl.call('expr', str(i))
if self.wantobjects:
self.assertEqual(result, i)
self.assertIsInstance(result, (int, long))
if abs(result) < 2**31:
self.assertIsInstance(result, int)
else:
self.assertEqual(result, str(i))
self.assertIsInstance(result, str)
if tcl_version < (8, 5): # bignum was added in Tcl 8.5
self.assertRaises(TclError, tcl.call, 'expr', str(2**1000))
def test_passing_values(self):
def passValue(value):
return self.interp.call('set', '_', value)
self.assertEqual(passValue(True), True if self.wantobjects else '1')
self.assertEqual(passValue(False), False if self.wantobjects else '0')
self.assertEqual(passValue('string'), 'string')
self.assertEqual(passValue('string\xbd'), 'string\xbd')
self.assertEqual(passValue('string\xe2\x82\xac'), u'string\u20ac')
self.assertEqual(passValue(u'string'), u'string')
self.assertEqual(passValue(u'string\xbd'), u'string\xbd')
self.assertEqual(passValue(u'string\u20ac'), u'string\u20ac')
self.assertEqual(passValue('str\x00ing'), 'str\x00ing')
self.assertEqual(passValue('str\xc0\x80ing'), 'str\x00ing')
self.assertEqual(passValue(u'str\x00ing'), u'str\x00ing')
self.assertEqual(passValue(u'str\x00ing\xbd'), u'str\x00ing\xbd')
self.assertEqual(passValue(u'str\x00ing\u20ac'), u'str\x00ing\u20ac')
for i in self.get_integers():
self.assertEqual(passValue(i), i if self.wantobjects else str(i))
if tcl_version < (8, 5): # bignum was added in Tcl 8.5
self.assertEqual(passValue(2**1000), str(2**1000))
for f in (0.0, 1.0, -1.0, 1//3, 1/3.0,
sys.float_info.min, sys.float_info.max,
-sys.float_info.min, -sys.float_info.max):
if self.wantobjects:
self.assertEqual(passValue(f), f)
else:
self.assertEqual(float(passValue(f)), f)
if self.wantobjects:
f = passValue(float('nan'))
self.assertNotEqual(f, f)
self.assertEqual(passValue(float('inf')), float('inf'))
self.assertEqual(passValue(-float('inf')), -float('inf'))
else:
self.assertEqual(float(passValue(float('inf'))), float('inf'))
self.assertEqual(float(passValue(-float('inf'))), -float('inf'))
# XXX NaN representation can be not parsable by float()
self.assertEqual(passValue((1, '2', (3.4,))),
(1, '2', (3.4,)) if self.wantobjects else '1 2 3.4')
def test_user_command(self):
result = []
def testfunc(arg):
result.append(arg)
return arg
self.interp.createcommand('testfunc', testfunc)
self.addCleanup(self.interp.tk.deletecommand, 'testfunc')
def check(value, expected=None, eq=self.assertEqual):
if expected is None:
expected = value
del result[:]
r = self.interp.call('testfunc', value)
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], (str, unicode))
eq(result[0], expected)
self.assertIsInstance(r, (str, unicode))
eq(r, expected)
def float_eq(actual, expected):
self.assertAlmostEqual(float(actual), expected,
delta=abs(expected) * 1e-10)
check(True, '1')
check(False, '0')
check('string')
check('string\xbd')
check('string\xe2\x82\xac', u'string\u20ac')
check('')
check(u'string')
check(u'string\xbd')
check(u'string\u20ac')
check(u'')
check('str\xc0\x80ing', u'str\x00ing')
check('str\xc0\x80ing\xe2\x82\xac', u'str\x00ing\u20ac')
check(u'str\x00ing')
check(u'str\x00ing\xbd')
check(u'str\x00ing\u20ac')
for i in self.get_integers():
check(i, str(i))
if tcl_version < (8, 5): # bignum was added in Tcl 8.5
check(2**1000, str(2**1000))
for f in (0.0, 1.0, -1.0):
check(f, repr(f))
for f in (1/3.0, sys.float_info.min, sys.float_info.max,
-sys.float_info.min, -sys.float_info.max):
check(f, eq=float_eq)
check(float('inf'), eq=float_eq)
check(-float('inf'), eq=float_eq)
# XXX NaN representation can be not parsable by float()
check((), '')
check((1, (2,), (3, 4), '5 6', ()), '1 2 {3 4} {5 6} {}')
def test_splitlist(self):
splitlist = self.interp.tk.splitlist
call = self.interp.tk.call
self.assertRaises(TypeError, splitlist)
self.assertRaises(TypeError, splitlist, 'a', 'b')
self.assertRaises(TypeError, splitlist, 2)
testcases = [
('2', ('2',)),
('', ()),
('{}', ('',)),
('""', ('',)),
('a\n b\t\r c\n ', ('a', 'b', 'c')),
(u'a\n b\t\r c\n ', ('a', 'b', 'c')),
('a \xe2\x82\xac', ('a', '\xe2\x82\xac')),
(u'a \u20ac', ('a', '\xe2\x82\xac')),
('a\xc0\x80b c\xc0\x80d', ('a\xc0\x80b', 'c\xc0\x80d')),
('a {b c}', ('a', 'b c')),
(r'a b\ c', ('a', 'b c')),
(('a', 'b c'), ('a', 'b c')),
('a 2', ('a', '2')),
(('a', 2), ('a', 2)),
('a 3.4', ('a', '3.4')),
(('a', 3.4), ('a', 3.4)),
((), ()),
(call('list', 1, '2', (3.4,)),
(1, '2', (3.4,)) if self.wantobjects else
('1', '2', '3.4')),
]
if tcl_version >= (8, 5):
if not self.wantobjects:
expected = ('12', '\xe2\x82\xac', '\xe2\x82\xac', '3.4')
elif get_tk_patchlevel() < (8, 5, 5):
# Before 8.5.5 dicts were converted to lists through string
expected = ('12', u'\u20ac', u'\u20ac', '3.4')
else:
expected = (12, u'\u20ac', u'\u20ac', (3.4,))
testcases += [
(call('dict', 'create', 12, u'\u20ac', '\xe2\x82\xac', (3.4,)),
expected),
]
for arg, res in testcases:
self.assertEqual(splitlist(arg), res)
self.assertRaises(TclError, splitlist, '{')
def test_split(self):
split = self.interp.tk.split
call = self.interp.tk.call
self.assertRaises(TypeError, split)
self.assertRaises(TypeError, split, 'a', 'b')
self.assertRaises(TypeError, split, 2)
testcases = [
('2', '2'),
('', ''),
('{}', ''),
('""', ''),
('{', '{'),
('a\n b\t\r c\n ', ('a', 'b', 'c')),
(u'a\n b\t\r c\n ', ('a', 'b', 'c')),
('a \xe2\x82\xac', ('a', '\xe2\x82\xac')),
(u'a \u20ac', ('a', '\xe2\x82\xac')),
('a\xc0\x80b', 'a\xc0\x80b'),
('a\xc0\x80b c\xc0\x80d', ('a\xc0\x80b', 'c\xc0\x80d')),
('a {b c}', ('a', ('b', 'c'))),
(r'a b\ c', ('a', ('b', 'c'))),
(('a', 'b c'), ('a', ('b', 'c'))),
(('a', u'b c'), ('a', ('b', 'c'))),
('a 2', ('a', '2')),
(('a', 2), ('a', 2)),
('a 3.4', ('a', '3.4')),
(('a', 3.4), ('a', 3.4)),
(('a', (2, 3.4)), ('a', (2, 3.4))),
((), ()),
(call('list', 1, '2', (3.4,)),
(1, '2', (3.4,)) if self.wantobjects else
('1', '2', '3.4')),
]
if tcl_version >= (8, 5):
if not self.wantobjects:
expected = ('12', '\xe2\x82\xac', '\xe2\x82\xac', '3.4')
elif get_tk_patchlevel() < (8, 5, 5):
# Before 8.5.5 dicts were converted to lists through string
expected = ('12', u'\u20ac', u'\u20ac', '3.4')
else:
expected = (12, u'\u20ac', u'\u20ac', (3.4,))
testcases += [
(call('dict', 'create', 12, u'\u20ac', '\xe2\x82\xac', (3.4,)),
expected),
]
for arg, res in testcases:
self.assertEqual(split(arg), res)
def test_splitdict(self):
splitdict = tkinter._splitdict
tcl = self.interp.tk
arg = '-a {1 2 3} -something foo status {}'
self.assertEqual(splitdict(tcl, arg, False),
{'-a': '1 2 3', '-something': 'foo', 'status': ''})
self.assertEqual(splitdict(tcl, arg),
{'a': '1 2 3', 'something': 'foo', 'status': ''})
arg = ('-a', (1, 2, 3), '-something', 'foo', 'status', '{}')
self.assertEqual(splitdict(tcl, arg, False),
{'-a': (1, 2, 3), '-something': 'foo', 'status': '{}'})
self.assertEqual(splitdict(tcl, arg),
{'a': (1, 2, 3), 'something': 'foo', 'status': '{}'})
self.assertRaises(RuntimeError, splitdict, tcl, '-a b -c ')
self.assertRaises(RuntimeError, splitdict, tcl, ('-a', 'b', '-c'))
arg = tcl.call('list',
'-a', (1, 2, 3), '-something', 'foo', 'status', ())
self.assertEqual(splitdict(tcl, arg),
{'a': (1, 2, 3) if self.wantobjects else '1 2 3',
'something': 'foo', 'status': ''})
if tcl_version >= (8, 5):
arg = tcl.call('dict', 'create',
'-a', (1, 2, 3), '-something', 'foo', 'status', ())
if not self.wantobjects or get_tk_patchlevel() < (8, 5, 5):
# Before 8.5.5 dicts were converted to lists through string
expected = {'a': '1 2 3', 'something': 'foo', 'status': ''}
else:
expected = {'a': (1, 2, 3), 'something': 'foo', 'status': ''}
self.assertEqual(splitdict(tcl, arg), expected)
character_size = 4 if sys.maxunicode > 0xFFFF else 2
class BigmemTclTest(unittest.TestCase):
def setUp(self):
self.interp = Tcl()
@test_support.cpython_only
@unittest.skipUnless(INT_MAX < PY_SSIZE_T_MAX, "needs UINT_MAX < SIZE_MAX")
@test_support.precisionbigmemtest(size=INT_MAX + 1, memuse=5, dry_run=False)
def test_huge_string_call(self, size):
value = ' ' * size
self.assertRaises(OverflowError, self.interp.call, 'set', '_', value)
@test_support.cpython_only
@unittest.skipUnless(test_support.have_unicode, 'requires unicode support')
@unittest.skipUnless(INT_MAX < PY_SSIZE_T_MAX, "needs UINT_MAX < SIZE_MAX")
@test_support.precisionbigmemtest(size=INT_MAX + 1,
memuse=2*character_size + 2,
dry_run=False)
def test_huge_unicode_call(self, size):
value = unicode(' ') * size
self.assertRaises(OverflowError, self.interp.call, 'set', '_', value)
@test_support.cpython_only
@unittest.skipUnless(INT_MAX < PY_SSIZE_T_MAX, "needs UINT_MAX < SIZE_MAX")
@test_support.precisionbigmemtest(size=INT_MAX + 1, memuse=9, dry_run=False)
def test_huge_string_builtins(self, size):
value = '1' + ' ' * size
self.check_huge_string_builtins(value)
@test_support.cpython_only
@unittest.skipUnless(test_support.have_unicode, 'requires unicode support')
@unittest.skipUnless(INT_MAX < PY_SSIZE_T_MAX, "needs UINT_MAX < SIZE_MAX")
@test_support.precisionbigmemtest(size=INT_MAX + 1,
memuse=2*character_size + 7,
dry_run=False)
def test_huge_unicode_builtins(self, size):
value = unicode('1' + ' ' * size)
self.check_huge_string_builtins(value)
def check_huge_string_builtins(self, value):
self.assertRaises(OverflowError, self.interp.tk.getint, value)
self.assertRaises(OverflowError, self.interp.tk.getdouble, value)
self.assertRaises(OverflowError, self.interp.tk.getboolean, value)
self.assertRaises(OverflowError, self.interp.eval, value)
self.assertRaises(OverflowError, self.interp.evalfile, value)
self.assertRaises(OverflowError, self.interp.record, value)
self.assertRaises(OverflowError, self.interp.adderrorinfo, value)
self.assertRaises(OverflowError, self.interp.setvar, value, 'x', 'a')
self.assertRaises(OverflowError, self.interp.setvar, 'x', value, 'a')
self.assertRaises(OverflowError, self.interp.unsetvar, value)
self.assertRaises(OverflowError, self.interp.unsetvar, 'x', value)
self.assertRaises(OverflowError, self.interp.adderrorinfo, value)
self.assertRaises(OverflowError, self.interp.exprstring, value)
self.assertRaises(OverflowError, self.interp.exprlong, value)
self.assertRaises(OverflowError, self.interp.exprboolean, value)
self.assertRaises(OverflowError, self.interp.splitlist, value)
self.assertRaises(OverflowError, self.interp.split, value)
self.assertRaises(OverflowError, self.interp.createcommand, value, max)
self.assertRaises(OverflowError, self.interp.deletecommand, value)
def setUpModule():
if test_support.verbose:
tcl = Tcl()
print 'patchlevel =', tcl.call('info', 'patchlevel')
def test_main():
test_support.run_unittest(TclTest, TkinterTest, BigmemTclTest)
if __name__ == "__main__":
test_main()
| |
from __future__ import division
"""
Author: Emmett Butler
"""
__license__ = """
Copyright 2015 Parse.ly, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ["SimpleConsumer"]
import itertools
import logging
import time
import threading
from collections import defaultdict
from Queue import Queue, Empty
from .common import OffsetType
from .utils.compat import Semaphore
from .exceptions import (OffsetOutOfRangeError, UnknownTopicOrPartition,
OffsetMetadataTooLarge, OffsetsLoadInProgress,
NotCoordinatorForConsumer, SocketDisconnectedError,
ConsumerStoppedException, KafkaException,
OffsetRequestFailedError, ERROR_CODES)
from .protocol import (PartitionFetchRequest, PartitionOffsetCommitRequest,
PartitionOffsetFetchRequest, PartitionOffsetRequest)
from .utils.error_handlers import (handle_partition_responses, raise_error,
build_parts_by_error)
log = logging.getLogger(__name__)
class SimpleConsumer():
"""
A non-balancing consumer for Kafka
"""
def __init__(self,
topic,
cluster,
consumer_group=None,
partitions=None,
fetch_message_max_bytes=1024 * 1024,
num_consumer_fetchers=1,
auto_commit_enable=False,
auto_commit_interval_ms=60 * 1000,
queued_max_messages=2000,
fetch_min_bytes=1,
fetch_wait_max_ms=100,
offsets_channel_backoff_ms=1000,
offsets_commit_max_retries=5,
auto_offset_reset=OffsetType.LATEST,
consumer_timeout_ms=-1,
auto_start=True,
reset_offset_on_start=False):
"""Create a SimpleConsumer.
Settings and default values are taken from the Scala
consumer implementation. Consumer group is included
because it's necessary for offset management, but doesn't imply
that this is a balancing consumer. Use a BalancedConsumer for
that.
:param topic: The topic this consumer should consume
:type topic: :class:`pykafka.topic.Topic`
:param cluster: The cluster to which this consumer should connect
:type cluster: :class:`pykafka.cluster.Cluster`
:param consumer_group: The name of the consumer group this consumer
should use for offset committing and fetching.
:type consumer_group: str
:param partitions: Existing partitions to which to connect
:type partitions: Iterable of :class:`pykafka.partition.Partition`
:param fetch_message_max_bytes: The number of bytes of messages to
attempt to fetch
:type fetch_message_max_bytes: int
:param num_consumer_fetchers: The number of workers used to make
FetchRequests
:type num_consumer_fetchers: int
:param auto_commit_enable: If true, periodically commit to kafka the
offset of messages already fetched by this consumer. This also
requires that `consumer_group` is not `None`.
:type auto_commit_enable: bool
:param auto_commit_interval_ms: The frequency (in milliseconds) at which the
consumer offsets are committed to kafka. This setting is ignored if
`auto_commit_enable` is `False`.
:type auto_commit_interval_ms: int
:param queued_max_messages: Maximum number of messages buffered for
consumption
:type queued_max_messages: int
:param fetch_min_bytes: The minimum amount of data (in bytes) the server
should return for a fetch request. If insufficient data is available
the request will block until sufficient data is available.
:type fetch_min_bytes: int
:param fetch_wait_max_ms: The maximum amount of time (in milliseconds)
the server will block before answering the fetch request if there
isn't sufficient data to immediately satisfy `fetch_min_bytes`.
:type fetch_wait_max_ms: int
:param offsets_channel_backoff_ms: Backoff time (in milliseconds) to
retry offset commits/fetches
:type offsets_channel_backoff_ms: int
:param offsets_commit_max_retries: Retry the offset commit up to this
many times on failure.
:type offsets_commit_max_retries: int
:param auto_offset_reset: What to do if an offset is out of range. This
setting indicates how to reset the consumer's internal offset
counter when an `OffsetOutOfRangeError` is encountered.
:type auto_offset_reset: :class:`pykafka.common.OffsetType`
:param consumer_timeout_ms: Amount of time (in milliseconds) the
consumer may spend without messages available for consumption
before returning None.
:type consumer_timeout_ms: int
:param auto_start: Whether the consumer should begin communicating
with kafka after __init__ is complete. If false, communication
can be started with `start()`.
:type auto_start: bool
:param reset_offset_on_start: Whether the consumer should reset its
internal offset counter to `self._auto_offset_reset` and commit that
offset immediately upon starting up
:type reset_offset_on_start: bool
"""
self._cluster = cluster
self._consumer_group = consumer_group
self._topic = topic
self._fetch_message_max_bytes = fetch_message_max_bytes
self._fetch_min_bytes = fetch_min_bytes
self._queued_max_messages = queued_max_messages
self._num_consumer_fetchers = num_consumer_fetchers
self._fetch_wait_max_ms = fetch_wait_max_ms
self._consumer_timeout_ms = consumer_timeout_ms
self._offsets_channel_backoff_ms = offsets_channel_backoff_ms
self._auto_offset_reset = auto_offset_reset
self._offsets_commit_max_retries = offsets_commit_max_retries
# not directly configurable
self._offsets_fetch_max_retries = offsets_commit_max_retries
self._offsets_reset_max_retries = offsets_commit_max_retries
self._auto_start = auto_start
self._reset_offset_on_start = reset_offset_on_start
# incremented for any message arrival from any partition
# the initial value is 0 (no messages waiting)
self._messages_arrived = Semaphore(value=0)
self._auto_commit_enable = auto_commit_enable
self._auto_commit_interval_ms = auto_commit_interval_ms
self._last_auto_commit = time.time()
self._discover_offset_manager()
if partitions:
self._partitions = {p: OwnedPartition(p, self._messages_arrived)
for p in partitions}
else:
self._partitions = {topic.partitions[k]:
OwnedPartition(p, self._messages_arrived)
for k, p in topic.partitions.iteritems()}
self._partitions_by_id = {p.partition.id: p
for p in self._partitions.itervalues()}
# Organize partitions by leader for efficient queries
self._partitions_by_leader = defaultdict(list)
for p in self._partitions.itervalues():
self._partitions_by_leader[p.partition.leader].append(p)
self.partition_cycle = itertools.cycle(self._partitions.values())
self._default_error_handlers = self._build_default_error_handlers()
self._running = False
if self._auto_start:
self.start()
def __repr__(self):
return "<{module}.{name} at {id_} (consumer_group={group})>".format(
module=self.__class__.__module__,
name=self.__class__.__name__,
id_=hex(id(self)),
group=self._consumer_group
)
def start(self):
"""Begin communicating with Kafka, including setting up worker threads
Fetches offsets, starts an offset autocommitter worker pool, and
starts a message fetcher worker pool.
"""
self._running = True
# Figure out which offset wer're starting on
if self._reset_offset_on_start:
self.reset_offsets()
elif self._consumer_group is not None:
self.fetch_offsets()
self._fetch_workers = self._setup_fetch_workers()
if self._auto_commit_enable:
self._autocommit_worker_thread = self._setup_autocommit_worker()
def _build_default_error_handlers(self):
"""Set up the error handlers to use for partition errors."""
def _handle_OffsetOutOfRangeError(parts):
log.info("Resetting offsets in response to OffsetOutOfRangeError")
self.reset_offsets(
partition_offsets=[(owned_partition.partition, self._auto_offset_reset)
for owned_partition, pres in parts]
)
def _handle_NotCoordinatorForConsumer(parts):
self._discover_offset_manager()
return {
UnknownTopicOrPartition.ERROR_CODE: lambda p: raise_error(UnknownTopicOrPartition),
OffsetOutOfRangeError.ERROR_CODE: _handle_OffsetOutOfRangeError,
OffsetMetadataTooLarge.ERROR_CODE: lambda p: raise_error(OffsetMetadataTooLarge),
NotCoordinatorForConsumer.ERROR_CODE: _handle_NotCoordinatorForConsumer
}
def _discover_offset_manager(self):
"""Set the offset manager for this consumer.
If a consumer group is not supplied to __init__, this method does nothing
"""
if self._consumer_group is not None:
self._offset_manager = self._cluster.get_offset_manager(self._consumer_group)
@property
def topic(self):
"""The topic this consumer consumes"""
return self._topic
@property
def partitions(self):
"""A list of the partitions that this consumer consumes"""
return {id_: partition.partition
for id_, partition in self._partitions_by_id.iteritems()}
@property
def held_offsets(self):
"""Return a map from partition id to held offset for each partition"""
return {p.partition.id: p.last_offset_consumed
for p in self._partitions_by_id.itervalues()}
def __del__(self):
"""Stop consumption and workers when object is deleted"""
self.stop()
def stop(self):
"""Flag all running workers for deletion."""
self._running = False
def _setup_autocommit_worker(self):
"""Start the autocommitter thread"""
def autocommitter():
while True:
if not self._running:
break
if self._auto_commit_enable:
self._auto_commit()
time.sleep(self._auto_commit_interval_ms / 1000)
log.debug("Autocommitter thread exiting")
log.debug("Starting autocommitter thread")
return self._cluster.handler.spawn(autocommitter)
def _setup_fetch_workers(self):
"""Start the fetcher threads"""
def fetcher():
while True:
if not self._running:
break
self.fetch()
time.sleep(.0001)
log.debug("Fetcher thread exiting")
log.info("Starting %s fetcher threads", self._num_consumer_fetchers)
return [self._cluster.handler.spawn(fetcher)
for i in xrange(self._num_consumer_fetchers)]
def __iter__(self):
"""Yield an infinite stream of messages until the consumer times out"""
while True:
message = self.consume(block=True)
if not message:
raise StopIteration
yield message
def consume(self, block=True):
"""Get one message from the consumer.
:param block: Whether to block while waiting for a message
:type block: bool
"""
timeout = None
if block:
if self._consumer_timeout_ms > 0:
timeout = float(self._consumer_timeout_ms) / 1000
else:
timeout = 1.0
while True:
if self._messages_arrived.acquire(blocking=block, timeout=timeout):
# by passing through this semaphore, we know that at
# least one message is waiting in some queue.
message = None
while not message:
owned_partition = self.partition_cycle.next()
message = owned_partition.consume()
return message
else:
if not self._running:
raise ConsumerStoppedException()
elif not block or self._consumer_timeout_ms > 0:
return None
def _auto_commit(self):
"""Commit offsets only if it's time to do so"""
if not self._auto_commit_enable or self._auto_commit_interval_ms == 0:
return
if (time.time() - self._last_auto_commit) * 1000.0 >= self._auto_commit_interval_ms:
log.info("Autocommitting consumer offset for consumer group %s and topic %s",
self._consumer_group, self._topic.name)
if self._consumer_group is not None:
self.commit_offsets()
self._last_auto_commit = time.time()
def commit_offsets(self):
"""Commit offsets for this consumer's partitions
Uses the offset commit/fetch API
"""
if not self._consumer_group:
raise Exception("consumer group must be specified to commit offsets")
reqs = [p.build_offset_commit_request() for p in self._partitions.values()]
log.debug("Committing offsets for %d partitions to broker id %s", len(reqs),
self._offset_manager.id)
for i in xrange(self._offsets_commit_max_retries):
if i > 0:
log.debug("Retrying")
time.sleep(i * (self._offsets_channel_backoff_ms / 1000))
response = self._offset_manager.commit_consumer_group_offsets(
self._consumer_group, 1, 'pykafka', reqs)
parts_by_error = handle_partition_responses(
self._default_error_handlers,
response=response,
partitions_by_id=self._partitions_by_id)
if len(parts_by_error) == 1 and 0 in parts_by_error:
break
log.error("Error committing offsets for topic %s (errors: %s)",
self._topic.name,
{ERROR_CODES[err]: [op.partition.id for op, _ in parts]
for err, parts in parts_by_error.iteritems()})
# retry only the partitions that errored
if 0 in parts_by_error:
parts_by_error.pop(0)
errored_partitions = [op for code, err_group in parts_by_error.iteritems()
for op, res in err_group]
reqs = [p.build_offset_commit_request() for p in errored_partitions]
def fetch_offsets(self):
"""Fetch offsets for this consumer's topic
Uses the offset commit/fetch API
:return: List of (id, :class:`pykafka.protocol.OffsetFetchPartitionResponse`)
tuples
"""
if not self._consumer_group:
raise Exception("consumer group must be specified to fetch offsets")
def _handle_success(parts):
for owned_partition, pres in parts:
log.debug("Set offset for partition %s to %s",
owned_partition.partition.id,
pres.offset)
owned_partition.set_offset(pres.offset)
reqs = [p.build_offset_fetch_request() for p in self._partitions.values()]
success_responses = []
log.debug("Fetching offsets for %d partitions from broker id %s", len(reqs),
self._offset_manager.id)
for i in xrange(self._offsets_fetch_max_retries):
if i > 0:
log.debug("Retrying offset fetch")
res = self._offset_manager.fetch_consumer_group_offsets(self._consumer_group, reqs)
parts_by_error = handle_partition_responses(
self._default_error_handlers,
response=res,
success_handler=_handle_success,
partitions_by_id=self._partitions_by_id)
success_responses.extend([(op.partition.id, r)
for op, r in parts_by_error.get(0, [])])
if len(parts_by_error) == 1 and 0 in parts_by_error:
return success_responses
log.error("Error fetching offsets for topic %s (errors: %s)",
self._topic.name,
{ERROR_CODES[err]: [op.partition.id for op, _ in parts]
for err, parts in parts_by_error.iteritems()})
time.sleep(i * (self._offsets_channel_backoff_ms / 1000))
# retry only specific error responses
to_retry = []
to_retry.extend(parts_by_error.get(OffsetsLoadInProgress.ERROR_CODE, []))
to_retry.extend(parts_by_error.get(NotCoordinatorForConsumer.ERROR_CODE, []))
reqs = [p.build_offset_fetch_request() for p, _ in to_retry]
def reset_offsets(self, partition_offsets=None):
"""Reset offsets for the specified partitions
Issue an OffsetRequest for each partition and set the appropriate
returned offset in the consumer's internal offset counter.
:param partition_offsets: (`partition`, `timestamp_or_offset`) pairs to
reset where `partition` is the partition for which to reset the offset
and `timestamp_or_offset` is EITHER the timestamp of the message
whose offset the partition should have OR the new offset the
partition should have
:type partition_offsets: Iterable of
(:class:`pykafka.partition.Partition`, int)
NOTE: If an instance of `timestamp_or_offset` is treated by kafka as
an invalid offset timestamp, this function directly sets the consumer's
internal offset counter for that partition to that instance of
`timestamp_or_offset`. On the next fetch request, the consumer attempts
to fetch messages starting from that offset. See the following link
for more information on what kafka treats as a valid offset timestamp:
https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetRequest
"""
def _handle_success(parts):
for owned_partition, pres in parts:
if len(pres.offset) > 0:
# offset requests return the next offset to consume,
# so account for this here by passing offset - 1
owned_partition.set_offset(pres.offset[0] - 1)
else:
# If the number specified in partition_offsets is an invalid
# timestamp value for the partition, kafka does the
# following:
# returns an empty array in pres.offset
# returns error code 0
# Here, we detect this case and set the consumer's internal
# offset to that value. Thus, the next fetch request will
# attempt to fetch from that offset. If it succeeds, all is
# well; if not, reset_offsets is called again by the error
# handlers in fetch() and fetching continues from
# self._auto_offset_reset..
# This amounts to a hacky way to support user-specified
# offsets in reset_offsets by working around a bug or bad
# design decision in kafka.
given_offset = owned_partition_offsets[owned_partition]
log.warning(
"Offset reset for partition {id_} to timestamp {offset}"
" failed. Setting partition {id_}'s internal counter"
" to {offset}".format(
id_=owned_partition.partition.id, offset=given_offset))
owned_partition.set_offset(given_offset)
# release locks on succeeded partitions to allow fetching
# to resume
owned_partition.fetch_lock.release()
if partition_offsets is None:
partition_offsets = [(a, self._auto_offset_reset)
for a in self._partitions.keys()]
# turn Partitions into their corresponding OwnedPartitions
try:
owned_partition_offsets = {self._partitions[p]: offset
for p, offset in partition_offsets}
except KeyError as e:
raise KafkaException("Unknown partition supplied to reset_offsets\n%s", e)
log.info("Resetting offsets for %s partitions", len(list(owned_partition_offsets)))
for i in xrange(self._offsets_reset_max_retries):
# group partitions by leader
by_leader = defaultdict(list)
for partition, offset in owned_partition_offsets.iteritems():
# acquire lock for each partition to stop fetching during offset
# reset
if partition.fetch_lock.acquire(True):
# empty the queue for this partition to avoid sending
# emitting messages from the old offset
partition.flush()
by_leader[partition.partition.leader].append((partition, offset))
# get valid offset ranges for each partition
for broker, offsets in by_leader.iteritems():
reqs = [owned_partition.build_offset_request(offset)
for owned_partition, offset in offsets]
response = broker.request_offset_limits(reqs)
parts_by_error = handle_partition_responses(
self._default_error_handlers,
response=response,
success_handler=_handle_success,
partitions_by_id=self._partitions_by_id)
if 0 in parts_by_error:
# drop successfully reset partitions for next retry
successful = [part for part, _ in parts_by_error.pop(0)]
map(owned_partition_offsets.pop, successful)
if not parts_by_error:
continue
log.error("Error resetting offsets for topic %s (errors: %s)",
self._topic.name,
{ERROR_CODES[err]: [op.partition.id for op, _ in parts]
for err, parts in parts_by_error.iteritems()})
time.sleep(i * (self._offsets_channel_backoff_ms / 1000))
for errcode, owned_partitions in parts_by_error.iteritems():
if errcode != 0:
for owned_partition in owned_partitions:
owned_partition.fetch_lock.release()
if not owned_partition_offsets:
break
log.debug("Retrying offset reset")
if owned_partition_offsets:
raise OffsetRequestFailedError("reset_offsets failed after %d "
"retries",
self._offsets_reset_max_retries)
if self._consumer_group is not None:
self.commit_offsets()
def fetch(self):
"""Fetch new messages for all partitions
Create a FetchRequest for each broker and send it. Enqueue each of the
returned messages in the approprate OwnedPartition.
"""
def _handle_success(parts):
for owned_partition, pres in parts:
if len(pres.messages) > 0:
log.debug("Fetched %s messages for partition %s",
len(pres.messages), owned_partition.partition.id)
owned_partition.enqueue_messages(pres.messages)
log.debug("Partition %s queue holds %s messages",
owned_partition.partition.id,
owned_partition.message_count)
for broker, owned_partitions in self._partitions_by_leader.iteritems():
partition_reqs = {}
for owned_partition in owned_partitions:
# attempt to acquire lock, just pass if we can't
if owned_partition.fetch_lock.acquire(False):
partition_reqs[owned_partition] = None
if owned_partition.message_count < self._queued_max_messages:
fetch_req = owned_partition.build_fetch_request(
self._fetch_message_max_bytes)
partition_reqs[owned_partition] = fetch_req
else:
log.debug("Partition %s above max queued count (queue has %d)",
owned_partition.partition.id,
owned_partition.message_count)
if partition_reqs:
try:
response = broker.fetch_messages(
[a for a in partition_reqs.itervalues() if a],
timeout=self._fetch_wait_max_ms,
min_bytes=self._fetch_min_bytes
)
except SocketDisconnectedError:
# If the broker dies while we're supposed to stop,
# it's fine, and probably an integration test.
if not self._running:
return
else:
raise
parts_by_error = build_parts_by_error(response, self._partitions_by_id)
# release the lock in these cases, since resolving the error
# requires an offset reset and not releasing the lock would
# lead to a deadlock in reset_offsets. For successful requests
# or requests with different errors, we still assume that
# it's ok to retain the lock since no offset_reset can happen
# before this function returns
out_of_range = parts_by_error.get(OffsetOutOfRangeError.ERROR_CODE, [])
for owned_partition, res in out_of_range:
owned_partition.fetch_lock.release()
# remove them from the dict of partitions to unlock to avoid
# double-unlocking
partition_reqs.pop(owned_partition)
# handle the rest of the errors that don't require deadlock
# management
handle_partition_responses(
self._default_error_handlers,
parts_by_error=parts_by_error,
success_handler=_handle_success)
# unlock the rest of the partitions
for owned_partition in partition_reqs.iterkeys():
owned_partition.fetch_lock.release()
class OwnedPartition(object):
"""A partition that is owned by a SimpleConsumer.
Used to keep track of offsets and the internal message queue.
"""
def __init__(self,
partition,
semaphore=None):
"""
:param partition: The partition to hold
:type partition: :class:`pykafka.partition.Partition`
:param semaphore: A Semaphore that counts available messages and
facilitates non-busy blocking
:type semaphore: :class:`pykafka.utils.compat.Semaphore`
"""
self.partition = partition
self._messages = Queue()
self._messages_arrived = semaphore
self.last_offset_consumed = 0
self.next_offset = 0
self.fetch_lock = threading.Lock()
@property
def message_count(self):
"""Count of messages currently in this partition's internal queue"""
return self._messages.qsize()
def flush(self):
"""Flush internal queue"""
# Swap out _messages so a concurrent consume/enqueue won't interfere
tmp = self._messages
self._messages = Queue()
while True:
try:
tmp.get_nowait()
self._messages_arrived.acquire(blocking=False)
except Empty:
break
log.info("Flushed queue for partition %d", self.partition.id)
def set_offset(self, last_offset_consumed):
"""Set the internal offset counters
:param last_offset_consumed: The last committed offset for this
partition
:type last_offset_consumed: int
"""
self.last_offset_consumed = last_offset_consumed
self.next_offset = last_offset_consumed + 1
def build_offset_request(self, new_offset):
"""Create a :class:`pykafka.protocol.PartitionOffsetRequest` for this
partition
:param new_offset: The offset to which to set this partition. This
setting indicates how to reset the consumer's internal offset
counter when an OffsetOutOfRangeError is encountered.
There are two special values. Specify -1 to receive the latest
offset (i.e. the offset of the next coming message) and -2 to
receive the earliest available offset.
:type new_offset: :class:`pykafka.common.OffsetType` or int
"""
return PartitionOffsetRequest(
self.partition.topic.name, self.partition.id,
new_offset, 1)
def build_fetch_request(self, max_bytes):
"""Create a :class:`pykafka.protocol.FetchPartitionRequest` for this
partition.
:param max_bytes: The number of bytes of messages to
attempt to fetch
:type max_bytes: int
"""
return PartitionFetchRequest(
self.partition.topic.name, self.partition.id,
self.next_offset, max_bytes)
def build_offset_commit_request(self):
"""Create a :class:`pykafka.protocol.PartitionOffsetCommitRequest`
for this partition
"""
return PartitionOffsetCommitRequest(
self.partition.topic.name,
self.partition.id,
self.last_offset_consumed,
int(time.time() * 1000),
'pykafka'
)
def build_offset_fetch_request(self):
"""Create a PartitionOffsetFetchRequest for this partition
"""
return PartitionOffsetFetchRequest(
self.partition.topic.name,
self.partition.id
)
def consume(self):
"""Get a single message from this partition"""
try:
message = self._messages.get_nowait()
self.last_offset_consumed = message.offset
return message
except Empty:
return None
def enqueue_messages(self, messages):
"""Put a set of messages into the internal message queue
:param messages: The messages to enqueue
:type messages: Iterable of :class:`pykafka.common.Message`
"""
for message in messages:
if message.offset < self.last_offset_consumed:
log.debug("Skipping enqueue for offset (%s) "
"less than last_offset_consumed (%s)",
message.offset, self.last_offset_consumed)
continue
message.partition = self.partition
if message.partition_id != self.partition.id:
log.error("Partition %s enqueued a message meant for partition %s",
self.partition.id, message.partition_id)
message.partition_id = self.partition.id
self._messages.put(message)
self.next_offset = message.offset + 1
if self._messages_arrived is not None:
self._messages_arrived.release()
| |
"""Correlogram PSD estimates
.. topic:: This module provides Correlograms methods
.. autosummary::
CORRELOGRAMPSD
pcorrelogram
.. codeauthor:: Thomas Cokelaer 2011
:References: See [Marple]_
"""
import numpy
from spectrum.correlation import CORRELATION, xcorr
from spectrum.window import Window
from numpy.fft import fft
from spectrum.psd import FourierSpectrum
from spectrum import tools
__all__ = ["CORRELOGRAMPSD", "pcorrelogram"]
def CORRELOGRAMPSD(X, Y=None, lag=-1, window='hamming',
norm='unbiased', NFFT=4096, window_params={},
correlation_method='xcorr'):
"""PSD estimate using correlogram method.
:param array X: complex or real data samples X(1) to X(N)
:param array Y: complex data samples Y(1) to Y(N). If provided, computes
the cross PSD, otherwise the PSD is returned
:param int lag: highest lag index to compute. Must be less than N
:param str window_name: see :mod:`window` for list of valid names
:param str norm: one of the valid normalisation of :func:`xcorr` (biased,
unbiased, coeff, None)
:param int NFFT: total length of the final data sets (padded with zero
if needed; default is 4096)
:param str correlation_method: either `xcorr` or `CORRELATION`.
CORRELATION should be removed in the future.
:return:
* Array of real (cross) power spectral density estimate values. This is
a two sided array with negative values following the positive ones
whatever is the input data (real or complex).
.. rubric:: Description:
The exact power spectral density is the Fourier transform of the
autocorrelation sequence:
.. math:: P_{xx}(f) = T \sum_{m=-\infty}^{\infty} r_{xx}[m] exp^{-j2\pi fmT}
The correlogram method of PSD estimation substitutes a finite sequence of
autocorrelation estimates :math:`\hat{r}_{xx}` in place of :math:`r_{xx}`.
This estimation can be computed with :func:`xcorr` or :func:`CORRELATION` by
chosing a proprer lag `L`. The estimated PSD is then
.. math:: \hat{P}_{xx}(f) = T \sum_{m=-L}^{L} \hat{r}_{xx}[m] exp^{-j2\pi fmT}
The lag index must be less than the number of data samples `N`. Ideally, it
should be around `L/10` [Marple]_ so as to avoid greater statistical
variance associated with higher lags.
To reduce the leakage of the implicit rectangular window and therefore to
reduce the bias in the estimate, a tapering window is normally used and lead
to the so-called Blackman and Tukey correlogram:
.. math:: \hat{P}_{BT}(f) = T \sum_{m=-L}^{L} w[m] \hat{r}_{xx}[m] exp^{-j2\pi fmT}
The correlogram for the cross power spectral estimate is
.. math:: \hat{P}_{xx}(f) = T \sum_{m=-L}^{L} \hat{r}_{xx}[m] exp^{-j2\pi fmT}
which is computed if :attr:`Y` is not provide. In such case,
:math:`r_{yx} = r_{xy}` so we compute the correlation only once.
.. plot::
:width: 80%
:include-source:
from spectrum import CORRELOGRAMPSD, marple_data
from spectrum.tools import cshift
from pylab import log10, axis, grid, plot,linspace
psd = CORRELOGRAMPSD(marple_data, marple_data, lag=15)
f = linspace(-0.5, 0.5, len(psd))
psd = cshift(psd, len(psd)/2)
plot(f, 10*log10(psd/max(psd)))
axis([-0.5,0.5,-50,0])
grid(True)
.. seealso:: :func:`create_window`, :func:`CORRELATION`, :func:`xcorr`,
:class:`pcorrelogram`.
"""
N = len(X)
assert lag<N, 'lag must be < size of input data'
assert correlation_method in ['CORRELATION', 'xcorr']
if Y is None:
Y = numpy.array(X)
crosscorrelation = False
else:
crosscorrelation = True
if NFFT is None:
NFFT = N
psd = numpy.zeros(NFFT, dtype=complex)
# Window should be centered around zero. Moreover, we want only the
# positive values. So, we need to use 2*lag + 1 window and keep values on
# the right side.
w = Window(2*lag+1, window, **window_params)
w = w.data[lag+1:]
# compute the cross correlation
if correlation_method == 'CORRELATION':
rxy = CORRELATION (X, Y, maxlags=lag, norm=norm)
elif correlation_method == 'xcorr':
rxy, _l = xcorr (X, Y, maxlags=lag, norm=norm)
rxy = rxy[lag:]
# keep track of the first elt.
psd[0] = rxy[0]
# create the first part of the PSD
psd[1:lag+1] = rxy[1:] * w
# create the second part.
# First, we need to compute the auto or cross correlation ryx
if crosscorrelation is True:
# compute the cross correlation
if correlation_method == 'CORRELATION':
ryx = CORRELATION(Y, X, maxlags=lag, norm=norm)
elif correlation_method == 'xcorr':
ryx, _l = xcorr(Y, X, maxlags=lag, norm=norm)
ryx = ryx[lag:]
#print len(ryx), len(psd[-1:NPSD-lag-1:-1])
psd[-1:NFFT-lag-1:-1] = ryx[1:].conjugate() * w
else: #autocorrelation no additional correlation call required
psd[-1:NFFT-lag-1:-1] = rxy[1:].conjugate() * w
psd = numpy.real(fft(psd))
return psd
class pcorrelogram(FourierSpectrum):
"""The Correlogram class provides an interface to :func:`CORRELOGRAMPSD`.
It returns an object that inherits from :class:`FourierSpectrum` and
therefore ease the manipulation of PSDs.
.. plot::
:width: 80%
:include-source:
from spectrum import pcorrelogram, data_cosine
p = pcorrelogram(data_cosine(N=1024), lag=15)
p.plot()
p.plot(sides='twosided')
"""
def __init__(self, data, sampling=1., lag=-1,
window='hamming', NFFT=None, scale_by_freq=True,
detrend=None):
"""**Correlogram Constructor**
:param array data: input data (list or numpy.array)
:param float sampling: sampling frequency of the input :attr:`data`.
:param int lag:
:param str window: a tapering window. See :class:`~spectrum.window.Window`.
:param int NFFT: total length of the final data sets (padded with
zero if needed; default is 4096)
:param bool scale_by_freq:
:param str detrend:
"""
super(pcorrelogram, self).__init__(data,
window=window,
sampling=sampling,
NFFT=NFFT,
scale_by_freq=scale_by_freq,
lag=lag,
detrend=detrend)
def __call__(self):
psd = CORRELOGRAMPSD(self.data, self.data_y,
lag=self.lag,
window=self.window,
NFFT=self.NFFT,
# scale_by_freq=self.scale_by_freq,
)
if self.datatype == 'real':
#FIXME. do we want to use same syntax/code as in burg/pminvar/pcovar
# to handle odd data ?
self.psd = tools.twosided_2_onesided(psd)
else:
self.psd = psd
self.scale()
return self
def _str_title(self):
return "Correlogram PSD estimate\n"
def __str__(self):
return super(pcorrelogram, self).__str__()
| |
"""
Copyright 2017 The Johns Hopkins University Applied Physics Laboratory LLC
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = 'jhuapl'
__version__ = 0.1
import json
import os
import errno
import numpy as np
import string
import dateutil.parser as dparser
from PIL import Image
from sklearn.utils import class_weight
from keras.preprocessing import image
from concurrent.futures import ProcessPoolExecutor
from functools import partial
from tqdm import tqdm
import warnings
import cv2
def prepare_data(params):
"""
Saves sub images, converts metadata to feature vectors and saves in JSON files,
calculates dataset statistics, and keeps track of saved files so they can be loaded as batches
while training the CNN.
:param params: global parameters, used to find location of the dataset and json file
:return:
"""
# suppress decompression bomb warnings for Pillow
warnings.simplefilter('ignore', Image.DecompressionBombWarning)
walkDirs = ['train', 'val', 'test']
executor = ProcessPoolExecutor(max_workers=params.num_workers)
futures = []
paramsDict = vars(params)
keysToKeep = ['image_format', 'target_img_size', 'metadata_length', 'category_names']
paramsDict = {keepKey: paramsDict[keepKey] for keepKey in keysToKeep}
for currDir in walkDirs:
isTrain = (currDir == 'train') or (currDir == 'val')
if isTrain:
outDir = params.directories['train_data']
else:
outDir = params.directories['test_data']
print('Queuing sequences in: ' + currDir)
for root, dirs, files in tqdm(os.walk(os.path.join(params.directories['dataset'], currDir))):
if len(files) > 0:
slashes = [i for i,ltr in enumerate(root) if ltr == '/']
for file in files:
if file.endswith('_rgb.json'): #skip _msrgb images
task = partial(_process_file, file, slashes, root, isTrain, outDir, paramsDict)
futures.append(executor.submit(task))
print('Wait for all preprocessing tasks to complete...')
results = []
[results.extend(future.result()) for future in tqdm(futures)]
allTrainFeatures = [np.array(r[0]) for r in results if r[0] is not None]
metadataTrainSum = np.zeros(params.metadata_length)
for features in allTrainFeatures:
metadataTrainSum += features
trainingData = [r[1] for r in results if r[1] is not None]
trainCount = len(trainingData)
testData = [r[2] for r in results if r[2] is not None]
# Shutdown the executor and free resources
executor.shutdown()
metadataMean = metadataTrainSum / trainCount
metadataMax = np.zeros(params.metadata_length)
for currFeat in allTrainFeatures:
currFeat = currFeat - metadataMean
for i in range(params.metadata_length):
if abs(currFeat[i]) > metadataMax[i]:
metadataMax[i] = abs(currFeat[i])
for i in range(params.metadata_length):
if metadataMax[i] == 0:
metadataMax[i] = 1.0
metadataStats = {}
metadataStats['metadata_mean'] = metadataMean.tolist()
metadataStats['metadata_max'] = metadataMax.tolist()
json.dump(testData, open(params.files['test_struct'], 'w'))
json.dump(trainingData, open(params.files['training_struct'], 'w'))
json.dump(metadataStats, open(params.files['dataset_stats'], 'w'))
def _process_file(file, slashes, root, isTrain, outDir, params):
"""
Helper for prepare_data that actually loads and resizes each image and computes
feature vectors. This function is designed to be called in parallel for each file
:param file: file to process
:param slashes: location of slashes from root walk path
:param root: root walk path
:param isTrain: flag on whether or not the current file is from the train set
:param outDir: output directory for processed data
:param params: dict of the global parameters with only the necessary fields
:return (allFeatures, allTrainResults, allTestResults)
"""
noResult = [(None, None, None)]
baseName = file[:-5]
imgFile = baseName + '.' + params['image_format']
if not os.path.isfile(os.path.join(root, imgFile)):
return noResult
try:
# img = image.load_img(os.path.join(root, imgFile))
# img = image.img_to_array(img)
img = cv2.imread(os.path.join(root, imgFile)).astype(np.float32)
except:
return noResult
jsonData = json.load(open(os.path.join(root, file)))
if not isinstance(jsonData['bounding_boxes'], list):
jsonData['bounding_boxes'] = [jsonData['bounding_boxes']]
allResults = []
for bb in jsonData['bounding_boxes']:
if isTrain:
category = bb['category']
box = bb['box']
outBaseName = '%d' % bb['ID']
if isTrain:
outBaseName = ('%s_' % category) + outBaseName
if isTrain:
currOut = os.path.join(outDir, root[slashes[-3] + 1:], outBaseName)
else:
currOut = os.path.join(outDir, root[slashes[-2] + 1:], outBaseName)
if not os.path.isdir(currOut):
try:
os.makedirs(currOut)
except OSError as e:
if e.errno == errno.EEXIST:
pass
featuresPath = os.path.join(currOut, baseName + '_features.json')
imgPath = os.path.join(currOut, imgFile)
# don't train on tiny boxes
if box[2] <= 2 or box[3] <= 2:
continue
# train with context around box
contextMultWidth = 0.15
contextMultHeight = 0.15
wRatio = float(box[2]) / img.shape[0]
hRatio = float(box[3]) / img.shape[1]
if wRatio < 0.5 and wRatio >= 0.4:
contextMultWidth = 0.2
if wRatio < 0.4 and wRatio >= 0.3:
contextMultWidth = 0.3
if wRatio < 0.3 and wRatio >= 0.2:
contextMultWidth = 0.5
if wRatio < 0.2 and wRatio >= 0.1:
contextMultWidth = 1
if wRatio < 0.1:
contextMultWidth = 2
if hRatio < 0.5 and hRatio >= 0.4:
contextMultHeight = 0.2
if hRatio < 0.4 and hRatio >= 0.3:
contextMultHeight = 0.3
if hRatio < 0.3 and hRatio >= 0.2:
contextMultHeight = 0.5
if hRatio < 0.2 and hRatio >= 0.1:
contextMultHeight = 1
if hRatio < 0.1:
contextMultHeight = 2
widthBuffer = int((box[2] * contextMultWidth) / 2.0)
heightBuffer = int((box[3] * contextMultHeight) / 2.0)
r1 = box[1] - heightBuffer
r2 = box[1] + box[3] + heightBuffer
c1 = box[0] - widthBuffer
c2 = box[0] + box[2] + widthBuffer
if r1 < 0:
r1 = 0
if r2 > img.shape[0]:
r2 = img.shape[0]
if c1 < 0:
c1 = 0
if c2 > img.shape[1]:
c2 = img.shape[1]
if r1 >= r2 or c1 >= c2:
continue
subImg = img[r1:r2, c1:c2, :]
# subImg = image.array_to_img(subImg)
# subImg = subImg.resize(params['target_img_size'])
# subImg.save(imgPath)
subImg = cv2.resize(subImg, params['target_img_size']).astype(np.uint8)
cv2.imwrite(imgPath, subImg)
features = json_to_feature_vector(params, jsonData, bb)
features = features.tolist()
json.dump(features, open(featuresPath, 'w'))
if isTrain:
allResults.append((features, {"features_path": featuresPath, "img_path": imgPath, "category": params['category_names'].index(category)}, None))
else:
allResults.append((None, None, {"features_path": featuresPath, "img_path": imgPath}))
return allResults
def json_to_feature_vector(params, jsonData, bb):
features = np.zeros(params['metadata_length'], dtype=float)
features[0] = float(jsonData['gsd'])
x,y = utm_to_xy(jsonData['utm'])
features[1] = x
features[2] = y
features[3] = float(jsonData['cloud_cover']) / 100.0
date = dparser.parse(jsonData['timestamp'])
features[4] = float(date.year)
features[5] = float(date.month) / 12.0
features[6] = float(date.day) / 31.0
features[7] = float(date.hour) + float(date.minute)/60.0
if jsonData['scan_direction'].lower() == 'forward':
features[8] = 0.0
else:
features[8] = 1.0
features[9] = float(jsonData['pan_resolution_dbl'])
features[10] = float(jsonData['pan_resolution_start_dbl'])
features[11] = float(jsonData['pan_resolution_end_dbl'])
features[12] = float(jsonData['pan_resolution_min_dbl'])
features[13] = float(jsonData['pan_resolution_max_dbl'])
features[14] = float(jsonData['multi_resolution_dbl'])
features[15] = float(jsonData['multi_resolution_min_dbl'])
features[16] = float(jsonData['multi_resolution_max_dbl'])
features[17] = float(jsonData['multi_resolution_start_dbl'])
features[18] = float(jsonData['multi_resolution_end_dbl'])
features[19] = float(jsonData['target_azimuth_dbl']) / 360.0
features[20] = float(jsonData['target_azimuth_min_dbl']) / 360.0
features[21] = float(jsonData['target_azimuth_max_dbl']) / 360.0
features[22] = float(jsonData['target_azimuth_start_dbl']) / 360.0
features[23] = float(jsonData['target_azimuth_end_dbl']) / 360.0
features[24] = float(jsonData['sun_azimuth_dbl']) / 360.0
features[25] = float(jsonData['sun_azimuth_min_dbl']) / 360.0
features[26] = float(jsonData['sun_azimuth_max_dbl']) / 360.0
features[27] = float(jsonData['sun_elevation_min_dbl']) / 90.0
features[28] = float(jsonData['sun_elevation_dbl']) / 90.0
features[29] = float(jsonData['sun_elevation_max_dbl']) / 90.0
features[30] = float(jsonData['off_nadir_angle_dbl']) / 90.0
features[31] = float(jsonData['off_nadir_angle_min_dbl']) / 90.0
features[32] = float(jsonData['off_nadir_angle_max_dbl']) / 90.0
features[33] = float(jsonData['off_nadir_angle_start_dbl']) / 90.0
features[34] = float(jsonData['off_nadir_angle_end_dbl']) / 90.0
features[35] = float(bb['box'][2])
features[36] = float(bb['box'][3])
features[37] = float(jsonData['img_width'])
features[38] = float(jsonData['img_height'])
features[39] = float(date.weekday())
features[40] = min([features[35], features[36]]) / max([features[37], features[38]])
features[41] = features[35] / features[37]
features[42] = features[36] / features[38]
features[43] = date.second
if len(jsonData['bounding_boxes']) == 1:
features[44] = 1.0
else:
features[44] = 0.0
return features
def utm_to_xy(zone):
"""
Converts UTM zone to x,y values between 0 and 1.
:param zone: UTM zone (string)
:return (x,y): values between 0 and 1
"""
nums = range(1,61);
letters = string.ascii_lowercase[2:-2]
if len(zone) == 2:
num = int(zone[0:1])
else:
num = int(zone[0:2])
letter = zone[-1].lower()
numIndex = nums.index(num)
letterIndex = letters.index(letter)
x = float(numIndex) / float(len(nums)-1)
y = float(letterIndex) / float(len(letters)-1)
return (x,y)
def get_batch_inds(batch_size, idx, N):
"""
Generates an array of indices of length N
:param batch_size: the size of training batches
:param idx: data to split into batches
:param N: Maximum size
:return batchInds: list of arrays of data of length batch_size
"""
batchInds = []
idx0 = 0
toProcess = True
while toProcess:
idx1 = idx0 + batch_size
if idx1 > N:
idx1 = N
idx0 = idx1 - batch_size
toProcess = False
batchInds.append(idx[idx0:idx1])
idx0 = idx1
return batchInds
def calculate_class_weights(params):
"""
Computes the class weights for the training data and writes out to a json file
:param params: global parameters, used to find location of the dataset and json file
:return:
"""
counts = {}
for i in range(0,params.num_labels):
counts[i] = 0
trainingData = json.load(open(params.files['training_struct']))
ytrain = []
for i,currData in enumerate(trainingData):
ytrain.append(currData['category'])
counts[currData['category']] += 1
print(i)
classWeights = class_weight.compute_class_weight('balanced', np.unique(ytrain), np.array(ytrain))
with open(params.files['class_weight'], 'w') as json_file:
json.dump(classWeights.tolist(), json_file)
| |
__author__ = 'M@Campbell'
from ooiservices.app import create_celery_app
from ooiservices.app.main.c2 import _compile_c2_toc
from flask.globals import current_app
import requests
from flask.ext.cache import Cache
CACHE_TIMEOUT = 172800
'''
Create the celery app, and configure it to talk to the redis broker.
Then initialize it.
'''
celery = create_celery_app('PRODUCTION')
celery.config_from_object('ooiservices.app.celeryconfig')
"""
Define the list of processes to run either on a heartbeat or simply waiting for
Caches created/utilized:
asset_list
asset_rds
c2_toc
stream_list
event_list
glider_tracks
cam_images
bad_asset_list
vocab_dict
vocab_codes
"""
from ooiservices.app.uframe.assetController import _compile_assets, _compile_bad_assets
from ooiservices.app.uframe.assetController import _compile_events
from ooiservices.app.uframe.controller import dfs_streams
from ooiservices.app.uframe.controller import _compile_glider_tracks
from ooiservices.app.uframe.controller import _compile_cam_images
from ooiservices.app.uframe.controller import _compile_large_format_files
from ooiservices.app.uframe.vocab import _compile_vocab
from ooiservices.app.main.alertsalarms_tools import _compile_asset_rds, get_assets_dict_from_list
@celery.task(name='tasks.compile_assets')
def compile_assets():
try:
print '\n debug - *** tasks - compile_assets()'
with current_app.test_request_context():
print "[+] Starting asset cache reset..."
cache = Cache(config={'CACHE_TYPE': 'redis', 'CACHE_REDIS_DB': 0})
cache.init_app(current_app)
url = current_app.config['UFRAME_ASSETS_URL'] + '/%s' % ('assets')
payload = requests.get(url)
if payload.status_code is 200:
# Cache assets_list
data = payload.json()
assets, asset_rds = _compile_assets(data)
if "error" not in assets:
cache.set('asset_list', assets, timeout=CACHE_TIMEOUT)
print "[+] Asset list cache reset"
# Cache assets_dict (based on success of _compile_assets returning assets)
assets_dict = get_assets_dict_from_list(assets)
if not assets_dict:
message = 'Warning: get_assets_dict_from_list returned empty assets_dict.'
print '\n debug -- message: ', message
current_app.logger.info(message)
if isinstance(assets_dict, dict):
cache.set('assets_dict', assets_dict, timeout=CACHE_TIMEOUT)
print "[+] Assets dictionary cache reset"
else:
print "[-] Error in Assets dictionary cache update"
else:
print "[-] Error in asset_list and asset_dict cache update"
# Cache assets_rd
if asset_rds:
cache.set('asset_rds', asset_rds, timeout=CACHE_TIMEOUT)
print "[+] Asset reference designators cache reset..."
else:
print "[-] Error in asset_rds cache update"
else:
print "[-] Error in cache update"
except Exception as err:
message = 'compile_assets exception: %s' % err.message
current_app.logger.warning(message)
raise Exception(message)
@celery.task(name='tasks.compile_asset_rds')
def compile_assets_rd():
try:
asset_rds = {}
with current_app.test_request_context():
print "[+] Starting asset reference designators cache reset..."
cache = Cache(config={'CACHE_TYPE': 'redis', 'CACHE_REDIS_DB': 0})
cache.init_app(current_app)
try:
asset_rds, _ = _compile_asset_rds()
except Exception as err:
message = 'Error processing _compile_asset_rds: ', err.message
current_app.logger.warning(message)
if asset_rds:
cache.set('asset_rds', asset_rds, timeout=CACHE_TIMEOUT)
print "[+] Asset reference designators cache reset..."
else:
print "[-] Error in cache update"
except Exception as err:
message = 'compile_asset_rds exception: %s' % err.message
current_app.logger.warning(message)
raise Exception(message)
@celery.task(name='tasks.compile_streams')
def compile_streams():
try:
with current_app.test_request_context():
print "[+] Starting stream cache reset..."
cache = Cache(config={'CACHE_TYPE': 'redis', 'CACHE_REDIS_DB': 0})
cache.init_app(current_app)
streams = dfs_streams()
if "error" not in streams:
cache.set('stream_list', streams, timeout=CACHE_TIMEOUT)
print "[+] Streams cache reset."
else:
print "[-] Error in cache update"
except Exception as err:
message = 'compile_streams exception: %s' % err.message
current_app.logger.warning(message)
@celery.task(name='tasks.compile_events')
def compile_events():
try:
with current_app.test_request_context():
print "[+] Starting events cache reset..."
cache = Cache(config={'CACHE_TYPE': 'redis', 'CACHE_REDIS_DB': 0})
cache.init_app(current_app)
url = current_app.config['UFRAME_ASSETS_URL'] + '/events'
payload = requests.get(url)
if payload.status_code is 200:
data = payload.json()
events = _compile_events(data)
if "error" not in events:
cache.set('event_list', events, timeout=CACHE_TIMEOUT)
print "[+] Events cache reset."
else:
print "[-] Error in cache update"
except Exception as err:
message = 'compile_cam_images exception: %s' % err.message
current_app.logger.warning(message)
@celery.task(name='tasks.compile_glider_tracks')
def compile_glider_tracks():
try:
with current_app.test_request_context():
print "[+] Starting glider tracks cache reset..."
cache = Cache(config={'CACHE_TYPE': 'redis', 'CACHE_REDIS_DB': 0})
cache.init_app(current_app)
glider_tracks = _compile_glider_tracks(True)
if "error" not in glider_tracks:
cache.set('glider_tracks', glider_tracks, timeout=CACHE_TIMEOUT)
print "[+] Glider tracks cache reset."
else:
print "[-] Error in cache update"
except Exception as err:
message = 'compile_glider_tracks exception: %s' % err.message
current_app.logger.warning(message)
@celery.task(name='tasks.compile_cam_images')
def compile_cam_images():
try:
with current_app.test_request_context():
print "[+] Starting cam images cache reset..."
cache = Cache(config={'CACHE_TYPE': 'redis', 'CACHE_REDIS_DB': 0})
cache.init_app(current_app)
cam_images = _compile_cam_images()
if "error" not in cam_images:
cache.set('cam_images', cam_images, timeout=CACHE_TIMEOUT)
print "[+] cam images cache reset."
else:
print "[-] Error in cache update"
except Exception as err:
message = 'compile_cam_images exception: %s' % err.message
current_app.logger.warning(message)
"""
'get-large-format-files-every': {
'task': 'tasks.compile_large_format_files',
'schedule': crontab(minute=0, hour='*/12'),
'args': (),
},
"""
@celery.task(name='tasks.compile_large_format_files')
def compile_large_format_files():
try:
with current_app.test_request_context():
print "[+] Starting large format file cache reset..."
cache = Cache(config={'CACHE_TYPE': 'redis', 'CACHE_REDIS_DB': 0})
cache.init_app(current_app)
data = _compile_large_format_files()
if "error" not in data:
cache.set('large_format', data, timeout=CACHE_TIMEOUT)
print "[+] large format files updated."
else:
print "[-] Error in large file format update"
except Exception as err:
message = 'compile_large_format_files exception: %s' % err.message
current_app.logger.warning(message)
@celery.task(name='tasks.compile_c2_toc')
def compile_c2_toc():
try:
c2_toc = {}
with current_app.test_request_context():
print "[+] Starting c2 toc cache reset..."
cache = Cache(config={'CACHE_TYPE': 'redis', 'CACHE_REDIS_DB': 0})
cache.init_app(current_app)
try:
c2_toc = _compile_c2_toc()
except Exception as err:
message = 'Error processing compile_c2_toc: ', err.message
current_app.logger.warning(message)
if c2_toc is not None:
cache.set('c2_toc', c2_toc, timeout=CACHE_TIMEOUT)
print "[+] C2 toc cache reset..."
else:
print "[-] Error in cache update"
except Exception as err:
message = 'compile_c2_toc exception: ', err.message
current_app.logger.warning(message)
@celery.task(name='tasks.compile_bad_assets')
def compile_bad_assets():
try:
with current_app.test_request_context():
print "[+] Starting bad asset cache reset..."
cache = Cache(config={'CACHE_TYPE': 'redis', 'CACHE_REDIS_DB': 0})
cache.init_app(current_app)
url = current_app.config['UFRAME_ASSETS_URL'] + '/assets'
payload = requests.get(url)
if payload.status_code is 200:
data = payload.json()
bad_assets = _compile_bad_assets(data)
if "error" not in bad_assets:
cache.set('bad_asset_list', bad_assets, timeout=CACHE_TIMEOUT)
print "[+] Bad asset cache reset"
else:
print "[-] Error in cache update"
except Exception as err:
message = 'compile_bad_assets exception: %s' % err.message
current_app.logger.warning(message)
@celery.task(name='tasks.compile_vocabulary')
def compile_vocabulary():
try:
with current_app.test_request_context():
print "[+] Starting vocabulary cache reset..."
cache = Cache(config={'CACHE_TYPE': 'redis', 'CACHE_REDIS_DB': 0})
cache.init_app(current_app)
url = current_app.config['UFRAME_VOCAB_URL'] + '/vocab'
payload = requests.get(url)
if payload.status_code is 200:
data = payload.json()
vocab_dict, vocab_codes = _compile_vocab(data)
if "error" not in vocab_dict:
cache.set('vocab_dict', vocab_dict, timeout=CACHE_TIMEOUT)
cache.set('vocab_codes', codes, timeout=CACHE_TIMEOUT)
print "[+] Vocabulary cache reset"
else:
print "[-] Error in cache update"
except Exception as err:
message = 'compile_vocabulary exception: %s' % err.message
current_app.logger.warning(message)
| |
#!/usr/bin/env python
import sys
import math
import time
import random
import numpy
import transformations
import cv2.cv as cv
def clamp(a, x, b):
return numpy.maximum(a, numpy.minimum(x, b))
def norm(v):
mag = numpy.sqrt(sum([e * e for e in v]))
return v / mag
class Vec3:
def __init__(self, x, y, z):
self.v = (x, y, z)
def x(self):
return self.v[0]
def y(self):
return self.v[1]
def z(self):
return self.v[2]
def __repr__(self):
return "<Vec3 (%s,%s,%s)>" % tuple([repr(c) for c in self.v])
def __add__(self, other):
return Vec3(*[self.v[i] + other.v[i] for i in range(3)])
def __sub__(self, other):
return Vec3(*[self.v[i] - other.v[i] for i in range(3)])
def __mul__(self, other):
if isinstance(other, Vec3):
return Vec3(*[self.v[i] * other.v[i] for i in range(3)])
else:
return Vec3(*[self.v[i] * other for i in range(3)])
def mag2(self):
return sum([e * e for e in self.v])
def __abs__(self):
return numpy.sqrt(sum([e * e for e in self.v]))
def norm(self):
return self * (1.0 / abs(self))
def dot(self, other):
return sum([self.v[i] * other.v[i] for i in range(3)])
def cross(self, other):
(ax, ay, az) = self.v
(bx, by, bz) = other.v
return Vec3(ay * bz - by * az, az * bx - bz * ax, ax * by - bx * ay)
class Ray:
def __init__(self, o, d):
self.o = o
self.d = d
def project(self, d):
return self.o + self.d * d
class Camera:
def __init__(self, F):
R = Vec3(1., 0., 0.)
U = Vec3(0, 1., 0)
self.center = Vec3(0, 0, 0)
self.pcenter = Vec3(0, 0, F)
self.up = U
self.right = R
def genray(self, x, y):
""" -1 <= y <= 1 """
r = numpy.sqrt(x * x + y * y)
if 0:
rprime = r + (0.17 * r**2)
else:
rprime = (10 * numpy.sqrt(17 * r + 25) - 50) / 17
print "scale", rprime / r
x *= rprime / r
y *= rprime / r
o = self.center
r = (self.pcenter + (self.right * x) + (self.up * y)) - o
return Ray(o, r.norm())
class Sphere:
def __init__(self, center, radius):
self.center = center
self.radius = radius
def hit(self, r):
# a = mag2(r.d)
a = 1.
v = r.o - self.center
b = 2 * r.d.dot(v)
c = self.center.mag2() + r.o.mag2() + -2 * self.center.dot(r.o) - (self.radius ** 2)
det = (b * b) - (4 * c)
pred = 0 < det
sq = numpy.sqrt(abs(det))
h0 = (-b - sq) / (2)
h1 = (-b + sq) / (2)
h = numpy.minimum(h0, h1)
pred = pred & (h > 0)
normal = (r.project(h) - self.center) * (1.0 / self.radius)
return (pred, numpy.where(pred, h, 999999.), normal)
def pt2plane(p, plane):
return p.dot(plane) * (1. / abs(plane))
class Plane:
def __init__(self, p, n, right):
self.D = -pt2plane(p, n)
self.Pn = n
self.right = right
self.rightD = -pt2plane(p, right)
self.up = n.cross(right)
self.upD = -pt2plane(p, self.up)
def hit(self, r):
Vd = self.Pn.dot(r.d)
V0 = -(self.Pn.dot(r.o) + self.D)
h = V0 / Vd
pred = (0 <= h)
return (pred, numpy.where(pred, h, 999999.), self.Pn)
def localxy(self, loc):
x = (loc.dot(self.right) + self.rightD)
y = (loc.dot(self.up) + self.upD)
return (x, y)
# lena = numpy.fromstring(cv.LoadImage("../samples/c/lena.jpg", 0).tostring(), numpy.uint8) / 255.0
def texture(xy):
x,y = xy
xa = numpy.floor(x * 512)
ya = numpy.floor(y * 512)
a = (512 * ya) + xa
safe = (0 <= x) & (0 <= y) & (x < 1) & (y < 1)
if 0:
a = numpy.where(safe, a, 0).astype(numpy.int)
return numpy.where(safe, numpy.take(lena, a), 0.0)
else:
xi = numpy.floor(x * 11).astype(numpy.int)
yi = numpy.floor(y * 11).astype(numpy.int)
inside = (1 <= xi) & (xi < 10) & (2 <= yi) & (yi < 9)
checker = (xi & 1) ^ (yi & 1)
final = numpy.where(inside, checker, 1.0)
return numpy.where(safe, final, 0.5)
def under(vv, m):
return Vec3(*(numpy.dot(m, vv.v + (1,))[:3]))
class Renderer:
def __init__(self, w, h, oversample):
self.w = w
self.h = h
random.seed(1)
x = numpy.arange(self.w*self.h) % self.w
y = numpy.floor(numpy.arange(self.w*self.h) / self.w)
h2 = h / 2.0
w2 = w / 2.0
self.r = [ None ] * oversample
for o in range(oversample):
stoch_x = numpy.random.rand(self.w * self.h)
stoch_y = numpy.random.rand(self.w * self.h)
nx = (x + stoch_x - 0.5 - w2) / h2
ny = (y + stoch_y - 0.5 - h2) / h2
self.r[o] = cam.genray(nx, ny)
self.rnds = [random.random() for i in range(10)]
def frame(self, i):
rnds = self.rnds
roll = math.sin(i * .01 * rnds[0] + rnds[1])
pitch = math.sin(i * .01 * rnds[2] + rnds[3])
yaw = math.pi * math.sin(i * .01 * rnds[4] + rnds[5])
x = math.sin(i * 0.01 * rnds[6])
y = math.sin(i * 0.01 * rnds[7])
x,y,z = -0.5,0.5,1
roll,pitch,yaw = (0,0,0)
z = 4 + 3 * math.sin(i * 0.1 * rnds[8])
print z
rz = transformations.euler_matrix(roll, pitch, yaw)
p = Plane(Vec3(x, y, z), under(Vec3(0,0,-1), rz), under(Vec3(1, 0, 0), rz))
acc = 0
for r in self.r:
(pred, h, norm) = p.hit(r)
l = numpy.where(pred, texture(p.localxy(r.project(h))), 0.0)
acc += l
acc *= (1.0 / len(self.r))
# print "took", time.time() - st
img = cv.CreateMat(self.h, self.w, cv.CV_8UC1)
cv.SetData(img, (clamp(0, acc, 1) * 255).astype(numpy.uint8).tostring(), self.w)
return img
#########################################################################
num_x_ints = 8
num_y_ints = 6
num_pts = num_x_ints * num_y_ints
def get_corners(mono, refine = False):
(ok, corners) = cv.FindChessboardCorners(mono, (num_x_ints, num_y_ints), cv.CV_CALIB_CB_ADAPTIVE_THRESH | cv.CV_CALIB_CB_NORMALIZE_IMAGE)
if refine and ok:
corners = cv.FindCornerSubPix(mono, corners, (5,5), (-1,-1), ( cv.CV_TERMCRIT_EPS+cv.CV_TERMCRIT_ITER, 30, 0.1 ))
return (ok, corners)
def mk_object_points(nimages, squaresize = 1):
opts = cv.CreateMat(nimages * num_pts, 3, cv.CV_32FC1)
for i in range(nimages):
for j in range(num_pts):
opts[i * num_pts + j, 0] = (j / num_x_ints) * squaresize
opts[i * num_pts + j, 1] = (j % num_x_ints) * squaresize
opts[i * num_pts + j, 2] = 0
return opts
def mk_image_points(goodcorners):
ipts = cv.CreateMat(len(goodcorners) * num_pts, 2, cv.CV_32FC1)
for (i, co) in enumerate(goodcorners):
for j in range(num_pts):
ipts[i * num_pts + j, 0] = co[j][0]
ipts[i * num_pts + j, 1] = co[j][1]
return ipts
def mk_point_counts(nimages):
npts = cv.CreateMat(nimages, 1, cv.CV_32SC1)
for i in range(nimages):
npts[i, 0] = num_pts
return npts
def cvmat_iterator(cvmat):
for i in range(cvmat.rows):
for j in range(cvmat.cols):
yield cvmat[i,j]
cam = Camera(3.0)
rend = Renderer(640, 480, 2)
cv.NamedWindow("snap")
#images = [rend.frame(i) for i in range(0, 2000, 400)]
images = [rend.frame(i) for i in [1200]]
if 0:
for i,img in enumerate(images):
cv.SaveImage("final/%06d.png" % i, img)
size = cv.GetSize(images[0])
corners = [get_corners(i) for i in images]
goodcorners = [co for (im, (ok, co)) in zip(images, corners) if ok]
def checkerboard_error(xformed):
def pt2line(a, b, c):
x0,y0 = a
x1,y1 = b
x2,y2 = c
return abs((x2 - x1) * (y1 - y0) - (x1 - x0) * (y2 - y1)) / math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
errorsum = 0.
for im in xformed:
for row in range(6):
l0 = im[8 * row]
l1 = im[8 * row + 7]
for col in range(1, 7):
e = pt2line(im[8 * row + col], l0, l1)
#print "row", row, "e", e
errorsum += e
return errorsum
if True:
from scipy.optimize import fmin
def xf(pt, poly):
x, y = pt
r = math.sqrt((x - 320) ** 2 + (y - 240) ** 2)
fr = poly(r) / r
return (320 + (x - 320) * fr, 240 + (y - 240) * fr)
def silly(p, goodcorners):
# print "eval", p
d = 1.0 # - sum(p)
poly = numpy.poly1d(list(p) + [d, 0.])
xformed = [[xf(pt, poly) for pt in co] for co in goodcorners]
return checkerboard_error(xformed)
x0 = [ 0. ]
#print silly(x0, goodcorners)
print "initial error", silly(x0, goodcorners)
xopt = fmin(silly, x0, args=(goodcorners,))
print "xopt", xopt
print "final error", silly(xopt, goodcorners)
d = 1.0 # - sum(xopt)
poly = numpy.poly1d(list(xopt) + [d, 0.])
print "final polynomial"
print poly
for co in goodcorners:
scrib = cv.CreateMat(480, 640, cv.CV_8UC3)
cv.SetZero(scrib)
cv.DrawChessboardCorners(scrib, (num_x_ints, num_y_ints), [xf(pt, poly) for pt in co], True)
cv.ShowImage("snap", scrib)
cv.WaitKey()
sys.exit(0)
for (i, (img, (ok, co))) in enumerate(zip(images, corners)):
scrib = cv.CreateMat(img.rows, img.cols, cv.CV_8UC3)
cv.CvtColor(img, scrib, cv.CV_GRAY2BGR)
if ok:
cv.DrawChessboardCorners(scrib, (num_x_ints, num_y_ints), co, True)
cv.ShowImage("snap", scrib)
cv.WaitKey()
print len(goodcorners)
ipts = mk_image_points(goodcorners)
opts = mk_object_points(len(goodcorners), .1)
npts = mk_point_counts(len(goodcorners))
intrinsics = cv.CreateMat(3, 3, cv.CV_64FC1)
distortion = cv.CreateMat(4, 1, cv.CV_64FC1)
cv.SetZero(intrinsics)
cv.SetZero(distortion)
# focal lengths have 1/1 ratio
intrinsics[0,0] = 1.0
intrinsics[1,1] = 1.0
cv.CalibrateCamera2(opts, ipts, npts,
cv.GetSize(images[0]),
intrinsics,
distortion,
cv.CreateMat(len(goodcorners), 3, cv.CV_32FC1),
cv.CreateMat(len(goodcorners), 3, cv.CV_32FC1),
flags = 0) # cv.CV_CALIB_ZERO_TANGENT_DIST)
print "D =", list(cvmat_iterator(distortion))
print "K =", list(cvmat_iterator(intrinsics))
mapx = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1)
mapy = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1)
cv.InitUndistortMap(intrinsics, distortion, mapx, mapy)
for img in images:
r = cv.CloneMat(img)
cv.Remap(img, r, mapx, mapy)
cv.ShowImage("snap", r)
cv.WaitKey()
| |
# (c) Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc
from sqlalchemy.orm import joinedload
from neutron.common import constants as q_const
from neutron.common import utils as n_utils
from neutron.db import agents_db
from neutron.db import l3_agentschedulers_db as l3agent_sch_db
from neutron.db import model_base
from neutron.db import models_v2
from neutron.openstack.common import log as logging
from neutron.plugins.ml2 import db as ml2_db
LOG = logging.getLogger(__name__)
class CentralizedSnatL3AgentBinding(model_base.BASEV2):
"""Represents binding between Neutron Centralized SNAT and L3 agents."""
__tablename__ = "csnat_l3_agent_bindings"
router_id = sa.Column(sa.String(36),
sa.ForeignKey("routers.id", ondelete='CASCADE'),
primary_key=True)
l3_agent_id = sa.Column(sa.String(36),
sa.ForeignKey("agents.id", ondelete='CASCADE'),
nullable=False)
host_id = sa.Column(sa.String(255))
csnat_gw_port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id'))
l3_agent = orm.relationship(agents_db.Agent)
csnat_gw_port = orm.relationship(models_v2.Port)
class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
"""Mixin class for L3 DVR scheduler.
DVR currently supports the following use cases:
- East/West (E/W) traffic between VMs: this is handled in a
distributed manner across Compute Nodes without a centralized element.
This includes E/W traffic between VMs on the same Compute Node.
- North/South traffic for Floating IPs (FIP N/S): this is supported on the
distributed routers on Compute Nodes without any centralized element.
- North/South traffic for SNAT (SNAT N/S): this is supported via a
centralized element that handles the SNAT traffic.
To support these use cases, DVR routers rely on an L3 agent that runs on a
central node (also known as Network Node or Service Node), as well as, L3
agents that run individually on each Compute Node of an OpenStack cloud.
Each L3 agent creates namespaces to route traffic according to the use
cases outlined above. The mechanism adopted for creating and managing
these namespaces is via (Router, Agent) binding and Scheduling in general.
The main difference between distributed routers and centralized ones is
that in the distributed case, multiple bindings will exist, one for each
of the agents participating in the routed topology for the specific router.
These bindings are created in the following circumstances:
- A subnet is added to a router via router-interface-add, and that subnet
has running VM's deployed in it. A binding will be created between the
router and any L3 agent whose Compute Node is hosting the VM(s).
- An external gateway is set to a router via router-gateway-set. A binding
will be created between the router and the L3 agent running centrally
on the Network Node.
Therefore, any time a router operation occurs (create, update or delete),
scheduling will determine whether the router needs to be associated to an
L3 agent, just like a regular centralized router, with the difference that,
in the distributed case, the bindings required are established based on
the state of the router and the Compute Nodes.
"""
def dvr_update_router_addvm(self, context, port):
ips = port['fixed_ips']
for ip in ips:
subnet = ip['subnet_id']
filter_sub = {'fixed_ips': {'subnet_id': [subnet]},
'device_owner':
[q_const.DEVICE_OWNER_DVR_INTERFACE]}
router_id = None
ports = self._core_plugin.get_ports(context, filters=filter_sub)
for port in ports:
router_id = port['device_id']
router_dict = self.get_router(context, router_id)
if router_dict.get('distributed', False):
payload = {'subnet_id': subnet}
self.l3_rpc_notifier.routers_updated(
context, [router_id], None, payload)
break
LOG.debug('DVR: dvr_update_router_addvm %s ', router_id)
def get_dvr_routers_by_portid(self, context, port_id):
"""Gets the dvr routers on vmport subnets."""
router_ids = set()
port_dict = self._core_plugin.get_port(context, port_id)
fixed_ips = port_dict['fixed_ips']
for fixedip in fixed_ips:
vm_subnet = fixedip['subnet_id']
filter_sub = {'fixed_ips': {'subnet_id': [vm_subnet]},
'device_owner':
[q_const.DEVICE_OWNER_DVR_INTERFACE]}
subnet_ports = self._core_plugin.get_ports(
context, filters=filter_sub)
for subnet_port in subnet_ports:
router_ids.add(subnet_port['device_id'])
return router_ids
def get_subnet_ids_on_router(self, context, router_id):
"""Return subnet IDs for interfaces attached to the given router."""
subnet_ids = set()
filter_rtr = {'device_id': [router_id]}
int_ports = self._core_plugin.get_ports(context, filters=filter_rtr)
for int_port in int_ports:
int_ips = int_port['fixed_ips']
int_subnet = int_ips[0]['subnet_id']
subnet_ids.add(int_subnet)
return subnet_ids
def check_ports_active_on_host_and_subnet(self, context, host,
port_id, subnet_id):
"""Check if there is any dvr serviceable port on the subnet_id."""
filter_sub = {'fixed_ips': {'subnet_id': [subnet_id]}}
ports = self._core_plugin.get_ports(context, filters=filter_sub)
for port in ports:
if (n_utils.is_dvr_serviced(port['device_owner'])
and port['status'] == 'ACTIVE'
and port['binding:host_id'] == host
and port['id'] != port_id):
LOG.debug('DVR: Active port exists for subnet %(subnet_id)s '
'on host %(host)s', {'subnet_id': subnet_id,
'host': host})
return True
return False
def dvr_deletens_if_no_port(self, context, port_id):
"""Delete the DVR namespace if no dvr serviced port exists."""
router_ids = self.get_dvr_routers_by_portid(context, port_id)
port_host = ml2_db.get_port_binding_host(port_id)
if not router_ids:
LOG.debug('No namespaces available for this DVR port %(port)s '
'on host %(host)s', {'port': port_id,
'host': port_host})
return []
removed_router_info = []
for router_id in router_ids:
subnet_ids = self.get_subnet_ids_on_router(context, router_id)
port_exists_on_subnet = False
for subnet in subnet_ids:
if self.check_ports_active_on_host_and_subnet(context,
port_host,
port_id,
subnet):
port_exists_on_subnet = True
break
if port_exists_on_subnet:
continue
filter_rtr = {'device_id': [router_id],
'device_owner':
[q_const.DEVICE_OWNER_DVR_INTERFACE]}
int_ports = self._core_plugin.get_ports(
context, filters=filter_rtr)
for prt in int_ports:
dvr_binding = (ml2_db.
get_dvr_port_binding_by_host(context.session,
prt['id'],
port_host))
if dvr_binding:
# unbind this port from router
dvr_binding['router_id'] = None
dvr_binding.update(dvr_binding)
agent = self._get_agent_by_type_and_host(context,
q_const.AGENT_TYPE_L3,
port_host)
info = {'router_id': router_id, 'host': port_host,
'agent_id': str(agent.id)}
removed_router_info.append(info)
LOG.debug('Router namespace %(router_id)s on host %(host)s '
'to be deleted', info)
return removed_router_info
def bind_snat_router(self, context, router_id, chosen_agent):
"""Bind the router to the chosen l3 agent."""
with context.session.begin(subtransactions=True):
binding = CentralizedSnatL3AgentBinding()
binding.l3_agent = chosen_agent
binding.router_id = router_id
context.session.add(binding)
LOG.debug('SNAT Router %(router_id)s is scheduled to L3 agent '
'%(agent_id)s', {'router_id': router_id,
'agent_id': chosen_agent.id})
def bind_dvr_router_servicenode(self, context, router_id,
chosen_snat_agent):
"""Bind the IR router to service node if not already hosted."""
query = (context.session.query(l3agent_sch_db.RouterL3AgentBinding).
filter_by(router_id=router_id))
for bind in query:
if bind.l3_agent_id == chosen_snat_agent.id:
LOG.debug('Distributed Router %(router_id)s already hosted '
'on snat l3_agent %(snat_id)s',
{'router_id': router_id,
'snat_id': chosen_snat_agent.id})
return
with context.session.begin(subtransactions=True):
binding = l3agent_sch_db.RouterL3AgentBinding()
binding.l3_agent = chosen_snat_agent
binding.router_id = router_id
context.session.add(binding)
LOG.debug('Binding the distributed router %(router_id)s to '
'the snat agent %(snat_id)s',
{'router_id': router_id,
'snat_id': chosen_snat_agent.id})
def bind_snat_servicenode(self, context, router_id, snat_candidates):
"""Bind the snat router to the chosen l3 service agent."""
chosen_snat_agent = random.choice(snat_candidates)
self.bind_snat_router(context, router_id, chosen_snat_agent)
return chosen_snat_agent
def unbind_snat_servicenode(self, context, router_id):
"""Unbind the snat router to the chosen l3 service agent."""
vm_ports = []
with context.session.begin(subtransactions=True):
query = (context.session.
query(CentralizedSnatL3AgentBinding).
filter_by(router_id=router_id))
try:
binding = query.one()
except exc.NoResultFound:
LOG.debug('no snat router binding found for %s', router_id)
return
host = binding.l3_agent.host
subnet_ids = self.get_subnet_ids_on_router(context, router_id)
for subnet in subnet_ids:
vm_ports = (
self._core_plugin.get_ports_on_host_by_subnet(
context, host, subnet))
if vm_ports:
LOG.debug('One or more ports exist on the snat enabled '
'l3_agent host %(host)s and router_id %(id)s',
{'host': host, 'id': router_id})
break
agent_id = binding.l3_agent_id
LOG.debug('Delete binding of the SNAT router %(router_id)s '
'from agent %(id)s', {'router_id': router_id,
'id': agent_id})
context.session.delete(binding)
if not vm_ports:
query = (context.session.
query(l3agent_sch_db.RouterL3AgentBinding).
filter_by(router_id=router_id,
l3_agent_id=agent_id).
delete(synchronize_session=False))
self.l3_rpc_notifier.router_removed_from_agent(
context, router_id, host)
LOG.debug('Removed binding for router %(router_id)s and '
'agent %(id)s', {'router_id': router_id, 'id': agent_id})
def get_snat_bindings(self, context, router_ids):
""" Retrieves the dvr snat bindings for a router."""
if not router_ids:
return []
query = context.session.query(CentralizedSnatL3AgentBinding)
query = query.options(joinedload('l3_agent')).filter(
CentralizedSnatL3AgentBinding.router_id.in_(router_ids))
return query.all()
def schedule_snat_router(self, context, router_id, sync_router):
"""Schedule the snat router on l3 service agent."""
active_l3_agents = self.get_l3_agents(context, active=True)
if not active_l3_agents:
LOG.warn(_('No active L3 agents found for SNAT'))
return
snat_candidates = self.get_snat_candidates(sync_router,
active_l3_agents)
if snat_candidates:
chosen_agent = self.bind_snat_servicenode(
context, router_id, snat_candidates)
self.bind_dvr_router_servicenode(
context, router_id, chosen_agent)
| |
"""Config flow for ezviz."""
import logging
from pyezviz.client import EzvizClient, HTTPError, InvalidURL, PyEzvizError
from pyezviz.test_cam_rtsp import AuthTestResultFailed, InvalidHost, TestRTSPAuth
import voluptuous as vol
from homeassistant.config_entries import CONN_CLASS_CLOUD_POLL, ConfigFlow, OptionsFlow
from homeassistant.const import (
CONF_CUSTOMIZE,
CONF_IP_ADDRESS,
CONF_PASSWORD,
CONF_TIMEOUT,
CONF_TYPE,
CONF_URL,
CONF_USERNAME,
)
from homeassistant.core import callback
from .const import ( # pylint: disable=unused-import
ATTR_SERIAL,
ATTR_TYPE_CAMERA,
ATTR_TYPE_CLOUD,
CONF_FFMPEG_ARGUMENTS,
DEFAULT_CAMERA_USERNAME,
DEFAULT_FFMPEG_ARGUMENTS,
DEFAULT_TIMEOUT,
DOMAIN,
EU_URL,
RUSSIA_URL,
)
_LOGGER = logging.getLogger(__name__)
def _get_ezviz_client_instance(data):
"""Initialize a new instance of EzvizClientApi."""
ezviz_client = EzvizClient(
data[CONF_USERNAME],
data[CONF_PASSWORD],
data.get(CONF_URL, EU_URL),
data.get(CONF_TIMEOUT, DEFAULT_TIMEOUT),
)
ezviz_client.login()
return ezviz_client
def _test_camera_rtsp_creds(data):
"""Try DESCRIBE on RTSP camera with credentials."""
test_rtsp = TestRTSPAuth(
data[CONF_IP_ADDRESS], data[CONF_USERNAME], data[CONF_PASSWORD]
)
test_rtsp.main()
class EzvizConfigFlow(ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Ezviz."""
VERSION = 1
CONNECTION_CLASS = CONN_CLASS_CLOUD_POLL
async def _validate_and_create_auth(self, data):
"""Try to login to ezviz cloud account and create entry if successful."""
await self.async_set_unique_id(data[CONF_USERNAME])
self._abort_if_unique_id_configured()
# Verify cloud credentials by attempting a login request.
try:
await self.hass.async_add_executor_job(_get_ezviz_client_instance, data)
except InvalidURL as err:
raise InvalidURL from err
except HTTPError as err:
raise InvalidHost from err
except PyEzvizError as err:
raise PyEzvizError from err
auth_data = {
CONF_USERNAME: data[CONF_USERNAME],
CONF_PASSWORD: data[CONF_PASSWORD],
CONF_URL: data.get(CONF_URL, EU_URL),
CONF_TYPE: ATTR_TYPE_CLOUD,
}
return self.async_create_entry(title=data[CONF_USERNAME], data=auth_data)
async def _validate_and_create_camera_rtsp(self, data):
"""Try DESCRIBE on RTSP camera with credentials."""
# Get Ezviz cloud credentials from config entry
ezviz_client_creds = {
CONF_USERNAME: None,
CONF_PASSWORD: None,
CONF_URL: None,
}
for item in self._async_current_entries():
if item.data.get(CONF_TYPE) == ATTR_TYPE_CLOUD:
ezviz_client_creds = {
CONF_USERNAME: item.data.get(CONF_USERNAME),
CONF_PASSWORD: item.data.get(CONF_PASSWORD),
CONF_URL: item.data.get(CONF_URL),
}
# Abort flow if user removed cloud account before adding camera.
if ezviz_client_creds[CONF_USERNAME] is None:
return self.async_abort(reason="ezviz_cloud_account_missing")
# We need to wake hibernating cameras.
# First create EZVIZ API instance.
try:
ezviz_client = await self.hass.async_add_executor_job(
_get_ezviz_client_instance, ezviz_client_creds
)
except InvalidURL as err:
raise InvalidURL from err
except HTTPError as err:
raise InvalidHost from err
except PyEzvizError as err:
raise PyEzvizError from err
# Secondly try to wake hybernating camera.
try:
await self.hass.async_add_executor_job(
ezviz_client.get_detection_sensibility, data[ATTR_SERIAL]
)
except HTTPError as err:
raise InvalidHost from err
# Thirdly attempts an authenticated RTSP DESCRIBE request.
try:
await self.hass.async_add_executor_job(_test_camera_rtsp_creds, data)
except InvalidHost as err:
raise InvalidHost from err
except AuthTestResultFailed as err:
raise AuthTestResultFailed from err
return self.async_create_entry(
title=data[ATTR_SERIAL],
data={
CONF_USERNAME: data[CONF_USERNAME],
CONF_PASSWORD: data[CONF_PASSWORD],
CONF_TYPE: ATTR_TYPE_CAMERA,
},
)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return EzvizOptionsFlowHandler(config_entry)
async def async_step_user(self, user_input=None):
"""Handle a flow initiated by the user."""
# Check if ezviz cloud account is present in entry config,
# abort if already configured.
for item in self._async_current_entries():
if item.data.get(CONF_TYPE) == ATTR_TYPE_CLOUD:
return self.async_abort(reason="already_configured_account")
errors = {}
if user_input is not None:
if user_input[CONF_URL] == CONF_CUSTOMIZE:
self.context["data"] = {
CONF_USERNAME: user_input[CONF_USERNAME],
CONF_PASSWORD: user_input[CONF_PASSWORD],
}
return await self.async_step_user_custom_url()
if CONF_TIMEOUT not in user_input:
user_input[CONF_TIMEOUT] = DEFAULT_TIMEOUT
try:
return await self._validate_and_create_auth(user_input)
except InvalidURL:
errors["base"] = "invalid_host"
except InvalidHost:
errors["base"] = "cannot_connect"
except PyEzvizError:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
return self.async_abort(reason="unknown")
data_schema = vol.Schema(
{
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
vol.Required(CONF_URL, default=EU_URL): vol.In(
[EU_URL, RUSSIA_URL, CONF_CUSTOMIZE]
),
}
)
return self.async_show_form(
step_id="user", data_schema=data_schema, errors=errors
)
async def async_step_user_custom_url(self, user_input=None):
"""Handle a flow initiated by the user for custom region url."""
errors = {}
if user_input is not None:
user_input[CONF_USERNAME] = self.context["data"][CONF_USERNAME]
user_input[CONF_PASSWORD] = self.context["data"][CONF_PASSWORD]
if CONF_TIMEOUT not in user_input:
user_input[CONF_TIMEOUT] = DEFAULT_TIMEOUT
try:
return await self._validate_and_create_auth(user_input)
except InvalidURL:
errors["base"] = "invalid_host"
except InvalidHost:
errors["base"] = "cannot_connect"
except PyEzvizError:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
return self.async_abort(reason="unknown")
data_schema_custom_url = vol.Schema(
{
vol.Required(CONF_URL, default=EU_URL): str,
}
)
return self.async_show_form(
step_id="user_custom_url", data_schema=data_schema_custom_url, errors=errors
)
async def async_step_discovery(self, discovery_info):
"""Handle a flow for discovered camera without rtsp config entry."""
await self.async_set_unique_id(discovery_info[ATTR_SERIAL])
self._abort_if_unique_id_configured()
self.context["title_placeholders"] = {"serial": self.unique_id}
self.context["data"] = {CONF_IP_ADDRESS: discovery_info[CONF_IP_ADDRESS]}
return await self.async_step_confirm()
async def async_step_confirm(self, user_input=None):
"""Confirm and create entry from discovery step."""
errors = {}
if user_input is not None:
user_input[ATTR_SERIAL] = self.unique_id
user_input[CONF_IP_ADDRESS] = self.context["data"][CONF_IP_ADDRESS]
try:
return await self._validate_and_create_camera_rtsp(user_input)
except (InvalidHost, InvalidURL):
errors["base"] = "invalid_host"
except (PyEzvizError, AuthTestResultFailed):
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
return self.async_abort(reason="unknown")
discovered_camera_schema = vol.Schema(
{
vol.Required(CONF_USERNAME, default=DEFAULT_CAMERA_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
}
)
return self.async_show_form(
step_id="confirm",
data_schema=discovered_camera_schema,
errors=errors,
description_placeholders={
"serial": self.unique_id,
CONF_IP_ADDRESS: self.context["data"][CONF_IP_ADDRESS],
},
)
async def async_step_import(self, import_config):
"""Handle config import from yaml."""
_LOGGER.debug("import config: %s", import_config)
# Check importing camera.
if ATTR_SERIAL in import_config:
return await self.async_step_import_camera(import_config)
# Validate and setup of main ezviz cloud account.
try:
return await self._validate_and_create_auth(import_config)
except InvalidURL:
_LOGGER.error("Error importing Ezviz platform config: invalid host")
return self.async_abort(reason="invalid_host")
except InvalidHost:
_LOGGER.error("Error importing Ezviz platform config: cannot connect")
return self.async_abort(reason="cannot_connect")
except (AuthTestResultFailed, PyEzvizError):
_LOGGER.error("Error importing Ezviz platform config: invalid auth")
return self.async_abort(reason="invalid_auth")
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Error importing ezviz platform config: unexpected exception"
)
return self.async_abort(reason="unknown")
async def async_step_import_camera(self, data):
"""Create RTSP auth entry per camera in config."""
await self.async_set_unique_id(data[ATTR_SERIAL])
self._abort_if_unique_id_configured()
_LOGGER.debug("Create camera with: %s", data)
cam_serial = data.pop(ATTR_SERIAL)
data[CONF_TYPE] = ATTR_TYPE_CAMERA
return self.async_create_entry(title=cam_serial, data=data)
class EzvizOptionsFlowHandler(OptionsFlow):
"""Handle Ezviz client options."""
def __init__(self, config_entry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage Ezviz options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
options = {
vol.Optional(
CONF_TIMEOUT,
default=self.config_entry.options.get(CONF_TIMEOUT, DEFAULT_TIMEOUT),
): int,
vol.Optional(
CONF_FFMPEG_ARGUMENTS,
default=self.config_entry.options.get(
CONF_FFMPEG_ARGUMENTS, DEFAULT_FFMPEG_ARGUMENTS
),
): str,
}
return self.async_show_form(step_id="init", data_schema=vol.Schema(options))
| |
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from ctypes import *
import unicodedata
import warnings
import pyglet
from pyglet.window import WindowException, NoSuchDisplayException, \
MouseCursorException, MouseCursor, \
DefaultMouseCursor, ImageMouseCursor, BaseWindow, _PlatformEventHandler, \
_ViewEventHandler
from pyglet.window import key
from pyglet.window import mouse
from pyglet.event import EventDispatcher
from pyglet.canvas.xlib import XlibCanvas
from pyglet.libs.x11 import xlib
from pyglet.libs.x11 import cursorfont
try:
from pyglet.libs.x11 import xsync
_have_xsync = True
except:
_have_xsync = False
class mwmhints_t(Structure):
_fields_ = [
('flags', c_uint32),
('functions', c_uint32),
('decorations', c_uint32),
('input_mode', c_int32),
('status', c_uint32)
]
XA_CARDINAL = 6 # Xatom.h:14
# Do we have the November 2000 UTF8 extension?
_have_utf8 = hasattr(xlib._lib, 'Xutf8TextListToTextProperty')
# symbol,ctrl -> motion mapping
_motion_map = {
(key.UP, False): key.MOTION_UP,
(key.RIGHT, False): key.MOTION_RIGHT,
(key.DOWN, False): key.MOTION_DOWN,
(key.LEFT, False): key.MOTION_LEFT,
(key.RIGHT, True): key.MOTION_NEXT_WORD,
(key.LEFT, True): key.MOTION_PREVIOUS_WORD,
(key.HOME, False): key.MOTION_BEGINNING_OF_LINE,
(key.END, False): key.MOTION_END_OF_LINE,
(key.PAGEUP, False): key.MOTION_PREVIOUS_PAGE,
(key.PAGEDOWN, False): key.MOTION_NEXT_PAGE,
(key.HOME, True): key.MOTION_BEGINNING_OF_FILE,
(key.END, True): key.MOTION_END_OF_FILE,
(key.BACKSPACE, False): key.MOTION_BACKSPACE,
(key.DELETE, False): key.MOTION_DELETE,
}
class XlibException(WindowException):
'''An X11-specific exception. This exception is probably a programming
error in pyglet.'''
pass
class XlibMouseCursor(MouseCursor):
drawable = False
def __init__(self, cursor):
self.cursor = cursor
# Platform event data is single item, so use platform event handler directly.
XlibEventHandler = _PlatformEventHandler
ViewEventHandler = _ViewEventHandler
class XlibWindow(BaseWindow):
_x_display = None # X display connection
_x_screen_id = None # X screen index
_x_ic = None # X input context
_window = None # Xlib window handle
_minimum_size = None
_maximum_size = None
_override_redirect = False
_x = 0
_y = 0 # Last known window position
_width = 0
_height = 0 # Last known window size
_mouse_exclusive_client = None # x,y of "real" mouse during exclusive
_mouse_buttons = [False] * 6 # State of each xlib button
_keyboard_exclusive = False
_active = True
_applied_mouse_exclusive = False
_applied_keyboard_exclusive = False
_mapped = False
_lost_context = False
_lost_context_state = False
_enable_xsync = False
_current_sync_value = None
_current_sync_valid = False
_needs_resize = False # True when resize event has been received but not
# dispatched
_default_event_mask = (0x1ffffff
& ~xlib.PointerMotionHintMask
& ~xlib.ResizeRedirectMask
& ~xlib.SubstructureNotifyMask)
def __init__(self, *args, **kwargs):
# Bind event handlers
self._event_handlers = {}
self._view_event_handlers = {}
for name in self._platform_event_names:
if not hasattr(self, name):
continue
func = getattr(self, name)
for message in func._platform_event_data:
if hasattr(func, '_view'):
self._view_event_handlers[message] = func
else:
self._event_handlers[message] = func
super(XlibWindow, self).__init__(*args, **kwargs)
def _recreate(self, changes):
# If flipping to/from fullscreen, need to recreate the window. (This
# is the case with both override_redirect method and
# _NET_WM_STATE_FULLSCREEN).
#
# A possible improvement could be to just hide the top window,
# destroy the GLX window, and reshow it again when leaving fullscreen.
# This would prevent the floating window from being moved by the
# WM.
if ('fullscreen' in changes or 'resizable' in changes):
# clear out the GLX context
self.context.detach()
xlib.XDestroyWindow(self._x_display, self._window)
del self.display._window_map[self._window]
del self.display._window_map[self._view]
self._window = None
self._mapped = False
# TODO: detect state loss only by examining context share.
if 'context' in changes:
self._lost_context = True
self._lost_context_state = True
self._create()
def _create(self):
# Unmap existing window if necessary while we fiddle with it.
if self._window and self._mapped:
self._unmap()
self._x_display = self.display._display
self._x_screen_id = self.display.x_screen
# Create X window if not already existing.
if not self._window:
root = xlib.XRootWindow(self._x_display, self._x_screen_id)
visual_info = self.config.get_visual_info()
visual = visual_info.visual
visual_id = xlib.XVisualIDFromVisual(visual)
default_visual = xlib.XDefaultVisual(
self._x_display, self._x_screen_id)
default_visual_id = xlib.XVisualIDFromVisual(default_visual)
window_attributes = xlib.XSetWindowAttributes()
if visual_id != default_visual_id:
window_attributes.colormap = xlib.XCreateColormap(
self._x_display, root, visual, xlib.AllocNone)
else:
window_attributes.colormap = xlib.XDefaultColormap(
self._x_display, self._x_screen_id)
window_attributes.bit_gravity = xlib.StaticGravity
# Issue 287: Compiz on Intel/Mesa doesn't draw window decoration
# unless CWBackPixel is given in mask. Should have
# no effect on other systems, so it's set
# unconditionally.
mask = xlib.CWColormap | xlib.CWBitGravity | xlib.CWBackPixel
if self._fullscreen:
width, height = self.screen.width, self.screen.height
self._view_x = (width - self._width) // 2
self._view_y = (height - self._height) // 2
else:
width, height = self._width, self._height
self._view_x = self._view_y = 0
self._window = xlib.XCreateWindow(self._x_display, root,
0, 0, width, height, 0, visual_info.depth,
xlib.InputOutput, visual, mask,
byref(window_attributes))
self._view = xlib.XCreateWindow(self._x_display,
self._window, self._view_x, self._view_y,
self._width, self._height, 0, visual_info.depth,
xlib.InputOutput, visual, mask,
byref(window_attributes));
xlib.XMapWindow(self._x_display, self._view)
xlib.XSelectInput(
self._x_display, self._view, self._default_event_mask)
self.display._window_map[self._window] = \
self.dispatch_platform_event
self.display._window_map[self._view] = \
self.dispatch_platform_event_view
self.canvas = XlibCanvas(self.display, self._view)
self.context.attach(self.canvas)
self.context.set_vsync(self._vsync) # XXX ?
# Setting null background pixmap disables drawing the background,
# preventing flicker while resizing (in theory).
#
# Issue 287: Compiz on Intel/Mesa doesn't draw window decoration if
# this is called. As it doesn't seem to have any
# effect anyway, it's just commented out.
#xlib.XSetWindowBackgroundPixmap(self._x_display, self._window, 0)
self._enable_xsync = (pyglet.options['xsync'] and
self.display._enable_xsync and
self.config.double_buffer)
# Set supported protocols
protocols = []
protocols.append(xlib.XInternAtom(self._x_display,
'WM_DELETE_WINDOW', False))
if self._enable_xsync:
protocols.append(xlib.XInternAtom(self._x_display,
'_NET_WM_SYNC_REQUEST',
False))
protocols = (c_ulong * len(protocols))(*protocols)
xlib.XSetWMProtocols(self._x_display, self._window,
protocols, len(protocols))
# Create window resize sync counter
if self._enable_xsync:
value = xsync.XSyncValue()
self._sync_counter = xlib.XID(
xsync.XSyncCreateCounter(self._x_display, value))
atom = xlib.XInternAtom(self._x_display,
'_NET_WM_SYNC_REQUEST_COUNTER', False)
ptr = pointer(self._sync_counter)
xlib.XChangeProperty(self._x_display, self._window,
atom, XA_CARDINAL, 32,
xlib.PropModeReplace,
cast(ptr, POINTER(c_ubyte)), 1)
# Set window attributes
attributes = xlib.XSetWindowAttributes()
attributes_mask = 0
self._override_redirect = False
if self._fullscreen:
if pyglet.options['xlib_fullscreen_override_redirect']:
# Try not to use this any more, it causes problems; disabled
# by default in favour of _NET_WM_STATE_FULLSCREEN.
attributes.override_redirect = self._fullscreen
attributes_mask |= xlib.CWOverrideRedirect
self._override_redirect = True
else:
self._set_wm_state('_NET_WM_STATE_FULLSCREEN')
if self._fullscreen:
xlib.XMoveResizeWindow(self._x_display, self._window,
self.screen.x, self.screen.y,
self.screen.width, self.screen.height)
else:
xlib.XResizeWindow(self._x_display, self._window,
self._width, self._height)
xlib.XChangeWindowAttributes(self._x_display, self._window,
attributes_mask, byref(attributes))
# Set style
styles = {
self.WINDOW_STYLE_DEFAULT: '_NET_WM_WINDOW_TYPE_NORMAL',
self.WINDOW_STYLE_DIALOG: '_NET_WM_WINDOW_TYPE_DIALOG',
self.WINDOW_STYLE_TOOL: '_NET_WM_WINDOW_TYPE_UTILITY',
}
if self._style in styles:
self._set_atoms_property('_NET_WM_WINDOW_TYPE',
(styles[self._style],))
elif self._style == self.WINDOW_STYLE_BORDERLESS:
MWM_HINTS_DECORATIONS = 1 << 1
PROP_MWM_HINTS_ELEMENTS = 5
mwmhints = mwmhints_t()
mwmhints.flags = MWM_HINTS_DECORATIONS
mwmhints.decorations = 0
name = xlib.XInternAtom(self._x_display, '_MOTIF_WM_HINTS', False)
xlib.XChangeProperty(self._x_display, self._window,
name, name, 32, xlib.PropModeReplace,
cast(pointer(mwmhints), POINTER(c_ubyte)),
PROP_MWM_HINTS_ELEMENTS)
# Set resizeable
if not self._resizable and not self._fullscreen:
self.set_minimum_size(self._width, self._height)
self.set_maximum_size(self._width, self._height)
# Set caption
self.set_caption(self._caption)
# Create input context. A good but very outdated reference for this
# is http://www.sbin.org/doc/Xlib/chapt_11.html
if _have_utf8 and not self._x_ic:
if not self.display._x_im:
xlib.XSetLocaleModifiers('@im=none')
self.display._x_im = \
xlib.XOpenIM(self._x_display, None, None, None)
xlib.XFlush(self._x_display);
# Need to set argtypes on this function because it's vararg,
# and ctypes guesses wrong.
xlib.XCreateIC.argtypes = [xlib.XIM,
c_char_p, c_int,
c_char_p, xlib.Window,
c_char_p, xlib.Window,
c_void_p]
self._x_ic = xlib.XCreateIC(self.display._x_im,
'inputStyle', xlib.XIMPreeditNothing|xlib.XIMStatusNothing,
'clientWindow', self._window,
'focusWindow', self._window,
None)
filter_events = c_ulong()
xlib.XGetICValues(self._x_ic,
'filterEvents', byref(filter_events),
None)
self._default_event_mask |= filter_events.value
xlib.XSetICFocus(self._x_ic)
self.switch_to()
if self._visible:
self.set_visible(True)
self.set_mouse_platform_visible()
self._applied_mouse_exclusive = None
self._update_exclusivity()
def _map(self):
if self._mapped:
return
# Map the window, wait for map event before continuing.
xlib.XSelectInput(
self._x_display, self._window, xlib.StructureNotifyMask)
xlib.XMapRaised(self._x_display, self._window)
e = xlib.XEvent()
while True:
xlib.XNextEvent(self._x_display, e)
if e.type == xlib.MapNotify:
break
xlib.XSelectInput(
self._x_display, self._window, self._default_event_mask)
self._mapped = True
if self._override_redirect:
# Possibly an override_redirect issue.
self.activate()
self.dispatch_event('on_resize', self._width, self._height)
self.dispatch_event('on_show')
self.dispatch_event('on_expose')
def _unmap(self):
if not self._mapped:
return
xlib.XSelectInput(
self._x_display, self._window, xlib.StructureNotifyMask)
xlib.XUnmapWindow(self._x_display, self._window)
e = xlib.XEvent()
while True:
xlib.XNextEvent(self._x_display, e)
if e.type == xlib.UnmapNotify:
break
xlib.XSelectInput(
self._x_display, self._window, self._default_event_mask)
self._mapped = False
def _get_root(self):
attributes = xlib.XWindowAttributes()
xlib.XGetWindowAttributes(self._x_display, self._window,
byref(attributes))
return attributes.root
def close(self):
if not self._window:
return
self.context.destroy()
self._unmap()
if self._window:
xlib.XDestroyWindow(self._x_display, self._window)
del self.display._window_map[self._window]
self._window = None
if _have_utf8:
xlib.XDestroyIC(self._x_ic)
self._x_ic = None
super(XlibWindow, self).close()
def switch_to(self):
if self.context:
self.context.set_current()
def flip(self):
self.draw_mouse_cursor()
# TODO canvas.flip?
if self.context:
self.context.flip()
self._sync_resize()
def set_vsync(self, vsync):
if pyglet.options['vsync'] is not None:
vsync = pyglet.options['vsync']
self._vsync = vsync
self.context.set_vsync(vsync)
def set_caption(self, caption):
if caption is None:
caption = ''
self._caption = caption
self._set_text_property('WM_NAME', caption, allow_utf8=False)
self._set_text_property('WM_ICON_NAME', caption, allow_utf8=False)
self._set_text_property('_NET_WM_NAME', caption)
self._set_text_property('_NET_WM_ICON_NAME', caption)
def get_caption(self):
return self._caption
def set_size(self, width, height):
if self._fullscreen:
raise WindowException('Cannot set size of fullscreen window.')
self._width = width
self._height = height
if not self._resizable:
self.set_minimum_size(width, height)
self.set_maximum_size(width, height)
xlib.XResizeWindow(self._x_display, self._window, width, height)
self._update_view_size()
self.dispatch_event('on_resize', width, height)
def _update_view_size(self):
xlib.XResizeWindow(self._x_display, self._view,
self._width, self._height)
def get_size(self):
# XGetGeometry and XWindowAttributes seem to always return the
# original size of the window, which is wrong after the user
# has resized it.
# XXX this is probably fixed now, with fix of resize.
return self._width, self._height
def set_location(self, x, y):
# Assume the window manager has reparented our top-level window
# only once, in which case attributes.x/y give the offset from
# the frame to the content window. Better solution would be
# to use _NET_FRAME_EXTENTS, where supported.
attributes = xlib.XWindowAttributes()
xlib.XGetWindowAttributes(self._x_display, self._window,
byref(attributes))
# XXX at least under KDE's WM these attrs are both 0
x -= attributes.x
y -= attributes.y
xlib.XMoveWindow(self._x_display, self._window, x, y)
def get_location(self):
child = xlib.Window()
x = c_int()
y = c_int()
xlib.XTranslateCoordinates(self._x_display,
self._window,
self._get_root(),
0, 0,
byref(x),
byref(y),
byref(child))
return x.value, y.value
def activate(self):
xlib.XSetInputFocus(self._x_display, self._window,
xlib.RevertToParent, xlib.CurrentTime)
def set_visible(self, visible=True):
if visible:
self._map()
else:
self._unmap()
self._visible = visible
def set_minimum_size(self, width, height):
self._minimum_size = width, height
self._set_wm_normal_hints()
def set_maximum_size(self, width, height):
self._maximum_size = width, height
self._set_wm_normal_hints()
def minimize(self):
xlib.XIconifyWindow(self._x_display, self._window, self._x_screen_id)
def maximize(self):
self._set_wm_state('_NET_WM_STATE_MAXIMIZED_HORZ',
'_NET_WM_STATE_MAXIMIZED_VERT')
def set_mouse_platform_visible(self, platform_visible=None):
if platform_visible is None:
platform_visible = self._mouse_visible and \
not self._mouse_cursor.drawable
if not platform_visible:
# Hide pointer by creating an empty cursor
black = xlib.XBlackPixel(self._x_display, self._x_screen_id)
black = xlib.XColor()
bmp = xlib.XCreateBitmapFromData(self._x_display, self._window,
c_buffer(8), 8, 8)
cursor = xlib.XCreatePixmapCursor(self._x_display, bmp, bmp,
black, black, 0, 0)
xlib.XDefineCursor(self._x_display, self._window, cursor)
xlib.XFreeCursor(self._x_display, cursor)
xlib.XFreePixmap(self._x_display, bmp)
else:
# Restore cursor
if isinstance(self._mouse_cursor, XlibMouseCursor):
xlib.XDefineCursor(self._x_display, self._window,
self._mouse_cursor.cursor)
else:
xlib.XUndefineCursor(self._x_display, self._window)
def _update_exclusivity(self):
mouse_exclusive = self._active and self._mouse_exclusive
keyboard_exclusive = self._active and self._keyboard_exclusive
if mouse_exclusive != self._applied_mouse_exclusive:
if mouse_exclusive:
self.set_mouse_platform_visible(False)
# Restrict to client area
xlib.XGrabPointer(self._x_display, self._window,
True,
0,
xlib.GrabModeAsync,
xlib.GrabModeAsync,
self._window,
0,
xlib.CurrentTime)
# Move pointer to center of window
x = self._width / 2
y = self._height / 2
self._mouse_exclusive_client = x, y
xlib.XWarpPointer(self._x_display,
0, # src window
self._window, # dst window
0, 0, # src x, y
0, 0, # src w, h
x, y)
elif self._fullscreen and not self.screen._xinerama:
# Restrict to fullscreen area (prevent viewport scrolling)
xlib.XWarpPointer(self._x_display,
0, # src window
self._view, # dst window
0, 0, # src x, y
0, 0, # src w, h
0, 0)
r = xlib.XGrabPointer(self._x_display, self._view,
True, 0,
xlib.GrabModeAsync,
xlib.GrabModeAsync,
self._view,
0,
xlib.CurrentTime)
if r:
# Failed to grab, try again later
self._applied_mouse_exclusive = None
return
self.set_mouse_platform_visible()
else:
# Unclip
xlib.XUngrabPointer(self._x_display, xlib.CurrentTime)
self.set_mouse_platform_visible()
self._applied_mouse_exclusive = mouse_exclusive
if keyboard_exclusive != self._applied_keyboard_exclusive:
if keyboard_exclusive:
xlib.XGrabKeyboard(self._x_display,
self._window,
False,
xlib.GrabModeAsync,
xlib.GrabModeAsync,
xlib.CurrentTime)
else:
xlib.XUngrabKeyboard(self._x_display, xlib.CurrentTime)
self._applied_keyboard_exclusive = keyboard_exclusive
def set_exclusive_mouse(self, exclusive=True):
if exclusive == self._mouse_exclusive:
return
self._mouse_exclusive = exclusive
self._update_exclusivity()
def set_exclusive_keyboard(self, exclusive=True):
if exclusive == self._keyboard_exclusive:
return
self._keyboard_exclusive = exclusive
self._update_exclusivity()
def get_system_mouse_cursor(self, name):
if name == self.CURSOR_DEFAULT:
return DefaultMouseCursor()
# NQR means default shape is not pretty... surely there is another
# cursor font?
cursor_shapes = {
self.CURSOR_CROSSHAIR: cursorfont.XC_crosshair,
self.CURSOR_HAND: cursorfont.XC_hand2,
self.CURSOR_HELP: cursorfont.XC_question_arrow, # NQR
self.CURSOR_NO: cursorfont.XC_pirate, # NQR
self.CURSOR_SIZE: cursorfont.XC_fleur,
self.CURSOR_SIZE_UP: cursorfont.XC_top_side,
self.CURSOR_SIZE_UP_RIGHT: cursorfont.XC_top_right_corner,
self.CURSOR_SIZE_RIGHT: cursorfont.XC_right_side,
self.CURSOR_SIZE_DOWN_RIGHT: cursorfont.XC_bottom_right_corner,
self.CURSOR_SIZE_DOWN: cursorfont.XC_bottom_side,
self.CURSOR_SIZE_DOWN_LEFT: cursorfont.XC_bottom_left_corner,
self.CURSOR_SIZE_LEFT: cursorfont.XC_left_side,
self.CURSOR_SIZE_UP_LEFT: cursorfont.XC_top_left_corner,
self.CURSOR_SIZE_UP_DOWN: cursorfont.XC_sb_v_double_arrow,
self.CURSOR_SIZE_LEFT_RIGHT: cursorfont.XC_sb_h_double_arrow,
self.CURSOR_TEXT: cursorfont.XC_xterm,
self.CURSOR_WAIT: cursorfont.XC_watch,
self.CURSOR_WAIT_ARROW: cursorfont.XC_watch, # NQR
}
if name not in cursor_shapes:
raise MouseCursorException('Unknown cursor name "%s"' % name)
cursor = xlib.XCreateFontCursor(self._x_display, cursor_shapes[name])
return XlibMouseCursor(cursor)
def set_icon(self, *images):
# Careful! XChangeProperty takes an array of long when data type
# is 32-bit (but long can be 64 bit!), so pad high bytes of format if
# necessary.
import sys
format = {
('little', 4): 'BGRA',
('little', 8): 'BGRAAAAA',
('big', 4): 'ARGB',
('big', 8): 'AAAAARGB'
}[(sys.byteorder, sizeof(c_ulong))]
data = ''
for image in images:
image = image.get_image_data()
pitch = -(image.width * len(format))
s = c_buffer(sizeof(c_ulong) * 2)
memmove(s, cast((c_ulong * 2)(image.width, image.height),
POINTER(c_ubyte)), len(s))
data += s.raw + image.get_data(format, pitch)
buffer = (c_ubyte * len(data))()
memmove(buffer, data, len(data))
atom = xlib.XInternAtom(self._x_display, '_NET_WM_ICON', False)
xlib.XChangeProperty(self._x_display, self._window, atom, XA_CARDINAL,
32, xlib.PropModeReplace, buffer, len(data)/sizeof(c_ulong))
# Private utility
def _set_wm_normal_hints(self):
hints = xlib.XAllocSizeHints().contents
if self._minimum_size:
hints.flags |= xlib.PMinSize
hints.min_width, hints.min_height = self._minimum_size
if self._maximum_size:
hints.flags |= xlib.PMaxSize
hints.max_width, hints.max_height = self._maximum_size
xlib.XSetWMNormalHints(self._x_display, self._window, byref(hints))
def _set_text_property(self, name, value, allow_utf8=True):
atom = xlib.XInternAtom(self._x_display, name, False)
if not atom:
raise XlibException('Undefined atom "%s"' % name)
assert type(value) in (str, unicode)
property = xlib.XTextProperty()
if _have_utf8 and allow_utf8:
buf = create_string_buffer(value.encode('utf8'))
result = xlib.Xutf8TextListToTextProperty(self._x_display,
cast(pointer(buf), c_char_p), 1, xlib.XUTF8StringStyle,
byref(property))
if result < 0:
raise XlibException('Could not create UTF8 text property')
else:
buf = create_string_buffer(value.encode('ascii', 'ignore'))
result = xlib.XStringListToTextProperty(
cast(pointer(buf), c_char_p), 1, byref(property))
if result < 0:
raise XlibException('Could not create text property')
xlib.XSetTextProperty(self._x_display,
self._window, byref(property), atom)
# XXX <rj> Xlib doesn't like us freeing this
#xlib.XFree(property.value)
def _set_atoms_property(self, name, values, mode=xlib.PropModeReplace):
name_atom = xlib.XInternAtom(self._x_display, name, False)
atoms = []
for value in values:
atoms.append(xlib.XInternAtom(self._x_display, value, False))
atom_type = xlib.XInternAtom(self._x_display, 'ATOM', False)
if len(atoms):
atoms_ar = (xlib.Atom * len(atoms))(*atoms)
xlib.XChangeProperty(self._x_display, self._window,
name_atom, atom_type, 32, mode,
cast(pointer(atoms_ar), POINTER(c_ubyte)), len(atoms))
else:
xlib.XDeleteProperty(self._x_display, self._window, net_wm_state)
def _set_wm_state(self, *states):
# Set property
net_wm_state = xlib.XInternAtom(self._x_display, '_NET_WM_STATE', False)
atoms = []
for state in states:
atoms.append(xlib.XInternAtom(self._x_display, state, False))
atom_type = xlib.XInternAtom(self._x_display, 'ATOM', False)
if len(atoms):
atoms_ar = (xlib.Atom * len(atoms))(*atoms)
xlib.XChangeProperty(self._x_display, self._window,
net_wm_state, atom_type, 32, xlib.PropModePrepend,
cast(pointer(atoms_ar), POINTER(c_ubyte)), len(atoms))
else:
xlib.XDeleteProperty(self._x_display, self._window, net_wm_state)
# Nudge the WM
e = xlib.XEvent()
e.xclient.type = xlib.ClientMessage
e.xclient.message_type = net_wm_state
e.xclient.display = cast(self._x_display, POINTER(xlib.Display))
e.xclient.window = self._window
e.xclient.format = 32
e.xclient.data.l[0] = xlib.PropModePrepend
for i, atom in enumerate(atoms):
e.xclient.data.l[i + 1] = atom
xlib.XSendEvent(self._x_display, self._get_root(),
False, xlib.SubstructureRedirectMask, byref(e))
# Event handling
def dispatch_events(self):
self.dispatch_pending_events()
self._allow_dispatch_event = True
e = xlib.XEvent()
# Cache these in case window is closed from an event handler
_x_display = self._x_display
_window = self._window
_view = self._view
# Check for the events specific to this window
while xlib.XCheckWindowEvent(_x_display, _window,
0x1ffffff, byref(e)):
# Key events are filtered by the xlib window event
# handler so they get a shot at the prefiltered event.
if e.xany.type not in (xlib.KeyPress, xlib.KeyRelease):
if xlib.XFilterEvent(e, 0):
continue
self.dispatch_platform_event(e)
# Check for the events specific to this view
while xlib.XCheckWindowEvent(_x_display, _view,
0x1ffffff, byref(e)):
# Key events are filtered by the xlib window event
# handler so they get a shot at the prefiltered event.
if e.xany.type not in (xlib.KeyPress, xlib.KeyRelease):
if xlib.XFilterEvent(e, 0):
continue
self.dispatch_platform_event_view(e)
# Generic events for this window (the window close event).
while xlib.XCheckTypedWindowEvent(_x_display, _window,
xlib.ClientMessage, byref(e)):
self.dispatch_platform_event(e)
if self._needs_resize:
self.dispatch_event('on_resize', self._width, self._height)
self.dispatch_event('on_expose')
self._needs_resize = False
self._allow_dispatch_event = False
def dispatch_pending_events(self):
while self._event_queue:
EventDispatcher.dispatch_event(self, *self._event_queue.pop(0))
# Dispatch any context-related events
if self._lost_context:
self._lost_context = False
EventDispatcher.dispatch_event(self, 'on_context_lost')
if self._lost_context_state:
self._lost_context_state = False
EventDispatcher.dispatch_event(self, 'on_context_state_lost')
def dispatch_platform_event(self, e):
if self._applied_mouse_exclusive is None:
self._update_exclusivity()
event_handler = self._event_handlers.get(e.type)
if event_handler:
event_handler(e)
def dispatch_platform_event_view(self, e):
event_handler = self._view_event_handlers.get(e.type)
if event_handler:
event_handler(e)
@staticmethod
def _translate_modifiers(state):
modifiers = 0
if state & xlib.ShiftMask:
modifiers |= key.MOD_SHIFT
if state & xlib.ControlMask:
modifiers |= key.MOD_CTRL
if state & xlib.LockMask:
modifiers |= key.MOD_CAPSLOCK
if state & xlib.Mod1Mask:
modifiers |= key.MOD_ALT
if state & xlib.Mod2Mask:
modifiers |= key.MOD_NUMLOCK
if state & xlib.Mod4Mask:
modifiers |= key.MOD_WINDOWS
if state & xlib.Mod5Mask:
modifiers |= key.MOD_SCROLLLOCK
return modifiers
# Event handlers
'''
def _event_symbol(self, event):
# pyglet.self.key keysymbols are identical to X11 keysymbols, no
# need to map the keysymbol.
symbol = xlib.XKeycodeToKeysym(self._x_display, event.xkey.keycode, 0)
if symbol == 0:
# XIM event
return None
elif symbol not in key._key_names.keys():
symbol = key.user_key(event.xkey.keycode)
return symbol
'''
def _event_text_symbol(self, ev):
text = None
symbol = xlib.KeySym()
buffer = create_string_buffer(128)
# Look up raw keysym before XIM filters it (default for keypress and
# keyrelease)
count = xlib.XLookupString(ev.xkey,
buffer, len(buffer) - 1,
byref(symbol), None)
# Give XIM a shot
filtered = xlib.XFilterEvent(ev, ev.xany.window)
if ev.type == xlib.KeyPress and not filtered:
status = c_int()
if _have_utf8:
encoding = 'utf8'
count = xlib.Xutf8LookupString(self._x_ic,
ev.xkey,
buffer, len(buffer) - 1,
byref(symbol), byref(status))
if status.value == xlib.XBufferOverflow:
raise NotImplementedError('TODO: XIM buffer resize')
else:
encoding = 'ascii'
count = xlib.XLookupString(ev.xkey,
buffer, len(buffer) - 1,
byref(symbol), None)
if count:
status.value = xlib.XLookupBoth
if status.value & (xlib.XLookupChars | xlib.XLookupBoth):
text = buffer.value[:count].decode(encoding)
# Don't treat Unicode command codepoints as text, except Return.
if text and unicodedata.category(text) == 'Cc' and text != '\r':
text = None
symbol = symbol.value
# If the event is a XIM filtered event, the keysym will be virtual
# (e.g., aacute instead of A after a dead key). Drop it, we don't
# want these kind of key events.
if ev.xkey.keycode == 0 and not filtered:
symbol = None
# pyglet.self.key keysymbols are identical to X11 keysymbols, no
# need to map the keysymbol. For keysyms outside the pyglet set, map
# raw key code to a user key.
if symbol and symbol not in key._key_names and ev.xkey.keycode:
# Issue 353: Symbol is uppercase when shift key held down.
symbol = ord(unichr(symbol).lower())
# If still not recognised, use the keycode
if symbol not in key._key_names:
symbol = key.user_key(ev.xkey.keycode)
if filtered:
# The event was filtered, text must be ignored, but the symbol is
# still good.
return None, symbol
return text, symbol
def _event_text_motion(self, symbol, modifiers):
if modifiers & key.MOD_ALT:
return None
ctrl = modifiers & key.MOD_CTRL != 0
return _motion_map.get((symbol, ctrl), None)
@ViewEventHandler
@XlibEventHandler(xlib.KeyPress)
@XlibEventHandler(xlib.KeyRelease)
def _event_key_view(self, ev):
if ev.type == xlib.KeyRelease:
# Look in the queue for a matching KeyPress with same timestamp,
# indicating an auto-repeat rather than actual key event.
saved = []
while True:
auto_event = xlib.XEvent()
result = xlib.XCheckWindowEvent(self._x_display,
self._window, xlib.KeyPress|xlib.KeyRelease,
byref(auto_event))
if not result:
break
saved.append(auto_event)
if auto_event.type == xlib.KeyRelease:
# just save this off for restoration back to the queue
continue
if ev.xkey.keycode == auto_event.xkey.keycode:
# Found a key repeat: dispatch EVENT_TEXT* event
text, symbol = self._event_text_symbol(auto_event)
modifiers = self._translate_modifiers(ev.xkey.state)
modifiers_ctrl = modifiers & (key.MOD_CTRL | key.MOD_ALT)
motion = self._event_text_motion(symbol, modifiers)
if motion:
if modifiers & key.MOD_SHIFT:
self.dispatch_event(
'on_text_motion_select', motion)
else:
self.dispatch_event('on_text_motion', motion)
elif text and not modifiers_ctrl:
self.dispatch_event('on_text', text)
ditched = saved.pop()
for auto_event in reversed(saved):
xlib.XPutBackEvent(self._x_display, byref(auto_event))
return
else:
# Key code of press did not match, therefore no repeating
# is going on, stop searching.
break
# Whoops, put the events back, it's for real.
for auto_event in reversed(saved):
xlib.XPutBackEvent(self._x_display, byref(auto_event))
text, symbol = self._event_text_symbol(ev)
modifiers = self._translate_modifiers(ev.xkey.state)
modifiers_ctrl = modifiers & (key.MOD_CTRL | key.MOD_ALT)
motion = self._event_text_motion(symbol, modifiers)
if ev.type == xlib.KeyPress:
if symbol:
self.dispatch_event('on_key_press', symbol, modifiers)
if motion:
if modifiers & key.MOD_SHIFT:
self.dispatch_event('on_text_motion_select', motion)
else:
self.dispatch_event('on_text_motion', motion)
elif text and not modifiers_ctrl:
self.dispatch_event('on_text', text)
elif ev.type == xlib.KeyRelease:
if symbol:
self.dispatch_event('on_key_release', symbol, modifiers)
@XlibEventHandler(xlib.KeyPress)
@XlibEventHandler(xlib.KeyRelease)
def _event_key(self, ev):
return self._event_key_view(ev)
@ViewEventHandler
@XlibEventHandler(xlib.MotionNotify)
def _event_motionnotify_view(self, ev):
x = ev.xmotion.x
y = self.height - ev.xmotion.y
if self._mouse_in_window:
dx = x - self._mouse_x
dy = y - self._mouse_y
else:
dx = dy = 0
if self._applied_mouse_exclusive and \
(ev.xmotion.x, ev.xmotion.y) == self._mouse_exclusive_client:
# Ignore events caused by XWarpPointer
self._mouse_x = x
self._mouse_y = y
return
if self._applied_mouse_exclusive:
# Reset pointer position
ex, ey = self._mouse_exclusive_client
xlib.XWarpPointer(self._x_display,
0,
self._window,
0, 0,
0, 0,
ex, ey)
self._mouse_x = x
self._mouse_y = y
self._mouse_in_window = True
buttons = 0
if ev.xmotion.state & xlib.Button1MotionMask:
buttons |= mouse.LEFT
if ev.xmotion.state & xlib.Button2MotionMask:
buttons |= mouse.MIDDLE
if ev.xmotion.state & xlib.Button3MotionMask:
buttons |= mouse.RIGHT
if buttons:
# Drag event
modifiers = self._translate_modifiers(ev.xmotion.state)
self.dispatch_event('on_mouse_drag',
x, y, dx, dy, buttons, modifiers)
else:
# Motion event
self.dispatch_event('on_mouse_motion', x, y, dx, dy)
@XlibEventHandler(xlib.MotionNotify)
def _event_motionnotify(self, ev):
# Window motion looks for drags that are outside the view but within
# the window.
buttons = 0
if ev.xmotion.state & xlib.Button1MotionMask:
buttons |= mouse.LEFT
if ev.xmotion.state & xlib.Button2MotionMask:
buttons |= mouse.MIDDLE
if ev.xmotion.state & xlib.Button3MotionMask:
buttons |= mouse.RIGHT
if buttons:
# Drag event
x = ev.xmotion.x - self._view_x
y = self._height - (ev.xmotion.y - self._view_y)
if self._mouse_in_window:
dx = x - self._mouse_x
dy = y - self._mouse_y
else:
dx = dy = 0
self._mouse_x = x
self._mouse_y = y
modifiers = self._translate_modifiers(ev.xmotion.state)
self.dispatch_event('on_mouse_drag',
x, y, dx, dy, buttons, modifiers)
@XlibEventHandler(xlib.ClientMessage)
def _event_clientmessage(self, ev):
atom = ev.xclient.data.l[0]
if atom == xlib.XInternAtom(ev.xclient.display,
'WM_DELETE_WINDOW', False):
self.dispatch_event('on_close')
elif (self._enable_xsync and
atom == xlib.XInternAtom(ev.xclient.display,
'_NET_WM_SYNC_REQUEST', False)):
lo = ev.xclient.data.l[2]
hi = ev.xclient.data.l[3]
self._current_sync_value = xsync.XSyncValue(hi, lo)
def _sync_resize(self):
if self._enable_xsync and self._current_sync_valid:
if xsync.XSyncValueIsZero(self._current_sync_value):
self._current_sync_valid = False
return
xsync.XSyncSetCounter(self._x_display,
self._sync_counter,
self._current_sync_value)
self._current_sync_value = None
self._current_sync_valid = False
@ViewEventHandler
@XlibEventHandler(xlib.ButtonPress)
@XlibEventHandler(xlib.ButtonRelease)
def _event_button(self, ev):
x = ev.xbutton.x
y = self.height - ev.xbutton.y
button = 1 << (ev.xbutton.button - 1) # 1, 2, 3 -> 1, 2, 4
modifiers = self._translate_modifiers(ev.xbutton.state)
if ev.type == xlib.ButtonPress:
# override_redirect issue: manually activate this window if
# fullscreen.
if self._override_redirect and not self._active:
self.activate()
if ev.xbutton.button == 4:
self.dispatch_event('on_mouse_scroll', x, y, 0, 1)
elif ev.xbutton.button == 5:
self.dispatch_event('on_mouse_scroll', x, y, 0, -1)
elif ev.xbutton.button < len(self._mouse_buttons):
self._mouse_buttons[ev.xbutton.button] = True
self.dispatch_event('on_mouse_press',
x, y, button, modifiers)
else:
if ev.xbutton.button < 4:
self._mouse_buttons[ev.xbutton.button] = False
self.dispatch_event('on_mouse_release',
x, y, button, modifiers)
@ViewEventHandler
@XlibEventHandler(xlib.Expose)
def _event_expose(self, ev):
# Ignore all expose events except the last one. We could be told
# about exposure rects - but I don't see the point since we're
# working with OpenGL and we'll just redraw the whole scene.
if ev.xexpose.count > 0: return
self.dispatch_event('on_expose')
@ViewEventHandler
@XlibEventHandler(xlib.EnterNotify)
def _event_enternotify(self, ev):
# figure active mouse buttons
# XXX ignore modifier state?
state = ev.xcrossing.state
self._mouse_buttons[1] = state & xlib.Button1Mask
self._mouse_buttons[2] = state & xlib.Button2Mask
self._mouse_buttons[3] = state & xlib.Button3Mask
self._mouse_buttons[4] = state & xlib.Button4Mask
self._mouse_buttons[5] = state & xlib.Button5Mask
# mouse position
x = self._mouse_x = ev.xcrossing.x
y = self._mouse_y = self.height - ev.xcrossing.y
self._mouse_in_window = True
# XXX there may be more we could do here
self.dispatch_event('on_mouse_enter', x, y)
@ViewEventHandler
@XlibEventHandler(xlib.LeaveNotify)
def _event_leavenotify(self, ev):
x = self._mouse_x = ev.xcrossing.x
y = self._mouse_y = self.height - ev.xcrossing.y
self._mouse_in_window = False
self.dispatch_event('on_mouse_leave', x, y)
@XlibEventHandler(xlib.ConfigureNotify)
def _event_configurenotify(self, ev):
if self._enable_xsync and self._current_sync_value:
self._current_sync_valid = True
if self._fullscreen:
return
self.switch_to()
w, h = ev.xconfigure.width, ev.xconfigure.height
x, y = ev.xconfigure.x, ev.xconfigure.y
if self._width != w or self._height != h:
self._update_view_size()
self._width = w
self._height = h
self._needs_resize = True
if self._x != x or self._y != y:
self.dispatch_event('on_move', x, y)
self._x = x
self._y = y
@XlibEventHandler(xlib.FocusIn)
def _event_focusin(self, ev):
self._active = True
self._update_exclusivity()
self.dispatch_event('on_activate')
xlib.XSetICFocus(self._x_ic)
@XlibEventHandler(xlib.FocusOut)
def _event_focusout(self, ev):
self._active = False
self._update_exclusivity()
self.dispatch_event('on_deactivate')
xlib.XUnsetICFocus(self._x_ic)
@XlibEventHandler(xlib.MapNotify)
def _event_mapnotify(self, ev):
self._mapped = True
self.dispatch_event('on_show')
self._update_exclusivity()
@XlibEventHandler(xlib.UnmapNotify)
def _event_unmapnotify(self, ev):
self._mapped = False
self.dispatch_event('on_hide')
| |
import pylons
import difflib
pylons.c = pylons.tmpl_context
pylons.g = pylons.app_globals
from pylons import g #g is a namespace for globally accessable app helpers
from pylons import c as context
from ming import schema
from ming.orm import FieldProperty, ForeignIdProperty, Mapper, session
from ming.orm.declarative import MappedClass
from allura.model import VersionedArtifact, Snapshot, Feed, Thread, Post, User, BaseAttachment
from allura.model import Notification, project_orm_session
from allura.model.timeline import ActivityObject
from allura.lib import helpers as h
from allura.lib import utils
config = utils.ConfigProxy(
common_suffix='forgemail.domain')
class Globals(MappedClass):
class __mongometa__:
name = 'wiki-globals'
session = project_orm_session
indexes = [ 'app_config_id' ]
type_s = 'WikiGlobals'
_id = FieldProperty(schema.ObjectId)
app_config_id = ForeignIdProperty('AppConfig', if_missing=lambda:context.app.config._id)
root = FieldProperty(str)
class PageHistory(Snapshot):
class __mongometa__:
name='page_history'
def original(self):
return Page.query.get(_id=self.artifact_id)
def authors(self):
return self.original().authors()
def shorthand_id(self):
return '%s#%s' % (self.original().shorthand_id(), self.version)
def url(self):
return self.original().url() + '?version=%d' % self.version
def index(self):
result = Snapshot.index(self)
result.update(
title_s='Version %d of %s' % (
self.version,self.original().title),
type_s='WikiPage Snapshot',
text=self.data.text)
return result
@property
def html_text(self):
"""A markdown processed version of the page text"""
return g.markdown_wiki.convert(self.data.text)
@property
def attachments(self):
return self.original().attachments
@property
def email_address(self):
return self.original().email_address
class Page(VersionedArtifact, ActivityObject):
class __mongometa__:
name='page'
history_class = PageHistory
title=FieldProperty(str)
text=FieldProperty(schema.String, if_missing='')
viewable_by=FieldProperty([str])
type_s = 'Wiki'
@property
def activity_name(self):
return 'wiki page %s' % self.title
def commit(self):
ss = VersionedArtifact.commit(self)
session(self).flush()
if self.version > 1:
v1 = self.get_version(self.version-1)
v2 = self
la = [ line + '\n' for line in v1.text.splitlines() ]
lb = [ line + '\n' for line in v2.text.splitlines() ]
diff = ''.join(difflib.unified_diff(
la, lb,
'v%d' % v1.version,
'v%d' % v2.version))
description = '<pre>' + diff + '</pre>'
if v1.title != v2.title:
subject = '%s renamed page %s to %s' % (
context.user.username, v1.title, v2.title)
else:
subject = '%s modified page %s' % (
context.user.username, self.title)
else:
description = self.text
subject = '%s created page %s' % (
context.user.username, self.title)
Feed.post(self, title=None, description=description)
Notification.post(
artifact=self, topic='metadata', text=description, subject=subject)
return ss
@property
def email_address(self):
domain = '.'.join(reversed(self.app.url[1:-1].split('/'))).replace('_', '-')
return '%s@%s%s' % (self.title.replace('/', '.'), domain, config.common_suffix)
@property
def email_subject(self):
return 'Discussion for %s page' % self.title
def url(self):
s = self.app_config.url() + h.urlquote(self.title.encode('utf-8')) + '/'
if self.deleted:
s += '?deleted=True'
return s
def shorthand_id(self):
return self.title
def index(self):
result = VersionedArtifact.index(self)
result.update(
title_s='WikiPage %s' % self.title,
version_i=self.version,
type_s='WikiPage',
text=self.text)
return result
@property
def attachments(self):
return WikiAttachment.query.find(dict(artifact_id=self._id, type='attachment'))
@classmethod
def upsert(cls, title, version=None):
"""Update page with `title` or insert new page with that name"""
if version is None:
#Check for existing page object
obj = cls.query.get(
app_config_id=context.app.config._id,
title=title)
if obj is None:
obj = cls(
title=title,
app_config_id=context.app.config._id,
)
Thread.new(discussion_id=obj.app_config.discussion_id,
ref_id=obj.index_id())
return obj
else:
pg = cls.upsert(title)
HC = cls.__mongometa__.history_class
ss = HC.query.find({'artifact_id':pg._id, 'version':int(version)}).one()
return ss
@classmethod
def attachment_class(cls):
return WikiAttachment
def reply(self, text):
Feed.post(self, text)
# Get thread
thread = Thread.query.get(artifact_id=self._id)
return Post(
discussion_id=thread.discussion_id,
thread_id=thread._id,
text=text)
@property
def html_text(self):
"""A markdown processed version of the page text"""
return g.markdown_wiki.convert(self.text)
def authors(self):
"""All the users that have edited this page"""
def uniq(users):
t = {}
for user in users:
t[user.username] = user.id
return t.values()
user_ids = uniq([r.author for r in self.history().all()])
return User.query.find({'_id':{'$in':user_ids}}).all()
class WikiAttachment(BaseAttachment):
ArtifactType=Page
class __mongometa__:
polymorphic_identity='WikiAttachment'
attachment_type=FieldProperty(str, if_missing='WikiAttachment')
Mapper.compile_all()
| |
import uuid
import pytest
import stix2
from stix2.exceptions import (
AtLeastOnePropertyError, CustomContentError, DictionaryKeyError,
ExtraPropertiesError, ParseError,
)
from stix2.properties import (
DictionaryProperty, EmbeddedObjectProperty, ExtensionsProperty,
HashesProperty, IDProperty, ListProperty, ObservableProperty,
ReferenceProperty, STIXObjectProperty,
)
from stix2.v20.common import MarkingProperty
from . import constants
ID_PROP = IDProperty('my-type', spec_version="2.0")
MY_ID = 'my-type--232c9d3f-49fc-4440-bb01-607f638778e7'
@pytest.mark.parametrize(
"value", [
MY_ID,
'my-type--00000000-0000-4000-8000-000000000000',
],
)
def test_id_property_valid(value):
assert ID_PROP.clean(value) == (value, False)
CONSTANT_IDS = [
constants.ATTACK_PATTERN_ID,
constants.CAMPAIGN_ID,
constants.COURSE_OF_ACTION_ID,
constants.IDENTITY_ID,
constants.INDICATOR_ID,
constants.INTRUSION_SET_ID,
constants.MALWARE_ID,
constants.MARKING_DEFINITION_ID,
constants.OBSERVED_DATA_ID,
constants.RELATIONSHIP_ID,
constants.REPORT_ID,
constants.SIGHTING_ID,
constants.THREAT_ACTOR_ID,
constants.TOOL_ID,
constants.VULNERABILITY_ID,
]
CONSTANT_IDS.extend(constants.MARKING_IDS)
CONSTANT_IDS.extend(constants.RELATIONSHIP_IDS)
@pytest.mark.parametrize("value", CONSTANT_IDS)
def test_id_property_valid_for_type(value):
type = value.split('--', 1)[0]
assert IDProperty(type=type, spec_version="2.0").clean(value) == (value, False)
def test_id_property_wrong_type():
with pytest.raises(ValueError) as excinfo:
ID_PROP.clean('not-my-type--232c9d3f-49fc-4440-bb01-607f638778e7')
assert str(excinfo.value) == "must start with 'my-type--'."
@pytest.mark.parametrize(
"value", [
'my-type--foo',
# Not a v4 UUID
'my-type--00000000-0000-0000-0000-000000000000',
'my-type--' + str(uuid.uuid1()),
'my-type--' + str(uuid.uuid3(uuid.NAMESPACE_DNS, "example.org")),
'my-type--' + str(uuid.uuid5(uuid.NAMESPACE_DNS, "example.org")),
],
)
def test_id_property_not_a_valid_hex_uuid(value):
with pytest.raises(ValueError):
ID_PROP.clean(value)
def test_id_property_default():
default = ID_PROP.default()
assert ID_PROP.clean(default) == (default, False)
def test_reference_property_whitelist_standard_type():
ref_prop = ReferenceProperty(valid_types="identity", spec_version="2.0")
result = ref_prop.clean(
"identity--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False,
)
assert result == ("identity--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
with pytest.raises(ValueError):
ref_prop.clean("foo--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
with pytest.raises(ValueError):
ref_prop.clean("foo--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
def test_reference_property_whitelist_custom_type():
ref_prop = ReferenceProperty(valid_types="my-type", spec_version="2.0")
with pytest.raises(ValueError):
ref_prop.clean("not-my-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
with pytest.raises(ValueError):
ref_prop.clean("not-my-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
with pytest.raises(CustomContentError):
# This is the whitelisted type, but it's still custom, and
# customization is disallowed here.
ref_prop.clean("my-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
result = ref_prop.clean("my-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
assert result == ("my-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
def test_reference_property_whitelist_generic_type():
ref_prop = ReferenceProperty(
valid_types=["SCO", "SRO"], spec_version="2.0",
)
result = ref_prop.clean("file--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
assert result == ("file--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
result = ref_prop.clean("file--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
assert result == ("file--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
result = ref_prop.clean(
"sighting--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False,
)
assert result == ("sighting--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
result = ref_prop.clean(
"sighting--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True,
)
assert result == ("sighting--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
# The prop assumes some-type is a custom type of one of the generic
# type categories.
result = ref_prop.clean(
"some-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True,
)
assert result == ("some-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
with pytest.raises(ValueError):
ref_prop.clean("some-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
with pytest.raises(ValueError):
ref_prop.clean("identity--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
with pytest.raises(ValueError):
ref_prop.clean("identity--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
def test_reference_property_blacklist_standard_type():
ref_prop = ReferenceProperty(invalid_types="identity", spec_version="2.0")
result = ref_prop.clean(
"malware--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False,
)
assert result == ("malware--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
result = ref_prop.clean(
"malware--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True,
)
assert result == ("malware--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
with pytest.raises(CustomContentError):
ref_prop.clean(
"some-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False,
)
result = ref_prop.clean(
"some-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True,
)
assert result == ("some-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
with pytest.raises(ValueError):
ref_prop.clean(
"identity--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False,
)
with pytest.raises(ValueError):
ref_prop.clean(
"identity--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True,
)
def test_reference_property_blacklist_generic_type():
ref_prop = ReferenceProperty(
invalid_types=["SDO", "SRO"], spec_version="2.0",
)
result = ref_prop.clean(
"file--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False,
)
assert result == ("file--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
result = ref_prop.clean(
"file--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True,
)
assert result == ("file--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
with pytest.raises(CustomContentError):
ref_prop.clean(
"some-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False,
)
result = ref_prop.clean(
"some-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True,
)
assert result == ("some-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
with pytest.raises(ValueError):
ref_prop.clean(
"identity--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False,
)
with pytest.raises(ValueError):
ref_prop.clean(
"identity--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True,
)
with pytest.raises(ValueError):
ref_prop.clean(
"relationship--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False,
)
with pytest.raises(ValueError):
ref_prop.clean(
"relationship--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True,
)
def test_reference_property_whitelist_hybrid_type():
p = ReferenceProperty(valid_types=["a", "SCO"], spec_version="2.0")
result = p.clean("file--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
assert result == ("file--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
result = p.clean("file--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
assert result == ("file--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
with pytest.raises(CustomContentError):
# although whitelisted, "a" is a custom type
p.clean("a--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
result = p.clean("a--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
assert result == ("a--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
with pytest.raises(ValueError):
p.clean("b--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
# should just assume "b" is a custom SCO type.
result = p.clean("b--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
assert result == ("b--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
def test_reference_property_blacklist_hybrid_type():
p = ReferenceProperty(invalid_types=["a", "SCO"], spec_version="2.0")
with pytest.raises(ValueError):
p.clean("file--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
with pytest.raises(ValueError):
p.clean("file--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
with pytest.raises(ValueError):
p.clean("a--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
with pytest.raises(ValueError):
p.clean("a--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
with pytest.raises(CustomContentError):
p.clean("b--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
# should just assume "b" is a custom type which is not an SCO
result = p.clean("b--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
assert result == ("b--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
def test_reference_property_impossible_constraint():
with pytest.raises(ValueError):
ReferenceProperty(valid_types=[], spec_version="2.0")
@pytest.mark.parametrize(
"d", [
{'description': 'something'},
[('abc', 1), ('bcd', 2), ('cde', 3)],
],
)
def test_dictionary_property_valid(d):
dict_prop = DictionaryProperty(spec_version="2.0")
assert dict_prop.clean(d)
@pytest.mark.parametrize(
"d", [
[{'a': 'something'}, "Invalid dictionary key a: (shorter than 3 characters)."],
[
{'a'*300: 'something'}, "Invalid dictionary key aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
"aaaaaaaaaaaaaaaaaaaaaaa: (longer than 256 characters).",
],
[
{'Hey!': 'something'}, "Invalid dictionary key Hey!: (contains characters other than lowercase a-z, "
"uppercase A-Z, numerals 0-9, hyphen (-), or underscore (_)).",
],
],
)
def test_dictionary_property_invalid_key(d):
dict_prop = DictionaryProperty(spec_version="2.0")
with pytest.raises(DictionaryKeyError) as excinfo:
dict_prop.clean(d[0])
assert str(excinfo.value) == d[1]
@pytest.mark.parametrize(
"d", [
# TODO: This error message could be made more helpful. The error is caused
# because `json.loads()` doesn't like the *single* quotes around the key
# name, even though they are valid in a Python dictionary. While technically
# accurate (a string is not a dictionary), if we want to be able to load
# string-encoded "dictionaries" that are, we need a better error message
# or an alternative to `json.loads()` ... and preferably *not* `eval()`. :-)
# Changing the following to `'{"description": "something"}'` does not cause
# any ValueError to be raised.
("{'description': 'something'}", "The dictionary property must contain a dictionary"),
],
)
def test_dictionary_property_invalid(d):
dict_prop = DictionaryProperty(spec_version="2.0")
with pytest.raises(ValueError) as excinfo:
dict_prop.clean(d[0])
assert str(excinfo.value) == d[1]
def test_property_list_of_dictionary():
@stix2.v20.CustomObject(
'x-new-obj-4', [
('property1', ListProperty(DictionaryProperty(spec_version="2.0"), required=True)),
],
)
class NewObj():
pass
test_obj = NewObj(property1=[{'foo': 'bar'}])
assert test_obj.property1[0]['foo'] == 'bar'
@pytest.mark.parametrize(
"key", [
"aaa",
"a"*256,
"a-1_b",
],
)
def test_hash_property_valid_key(key):
p = HashesProperty(["foo"], spec_version="2.0")
result = p.clean({key: "bar"}, True)
assert result == ({key: "bar"}, True)
@pytest.mark.parametrize(
"key", [
"aa",
"a"*257,
"funny%chars?",
],
)
def test_hash_property_invalid_key(key):
p = HashesProperty(["foo"], spec_version="2.0")
with pytest.raises(DictionaryKeyError):
p.clean({key: "foo"}, True)
def test_embedded_property():
emb_prop = EmbeddedObjectProperty(type=stix2.v20.EmailMIMEComponent)
mime = stix2.v20.EmailMIMEComponent(
content_type="text/plain; charset=utf-8",
content_disposition="inline",
body="Cats are funny!",
)
result = emb_prop.clean(mime, False)
assert result == (mime, False)
result = emb_prop.clean(mime, True)
assert result == (mime, False)
with pytest.raises(ValueError):
emb_prop.clean("string", False)
def test_embedded_property_dict():
emb_prop = EmbeddedObjectProperty(type=stix2.v20.EmailMIMEComponent)
mime = {
"content_type": "text/plain; charset=utf-8",
"content_disposition": "inline",
"body": "Cats are funny!",
}
result = emb_prop.clean(mime, False)
assert isinstance(result[0], stix2.v20.EmailMIMEComponent)
assert result[0]["body"] == "Cats are funny!"
assert not result[1]
result = emb_prop.clean(mime, True)
assert isinstance(result[0], stix2.v20.EmailMIMEComponent)
assert result[0]["body"] == "Cats are funny!"
assert not result[1]
def test_embedded_property_custom():
emb_prop = EmbeddedObjectProperty(type=stix2.v20.EmailMIMEComponent)
mime = stix2.v20.EmailMIMEComponent(
content_type="text/plain; charset=utf-8",
content_disposition="inline",
body="Cats are funny!",
foo=123,
allow_custom=True,
)
with pytest.raises(CustomContentError):
emb_prop.clean(mime, False)
result = emb_prop.clean(mime, True)
assert result == (mime, True)
def test_embedded_property_dict_custom():
emb_prop = EmbeddedObjectProperty(type=stix2.v20.EmailMIMEComponent)
mime = {
"content_type": "text/plain; charset=utf-8",
"content_disposition": "inline",
"body": "Cats are funny!",
"foo": 123,
}
with pytest.raises(ExtraPropertiesError):
emb_prop.clean(mime, False)
result = emb_prop.clean(mime, True)
assert isinstance(result[0], stix2.v20.EmailMIMEComponent)
assert result[0]["body"] == "Cats are funny!"
assert result[1]
def test_extension_property_valid():
ext_prop = ExtensionsProperty(spec_version="2.0")
result = ext_prop.clean(
{
'windows-pebinary-ext': {
'pe_type': 'exe',
},
}, False,
)
assert isinstance(
result[0]["windows-pebinary-ext"], stix2.v20.WindowsPEBinaryExt,
)
assert not result[1]
result = ext_prop.clean(
{
'windows-pebinary-ext': {
'pe_type': 'exe',
},
}, True,
)
assert isinstance(
result[0]["windows-pebinary-ext"], stix2.v20.WindowsPEBinaryExt,
)
assert not result[1]
def test_extension_property_invalid1():
ext_prop = ExtensionsProperty(spec_version="2.0")
with pytest.raises(ValueError):
ext_prop.clean(1, False)
def test_extension_property_invalid2():
ext_prop = ExtensionsProperty(spec_version="2.0")
with pytest.raises(CustomContentError):
ext_prop.clean(
{
'foobar-ext': {
'pe_type': 'exe',
},
},
False,
)
result = ext_prop.clean(
{
'foobar-ext': {
'pe_type': 'exe',
},
}, True,
)
assert result == ({"foobar-ext": {"pe_type": "exe"}}, True)
def test_extension_property_invalid3():
ext_prop = ExtensionsProperty(spec_version="2.0")
with pytest.raises(ExtraPropertiesError):
ext_prop.clean(
{
'windows-pebinary-ext': {
'pe_type': 'exe',
'abc': 123,
},
},
False,
)
result = ext_prop.clean(
{
'windows-pebinary-ext': {
'pe_type': 'exe',
'abc': 123,
},
}, True,
)
assert isinstance(
result[0]["windows-pebinary-ext"], stix2.v20.WindowsPEBinaryExt,
)
assert result[0]["windows-pebinary-ext"]["abc"] == 123
assert result[1]
def test_extension_at_least_one_property_constraint():
with pytest.raises(AtLeastOnePropertyError):
stix2.v20.TCPExt()
def test_marking_property_error():
mark_prop = MarkingProperty()
with pytest.raises(ValueError) as excinfo:
mark_prop.clean('my-marking')
assert str(excinfo.value) == "must be a Statement, TLP Marking or a registered marking."
def test_stix_property_not_compliant_spec():
# This is a 2.0 test only...
indicator = stix2.v20.Indicator(spec_version="2.0", allow_custom=True, **constants.INDICATOR_KWARGS)
stix_prop = STIXObjectProperty(spec_version="2.0")
with pytest.raises(ValueError) as excinfo:
stix_prop.clean(indicator, False)
assert "Spec version 2.0 bundles don't yet support containing objects of a different spec version." in str(excinfo.value)
def test_observable_property_obj():
prop = ObservableProperty(spec_version="2.0")
obs = stix2.v20.File(name="data.dat")
obs_dict = {
"0": obs,
}
result = prop.clean(obs_dict, False)
assert result[0]["0"] == obs
assert not result[1]
result = prop.clean(obs_dict, True)
assert result[0]["0"] == obs
assert not result[1]
def test_observable_property_dict():
prop = ObservableProperty(spec_version="2.0")
obs_dict = {
"0": {
"type": "file",
"name": "data.dat",
},
}
result = prop.clean(obs_dict, False)
assert isinstance(result[0]["0"], stix2.v20.File)
assert result[0]["0"]["name"] == "data.dat"
assert not result[1]
result = prop.clean(obs_dict, True)
assert isinstance(result[0]["0"], stix2.v20.File)
assert result[0]["0"]["name"] == "data.dat"
assert not result[1]
def test_observable_property_obj_custom():
prop = ObservableProperty(spec_version="2.0")
obs = stix2.v20.File(name="data.dat", foo=True, allow_custom=True)
obs_dict = {
"0": obs,
}
with pytest.raises(ExtraPropertiesError):
prop.clean(obs_dict, False)
result = prop.clean(obs_dict, True)
assert result[0]["0"] == obs
assert result[1]
def test_observable_property_dict_custom():
prop = ObservableProperty(spec_version="2.0")
obs_dict = {
"0": {
"type": "file",
"name": "data.dat",
"foo": True,
},
}
with pytest.raises(ExtraPropertiesError):
prop.clean(obs_dict, False)
result = prop.clean(obs_dict, True)
assert isinstance(result[0]["0"], stix2.v20.File)
assert result[0]["0"]["foo"]
assert result[1]
def test_stix_object_property_custom_prop():
prop = STIXObjectProperty(spec_version="2.0")
obj_dict = {
"type": "identity",
"name": "alice",
"identity_class": "supergirl",
"foo": "bar",
}
with pytest.raises(ExtraPropertiesError):
prop.clean(obj_dict, False)
result = prop.clean(obj_dict, True)
assert isinstance(result[0], stix2.v20.Identity)
assert result[0]["foo"] == "bar"
assert result[1]
def test_stix_object_property_custom_obj():
prop = STIXObjectProperty(spec_version="2.0")
obj_dict = {
"type": "something",
"abc": 123,
"xyz": ["a", 1],
}
with pytest.raises(ParseError):
prop.clean(obj_dict, False)
result = prop.clean(obj_dict, True)
assert result[0] == {"type": "something", "abc": 123, "xyz": ["a", 1]}
assert result[1]
| |
#!/usr/bin/env python3
"""
This is -80 (minus80), a tool for long-term archival backup to Amazon S3 and Glacier.
If you're reading this on Amazon, this file both describes the data format and
contains code for restoring the backup to your local computer. Keep reading.
Dependencies
============
Minus80 was developed and tested with Boto3 1.9.23 and Python 3.6.
Hopefully backwards-compatible versions will still be available
by the time you want to restore from this backup...
Getting Started
===============
Sign up for Amazon Web Services (AWS) at http://aws.amazon.com/
Create a configuration file in JSON format that looks like this:
{
"credentials": {
"profile_name": "XXX",
"aws_access_key_id": "XXX",
"aws_secret_access_key": "XXX"
},
"aws_s3_bucket": "XXX",
"restore_for_days": 30,
"file_database": "~/.minus80.sqlite3"
}
Replace the "XXX" with appropriate values from your new AWS acount.
You can supply a profile, or access key and secret key, or none of the above
(in which case defaults will be used, based on boto3's search process).
Your bucket name must be globally unique, so pick something no one else
is likely to have used.
Usage - Backup
==============
Storage costs can be reduced by setting up a bucket lifecycle rule that transfers
data from S3 storage to Glacier storage. Since most of the bytes are in the data/
folder, I would suggest only archiving that folder, say, 7 days after upload.
See http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html
A CloudFormation template is provided to create a bucket with a suitable policy.
The easiest way to use it is through the CloudFormation web console.
Usage - Restore
===============
Restore proceeds in three stages: 'thaw', 'download', and 'rebuild'.
1. Thaw. The thaw command traverses the entire S3 bucket. Any object
that has been moved to Glacier is restored to S3, for a period of
`restore_for_days` days. According to Glacier documentation,
each thaw command will take up to 12 hours to complete; after that, files
can be downloaded from S3 normally.
2. Download. The download command will pull down a perfect copy of all
the data in your S3 bucket. If any of it has been transferred to
Glacier, you should run 'thaw' first and wait 12 hours after it finishes.
Existing files of the right size will be skipped, allowing you to
resume an interrupted download.
3. Rebuild. The rebuild command will re-organize the downloaded data into
a reasonable approximation of the structure you archived originally.
Existing files of the right size will be skipped, so you'll usually
want to target the restore to an empty directory.
Deleting Things
===============
Minus80 is designed to back up things that you want to keep, well, forever.
But sometimes forever is too long. Here are some options:
- If you're moving to a different backup solution and no longer want this
one, you can delete the whole S3 bucket through the AWS control panel.
- If you want to get rid of very old stuff, you can use an Amazon
lifecycle rule to e.g. delete everything over 10 years old.
- If you want to get rid of an individual file that should have never
been committed to backup, you need to know its hash. If you still have
the file, you can calculate its hash with the widely available `shasum`
command line tool. If you have the file name and the SQLite database,
you can look up the hash. If you have neither, you'll have to download
all of index/ and search through it to find the file you want. With the
hash in hand, you can then remove data/DATA_HASH using the S3 web control
panel. To remove all trace of the file, remove index/DATA_HASH as well.
Data Format
===========
All data is stored in an Amazon S3 bucket, using a simple content-addressable
storage scheme. The contents of each file are stored in
data/DATA_HASH
where DATA_HASH is the hexadecimal SHA-1 hash of the file contents. This provides
automatic de-duplication -- if you have multiple copies of a single file, it is
only uploaded and stored once.
Metadata about each file is stored in JSON format in
index/DATA_HASH/INFO_HASH.json
where INFO_HASH is the hexadecimal SHA-1 of the contents of the metadata JSON file.
The metadata includes a timestamp, so even if the contents of a particular file
change over time, one can recover previous versions from the backup if needed.
All the metadata for various instances of a particular piece of content are stored
under a common prefix (DATA_HASH) to make it feasible to purge all aliases to a
particular bit of content from the archive if e.g. it was included by mistake.
The metadata is also stored in
stream/TIMESTAMP_INFO_HASH.json
where TIMESTAMP is an ISO8601 basic datetime in UTC, like 2015-07-23T16:17:00Z.
This enables file sync across multiple computers, assuming your clock is correct,
because temporal order matches Amazon's traversal order (lexicographic).
Once files have been backed up, this is recorded in a local SQLite database.
This is purely for efficiency -- if a file's size and mtime have not changed
since the last backup, it is assumed to already be in the archive, and is skipped.
This saves recalculating hashes for thousands of files. However, if the local
database is deleted, the next backup attempt will verify that each file is in S3.
For convenience, the S3 bucket also contains a copy of the current version of
minus80 (this file), as README.txt, and a file LAST_UPDATE.txt containing the
time of the last backup run as YYYY-MM-DD HH:MM:SS (UTC time zone).
Philosophy
==========
Minus80 is a tool in the Unix tradition. This means it tries to be simple and modular,
and not re-invent functionality that's available elsewhere. Thus:
- The only way to specify files is as a list of names, one per line, on stdin.
Expected usage is to pipe the output of `find` to minus80,
but you could also have a manually curated backup list.
It does not currently support null-separated output (-print0), because
I assume you are a sane human and do not have newlines in your filenames.
- Minus80 does not have built-in bandwidth limits, because tools like
`trickle` (http://monkey.org/~marius/pages/?page=trickle)
and `throttled` (https://github.com/zquestz/throttled) are available.
- Minus80 does not encourage encryption, because it is designed for disaster recovery
of files that are very important to you but of low importance to others, such as
your personal photo library. The risk of forgetting the encryption password
is higher than the risk of the data being compromised in a damaging way.
Of course, I can't stop you from using `gpg` to encrypt files before backing them up.
- The archive format is as simple as possible. An average coder should be able to
easily whip up a script to restore the data, given a description of the format.
Hopefully, s/he could even reverse-engineer the format if necessary.
- Once backed up, data is never deleted. This is not a backup tool for many small,
frequently-changing files. It is not for reconstructing a flawless image of your
entire disk exactly as it was before the crash. It is for worst-case-scenario
recovery of your most precious digital assets if all your other backups have failed.
Minus80 was named for the -80 C freezers that scientists use for long-term cold storage.
Although it talks only to S3, the intention is to use bucket lifecycle rules to move
all data into Glacier for permanent storage.
"""
# MAKE SURE to increment this when code is updated,
# so a new copy gets stored in the archive!
VERSION = '0.4.0'
import argparse, datetime, hashlib, json, logging, os, shutil, sqlite3, sys
import os.path as osp
import boto3
from botocore.exceptions import ClientError
def init_db(dbpath):
# PARSE_DECLTYPES causes TIMESTAMP columns to be read in as datetime objects.
db = sqlite3.connect(osp.expanduser(dbpath), detect_types=sqlite3.PARSE_DECLTYPES)
db.row_factory = sqlite3.Row
with db:
db.executescript("""
CREATE TABLE IF NOT EXISTS files (
id INTEGER PRIMARY KEY AUTOINCREMENT,
abspath TEXT NOT NULL,
mtime NUMERIC NOT NULL,
size INTEGER NOT NULL,
infohash TEXT NOT NULL,
datahash TEXT NOT NULL,
updated TIMESTAMP NOT NULL DEFAULT (DATETIME('now'))
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_files_1 ON files (abspath, mtime, size);
""")
return db
def hash_string(s, hashname='sha1'):
hashfunc = hashlib.new(hashname)
hashfunc.update(s.encode())
return hashfunc.hexdigest()
def hash_file_content(absfile, hashname='sha1', chunk=1024*1024):
hashfunc = hashlib.new(hashname)
infile = open(absfile, 'rb')
while True:
data = infile.read(chunk)
if not data: break
hashfunc.update(data)
return hashfunc.hexdigest()
def key_exists(key):
try:
key.load() # Uses HEAD to check if this key exists, or gives 404
return True
except ClientError as ex:
if ex.response['Error']['Code'] == '404':
return False
else:
raise # some other API failure
def upload_string(s3bucket, key_name, s, replace=False):
"""Returns True if data was transferred, False if it was already there."""
key = s3bucket.Object(key_name)
if not replace and key_exists(key):
return False
key.put(Body=s.encode())
return True
def upload_file(s3bucket, key_name, filename, replace=False):
"""Returns True if data was transferred, False if it was already there."""
key = s3bucket.Object(key_name)
if not replace and key_exists(key):
return False
key.upload_file(filename)
return True
def get_file_info(absfile, datahash):
return json.dumps({
'path':absfile,
'size':osp.getsize(absfile),
'mtime':osp.getmtime(absfile),
# We don't want to do this, or the hash and content change every time!
# mtime should really be enough info to reconstruct the file system state.
#'stored':time.time(),
'data':datahash,
}, separators=(',', ':'), sort_keys=True)
def s3_path_to_local(localroot, path):
"""
When translating S3 paths to local, insert directory separators
after the second and fourth characters of the first hash.
This is a nod to practicality, because most filesystems start to
perform badly when a directory contains more than 1,000 files.
"""
path = path.lstrip("/").split("/") # shouldn't be a leading slash, but just in case
if path[0] in ("data","index") and len(path) >= 2 and len(path[1]) > 4:
h = path[1]
path = [path[0], h[0:2], h[2:4], h[4:]] + path[2:]
return osp.join(localroot, *path)
def do_archive(filename_iter, s3bucket, db):
# Make sure current documentation exists in bucket.
# We actually upload this whole file, to provide unambiguous info and a way to restore.
upload_file(s3bucket, "README_%s.txt" % VERSION, __file__)
upload_string(s3bucket, "LAST_UPDATE.txt", datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"), replace=True)
for relfile in filename_iter:
try:
absfile = osp.realpath(relfile) # eliminates symbolic links and does abspath()
if osp.isdir(absfile):
logger.info("SKIP_DIR %s" % absfile)
continue
if not osp.exists(absfile):
logger.warning("DOES_NOT_EXIST file %s" % absfile)
# If file of same name, modification date, and size is in database, we can skip it.
mtime = osp.getmtime(absfile)
fsize = osp.getsize(absfile)
known_file = db.execute("SELECT updated FROM files WHERE abspath = ? AND mtime = ? AND size = ?", (absfile, mtime, fsize)).fetchone() # or None
if known_file is not None:
logger.info("SKIP_KNOWN %s %s" % (known_file[0].strftime("%Y-%m-%d %H:%M:%S"), absfile))
continue
# Can't skip it. Calculate the hashes!
datahash = hash_file_content(absfile)
fileinfo = get_file_info(absfile, datahash)
infohash = hash_string(fileinfo)
indexkey = "index/%s/%s.json" % (datahash, infohash)
datakey = "data/%s" % datahash
# Upload the actual file contents, if needed.
logger.debug("UPLOAD_READY %s %s" % (datakey, absfile))
did_upload = upload_file(s3bucket, datakey, absfile)
if did_upload:
# If file has been modified while we were hashing, abort. We'll get it next time through.
if mtime != osp.getmtime(absfile) or fsize != osp.getsize(absfile):
logger.warning("CONCURENT_MODIFICATION %s" % absfile)
# We uploaded on this pass, so content is likely wrong. Remove it.
s3bucket.Object(datakey).delete()
continue
else:
logger.info("UPLOAD_DONE %s %s" % (datakey, absfile))
else:
logger.info("UPLOAD_EXISTS %s %s" % (datakey, absfile))
# Upload the metadata, if needed.
logger.debug("INDEX_READY %s %s" % (indexkey, absfile))
if upload_string(s3bucket, indexkey, fileinfo, replace=did_upload):
logger.info("INDEX_DONE %s %s" % (indexkey, absfile))
# Post timestamped metadata to allow syncing with the archive.
now = datetime.datetime.utcnow().replace(microsecond=0).isoformat()
streamkey = "stream/%sZ_%s.json" % (now, infohash)
logger.debug("STREAM_READY %s %s" % (streamkey, absfile))
upload_string(s3bucket, streamkey, fileinfo)
logger.info("STREAM_DONE %s %s" % (streamkey, absfile))
else:
logger.info("INDEX_EXISTS %s %s" % (indexkey, absfile))
# Note to self: we've now backed this file up.
with db:
db.execute("INSERT INTO files (abspath, mtime, size, infohash, datahash) VALUES (?, ?, ?, ?, ?)",
(absfile, mtime, fsize, infohash, datahash))
except Exception as ex:
logger.exception("ERROR %s %s" % (relfile, ex))
def is_thawed(key):
return key.restore and 'ongoing-request="false"' in key.restore and 'expiry-date=' in key.restore
def is_thawing(key):
return key.restore and 'ongoing-request="true"' in key.restore
def do_thaw(s3bucket, for_days):
thawing_objs = 0
for key in s3bucket.objects.filter(Prefix="data/"):
if key.storage_class not in ('GLACIER', 'DEEP_ARCHIVE'):
logger.debug("NOT_FROZEN %s" % key.key)
continue
key = key.Object() # get a full Object, instead of an ObjectSummary
if is_thawed(key):
logger.debug("THAW_DONE %s" % key.key)
continue
thawing_objs += 1
if is_thawing(key):
logger.debug("THAW_IN_PROGRESS %s" % key.key)
continue
logger.debug("READY_THAW %s" % key.key)
key.restore_object(RestoreRequest={
'Days': for_days,
'GlacierJobParameters': {
'Tier': 'Bulk' # 'Standard'|'Bulk'|'Expedited'
}})
logger.info("THAW_STARTED %s" % key.key)
if thawing_objs:
logger.warning("Thawing %i objects; should be complete by %s" % (thawing_objs, datetime.datetime.now() + datetime.timedelta(hours=12)))
else:
logger.warning("All objects thawed; ready to start download")
def do_download(s3bucket, destdir):
for prefix in ["index/", "data/"]:
for key in s3bucket.objects.filter(Prefix=prefix):
try:
localname = s3_path_to_local(destdir, key.key)
if osp.lexists(localname) and osp.getsize(localname) == key.size:
logger.debug("EXISTS %s" % localname)
continue
localdir = osp.dirname(localname)
if not osp.isdir(localdir): os.makedirs(localdir)
logger.debug("DOWNLOAD_READY %s %s" % (key.key, localname))
key.Object().download_file(localname)
logger.info("DOWNLOAD_DONE %s %s" % (key.key, localname))
except Exception as ex:
logger.exception("ERROR %s %s" % (key.key, ex))
def do_rebuild(srcdir, destdir):
# Build list of files to restore
indexes = []
for dirpath, dirnames, filenames in os.walk(osp.join(srcdir, "index")):
for indexfile in filenames:
indexes.append(json.load(open(osp.join(dirpath, indexfile))))
# Put newest files first
indexes.sort(key=lambda x:x['mtime'], reverse=True)
# Iterate over files and copy data into place
for index in indexes:
s3name = "data/%s" % index['data']
srcname = s3_path_to_local(srcdir, s3name)
destname = osp.join(destdir, index['path'].lstrip(os.sep))
destbase = osp.dirname(destname)
if not osp.isdir(destbase): os.makedirs(destbase)
# Because we only copy if not exists, info from newer indexes takes precedence
if osp.lexists(destname) and osp.getsize(srcname) == osp.getsize(destname):
logger.debug("SKIP_EXISTS %s %s" % (srcname, destname))
else:
logger.info("COPY %s %s" % (srcname, destname))
try:
shutil.copy2(srcname, destname)
os.utime(destname, (index['mtime'], index['mtime']))
except Exception as ex:
logger.exception("ERROR %s %s %s" % (srcname, destname, ex))
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', default=0, action='count')
subparsers = parser.add_subparsers(dest='cmd_name', help='command to run')
p_archive = subparsers.add_parser('archive', help='store files listed on stdin to S3/Glacier')
p_archive.add_argument("config", metavar="CONFIG.json", type=argparse.FileType('r'))
p_thaw = subparsers.add_parser('thaw', help='restore files from Glacier to normal S3')
p_thaw.add_argument("config", metavar="CONFIG.json", type=argparse.FileType('r'))
p_download = subparsers.add_parser('download', help='pull down all files from normal S3')
p_download.add_argument("config", metavar="CONFIG.json", type=argparse.FileType('r'))
p_download.add_argument("download_dir", metavar="DOWNLOAD_DIR")
p_rebuild = subparsers.add_parser('rebuild', help='convert hashed names back to original structure')
p_rebuild.add_argument("download_dir", metavar="DOWNLOAD_DIR")
p_rebuild.add_argument("rebuild_dir", metavar="REBUILD_DIR")
args = parser.parse_args(argv)
boto_log_level = [logging.WARNING, logging.INFO, logging.DEBUG][min(1, args.verbose)] # max of INFO
logging.basicConfig(level=boto_log_level, format='%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s')
global logger
logger = logging.getLogger("minus80")
app_log_level = [logging.WARNING, logging.INFO, logging.DEBUG][min(2, args.verbose)]
logger.setLevel(app_log_level)
# Shared initialization for subcommands:
if 'config' in args:
config = json.load(args.config)
db = init_db(config['file_database'])
session = boto3.Session(**config['credentials'])
s3conn = session.resource('s3')
s3bucket = s3conn.Bucket(config['aws_s3_bucket']) # Bucket must already exist
# Run subcommand
if args.cmd_name == 'archive':
filename_iter = (line.rstrip("\r\n") for line in sys.stdin)
do_archive(filename_iter, s3bucket, db)
elif args.cmd_name == 'thaw':
do_thaw(s3bucket, config['restore_for_days'])
elif args.cmd_name == 'download':
do_download(s3bucket, args.download_dir)
elif args.cmd_name == 'rebuild':
do_rebuild(args.download_dir, args.rebuild_dir)
else:
parser.print_help()
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| |
import random
import time
import scipy
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
import multiprocessing
import threading
import scipy.signal as signal
import requests
import gym
import tensorflow.contrib.layers as layers
from tensorflow.python.training import training_ops
from tensorflow.python.training import slot_creator
class RMSPropApplier(object):
def __init__(self,
learning_rate,
decay=0.9,
momentum=0.0,
epsilon=1e-10,
clip_norm=40.0,
device="/cpu:0",
name="RMSPropApplier"):
self._name = name
self._learning_rate = learning_rate
self._decay = decay
self._momentum = momentum
self._epsilon = epsilon
self._clip_norm = clip_norm
self._device = device
# Tensors for learning rate and momentum. Created in _prepare.
self._learning_rate_tensor = None
self._decay_tensor = None
self._momentum_tensor = None
self._epsilon_tensor = None
self._slots = {}
def _create_slots(self, var_list):
for v in var_list:
# 'val' is Variable's intial value tensor.
val = tf.constant(1.0, dtype=v.dtype, shape=v.get_shape())
self._get_or_make_slot(v, val, "rms", self._name)
self._zeros_slot(v, "momentum", self._name)
def _prepare(self):
self._learning_rate_tensor = tf.convert_to_tensor(self._learning_rate,
name="learning_rate")
self._decay_tensor = tf.convert_to_tensor(self._decay, name="decay")
self._momentum_tensor = tf.convert_to_tensor(self._momentum,
name="momentum")
self._epsilon_tensor = tf.convert_to_tensor(self._epsilon,
name="epsilon")
def _slot_dict(self, slot_name):
named_slots = self._slots.get(slot_name, None)
if named_slots is None:
named_slots = {}
self._slots[slot_name] = named_slots
return named_slots
def _get_or_make_slot(self, var, val, slot_name, op_name):
named_slots = self._slot_dict(slot_name)
if var not in named_slots:
named_slots[var] = slot_creator.create_slot(var, val, op_name)
return named_slots[var]
def get_slot(self, var, name):
named_slots = self._slots.get(name, None)
if not named_slots:
return None
return named_slots.get(var, None)
def _zeros_slot(self, var, slot_name, op_name):
named_slots = self._slot_dict(slot_name)
if var not in named_slots:
named_slots[var] = slot_creator.create_zeros_slot(var, op_name)
return named_slots[var]
# TODO: in RMSProp native code, memcpy() (for CPU) and
# cudaMemcpyAsync() (for GPU) are used when updating values,
# and values might tend to be overwritten with results from other threads.
# (Need to check the learning performance with replacing it)
def _apply_dense(self, grad, var):
rms = self.get_slot(var, "rms")
mom = self.get_slot(var, "momentum")
return training_ops.apply_rms_prop(
var, rms, mom,
self._learning_rate_tensor,
self._decay_tensor,
self._momentum_tensor,
self._epsilon_tensor,
grad,
use_locking=False).op
# Apply accumulated gradients to var.
def apply_gradients(self, var_list, accum_grad_list, name=None):
update_ops = []
with tf.device(self._device):
with tf.control_dependencies(None):
self._create_slots(var_list)
with tf.name_scope(name, self._name, []) as name:
self._prepare()
for var, accum_grad in zip(var_list, accum_grad_list):
with tf.name_scope("update_" + var.op.name), tf.device(var.device):
clipped_accum_grad = tf.clip_by_norm(accum_grad, self._clip_norm)
update_ops.append(self._apply_dense(clipped_accum_grad, var))
return tf.group(*update_ops, name=name)
def sendStatElastic(data, endpoint="http://35.187.182.237:9200/reinforce/games"):
data['step_time'] = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
try:
r = requests.post(endpoint, json=data)
except:
print("Elasticsearch exception")
#log.warning(r.text)
finally:
pass
class FrameBuffer():
def __init__(self, buffer_size=4, frame_size=None):
self.buffer_size = buffer_size
self.frame_size = frame_size
self._frames = [[0] * frame_size] * buffer_size
def add(self, frame):
self._frames.append(frame)
if len(self._frames) > self.buffer_size:
self._frames.pop(0)
def frames(self):
return np.reshape(np.array(self._frames), [1, self.frame_size * self.buffer_size])
def make_gym_env(name):
env = gym.make(name)
env.env.frameskip=3
return env
def process_frame(f, last_f=None, height=84,width=84):
if last_f is not None:
f = np.amax(np.array([f, last_f]), axis=0)
f = scipy.misc.imresize(f[34:194,:160,:], (height, width))
f = np.dot(f[...,:3], [0.299, 0.587, 0.114])/255.0
return np.reshape(f,[-1])
def clip_reward(r):
if r>0:
return 1.0
elif r<0:
return -1.0
return 0
def clip_reward_tan(r):
return np.arctan(r)
def discount_reward(rs, gamma):
return signal.lfilter([1], [1, -gamma], rs[::-1], axis=0)[::-1]
def exp_coeff(vs, gamma):
for k in range(len(vs)):
vs[k] *= gamma ** (k+1)
return vs
def update_target_graph(from_scope, to_scope):
from_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, from_scope)
to_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, to_scope)
op_holder = []
for from_v,to_v in zip(from_vars, to_vars):
op_holder.append(to_v.assign(from_v))
return op_holder
def normalized_columns_initializer(std=1.0):
def __initializer(shape, dtype=None, partition_info=None):
out = np.random.rand(*shape).astype(np.float32)
out *= std/np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return __initializer
class ACNetwork():
def __init__(self, act_size, scope, grad_applier=None, init_learn_rate=1e-3, learn_rate_decay_step=1e8,frame_count=4,im_size=84, h_size=256, global_step=None):
self.inputs = tf.placeholder(tf.float32, [None, im_size*im_size*frame_count], name="in_frames")
img_in = tf.reshape(self.inputs, [-1, im_size, im_size, frame_count])
#conv1 = slim.convolution2d(activation_fn=tf.nn.relu,scope="conv1",inputs=img_in, num_outputs=32, kernel_size=[8,8], stride=[4, 4], padding="VALID", biases_initializer=None)
#conv2 = slim.convolution2d(activation_fn=tf.nn.relu,scope="conv2",inputs=conv1, num_outputs=64, kernel_size=[4, 4], stride=[2, 2], padding="VALID", biases_initializer=None)
#conv3 = slim.convolution2d(activation_fn=tf.nn.relu,scope="conv3",inputs=conv2, num_outputs=64, kernel_size=[3, 3], stride=[1, 1], padding="VALID", biases_initializer=None)
#conv4 = slim.convolution2d(activation_fn=tf.nn.relu,scope="conv4",inputs=conv3, num_outputs=h_size, kernel_size=[7, 7], stride=[1, 1], padding="VALID", biases_initializer=None)
with tf.name_scope("conv"):
# conv1 = layers.conv2d(img_in, num_outputs=32, kernel_size=[5,5], stride=1, padding="VALID", weights_initializer=layers.xavier_initializer())
# pool1 = layers.max_pool2d(conv1, kernel_size=[2,2], stride=2)
# conv2 = layers.conv2d(pool1, num_outputs=32, kernel_size=[5,5], stride=1, padding="VALID", weights_initializer=layers.xavier_initializer())
# pool2 = layers.max_pool2d(conv2, kernel_size=[2,2], stride=2)
# conv3 = layers.conv2d(pool2, num_outputs=64, kernel_size=[4,4], stride=1, padding="VALID", weights_initializer=layers.xavier_initializer())
# pool3 = layers.max_pool2d(conv3, kernel_size=[2,2], stride=2)
# conv4 = layers.conv2d(pool3, num_outputs=64, kernel_size=3, stride=1, padding="VALID", weights_initializer=layers.xavier_initializer())
# pool4 = layers.max_pool2d(conv4, kernel_size=[2,2], stride=2)
# hidden = layers.fully_connected(layers.flatten(pool4), h_size, weights_initializer=layers.xavier_initializer())
conv1 = layers.conv2d(img_in, num_outputs=16, kernel_size=[8, 8], stride=[4, 4])
conv2 = layers.conv2d(conv1, num_outputs=32, kernel_size=[4, 4], stride=[2, 2])
hidden = layers.fully_connected(layers.flatten(conv2), h_size)
#hidden = slim.flatten(conv4)
with tf.variable_scope("va_split"):
advantage = slim.fully_connected(hidden, act_size, activation_fn=None, weights_initializer=normalized_columns_initializer(std=0.01))
self.value = slim.fully_connected(hidden, 1, activation_fn=None, weights_initializer=normalized_columns_initializer(std=1.0))
# salience = tf.gradients(advantage, img_in)
with tf.variable_scope("predict"):
#self.q_out = value + tf.subtract(advantage, tf.reduce_mean(advantage, axis=1, keep_dims=True))
self.pred = tf.argmax(advantage, axis=1)
self.policy = tf.nn.softmax(advantage)
#self.policy = tf.clip_by_value(self.policy, 1e-13,1.0)
# master network up date by copying value
# workers by gradient descent
if scope!="master":
self.actions = tf.placeholder(tf.int32, [None],name="actions")
act_onehot = tf.one_hot(self.actions, act_size, dtype=tf.float32)
self.target_v = tf.placeholder(tf.float32, [None],name="target_v")
self.target_adv = tf.placeholder(tf.float32, [None],name="target_advantage")
#self.entropy_scale = tf.placeholder(tf.float32,[],name="entrypy_scale")
resp_outputs = tf.reduce_sum(self.policy * act_onehot, [1])
value_loss = tf.reduce_mean(tf.square(self.target_v - self.value))
entropy = -tf.reduce_mean(tf.reduce_sum(self.policy * tf.log(self.policy+1e-13), axis=1))
policy_loss = - tf.reduce_mean(tf.log(resp_outputs+1e-13) * self.target_adv)
loss = 0.5 * value_loss + policy_loss - entropy * 0.005
local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
gradients = tf.gradients(loss, local_vars)
#var_norms = tf.global_norm(local_vars)
grads, grad_norms = tf.clip_by_global_norm(gradients, 5.0)
master_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'master')
learning_rate = tf.Variable(init_learn_rate, trainable=False,dtype=tf.float32, name="learning_rate")
delta_learn_rate = init_learn_rate/learn_rate_decay_step
self.decay_learn_rate = tf.assign(learning_rate,learning_rate.value() - delta_learn_rate)
#trainer = tf.train.RMSPropOptimizer(learning_rate=learning_rate, momentum=0.0, decay=0.99, epsilon=1e-6)
self.train_op = grad_applier.apply_gradients(master_vars, grads)
self.incr_global_step = global_step.assign(global_step.value()+1)
with tf.name_scope("summary"):
s_lr = tf.summary.scalar("learning_rate", learning_rate)
s_loss = tf.summary.scalar("loss", loss)
s_val = tf.summary.scalar("mean_value", tf.reduce_mean(self.value))
s_max_adv = tf.summary.scalar("max_advantage", tf.reduce_max(advantage))
s_min_adv = tf.summary.scalar("min_advantage", tf.reduce_min(advantage))
s_tar_q = tf.summary.scalar("mean_target_q", tf.reduce_mean(self.target_v))
s_v_l = tf.summary.scalar("value_loss", value_loss)
s_p_l = tf.summary.scalar("policy_loss", policy_loss)
s_en = tf.summary.scalar("entropy", entropy)
#s_pred_q = tf.summary.scalar("mean_pred_q", tf.reduce_mean(self.q_out))
self.summary_op = tf.summary.merge([ s_lr,s_loss, s_val, s_max_adv, s_min_adv, s_tar_q, s_v_l,s_p_l,s_en])
def get_exp_prob(step, max_step=500000):
min_p = np.random.choice([0.1,0.01,0.5],1,p=[0.4,0.3,0.3])[0]
#min_p = 0.1
if step > max_step:
return min_p
return 1.0 - (1.0 - min_p)/max_step*step
class Worker():
def __init__(self, act_size , name, grad_applier=None, game_name=None,global_step=None,summary_writer=None):
self.name = str(name)
#self.trainer = trainer
self.act_size = act_size
with tf.variable_scope(self.name):
self.local_ac = ACNetwork(act_size, self.name, grad_applier, global_step=global_step)
self.game_name = game_name
# copy values from master graph to local
self.update_local_ops = update_target_graph('master', self.name)
self.global_step = global_step
self.summary_writer = summary_writer
def train(self, rollout,gamma, bootstrap_val, sess):
rollout = np.array(rollout)
obs = rollout[:,0]
acts = rollout[:,1]
rewards = rollout[:,2]
nxt_obs = rollout[:, 3]
values = rollout[:, 5]
if bootstrap_val is None or np.isnan(bootstrap_val)==True:
bootstrap_val = 0
reward_plus = np.asarray(rewards.tolist()+[bootstrap_val])
disc_rew = discount_reward(reward_plus, gamma)[:-1]
value_plus = np.asarray(values.tolist()+[bootstrap_val])
#print(value_plus)
#advantages = disc_rew + exp_coeff(value_plus[1:], gamma) - value_plus[:-1]
#advantages = disc_rew + gamma * value_plus[1:] - value_plus[:-1]
#advantages = discount_reward(advantages, gamma)
advantages = disc_rew - values
feed_dict = {
self.local_ac.inputs:np.vstack(obs),
self.local_ac.target_v:disc_rew,
self.local_ac.actions:acts,
self.local_ac.target_adv:advantages,
}
summ,_ ,step,_ = sess.run([self.local_ac.summary_op, self.local_ac.train_op,self.local_ac.incr_global_step, self.local_ac.decay_learn_rate], feed_dict=feed_dict)
if self.summary_writer is not None:
self.summary_writer.add_summary(summ,step)
def play(self, sess, coord, render=False):
print("Starting worker {}".format(self.name))
env = make_gym_env(self.game_name)
total_step = 0
with sess.as_default():
ep_count = 0
while not coord.should_stop():
frame_buffer = FrameBuffer(frame_size=84 * 84)
s = env.reset()
s = process_frame(s)
frame_buffer.add(s)
ep_score = 0.0
t_ep_start = time.time()
ep_len = 0
while True:
total_step += 1
ep_len += 1
if render:
env.render()
pred = sess.run(self.local_ac.policy,feed_dict={self.local_ac.inputs: frame_buffer.frames()})
act = np.random.choice(range(self.act_size), p=pred[0])
#act = pred[0]
s, reward, done, obs = env.step(act)
ep_score += reward
s = process_frame(s)
frame_buffer.add(s)
if done:
ep_count += 1
print("Agent {} finished episode {} finished with total reward: {} in {} seconds, total step {}".format(self.name,ep_count, ep_score,
time.time() - t_ep_start,total_step))
sendStatElastic({"score": ep_score,'agent_name':self.name, 'game_name': 'ac3-Breakout-v0', 'episode': ep_count,'frame_count':total_step,'episode_length':ep_len})
break
def work(self, gamma, sess, coord, max_ep_buffer_size=8, max_episode_count=5000):
print("Starting worker {}".format(self.name))
env = make_gym_env(self.game_name)
total_step = 0
with sess.as_default():
ep_count = 0
while not coord.should_stop() and ep_count<max_episode_count:
sess.run(self.update_local_ops)
frame_buffer = FrameBuffer(frame_size=84 * 84)
s = env.reset()
s = process_frame(s)
frame_buffer.add(s)
episode_buffer = []
ep_score = 0.0
t_ep_start = time.time()
e = get_exp_prob(total_step)
ep_len = 0
while True:
total_step += 1
ep_len += 1
begin_frames = frame_buffer.frames()
pred, val = sess.run([self.local_ac.policy, self.local_ac.value],feed_dict={self.local_ac.inputs:begin_frames})
val = val[0,0]
#e = get_exp_prob(total_step)
#if random.random() < e:
# act = np.random.choice(range(self.act_size))
#else:
act = np.random.choice(range(self.act_size), p=pred[0])
#act = pred[0]
s, reward, done, obs = env.step(act)
ep_score += reward
s = process_frame(s)
reward = clip_reward(reward)
frame_buffer.add(s)
next_frames = frame_buffer.frames()
episode_buffer.append([begin_frames, act, reward, next_frames, done, val])
if len(episode_buffer) >= max_ep_buffer_size and not done:
v_pred = sess.run(self.local_ac.value,feed_dict={self.local_ac.inputs:next_frames})
self.train(episode_buffer, gamma,bootstrap_val=v_pred[0,0], sess=sess)
episode_buffer = []
#sess.run(self.update_local_ops)
if done:
ep_count += 1
print("Agent {} finished episode {} finished with total reward: {} in {} seconds, total step {}".format(self.name,ep_count, ep_score, time.time()-t_ep_start, total_step))
sendStatElastic({"score": ep_score,'game_name': 'ac3-Breakout-v0','episode':ep_count,'rand_e_prob':100.0*e,'agent_name':self.name,'frame_count':total_step,'episode_length':ep_len})
break
if len(episode_buffer) != 0:
self.train(episode_buffer, gamma, 0.0, sess)
if __name__=="__main__":
game_name = 'Breakout-v0'
logdir = "./checkpoints/a3c-dqn"
max_episode_len = 10000
action_count = 4
gamma = 0.99
#num_workers = multiprocessing.cpu_count() - 2
num_workers = 32
train_step = 8
print("Running with {} workers".format(num_workers))
graph = tf.Graph()
with graph.as_default():
global_step = tf.get_variable("global_step",(),tf.int64,initializer=tf.zeros_initializer(), trainable=False)
#learning_rate = tf.Variable(0.00025, trainable=False, dtype=tf.float32, name="learning_rate")
#trainer = tf.train.AdamOptimizer(learning_rate=1e-3)
#trainer = tf.train.RMSPropOptimizer(learning_rate=0.00025, momentum=0.0, decay=0.99, epsilon=1e-6)
#trainer = tf.train.MomentumOptimizer(learning_rate=1e-3, momentum=0.95)
#trainer = tf.train.AdadeltaOptimizer(learning_rate=1e-4)
grad_applier = RMSPropApplier(learning_rate=0.00025)
#with tf.variable_scope("master"):
master_worker = Worker(action_count,"master", game_name=game_name)
#master_network = ACNetwork(act_size=action_count, scope="master", trainer=None)
summ_writer = tf.summary.FileWriter(logdir)
workers = []
for k in range(num_workers):
w_name = "worker_"+str(k)
#with tf.variable_scope(w_name):
w = Worker(action_count,w_name, grad_applier, game_name, summary_writer=summ_writer, global_step=global_step)
workers.append(w)
sv = tf.train.Supervisor(logdir=logdir, graph=graph, summary_op=None)
with sv.managed_session() as sess:
#with tf.Session() as sess:
coord = tf.train.Coordinator()
#sess.run(tf.global_variables_initializer())
worker_threads = []
for wk in workers:
work = lambda : wk.work(gamma, sess, coord, max_episode_count=max_episode_len, max_ep_buffer_size=train_step)
t = threading.Thread(target=(work))
t.start()
time.sleep(0.5)
worker_threads.append(t)
#master_worker.play(sess, coord, render=True)
print("Started all threads")
coord.join(worker_threads)
| |
#
# For more information, please see: http://software.sci.utah.edu
#
# The MIT License
#
# Copyright (c) 2005-2006
# Scientific Computing and Imaging Institute, University of Utah
#
# License for the specific language governing rights and limitations under
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# Import wxManta gui and some system modules.
import wxManta
import getopt, sys
# Import the manta module, the mantainterface module which was %import'ed
# into swig/example.i is automatically included by the manta module.
from manta import *
from pycallback import *
filename = ""
ignore_vn = False
default_material = None;
triangle_type = MeshTriangle.KENSLER_SHIRLEY_TRI
# Re-create the default scene using the example texture.
def initialize_scene( frame, engine ):
# Create a scene object.
scene = manta_new(Scene())
scene.setBackground(manta_new(ConstantBackground(ColorDB.getNamedColor("white"))))
# Load an obj file
global filename, ignore_vn
print "File: " + filename
try:
obj = manta_new( ObjGroup( filename, default_material, triangle_type ) )
except InputError,e:
print "Error: " + e.message()
exit(1)
# Turn off vertex normals (code appears broken in some models)
if (ignore_vn):
obj.discardVertexNormals();
# Create a bvh.
bvh = manta_new( DynBVH() )
bvh.setGroup( obj )
bvh.rebuild()
# scene.setObject(world)
scene.setObject( bvh )
# Lights.
lights = manta_new(LightSet())
lights.add(manta_new(HeadLight(1.0, Color(RGBColor(.8,.8,.9)) )))
lights.setAmbientLight(manta_new(ConstantAmbient(Color.black())))
#
scene.setLights(lights)
scene.getRenderParameters().maxDepth = 5
engine.setScene( scene )
def usage():
print "Usage: python test.py [options]"
print "Where options contains one or more of:"
print "-n --np=<threads>"
print "-f --file=<filename"
print " --ignore_vn Ignore vertex normals in file."
def main():
# Default options.
num_workers = 1
# Value of None indicates that a default will be used.
camera = "pinhole( -normalizeRays -createCornerRays )";
imagetype = None;
shadows = "noshadows";
imagetraverser = "tiled( -tilesize 8x8 -square )";
loadbalancer = None;
pixelsampler = None;
renderer = None;
autoview = False;
# Parse command line options. Note these have to conform to getopt
# So -np would be parsed as -n <space> p. Use --np=<threads> instead.
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["np=",
"file=",
"camera=",
"imagetype=",
"shadows=",
"imagetraverser=",
"loadbalancer=",
"pixelsampler=",
"renderer=",
"ignore_vn"]
)
except getopt.GetoptError,e:
print e
usage()
sys.exit(2)
global filename, ignore_vn
for o, a in opts:
if o in ("-n", "--np"):
try:
num_workers = int(a)
except ValueError:
usage()
sys.exit(2)
elif o in ("-f", "--file"):
filename = a
elif o in ("--ignore_vn"):
ignore_vn = True;
elif o in ("--camera"):
camera = str(a).replace(';',' ');
elif o in ("--imagetype"):
imagetype = str(a).replace(';',' ');
elif o in ("--shadows"):
shadows = str(a).replace(';',' ');
elif o in ("--imagetraverser"):
imagetraverser = str(a).replace(';',' ');
elif o in ("--loadbalancer"):
loadbalancer = str(a).replace(';',' ');
elif o in ("--imagetraverser"):
imagetraverser = str(a).replace(';',' ');
elif o in ("--pixelsampler"):
pixelsampler = str(a).replace(';',' ');
elif o in ("--renderer"):
renderer = str(a).replace(';',' ');
# Add additional command line args here.
###########################################################################
# Create the application.
app = wxManta.MantaApp( initialize_scene,
num_workers )
factory = Factory(app.frame.engine,True)
if (camera):
cam = factory.createCamera(camera);
if (not cam):
print "Invalid camera, choices:"
for key in factory.listCameras():
print key
sys.exit(2)
else:
app.frame.engine.setCamera(0,cam);
if (imagetype):
if (not factory.selectImageType( imagetype )):
print "Invalid image type, choices:"
for key in factory.listImageTypes():
print key
sys.exit(2)
if (shadows):
if (not factory.selectShadowAlgorithm( shadows )):
print "Invalid shadow algorithm, choices:"
for key in factory.listShadowAlgorithms():
print key
sys.exit(2)
if (imagetraverser):
if (not factory.selectImageTraverser( imagetraverser )):
print "Invalid image traverser, choices:"
for key in factory.listImageTraversers():
print key
sys.exit(2)
if (loadbalancer):
if (not factory.selectLoadBalancer( loadbalancer )):
print "Invalid load balancer, choices:"
for key in factory.listLoadBalancers():
print key
sys.exit(2)
if (pixelsampler):
if (not factory.selectPixelSampler( pixelsampler )):
print "Invalid pixel sampler, choices:"
for key in factory.listPixelSamplers():
print key
sys.exit(2)
if (renderer):
if (not factory.selectRenderer( renderer )):
print "Invalid renderer, choices:"
for key in factory.listRenderers():
print key
sys.exit(2)
###########################################################################
# Perform any additional setup
# cbArgs = ( manta_new( TiledImageTraverser( 64, 64 ) ), )
# app.frame.engine.addTransaction("set image traverser",
# manta_new(createMantaTransaction(app.frame.engine.setImageTraverser, cbArgs)))
# app.frame.engine.setShadowAlgorithm( manta_new( HardShadows( True ) ) )
# Start rendering.
app.MainLoop()
if __name__ == "__main__":
main()
| |
# Copyright (C) 2003-2007 Robey Pointer <robey@lag.net>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Client-mode SFTP support.
"""
from binascii import hexlify
import errno
import os
import threading
import time
import weakref
from paramiko.sftp import *
from paramiko.sftp_attr import SFTPAttributes
from paramiko.ssh_exception import SSHException
from paramiko.sftp_file import SFTPFile
def _to_unicode(s):
"""
decode a string as ascii or utf8 if possible (as required by the sftp
protocol). if neither works, just return a byte string because the server
probably doesn't know the filename's encoding.
"""
try:
return s.encode('ascii')
except UnicodeError:
try:
return s.decode('utf-8')
except UnicodeError:
return s
class SFTPClient (BaseSFTP):
"""
SFTP client object. C{SFTPClient} is used to open an sftp session across
an open ssh L{Transport} and do remote file operations.
"""
def __init__(self, sock):
"""
Create an SFTP client from an existing L{Channel}. The channel
should already have requested the C{"sftp"} subsystem.
An alternate way to create an SFTP client context is by using
L{from_transport}.
@param sock: an open L{Channel} using the C{"sftp"} subsystem
@type sock: L{Channel}
@raise SSHException: if there's an exception while negotiating
sftp
"""
BaseSFTP.__init__(self)
self.sock = sock
self.ultra_debug = False
self.request_number = 1
# lock for request_number
self._lock = threading.Lock()
self._cwd = None
# request # -> SFTPFile
self._expecting = weakref.WeakValueDictionary()
if type(sock) is Channel:
# override default logger
transport = self.sock.get_transport()
self.logger = util.get_logger(transport.get_log_channel() + '.sftp')
self.ultra_debug = transport.get_hexdump()
try:
server_version = self._send_version()
except EOFError, x:
raise SSHException('EOF during negotiation')
self._log(INFO, 'Opened sftp connection (server version %d)' % server_version)
def from_transport(cls, t):
"""
Create an SFTP client channel from an open L{Transport}.
@param t: an open L{Transport} which is already authenticated
@type t: L{Transport}
@return: a new L{SFTPClient} object, referring to an sftp session
(channel) across the transport
@rtype: L{SFTPClient}
"""
chan = t.open_session()
if chan is None:
return None
chan.invoke_subsystem('sftp')
return cls(chan)
from_transport = classmethod(from_transport)
def _log(self, level, msg, *args):
super(SFTPClient, self)._log(level, "[chan %s] " + msg, *([ self.sock.get_name() ] + list(args)))
def close(self):
"""
Close the SFTP session and its underlying channel.
@since: 1.4
"""
self._log(INFO, 'sftp session closed.')
self.sock.close()
def get_channel(self):
"""
Return the underlying L{Channel} object for this SFTP session. This
might be useful for doing things like setting a timeout on the channel.
@return: the SSH channel
@rtype: L{Channel}
@since: 1.7.1
"""
return self.sock
def listdir(self, path='.'):
"""
Return a list containing the names of the entries in the given C{path}.
The list is in arbitrary order. It does not include the special
entries C{'.'} and C{'..'} even if they are present in the folder.
This method is meant to mirror C{os.listdir} as closely as possible.
For a list of full L{SFTPAttributes} objects, see L{listdir_attr}.
@param path: path to list (defaults to C{'.'})
@type path: str
@return: list of filenames
@rtype: list of str
"""
return [f.filename for f in self.listdir_attr(path)]
def listdir_attr(self, path='.'):
"""
Return a list containing L{SFTPAttributes} objects corresponding to
files in the given C{path}. The list is in arbitrary order. It does
not include the special entries C{'.'} and C{'..'} even if they are
present in the folder.
The returned L{SFTPAttributes} objects will each have an additional
field: C{longname}, which may contain a formatted string of the file's
attributes, in unix format. The content of this string will probably
depend on the SFTP server implementation.
@param path: path to list (defaults to C{'.'})
@type path: str
@return: list of attributes
@rtype: list of L{SFTPAttributes}
@since: 1.2
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'listdir(%r)' % path)
t, msg = self._request(CMD_OPENDIR, path)
if t != CMD_HANDLE:
raise SFTPError('Expected handle')
handle = msg.get_string()
filelist = []
while True:
try:
t, msg = self._request(CMD_READDIR, handle)
except EOFError, e:
# done with handle
break
if t != CMD_NAME:
raise SFTPError('Expected name response')
count = msg.get_int()
for i in range(count):
filename = _to_unicode(msg.get_string())
longname = _to_unicode(msg.get_string())
attr = SFTPAttributes._from_msg(msg, filename, longname)
if (filename != '.') and (filename != '..'):
filelist.append(attr)
self._request(CMD_CLOSE, handle)
return filelist
def open(self, filename, mode='r', bufsize=-1):
"""
Open a file on the remote server. The arguments are the same as for
python's built-in C{file} (aka C{open}). A file-like object is
returned, which closely mimics the behavior of a normal python file
object.
The mode indicates how the file is to be opened: C{'r'} for reading,
C{'w'} for writing (truncating an existing file), C{'a'} for appending,
C{'r+'} for reading/writing, C{'w+'} for reading/writing (truncating an
existing file), C{'a+'} for reading/appending. The python C{'b'} flag
is ignored, since SSH treats all files as binary. The C{'U'} flag is
supported in a compatible way.
Since 1.5.2, an C{'x'} flag indicates that the operation should only
succeed if the file was created and did not previously exist. This has
no direct mapping to python's file flags, but is commonly known as the
C{O_EXCL} flag in posix.
The file will be buffered in standard python style by default, but
can be altered with the C{bufsize} parameter. C{0} turns off
buffering, C{1} uses line buffering, and any number greater than 1
(C{>1}) uses that specific buffer size.
@param filename: name of the file to open
@type filename: str
@param mode: mode (python-style) to open in
@type mode: str
@param bufsize: desired buffering (-1 = default buffer size)
@type bufsize: int
@return: a file object representing the open file
@rtype: SFTPFile
@raise IOError: if the file could not be opened.
"""
filename = self._adjust_cwd(filename)
self._log(DEBUG, 'open(%r, %r)' % (filename, mode))
imode = 0
if ('r' in mode) or ('+' in mode):
imode |= SFTP_FLAG_READ
if ('w' in mode) or ('+' in mode) or ('a' in mode):
imode |= SFTP_FLAG_WRITE
if ('w' in mode):
imode |= SFTP_FLAG_CREATE | SFTP_FLAG_TRUNC
if ('a' in mode):
imode |= SFTP_FLAG_CREATE | SFTP_FLAG_APPEND
if ('x' in mode):
imode |= SFTP_FLAG_CREATE | SFTP_FLAG_EXCL
attrblock = SFTPAttributes()
t, msg = self._request(CMD_OPEN, filename, imode, attrblock)
if t != CMD_HANDLE:
raise SFTPError('Expected handle')
handle = msg.get_string()
self._log(DEBUG, 'open(%r, %r) -> %s' % (filename, mode, hexlify(handle)))
return SFTPFile(self, handle, mode, bufsize)
# python continues to vacillate about "open" vs "file"...
file = open
def remove(self, path):
"""
Remove the file at the given path. This only works on files; for
removing folders (directories), use L{rmdir}.
@param path: path (absolute or relative) of the file to remove
@type path: str
@raise IOError: if the path refers to a folder (directory)
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'remove(%r)' % path)
self._request(CMD_REMOVE, path)
unlink = remove
def rename(self, oldpath, newpath):
"""
Rename a file or folder from C{oldpath} to C{newpath}.
@param oldpath: existing name of the file or folder
@type oldpath: str
@param newpath: new name for the file or folder
@type newpath: str
@raise IOError: if C{newpath} is a folder, or something else goes
wrong
"""
oldpath = self._adjust_cwd(oldpath)
newpath = self._adjust_cwd(newpath)
self._log(DEBUG, 'rename(%r, %r)' % (oldpath, newpath))
self._request(CMD_RENAME, oldpath, newpath)
def mkdir(self, path, mode=0777):
"""
Create a folder (directory) named C{path} with numeric mode C{mode}.
The default mode is 0777 (octal). On some systems, mode is ignored.
Where it is used, the current umask value is first masked out.
@param path: name of the folder to create
@type path: str
@param mode: permissions (posix-style) for the newly-created folder
@type mode: int
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'mkdir(%r, %r)' % (path, mode))
attr = SFTPAttributes()
attr.st_mode = mode
self._request(CMD_MKDIR, path, attr)
def rmdir(self, path):
"""
Remove the folder named C{path}.
@param path: name of the folder to remove
@type path: str
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'rmdir(%r)' % path)
self._request(CMD_RMDIR, path)
def stat(self, path):
"""
Retrieve information about a file on the remote system. The return
value is an object whose attributes correspond to the attributes of
python's C{stat} structure as returned by C{os.stat}, except that it
contains fewer fields. An SFTP server may return as much or as little
info as it wants, so the results may vary from server to server.
Unlike a python C{stat} object, the result may not be accessed as a
tuple. This is mostly due to the author's slack factor.
The fields supported are: C{st_mode}, C{st_size}, C{st_uid}, C{st_gid},
C{st_atime}, and C{st_mtime}.
@param path: the filename to stat
@type path: str
@return: an object containing attributes about the given file
@rtype: SFTPAttributes
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'stat(%r)' % path)
t, msg = self._request(CMD_STAT, path)
if t != CMD_ATTRS:
raise SFTPError('Expected attributes')
return SFTPAttributes._from_msg(msg)
def lstat(self, path):
"""
Retrieve information about a file on the remote system, without
following symbolic links (shortcuts). This otherwise behaves exactly
the same as L{stat}.
@param path: the filename to stat
@type path: str
@return: an object containing attributes about the given file
@rtype: SFTPAttributes
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'lstat(%r)' % path)
t, msg = self._request(CMD_LSTAT, path)
if t != CMD_ATTRS:
raise SFTPError('Expected attributes')
return SFTPAttributes._from_msg(msg)
def symlink(self, source, dest):
"""
Create a symbolic link (shortcut) of the C{source} path at
C{destination}.
@param source: path of the original file
@type source: str
@param dest: path of the newly created symlink
@type dest: str
"""
dest = self._adjust_cwd(dest)
self._log(DEBUG, 'symlink(%r, %r)' % (source, dest))
if type(source) is unicode:
source = source.encode('utf-8')
self._request(CMD_SYMLINK, source, dest)
def chmod(self, path, mode):
"""
Change the mode (permissions) of a file. The permissions are
unix-style and identical to those used by python's C{os.chmod}
function.
@param path: path of the file to change the permissions of
@type path: str
@param mode: new permissions
@type mode: int
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'chmod(%r, %r)' % (path, mode))
attr = SFTPAttributes()
attr.st_mode = mode
self._request(CMD_SETSTAT, path, attr)
def chown(self, path, uid, gid):
"""
Change the owner (C{uid}) and group (C{gid}) of a file. As with
python's C{os.chown} function, you must pass both arguments, so if you
only want to change one, use L{stat} first to retrieve the current
owner and group.
@param path: path of the file to change the owner and group of
@type path: str
@param uid: new owner's uid
@type uid: int
@param gid: new group id
@type gid: int
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'chown(%r, %r, %r)' % (path, uid, gid))
attr = SFTPAttributes()
attr.st_uid, attr.st_gid = uid, gid
self._request(CMD_SETSTAT, path, attr)
def utime(self, path, times):
"""
Set the access and modified times of the file specified by C{path}. If
C{times} is C{None}, then the file's access and modified times are set
to the current time. Otherwise, C{times} must be a 2-tuple of numbers,
of the form C{(atime, mtime)}, which is used to set the access and
modified times, respectively. This bizarre API is mimicked from python
for the sake of consistency -- I apologize.
@param path: path of the file to modify
@type path: str
@param times: C{None} or a tuple of (access time, modified time) in
standard internet epoch time (seconds since 01 January 1970 GMT)
@type times: tuple(int)
"""
path = self._adjust_cwd(path)
if times is None:
times = (time.time(), time.time())
self._log(DEBUG, 'utime(%r, %r)' % (path, times))
attr = SFTPAttributes()
attr.st_atime, attr.st_mtime = times
self._request(CMD_SETSTAT, path, attr)
def truncate(self, path, size):
"""
Change the size of the file specified by C{path}. This usually extends
or shrinks the size of the file, just like the C{truncate()} method on
python file objects.
@param path: path of the file to modify
@type path: str
@param size: the new size of the file
@type size: int or long
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'truncate(%r, %r)' % (path, size))
attr = SFTPAttributes()
attr.st_size = size
self._request(CMD_SETSTAT, path, attr)
def readlink(self, path):
"""
Return the target of a symbolic link (shortcut). You can use
L{symlink} to create these. The result may be either an absolute or
relative pathname.
@param path: path of the symbolic link file
@type path: str
@return: target path
@rtype: str
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'readlink(%r)' % path)
t, msg = self._request(CMD_READLINK, path)
if t != CMD_NAME:
raise SFTPError('Expected name response')
count = msg.get_int()
if count == 0:
return None
if count != 1:
raise SFTPError('Readlink returned %d results' % count)
return _to_unicode(msg.get_string())
def normalize(self, path):
"""
Return the normalized path (on the server) of a given path. This
can be used to quickly resolve symbolic links or determine what the
server is considering to be the "current folder" (by passing C{'.'}
as C{path}).
@param path: path to be normalized
@type path: str
@return: normalized form of the given path
@rtype: str
@raise IOError: if the path can't be resolved on the server
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'normalize(%r)' % path)
t, msg = self._request(CMD_REALPATH, path)
if t != CMD_NAME:
raise SFTPError('Expected name response')
count = msg.get_int()
if count != 1:
raise SFTPError('Realpath returned %d results' % count)
return _to_unicode(msg.get_string())
def chdir(self, path):
"""
Change the "current directory" of this SFTP session. Since SFTP
doesn't really have the concept of a current working directory, this
is emulated by paramiko. Once you use this method to set a working
directory, all operations on this SFTPClient object will be relative
to that path.
@param path: new current working directory
@type path: str
@raise IOError: if the requested path doesn't exist on the server
@since: 1.4
"""
self._cwd = self.normalize(path)
def getcwd(self):
"""
Return the "current working directory" for this SFTP session, as
emulated by paramiko. If no directory has been set with L{chdir},
this method will return C{None}.
@return: the current working directory on the server, or C{None}
@rtype: str
@since: 1.4
"""
return self._cwd
def put(self, localpath, remotepath, callback=None):
"""
Copy a local file (C{localpath}) to the SFTP server as C{remotepath}.
Any exception raised by operations will be passed through. This
method is primarily provided as a convenience.
The SFTP operations use pipelining for speed.
@param localpath: the local file to copy
@type localpath: str
@param remotepath: the destination path on the SFTP server
@type remotepath: str
@param callback: optional callback function that accepts the bytes
transferred so far and the total bytes to be transferred
(since 1.7.4)
@type callback: function(int, int)
@return: an object containing attributes about the given file
(since 1.7.4)
@rtype: SFTPAttributes
@since: 1.4
"""
file_size = os.stat(localpath).st_size
fl = file(localpath, 'rb')
fr = self.file(remotepath, 'wb')
fr.set_pipelined(True)
size = 0
while True:
data = fl.read(32768)
if len(data) == 0:
break
fr.write(data)
size += len(data)
if callback is not None:
callback(size, file_size)
fl.close()
fr.close()
s = self.stat(remotepath)
if s.st_size != size:
raise IOError('size mismatch in put! %d != %d' % (s.st_size, size))
return s
def get(self, remotepath, localpath, callback=None):
"""
Copy a remote file (C{remotepath}) from the SFTP server to the local
host as C{localpath}. Any exception raised by operations will be
passed through. This method is primarily provided as a convenience.
@param remotepath: the remote file to copy
@type remotepath: str
@param localpath: the destination path on the local host
@type localpath: str
@param callback: optional callback function that accepts the bytes
transferred so far and the total bytes to be transferred
(since 1.7.4)
@type callback: function(int, int)
@since: 1.4
"""
fr = self.file(remotepath, 'rb')
file_size = self.stat(remotepath).st_size
fr.prefetch()
fl = file(localpath, 'wb')
size = 0
while True:
data = fr.read(32768)
if len(data) == 0:
break
fl.write(data)
size += len(data)
if callback is not None:
callback(size, file_size)
fl.close()
fr.close()
s = os.stat(localpath)
if s.st_size != size:
raise IOError('size mismatch in get! %d != %d' % (s.st_size, size))
### internals...
def _request(self, t, *arg):
num = self._async_request(type(None), t, *arg)
return self._read_response(num)
def _async_request(self, fileobj, t, *arg):
# this method may be called from other threads (prefetch)
self._lock.acquire()
try:
msg = Message()
msg.add_int(self.request_number)
for item in arg:
if type(item) is int:
msg.add_int(item)
elif type(item) is long:
msg.add_int64(item)
elif type(item) is str:
msg.add_string(item)
elif type(item) is SFTPAttributes:
item._pack(msg)
else:
raise Exception('unknown type for %r type %r' % (item, type(item)))
num = self.request_number
self._expecting[num] = fileobj
self._send_packet(t, str(msg))
self.request_number += 1
finally:
self._lock.release()
return num
def _read_response(self, waitfor=None):
while True:
try:
t, data = self._read_packet()
except EOFError, e:
raise SSHException('Server connection dropped: %s' % (str(e),))
msg = Message(data)
num = msg.get_int()
if num not in self._expecting:
# might be response for a file that was closed before responses came back
self._log(DEBUG, 'Unexpected response #%d' % (num,))
if waitfor is None:
# just doing a single check
break
continue
fileobj = self._expecting[num]
del self._expecting[num]
if num == waitfor:
# synchronous
if t == CMD_STATUS:
self._convert_status(msg)
return t, msg
if fileobj is not type(None):
fileobj._async_response(t, msg)
if waitfor is None:
# just doing a single check
break
return (None, None)
def _finish_responses(self, fileobj):
while fileobj in self._expecting.values():
self._read_response()
fileobj._check_exception()
def _convert_status(self, msg):
"""
Raises EOFError or IOError on error status; otherwise does nothing.
"""
code = msg.get_int()
text = msg.get_string()
if code == SFTP_OK:
return
elif code == SFTP_EOF:
raise EOFError(text)
elif code == SFTP_NO_SUCH_FILE:
# clever idea from john a. meinel: map the error codes to errno
raise IOError(errno.ENOENT, text)
elif code == SFTP_PERMISSION_DENIED:
raise IOError(errno.EACCES, text)
else:
raise IOError(text)
def _adjust_cwd(self, path):
"""
Return an adjusted path if we're emulating a "current working
directory" for the server.
"""
if type(path) is unicode:
path = path.encode('utf-8')
if self._cwd is None:
return path
if (len(path) > 0) and (path[0] == '/'):
# absolute path
return path
if self._cwd == '/':
return self._cwd + path
return self._cwd + '/' + path
class SFTP (SFTPClient):
"an alias for L{SFTPClient} for backwards compatability"
pass
| |
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist
from django.test import TestCase
from django.template import Template, Context
from django.template.exceptions import TemplateSyntaxError
from django.utils import translation
from django.utils.timezone import make_aware, utc
from geotrek.tourism.factories import TouristicEventFactory
from datetime import datetime, timedelta
import json
import os
import shutil
class ValueListTest(TestCase):
def test_empty_list_should_show_none(self):
translation.deactivate()
out = Template(
'{% load mapentity_tags %}'
'{% valuelist items %}'
).render(Context({
'items': []
}))
self.assertEqual(out.strip(), '<span class="none">None</span>')
def test_simple_usage_outputs_list_of_items(self):
out = Template(
'{% load mapentity_tags %}'
'{% valuelist items %}'
).render(Context({
'items': ['blah']
}))
self.assertEqual(out.strip(), """<ul>\n <li>blah</li>\n </ul>""")
def test_can_specify_field_to_be_used(self):
obj = TouristicEventFactory.create(name='blah')
out = Template(
'{% load mapentity_tags %}'
'{% valuelist items field="name" %}'
).render(Context({
'items': [obj]
}))
self.assertIn("""title="blah">blah</a></li>""", out.strip())
def test_can_specify_an_enumeration4(self):
out = Template(
'{% load mapentity_tags %}'
'{% valuelist items enumeration=True %}'
).render(Context({
'items': range(1, 4)
}))
self.assertIn('<li><span class="enumeration-value">A. </span>1</li>', out)
self.assertIn('<li><span class="enumeration-value">B. </span>2</li>', out)
self.assertIn('<li><span class="enumeration-value">C. </span>3</li>', out)
def test_can_specify_an_enumeration30(self):
out = Template(
'{% load mapentity_tags %}'
'{% valuelist items enumeration=True %}'
).render(Context({
'items': range(1, 30)
}))
self.assertIn('<li><span class="enumeration-value">AA. </span>1</li>', out)
self.assertIn('<li><span class="enumeration-value">AZ. </span>26</li>', out)
self.assertIn('<li><span class="enumeration-value">BA. </span>27</li>', out)
self.assertIn('<li><span class="enumeration-value">BB. </span>28</li>', out)
def test_can_specify_an_enumeration300(self):
out = Template(
'{% load mapentity_tags %}'
'{% valuelist items enumeration=True %}'
).render(Context({
'items': range(1, 678)
}))
self.assertIn('<li><span class="enumeration-value">AAA. </span>1</li>', out)
self.assertIn('<li><span class="enumeration-value">AAZ. </span>26</li>', out)
self.assertIn('<li><span class="enumeration-value">ABA. </span>27</li>', out)
self.assertIn('<li><span class="enumeration-value">ABB. </span>28</li>', out)
self.assertIn('<li><span class="enumeration-value">BAA. </span>677</li>', out)
class SmartIncludeTest(TestCase):
def test_smart_include_no_argument(self):
with self.assertRaisesRegex(TemplateSyntaxError, "'smart_include' tag requires one argument"):
Template(
'{% load mapentity_tags %}'
'{% smart_include %}'
).render(Context())
def test_smart_include_no_quotes(self):
with self.assertRaisesRegex(TemplateSyntaxError,
"'smart_include' tag's viewname argument should be in quotes"):
Template(
'{% load mapentity_tags %}'
'{% smart_include test %}'
).render(Context())
class LatLngBoundsTest(TestCase):
def test_latlngbound_null(self):
out = Template(
'{% load mapentity_tags %}'
'{{ object|latlngbounds }}'
).render(Context({'object': None}))
self.assertEqual('null', out)
def test_latlngbound_object(self):
object_event = TouristicEventFactory.create()
out = Template(
'{% load mapentity_tags %}'
'{{ object|latlngbounds }}'
).render(Context({'object': object_event}))
json_out = json.loads(out)
self.assertAlmostEqual(json_out[0][0], -5.9838563092087576)
self.assertAlmostEqual(json_out[0][1], -1.363081210117898)
self.assertAlmostEqual(json_out[1][0], -5.9838563092087576)
self.assertAlmostEqual(json_out[1][1], -1.363081210117898)
class FieldVerboseNameTest(TestCase):
def test_field_no_field_but_verbose_name_field(self):
object_event = TouristicEventFactory.create()
setattr(object_event, 'do_not_exist_verbose_name', "test")
template = Template(
'{% load mapentity_tags %}'
'{{ object|verbose:"do_not_exist" }}'
).render(Context({'object': object_event}))
self.assertEqual(template, "test")
def test_field_verbose_name_field_does_not_exist(self):
object_event = TouristicEventFactory.create()
with self.assertRaisesRegex(FieldDoesNotExist, "TouristicEvent has no field named 'do_not_exist'"):
Template(
'{% load mapentity_tags %}'
'{{ object|verbose:"do_not_exist" }}'
).render(Context({'object': object_event}))
class MediasFallbackExistTest(TestCase):
def setUp(self):
os.mkdir(os.path.join('var', 'media', 'testx3'))
with open(os.path.join('var', 'media', 'testx3', 'logo-login.png'), 'wb') as f:
f.write(b'')
def test_media_static_fallback_exist(self):
out = Template(
'{% load mapentity_tags %}'
'{% media_static_fallback "testx3/logo-login.png" "images/logo-login.png" %}'
).render(Context())
self.assertEqual('/media/testx3/logo-login.png', out)
def test_media_static_fallback_path_exist(self):
out = Template(
'{% load mapentity_tags %}'
'{% media_static_fallback_path "testx3/logo-login.png" "images/logo-login.png" %}'
).render(Context())
self.assertEqual('%s/testx3/logo-login.png' % settings.MEDIA_ROOT, out)
def tearDown(self):
shutil.rmtree(os.path.join('var', 'media', 'testx3'))
class TimeSinceTest(TestCase):
def setUp(self):
translation.activate('en')
def test_time_since_years(self):
date = make_aware(datetime.now() - timedelta(days=800), utc)
object_event = TouristicEventFactory.create(begin_date=date)
out = Template(
'{% load mapentity_tags %}'
'{{ object.begin_date|timesince }}'
).render(Context({'object': object_event}))
self.assertEqual('2 years ago', out)
def test_time_since_year(self):
date = make_aware(datetime.now() - timedelta(days=366), utc)
object_event = TouristicEventFactory.create(begin_date=date)
out = Template(
'{% load mapentity_tags %}'
'{{ object.begin_date|timesince }}'
).render(Context({'object': object_event}))
self.assertEqual('1 year ago', out)
def test_time_since_weeks(self):
date = make_aware(datetime.now() - timedelta(days=15), utc)
object_event = TouristicEventFactory.create(begin_date=date)
out = Template(
'{% load mapentity_tags %}'
'{{ object.begin_date|timesince }}'
).render(Context({'object': object_event}))
self.assertIn('2 weeks ago', out)
def test_time_since_week(self):
date = make_aware(datetime.now() - timedelta(days=13), utc)
object_event = TouristicEventFactory.create(begin_date=date)
out = Template(
'{% load mapentity_tags %}'
'{{ object.begin_date|timesince }}'
).render(Context({'object': object_event}))
self.assertIn('1 week ago', out)
def test_time_since_days(self):
date = make_aware(datetime.now() - timedelta(days=3), utc)
object_event = TouristicEventFactory.create(begin_date=date)
out = Template(
'{% load mapentity_tags %}'
'{{ object.begin_date|timesince }}'
).render(Context({'object': object_event}))
self.assertIn('3 days ago', out)
def test_time_since_day(self):
date = make_aware(datetime.now() - timedelta(days=1), utc)
object_event = TouristicEventFactory.create(begin_date=date)
out = Template(
'{% load mapentity_tags %}'
'{{ object.begin_date|timesince }}'
).render(Context({'object': object_event}))
self.assertIn('1 day ago', out)
def test_time_since_hours(self):
date = make_aware(datetime.now() - timedelta(hours=4), utc)
object_event = TouristicEventFactory.create(begin_date=date)
out = Template(
'{% load mapentity_tags %}'
'{{ object.begin_date|timesince }}'
).render(Context({'object': object_event}))
self.assertIn('4 hours ago', out)
def test_time_since_hour(self):
date = make_aware(datetime.now() - timedelta(hours=1), utc)
object_event = TouristicEventFactory.create(begin_date=date)
out = Template(
'{% load mapentity_tags %}'
'{{ object.begin_date|timesince }}'
).render(Context({'object': object_event}))
self.assertIn('1 hour ago', out)
def test_time_since_minutes(self):
date = make_aware(datetime.now() - timedelta(minutes=3), utc)
object_event = TouristicEventFactory.create(begin_date=date)
out = Template(
'{% load mapentity_tags %}'
'{{ object.begin_date|timesince }}'
).render(Context({'object': object_event}))
self.assertIn('3 minutes ago', out)
def test_time_since_minute(self):
date = make_aware(datetime.now() - timedelta(minutes=1), utc)
object_event = TouristicEventFactory.create(begin_date=date)
out = Template(
'{% load mapentity_tags %}'
'{{ object.begin_date|timesince }}'
).render(Context({'object': object_event}))
self.assertIn('1 minute ago', out)
def test_time_since_seconds(self):
date = make_aware(datetime.now() - timedelta(seconds=15), utc)
object_event = TouristicEventFactory.create(begin_date=date)
out = Template(
'{% load mapentity_tags %}'
'{{ object.begin_date|timesince }}'
).render(Context({'object': object_event}))
self.assertIn('just a few seconds ago', out)
def test_time_since_now(self):
date = make_aware(datetime.now(), utc)
object_event = TouristicEventFactory.create(begin_date=date)
out = Template(
'{% load mapentity_tags %}'
'{{ object.begin_date|timesince }}'
).render(Context({'object': object_event}))
self.assertIn('just a few seconds ago', out)
def test_time_since_wrong_object(self):
out = Template(
'{% load mapentity_tags %}'
'{{ object.begin_date|timesince }}'
).render(Context())
self.assertIn('', out)
| |
"""
Reads a parsed corpus (data_path) and a model report (report_path) from a model
that produces latent tree structures and computes the unlabeled F1 score between
the model's latent trees and:
- The ground-truth trees in the parsed corpus
- Strictly left-branching trees for the sentences in the parsed corpus
- Strictly right-branching trees for the sentences in the parsed corpus
Note that for binary-branching trees like these, precision, recall, and F1 are
equal by definition, so only one number is shown.
Usage:
$ python scripts/parse_comparison.py \
--data_path ./snli_1.0/snli_1.0_dev.jsonl \
--report_path ./logs/example-nli.report \
"""
import gflags
import sys
import codecs
import json
import random
import re
import glob
import math
from collections import Counter
LABEL_MAP = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
FLAGS = gflags.FLAGS
mathops = ["[MAX", "[MIN", "[MED", "[SM"]
def spaceify(parse):
return parse #.replace("(", "( ").replace(")", " )")
def balance(parse, lowercase=False):
# Modified to provided a "half-full" binary tree without padding.
# Difference between the other method is the right subtrees are
# the half full ones.
tokens = tokenize_parse(parse)
if len(tokens) > 1:
transitions = full_transitions(len(tokens), right_full=True)
stack = []
for transition in transitions:
if transition == 0:
stack.append(tokens.pop(0))
elif transition == 1:
right = stack.pop()
left = stack.pop()
stack.append("( " + left + " " + right + " )")
assert len(stack) == 1
else:
stack = tokens
return stack[0]
def roundup2(N):
""" Round up using factors of 2. """
return int(2 ** math.ceil(math.log(N, 2)))
def full_transitions(N, left_full=False, right_full=False):
"""
Recursively creates a full binary tree of with N
leaves using shift reduce transitions.
"""
if N == 1:
return [0]
if N == 2:
return [0, 0, 1]
assert not (left_full and right_full), "Please only choose one."
if not left_full and not right_full:
N = float(N)
# Constrain to full binary trees.
assert math.log(N, 2) % 1 == 0, \
"Bad value. N={}".format(N)
left_N = N / 2
right_N = N - left_N
if left_full:
left_N = roundup2(N) / 2
right_N = N - left_N
if right_full:
right_N = roundup2(N) / 2
left_N = N - right_N
return full_transitions(left_N, left_full=left_full, right_full=right_full) + \
full_transitions(right_N, left_full=left_full, right_full=right_full) + \
[1]
def tokenize_parse(parse):
parse = spaceify(parse)
return [token for token in parse.split() if token not in ['(', ')']]
def to_string(parse):
if type(parse) is not list:
return parse
if len(parse) == 1:
return parse[0]
else:
return '( ' + to_string(parse[0]) + ' ' + to_string(parse[1]) + ' )'
def tokens_to_rb(tree):
if type(tree) is not list:
return tree
if len(tree) == 1:
return tree[0]
else:
return [tree[0], tokens_to_rb(tree[1:])]
def to_rb(gt_table):
new_data = {}
for key in gt_table:
parse = gt_table[key]
tokens = tokenize_parse(parse)
new_data[key] = to_string(tokens_to_rb(tokens))
return new_data
def tokens_to_lb(tree):
if type(tree) is not list:
return tree
if len(tree) == 1:
return tree[0]
else:
return [tokens_to_lb(tree[:-1]), tree[-1]]
def to_lb(gt_table):
new_data = {}
for key in gt_table:
parse = gt_table[key]
tokens = tokenize_parse(parse)
new_data[key] = to_string(tokens_to_lb(tokens))
return new_data
def average_depth(parse):
depths = []
current_depth = 0
for token in parse.split():
if token == '(':
current_depth += 1
elif token == ')':
current_depth -= 1
else:
depths.append(current_depth)
if len(depths) == 0:
pass
else:
return float(sum(depths)) / len(depths)
def corpus_average_depth(corpus):
local_averages = []
for key in corpus:
s = corpus[key]
if average_depth(s) is not None:
local_averages.append(average_depth(s))
else:
pass
return float(sum(local_averages)) / len(local_averages)
def average_length(parse):
parse = spaceify(parse)
return len(parse.split())
def corpus_average_length(corpus):
local_averages = []
for key in corpus:
if average_length(s) is not None:
local_averages.append(average_length(s))
else:
pass
return float(sum(local_averages)) / len(local_averages)
def corpus_stats(corpus_1, corpus_2, first_two=False, neg_pair=False, const_parse=False):
"""
Note: If a few examples in one dataset are missing from the other (i.e., some examples from the source corpus were not included
in a model corpus), the shorter dataset must be supplied as corpus_1.
corpus_1 is the report being evaluated (important for counting complete constituents)
"""
f1_accum = 0.0
count = 0.0
first_two_count = 0.0
last_two_count = 0.0
three_count = 0.0
neg_pair_count = 0.0
neg_count = 0.0
const_parsed_1 = 0
if const_parse:
const_parsed_2 = 0
else:
const_parsed_2 = 1
for key in corpus_2:
c1, cp1 = to_indexed_contituents(corpus_1[key], const_parse)
c2, cp2 = to_indexed_contituents(corpus_2[key], const_parse)
f1_accum += example_f1(c1, c2)
count += 1
const_parsed_1 += cp1
const_parsed_2 += cp2
if first_two and len(c1) > 1:
if (0, 2) in c1:
first_two_count += 1
num_words = len(c1) + 1
if (num_words - 2, num_words) in c1:
last_two_count += 1
three_count += 1
if neg_pair:
word_index = 0
s = spaceify(corpus_1[key])
tokens = s.split()
for token_index, token in enumerate(tokens):
if token in ['(', ')']:
continue
if token in ["n't", "not", "never", "no", "none", "Not", "Never", "No", "None"]:
if tokens[token_index + 1] not in ['(', ')']:
neg_pair_count += 1
neg_count += 1
word_index += 1
stats = f1_accum / count
if first_two:
stats = str(stats) + '\t' + str(first_two_count / three_count) + '\t' + str(last_two_count / three_count)
if neg_pair:
stats = str(stats) + '\t' + str(neg_pair_count / neg_count)
return stats, const_parsed_1 / const_parsed_2
def corpus_stats_labeled(corpus_unlabeled, corpus_labeled):
"""
Note: If a few examples in one dataset are missing from the other (i.e., some examples from the source corpus were not included
in a model corpus), the shorter dataset must be supplied as corpus_1.
"""
correct = Counter()
total = Counter()
for key in corpus_labeled:
c1 = to_indexed_contituents(corpus_unlabeled[key])
c2 = to_indexed_contituents_labeled(corpus_labeled[key])
if len(c2) == 0:
continue
ex_correct, ex_total = example_labeled_acc(c1, c2)
correct.update(ex_correct)
total.update(ex_total)
return correct, total
def count_parse(parse, index, const_parsed=[]):
"""
Compute Constituents Parsed metric for ListOps style examples.
"""
mathops = ["[MAX", "[MIN", "[MED", "[SM"]
if "]" in parse:
after = parse[index:]
before = parse[:index]
between = after[: after.index("]")]
nest_check = [m in between[1:] for m in mathops]
if True in nest_check:
op_i = nest_check.index(True)
nested_i = after[1:].index(mathops[op_i]) + 1
nested = after[nested_i : ]
c = count_parse(parse, index+nested_i, const_parsed)
cc = count_parse(parse, index, const_parsed)
else:
o_b = between.count("(") # open, between
c_b = between.count(")") # close, between
end = after.index("]")
cafter = after[end+1:]
stop = None
stop_list = []
for item in cafter:
stop_list.append(")" == item)
if stop_list[-1] == False:
break
if False in stop_list:
stop = stop_list.index(False)
else:
stop = None
cafter = cafter[: stop]
c_a = cafter.count(")")
stop = None
stop_list = []
for item in before[::-1] :
stop_list.append("(" == item)
if stop_list[-1] == False:
break
if False in stop_list:
stop = len(before) - stop_list.index(False) - 1
else:
stop = None
cbefore = before[stop:]
o_a = cbefore.count("(")
ints = sum(c.isdigit() for c in between) + between.count("-")
op = o_a + o_b
cl = c_a + c_b
if op >= ints and cl >= ints:
if op == ints+1 or cl == ints+1:
const_parsed.append(1)
parse[index - o_a : index + len(between) + 1 + c_a] = '-'
return sum(const_parsed)
def to_indexed_contituents(parse, const_parse):
if parse.count("(") != parse.count(")"):
print(parse)
parse = spaceify(parse)
sp = parse.split()
if len(sp) == 1:
return set([(0, 1)])
backpointers = []
indexed_constituents = set()
word_index = 0
first_op = -1
for index, token in enumerate(sp):
if token == '(':
backpointers.append(word_index)
elif token == ')':
#if len(backpointers) == 0:
# pass
#else:
start = backpointers.pop()
end = word_index
constituent = (start, end)
indexed_constituents.add(constituent)
elif "[" in token:
if first_op == -1:
first_op = index
else:
pass
else:
word_index += 1
const_parsed = []
cp = 0
if const_parse:
cp = count_parse(sp, first_op, const_parsed)
max_count = parse.count("]")
return indexed_constituents, cp
def to_indexed_contituents_labeled(parse):
sp = re.findall(r'\([^ ]+| [^\(\) ]+|\)', parse)
if len(sp) == 1:
return set([(0, 1)])
backpointers = []
indexed_constituents = set()
word_index = 0
for index, token in enumerate(sp):
if token[0] == '(':
backpointers.append((word_index, token[1:]))
elif token == ')':
start, typ = backpointers.pop()
end = word_index
constituent = (start, end, typ)
if end - start > 1:
indexed_constituents.add(constituent)
else:
word_index += 1
return indexed_constituents
def example_f1(c1, c2):
prec = float(len(c1.intersection(c2))) / len(c2) # TODO: More efficient.
return prec # For strictly binary trees, P = R = F1
def example_labeled_acc(c1, c2):
'''Compute the number of non-unary constituents of each type in the labeled (non-binirized) parse appear in the model output.'''
correct = Counter()
total = Counter()
for constituent in c2:
if (constituent[0], constituent[1]) in c1:
correct[constituent[2]] += 1
total[constituent[2]] += 1
return correct, total
def randomize(parse):
tokens = tokenize_parse(parse)
while len(tokens) > 1:
merge = random.choice(list(range(len(tokens) - 1)))
tokens[merge] = "( " + tokens[merge] + " " + tokens[merge + 1] + " )"
del tokens[merge + 1]
return tokens[0]
def to_latex(parse):
return ("\\Tree " + parse).replace('(', '[').replace(')', ']').replace(' . ', ' $.$ ')
def read_nli_report(path):
report = {}
with codecs.open(path, encoding='utf-8') as f:
for line in f:
loaded_example = json.loads(line)
report[loaded_example['example_id'] + "_1"] = unpad(loaded_example['sent1_tree'])
report[loaded_example['example_id'] + "_2"] = unpad(loaded_example['sent2_tree'])
return report
def read_sst_report(path):
report = {}
with codecs.open(path, encoding='utf-8') as f:
for line in f:
loaded_example = json.loads(line)
report[loaded_example['example_id'] + "_1"] = unpad(loaded_example['sent1_tree'])
return report
def read_mt_report(path):
report = {}
with codecs.open(path, encoding='utf-8') as f:
for line in f:
loaded_example = json.loads(line)
report[loaded_example['example_id'] + "_1"] = unpad(loaded_example['sent1_tree'])
return report
def read_listops_report(path):
report = {}
correct = 0
num = 0
with codecs.open(path, encoding='utf-8') as f:
for line in f:
loaded_example = json.loads(line)
report[loaded_example['example_id']] = unpad(loaded_example['sent1_tree'])
num += 1
if loaded_example['truth'] == loaded_example['prediction']:
correct +=1
print("Accuracy = ", correct / num)
return report
def read_nli_report_padded(path):
report = {}
with codecs.open(path, encoding='utf-8') as f:
for line in f:
try:
line = line.encode('UTF-8')
except UnicodeError as e:
print("ENCODING ERROR:", line, e)
line = "{}"
loaded_example = json.loads(line)
report[loaded_example['example_id'] + "_1"] = loaded_example['sent1_tree']
report[loaded_example['example_id'] + "_2"] = loaded_example['sent2_tree']
return report
def read_ptb_report(path):
report = {}
with codecs.open(path, encoding='utf-8') as f:
for line in f:
loaded_example = json.loads(line)
report[loaded_example['example_id']] = unpad(loaded_example['sent1_tree'])
return report
def unpad(parse):
ok = ["(", ")", "_PAD"]
unpadded = []
tokens = parse.split()
cur = [i for i in range(len(tokens)) if tokens[i] == "_PAD"]
if len(cur) != 0:
if tokens[cur[0]-1] in ok:
unpad = tokens[:cur[0]-1]
else:
unpad = tokens[:cur[0]]
else:
unpad = tokens
sent = " ".join(unpad)
while sent.count("(") != sent.count(")"):
sent += " )"
return sent
def ConvertBinaryBracketedSeq(seq):
T_SHIFT = 0
T_REDUCE = 1
tokens, transitions = [], []
for item in seq:
if item != "(":
if item != ")":
tokens.append(item)
transitions.append(T_REDUCE if item == ")" else T_SHIFT)
return tokens, transitions
def run():
gt = {}
# gt_labeled = {}
with codecs.open(FLAGS.main_data_path, encoding='utf-8') as f:
for example_id, line in enumerate(f):
if FLAGS.data_type=="nli":
loaded_example = json.loads(line)
if loaded_example["gold_label"] not in LABEL_MAP:
continue
if '512-4841' in loaded_example['sentence1_binary_parse'] \
or '512-8581' in loaded_example['sentence1_binary_parse'] \
or '412-4841' in loaded_example['sentence1_binary_parse'] \
or '512-4841' in loaded_example['sentence2_binary_parse'] \
or '512-8581' in loaded_example['sentence2_binary_parse'] \
or '412-4841' in loaded_example['sentence2_binary_parse']:
continue # Stanford parser tree binarizer doesn't handle phone numbers properly.
gt[loaded_example['pairID'] + "_1"] = loaded_example['sentence1_binary_parse']
gt[loaded_example['pairID'] + "_2"] = loaded_example['sentence2_binary_parse']
# gt_labeled[loaded_example['pairID'] + "_1"] = loaded_example['sentence1_parse']
# gt_labeled[loaded_example['pairID'] + "_2"] = loaded_example['sentence2_parse']
gt_labeled[loaded_example['pairID'] + "_1"] = loaded_example['sentence1_parse']
gt_labeled[loaded_example['pairID'] + "_2"] = loaded_example['sentence2_parse']
elif FLAGS.data_type=="sst":
line = line.strip()
stack=[]
words = line.replace(')', ' )')
words=words.split(' ')
for index, word in enumerate(words):
if word[0] != "(":
if word == ")":
# Ignore unary merges
if words[index - 1] == ")":
newg="( "+stack.pop()+" "+stack.pop()+" )"
stack.append(newg)
else:
stack.append(word)
gt[str(example_id)+"_1"]=stack[0]
elif FLAGS.data_type=="listops":
line = line.strip()
label, seq = line.split('\t')
if len(seq) <= 1:
continue
tokens, transitions = ConvertBinaryBracketedSeq(
seq.split(' '))
example = {}
example["label"] = label
example["sentence"] = seq
example["tokens"] = tokens
example["transitions"] = transitions
example["example_id"] = str(example_id)
gt[example["example_id"]] = example["sentence"]
elif FLAGS.data_type=="mt":
line=line.strip()
gt[str(example_id)+"_1"] = line
lb = to_lb(gt)
rb = to_rb(gt)
print("GT average depth", corpus_average_depth(gt))
ptb = {}
ptb_labeled = {}
if FLAGS.ptb_data_path != "_":
with codecs.open(FLAGS.ptb_data_path, encoding='utf-8') as f:
for line in f:
loaded_example = json.loads(line)
if loaded_example["gold_label"] not in LABEL_MAP:
continue
ptb[loaded_example['pairID']] = loaded_example['sentence1_binary_parse']
ptb_labeled[loaded_example['pairID']] = loaded_example['sentence1_parse']
reports = []
ptb_reports = []
if FLAGS.use_random_parses:
print("Creating five sets of random parses for the main data.")
report_paths = list(range(5))
for _ in report_paths:
report = {}
for sentence in gt:
report[sentence] = randomize(gt[sentence])
reports.append(report)
print("Creating five sets of random parses for the PTB data.")
ptb_report_paths = list(range(5))
for _ in report_paths:
report = {}
for sentence in ptb:
report[sentence] = randomize(ptb[sentence])
ptb_reports.append(report)
if FLAGS.use_balanced_parses:
print("Creating five sets of balanced binary parses for the main data.")
report_paths = list(range(5))
for _ in report_paths:
report = {}
for sentence in gt:
report[sentence] = balance(gt[sentence])
reports.append(report)
print("Creating five sets of balanced binary parses for the PTB data.")
ptb_report_paths = list(range(5))
for _ in report_paths:
report = {}
for sentence in ptb:
report[sentence] = balance(ptb[sentence])
ptb_reports.append(report)
else:
report_paths = glob.glob(FLAGS.main_report_path_template)
for path in report_paths:
print("Loading", path)
if FLAGS.data_type=="nli":
reports.append(read_nli_report(path))
elif FLAGS.data_type=="sst":
reports.append(read_sst_report(path))
elif FLAGS.data_type=="listops":
reports.append(read_listops_report(path))
elif FLAGS.data_type=="mt":
reports.append(read_mt_report(path))
if FLAGS.main_report_path_template != "_":
ptb_report_paths = glob.glob(FLAGS.ptb_report_path_template)
for path in ptb_report_paths:
print("Loading", path)
ptb_reports.append(read_ptb_report(path))
if len(reports) > 1 and FLAGS.compute_self_f1:
f1s = []
for i in range(len(report_paths) - 1):
for j in range(i + 1, len(report_paths)):
path_1 = report_paths[i]
path_2 = report_paths[j]
f1 = corpus_stats(reports[i], reports[j])
f1s.append(f1)
print("Mean Self F1:\t" + str(sum(f1s) / len(f1s)))
correct = Counter()
total = Counter()
for i, report in enumerate(reports):
print(report_paths[i])
if FLAGS.print_latex > 0:
for index, sentence in enumerate(gt):
if index == FLAGS.print_latex:
break
print(to_latex(gt[sentence]))
print(to_latex(report[sentence]))
print()
if FLAGS.data_type == "listops":
gtf1, gtcp = corpus_stats(report, gt, first_two=FLAGS.first_two, neg_pair=FLAGS.neg_pair, const_parse=True)
else:
gtf1, gtcp = corpus_stats(report, gt, first_two=FLAGS.first_two, neg_pair=FLAGS.neg_pair, const_parse=False)
print("Left:", str(corpus_stats(report, lb)[0]) + '\t' + "Right:", str(corpus_stats(report, rb)[0]) + '\t' + "Groud-truth", str(gtf1) + '\t' + "Tree depth:", str(corpus_average_depth(report)), '\t', "Constituent parsed:", str(gtcp))
correct = Counter()
total = Counter()
for i, report in enumerate(ptb_reports):
print(ptb_report_paths[i])
if FLAGS.print_latex > 0:
for index, sentence in enumerate(ptb):
if index == FLAGS.print_latex:
break
print(to_latex(ptb[sentence]))
print(to_latex(report[sentence]))
print()
print(str(corpus_stats(report, ptb)) + '\t' + str(corpus_average_depth(report)))
set_correct, set_total = corpus_stats_labeled(report, ptb_labeled)
correct.update(set_correct)
total.update(set_total)
for key in sorted(total):
print(key + '\t' + str(correct[key] * 1. / total[key]))
if __name__ == '__main__':
gflags.DEFINE_string("main_report_path_template", "./checkpoints/*.report", "A template (with wildcards input as \*) for the paths to the main reports.")
gflags.DEFINE_string("main_data_path", "./snli_1.0/snli_1.0_dev.jsonl", "A template (with wildcards input as \*) for the paths to the main reports.")
gflags.DEFINE_string("ptb_report_path_template", "_", "A template (with wildcards input as \*) for the paths to the PTB reports, or '_' if not available.")
gflags.DEFINE_string("ptb_data_path", "_", "The path to the PTB data in SNLI format, or '_' if not available.")
gflags.DEFINE_boolean("compute_self_f1", True, "Compute self F1 over all reports matching main_report_path_template.")
gflags.DEFINE_boolean("use_random_parses", False, "Replace all report trees with randomly generated trees. Report path template flags are not used when this is set.")
gflags.DEFINE_boolean("use_balanced_parses", False, "Replace all report trees with roughly-balanced binary trees. Report path template flags are not used when this is set.")
gflags.DEFINE_boolean("first_two", False, "Show 'first two' and 'last two' metrics.")
gflags.DEFINE_boolean("neg_pair", False, "Show 'neg_pair' metric.")
gflags.DEFINE_enum("data_type", "nli", ["nli", "sst", "listops", "mt"], "Data Type")
gflags.DEFINE_integer("print_latex", 0, "Print this many trees in LaTeX format for each report.")
FLAGS(sys.argv)
run()
| |
# coding=utf-8
# Copyright 2022 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Temperature scaling on CIFAR-10 and CIFAR-100.
It takes a SavedModel, adds a temperature parameter to its predictions, and
minimizes negative log-likelihood with respect to the parameter by grid search.
"""
import functools
import os
import time
from absl import app
from absl import flags
from absl import logging
from experimental.marginalization_mixup import data_utils # local file import
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from uncertainty_baselines.baselines.cifar import utils
import uncertainty_metrics as um
flags.DEFINE_integer('ensemble_size', 4, 'Size of ensemble.')
flags.DEFINE_integer('per_core_batch_size', 64, 'Batch size per TPU core/GPU.')
flags.DEFINE_bool('ensemble_then_calibrate', False,
'Whether to ensemble before or after scaling by temperature.')
flags.DEFINE_integer('seed', 0, 'Random seed.')
flags.DEFINE_enum('dataset', 'cifar10',
enum_values=['cifar10', 'cifar100'],
help='Dataset.')
flags.DEFINE_integer('num_bins', 15, 'Number of bins for ECE.')
flags.DEFINE_string('output_dir', '/tmp/cifar',
'The directory where summaries are stored.')
flags.DEFINE_string('model_dir', '/tmp/cifar/model',
'The directory with SavedModel.')
flags.DEFINE_bool('corruptions', True, 'Whether to evaluate on corruptions.')
# TODO(ghassen): consider adding CIFAR-100-C to TFDS.
flags.DEFINE_string('cifar100_c_path', None,
'Path to the TFRecords files for CIFAR-100-C. Only valid '
'(and required) if dataset is cifar100 and corruptions.')
flags.DEFINE_bool('save_predictions', False, 'Whether to save predictions.')
# Accelerator flags.
flags.DEFINE_bool('use_gpu', False, 'Whether to run on GPU or otherwise TPU.')
flags.DEFINE_bool('use_bfloat16', False, 'Whether to use mixed precision.')
flags.DEFINE_integer('num_cores', 8, 'Number of TPU cores or number of GPUs.')
flags.DEFINE_string('tpu', None,
'Name of the TPU. Only used if use_gpu is False.')
FLAGS = flags.FLAGS
def main(argv):
del argv # unused arg
tf.io.gfile.makedirs(FLAGS.output_dir)
logging.info('Saving summaries at %s', FLAGS.output_dir)
tf.random.set_seed(FLAGS.seed)
if FLAGS.use_gpu:
logging.info('Use GPU')
strategy = tf.distribute.MirroredStrategy()
else:
logging.info('Use TPU at %s',
FLAGS.tpu if FLAGS.tpu is not None else 'local')
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
validation_input_fn = data_utils.load_input_fn(
split=tfds.Split.VALIDATION,
name=FLAGS.dataset,
batch_size=FLAGS.per_core_batch_size,
use_bfloat16=FLAGS.use_bfloat16,
validation_set=True)
clean_test_input_fn = data_utils.load_input_fn(
split=tfds.Split.TEST,
name=FLAGS.dataset,
batch_size=FLAGS.per_core_batch_size,
use_bfloat16=FLAGS.use_bfloat16)
test_datasets = {
'validation': strategy.experimental_distribute_datasets_from_function(
validation_input_fn),
'clean': strategy.experimental_distribute_datasets_from_function(
clean_test_input_fn),
}
if FLAGS.corruptions:
if FLAGS.dataset == 'cifar10':
load_c_dataset = utils.load_cifar10_c
else:
load_c_dataset = functools.partial(utils.load_cifar100_c,
path=FLAGS.cifar100_c_path)
corruption_types, max_intensity = utils.load_corrupted_test_info(
FLAGS.dataset)
for corruption in corruption_types:
for intensity in range(1, max_intensity + 1):
dataset = load_c_dataset(
corruption_name=corruption,
corruption_intensity=intensity,
batch_size=batch_size,
use_bfloat16=FLAGS.use_bfloat16)
test_datasets['{0}_{1}'.format(corruption, intensity)] = (
strategy.experimental_distribute_dataset(dataset))
ds_info = tfds.builder(FLAGS.dataset).info
# TODO(ywenxu): Remove hard-coding validation images.
steps_per_val = 2500 // (FLAGS.per_core_batch_size * FLAGS.num_cores)
steps_per_eval = ds_info.splits['test'].num_examples // batch_size
if FLAGS.use_bfloat16:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16')
summary_writer = tf.summary.create_file_writer(
os.path.join(FLAGS.output_dir, 'summaries'))
with strategy.scope():
logging.info('Building Keras model')
model = tf.keras.models.load_model(FLAGS.model_dir)
logging.info('Model input shape: %s', model.input_shape)
logging.info('Model output shape: %s', model.output_shape)
logging.info('Model number of weights: %s', model.count_params())
# Compute grid search over [0.1, ..., 4.9, 5.0].
temperatures = [x * 0.1 for x in range(1, 51)]
temperature_metrics = []
temperature_corrupt_metrics = []
for _ in temperatures:
metrics = {
'val/negative_log_likelihood': tf.keras.metrics.Mean(),
'val/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'val/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
'test/negative_log_likelihood': tf.keras.metrics.Mean(),
'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
}
temperature_metrics.append(metrics)
corrupt_metrics = {}
if FLAGS.corruptions:
for intensity in range(1, max_intensity + 1):
for corruption in corruption_types:
dataset_name = '{0}_{1}'.format(corruption, intensity)
corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
tf.keras.metrics.Mean())
corrupt_metrics['test/accuracy_{}'.format(dataset_name)] = (
tf.keras.metrics.SparseCategoricalAccuracy())
corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
temperature_corrupt_metrics.append(corrupt_metrics)
@tf.function
def test_step(iterator, dataset_name):
"""Evaluation StepFn."""
def step_fn(inputs):
"""Per-Replica StepFn."""
images, labels = inputs
images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
logits = model(images, training=False)
if FLAGS.use_bfloat16:
logits = tf.cast(logits, tf.float32)
for (temperature,
metrics,
corrupt_metrics) in zip(temperatures,
temperature_metrics,
temperature_corrupt_metrics):
tempered_logits = logits
if not FLAGS.ensemble_then_calibrate:
tempered_logits = logits / temperature
probs = tf.nn.softmax(tempered_logits)
per_probs = tf.split(probs,
num_or_size_splits=FLAGS.ensemble_size,
axis=0)
probs = tf.reduce_mean(per_probs, axis=0)
if FLAGS.ensemble_then_calibrate:
probs = tf.nn.softmax(tf.math.log(probs) / temperature)
negative_log_likelihood = tf.reduce_mean(
tf.keras.losses.sparse_categorical_crossentropy(labels, probs))
if dataset_name == 'validation':
metrics['val/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['val/accuracy'].update_state(labels, probs)
metrics['val/ece'].update_state(labels, probs)
elif dataset_name == 'clean':
metrics['test/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['test/accuracy'].update_state(labels, probs)
metrics['test/ece'].update_state(labels, probs)
else:
corrupt_metrics['test/nll_{}'.format(dataset_name)].update_state(
negative_log_likelihood)
corrupt_metrics['test/accuracy_{}'.format(dataset_name)].update_state(
labels, probs)
corrupt_metrics['test/ece_{}'.format(dataset_name)].update_state(
labels, probs)
return logits, labels
return strategy.run(step_fn, args=(next(iterator),))
start_time = time.time()
for i, (dataset_name, test_dataset) in enumerate(test_datasets.items()):
logging.info('Testing on dataset %s', dataset_name)
test_iterator = iter(test_dataset)
if dataset_name == 'validation':
steps = steps_per_val
else:
steps = steps_per_eval
full_logits = []
full_labels = []
for step in range(steps):
logits, labels = test_step(test_iterator, dataset_name)
full_logits.append(logits)
full_labels.append(labels)
current_step = i * steps_per_eval + (step + 1)
max_steps = steps_per_eval * len(test_datasets)
time_elapsed = time.time() - start_time
steps_per_sec = float(current_step) / time_elapsed
eta_seconds = (max_steps - current_step) / steps_per_sec
message = ('{:.1%} completion: dataset {:d}/{:d}. {:.1f} steps/s. '
'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
current_step / max_steps,
i + 1,
len(test_datasets),
steps_per_sec,
eta_seconds / 60,
time_elapsed / 60))
if step % 20 == 0:
logging.info(message)
if FLAGS.save_predictions:
full_logits = tf.concat([
tf.concat(strategy.experimental_local_results(logits), axis=0)
for logits in full_logits], axis=0)
full_labels = tf.cast(tf.concat([
tf.concat(strategy.experimental_local_results(labels), axis=0)
for labels in full_labels], axis=0), tf.int64)
with tf.io.gfile.GFile(os.path.join(
FLAGS.output_dir, f'{dataset_name}_logits.npy'), 'wb') as f:
np.save(f, full_logits.numpy())
with tf.io.gfile.GFile(os.path.join(
FLAGS.output_dir, f'{dataset_name}_labels.npy'), 'wb') as f:
np.save(f, full_labels.numpy())
logging.info('Done with testing on %s', dataset_name)
for i, metrics in enumerate(temperature_metrics):
total_results = {name: metric.result() for name, metric in metrics.items()}
if FLAGS.corruptions:
corrupt_results = utils.aggregate_corrupt_metrics(
temperature_corrupt_metrics[i],
corruption_types,
max_intensity,
output_dir=FLAGS.output_dir)
total_results.update(corrupt_results)
logging.info('Temperature: %.2f, Test NLL: %.4f, Accuracy: %.2f%%',
temperatures[i],
total_results['test/negative_log_likelihood'],
total_results['test/accuracy'] * 100)
# Use step counter as an alternative to temperature on x-axis. Can't do the
# latter.
with summary_writer.as_default():
for name, result in total_results.items():
tf.summary.scalar(name, result, step=i)
logging.info('Completed script')
if __name__ == '__main__':
app.run(main)
| |
import pytest
from django.utils import timezone
from api.base.settings.defaults import API_BASE
from django.contrib.auth.models import Permission
from osf.models import RegistrationSchema
from osf_tests.factories import (
ProjectFactory,
RegistrationFactory,
AuthUserFactory,
CollectionFactory,
DraftRegistrationFactory,
)
from osf.utils import permissions
from website.project.metadata.schemas import LATEST_SCHEMA_VERSION
from website.project.metadata.utils import create_jsonschema_from_metaschema
@pytest.mark.django_db
class DraftRegistrationTestCase:
@pytest.fixture()
def user(self):
return AuthUserFactory()
@pytest.fixture()
def user_write_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_read_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_non_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def project_public(self, user, user_write_contrib, user_read_contrib):
project_public = ProjectFactory(is_public=True, creator=user)
project_public.add_contributor(
user_write_contrib,
permissions=[permissions.WRITE])
project_public.add_contributor(
user_read_contrib,
permissions=[permissions.READ])
project_public.save()
return project_public
@pytest.fixture()
def prereg_metadata(self):
def metadata(draft):
test_metadata = {}
json_schema = create_jsonschema_from_metaschema(
draft.registration_schema.schema)
for key, value in json_schema['properties'].iteritems():
response = 'Test response'
if value['properties']['value'].get('enum'):
response = value['properties']['value']['enum'][0]
if value['properties']['value'].get('properties'):
response = {'question': {'value': 'Test Response'}}
test_metadata[key] = {'value': response}
return test_metadata
return metadata
@pytest.mark.django_db
class TestDraftRegistrationList(DraftRegistrationTestCase):
@pytest.fixture()
def schema(self):
return RegistrationSchema.objects.get(
name='Open-Ended Registration',
schema_version=LATEST_SCHEMA_VERSION)
@pytest.fixture()
def draft_registration(self, user, project_public, schema):
return DraftRegistrationFactory(
initiator=user,
registration_schema=schema,
branched_from=project_public
)
@pytest.fixture()
def url_draft_registrations(self, project_public):
return '/{}nodes/{}/draft_registrations/'.format(
API_BASE, project_public._id)
def test_admin_can_view_draft_list(
self, app, user, draft_registration,
schema, url_draft_registrations):
res = app.get(url_draft_registrations, auth=user.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 1
assert data[0]['attributes']['registration_supplement'] == schema._id
assert data[0]['id'] == draft_registration._id
assert data[0]['attributes']['registration_metadata'] == {}
def test_cannot_view_draft_list(
self, app, user_write_contrib,
user_read_contrib, user_non_contrib,
url_draft_registrations):
# test_read_only_contributor_cannot_view_draft_list
res = app.get(
url_draft_registrations,
auth=user_read_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# test_read_write_contributor_cannot_view_draft_list
res = app.get(
url_draft_registrations,
auth=user_write_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# test_logged_in_non_contributor_cannot_view_draft_list
res = app.get(
url_draft_registrations,
auth=user_non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# test_unauthenticated_user_cannot_view_draft_list
res = app.get(url_draft_registrations, expect_errors=True)
assert res.status_code == 401
def test_deleted_draft_registration_does_not_show_up_in_draft_list(
self, app, user, draft_registration, url_draft_registrations):
draft_registration.deleted = timezone.now()
draft_registration.save()
res = app.get(url_draft_registrations, auth=user.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 0
def test_draft_with_registered_node_does_not_show_up_in_draft_list(
self, app, user, project_public, draft_registration, url_draft_registrations):
reg = RegistrationFactory(project=project_public)
draft_registration.registered_node = reg
draft_registration.save()
res = app.get(url_draft_registrations, auth=user.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 0
def test_draft_with_deleted_registered_node_shows_up_in_draft_list(
self, app, user, project_public,
draft_registration, schema,
url_draft_registrations):
reg = RegistrationFactory(project=project_public)
draft_registration.registered_node = reg
draft_registration.save()
reg.is_deleted = True
reg.save()
res = app.get(url_draft_registrations, auth=user.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 1
assert data[0]['attributes']['registration_supplement'] == schema._id
assert data[0]['id'] == draft_registration._id
assert data[0]['attributes']['registration_metadata'] == {}
@pytest.mark.django_db
@pytest.mark.enable_quickfiles_creation
class TestDraftRegistrationCreate(DraftRegistrationTestCase):
@pytest.fixture()
def metaschema_open_ended(self):
return RegistrationSchema.objects.get(
name='Open-Ended Registration',
schema_version=LATEST_SCHEMA_VERSION)
@pytest.fixture()
def payload(self, metaschema_open_ended):
return {
'data': {
'type': 'draft_registrations',
'attributes': {
'registration_supplement': metaschema_open_ended._id
}
}
}
@pytest.fixture()
def url_draft_registrations(self, project_public):
return '/{}nodes/{}/draft_registrations/'.format(
API_BASE, project_public._id)
def test_type_is_draft_registrations(
self, app, user, metaschema_open_ended,
url_draft_registrations):
draft_data = {
'data': {
'type': 'nodes',
'attributes': {
'registration_supplement': metaschema_open_ended._id
}
}
}
res = app.post_json_api(
url_draft_registrations,
draft_data, auth=user.auth,
expect_errors=True)
assert res.status_code == 409
def test_admin_can_create_draft(
self, app, user, project_public,
payload, metaschema_open_ended):
url = '/{}nodes/{}/draft_registrations/?embed=branched_from&embed=initiator'.format(
API_BASE, project_public._id)
res = app.post_json_api(url, payload, auth=user.auth)
assert res.status_code == 201
data = res.json['data']
assert data['attributes']['registration_supplement'] == metaschema_open_ended._id
assert data['attributes']['registration_metadata'] == {}
assert data['embeds']['branched_from']['data']['id'] == project_public._id
assert data['embeds']['initiator']['data']['id'] == user._id
def test_cannot_create_draft(
self, app, user_write_contrib,
user_read_contrib, user_non_contrib,
project_public, payload,
url_draft_registrations):
# test_write_only_contributor_cannot_create_draft
assert user_write_contrib in project_public.contributors.all()
res = app.post_json_api(
url_draft_registrations,
payload,
auth=user_write_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# test_read_only_contributor_cannot_create_draft
assert user_read_contrib in project_public.contributors.all()
res = app.post_json_api(
url_draft_registrations,
payload,
auth=user_read_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# test_non_authenticated_user_cannot_create_draft
res = app.post_json_api(
url_draft_registrations,
payload, expect_errors=True)
assert res.status_code == 401
# test_logged_in_non_contributor_cannot_create_draft
res = app.post_json_api(
url_draft_registrations,
payload,
auth=user_non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
def test_registration_supplement_errors(
self, app, user, url_draft_registrations):
# test_registration_supplement_not_found
draft_data = {
'data': {
'type': 'draft_registrations',
'attributes': {
'registration_supplement': 'Invalid schema'
}
}
}
res = app.post_json_api(
url_draft_registrations,
draft_data, auth=user.auth,
expect_errors=True)
assert res.status_code == 404
# test_registration_supplement_must_be_active_metaschema
schema = RegistrationSchema.objects.get(
name='Election Research Preacceptance Competition', active=False)
draft_data = {
'data': {
'type': 'draft_registrations',
'attributes': {
'registration_supplement': schema._id
}
}
}
res = app.post_json_api(
url_draft_registrations,
draft_data, auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Registration supplement must be an active schema.'
# test_registration_supplement_must_be_most_recent_metaschema
schema = RegistrationSchema.objects.get(
name='Open-Ended Registration', schema_version=1)
draft_data = {
'data': {
'type': 'draft_registrations',
'attributes': {
'registration_supplement': schema._id
}
}
}
res = app.post_json_api(
url_draft_registrations,
draft_data, auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Registration supplement must be an active schema.'
def test_cannot_create_draft_errors(
self, app, user, project_public, payload):
# test_cannot_create_draft_from_a_registration
registration = RegistrationFactory(
project=project_public, creator=user)
url = '/{}nodes/{}/draft_registrations/'.format(
API_BASE, registration._id)
res = app.post_json_api(
url, payload, auth=user.auth,
expect_errors=True)
assert res.status_code == 404
# test_cannot_create_draft_from_deleted_node
project = ProjectFactory(is_public=True, creator=user)
project.is_deleted = True
project.save()
url_project = '/{}nodes/{}/draft_registrations/'.format(
API_BASE, project._id)
res = app.post_json_api(
url_project, payload,
auth=user.auth, expect_errors=True)
assert res.status_code == 410
assert res.json['errors'][0]['detail'] == 'The requested node is no longer available.'
# test_cannot_create_draft_from_collection
collection = CollectionFactory(creator=user)
url = '/{}nodes/{}/draft_registrations/'.format(
API_BASE, collection._id)
res = app.post_json_api(
url, payload, auth=user.auth,
expect_errors=True)
assert res.status_code == 404
def test_required_metaschema_questions_not_required_on_post(
self, app, user, project_public, prereg_metadata):
prereg_schema = RegistrationSchema.objects.get(
name='Prereg Challenge',
schema_version=LATEST_SCHEMA_VERSION)
prereg_draft_registration = DraftRegistrationFactory(
initiator=user,
registration_schema=prereg_schema,
branched_from=project_public
)
url = '/{}nodes/{}/draft_registrations/?embed=initiator&embed=branched_from'.format(
API_BASE, project_public._id)
registration_metadata = prereg_metadata(prereg_draft_registration)
del registration_metadata['q1']
prereg_draft_registration.registration_metadata = registration_metadata
prereg_draft_registration.save()
payload = {
'data': {
'type': 'draft_registrations',
'attributes': {
'registration_supplement': prereg_schema._id,
'registration_metadata': registration_metadata
}
}
}
res = app.post_json_api(
url, payload, auth=user.auth,
expect_errors=True)
assert res.status_code == 201
data = res.json['data']
assert res.json['data']['attributes']['registration_metadata']['q2']['value'] == 'Test response'
assert data['attributes']['registration_supplement'] == prereg_schema._id
assert data['embeds']['branched_from']['data']['id'] == project_public._id
assert data['embeds']['initiator']['data']['id'] == user._id
def test_registration_supplement_must_be_supplied(
self, app, user, url_draft_registrations):
draft_data = {
'data': {
'type': 'draft_registrations',
'attributes': {
}
}
}
res = app.post_json_api(
url_draft_registrations,
draft_data, auth=user.auth,
expect_errors=True)
errors = res.json['errors'][0]
assert res.status_code == 400
assert errors['detail'] == 'This field is required.'
assert errors['source']['pointer'] == '/data/attributes/registration_supplement'
def test_registration_metadata_must_be_a_dictionary(
self, app, user, payload, url_draft_registrations):
payload['data']['attributes']['registration_metadata'] = 'Registration data'
res = app.post_json_api(
url_draft_registrations,
payload, auth=user.auth,
expect_errors=True)
errors = res.json['errors'][0]
assert res.status_code == 400
assert errors['source']['pointer'] == '/data/attributes/registration_metadata'
assert errors['detail'] == 'Expected a dictionary of items but got type "unicode".'
def test_registration_metadata_question_values_must_be_dictionaries(
self, app, user, payload, url_draft_registrations):
schema = RegistrationSchema.objects.get(
name='OSF-Standard Pre-Data Collection Registration',
schema_version=LATEST_SCHEMA_VERSION)
payload['data']['attributes']['registration_supplement'] = schema._id
payload['data']['attributes']['registration_metadata'] = {}
payload['data']['attributes']['registration_metadata']['datacompletion'] = 'No, data collection has not begun'
res = app.post_json_api(
url_draft_registrations,
payload, auth=user.auth,
expect_errors=True)
errors = res.json['errors'][0]
assert res.status_code == 400
assert errors['detail'] == 'u\'No, data collection has not begun\' is not of type \'object\''
def test_registration_metadata_question_keys_must_be_value(
self, app, user, payload, url_draft_registrations):
schema = RegistrationSchema.objects.get(
name='OSF-Standard Pre-Data Collection Registration',
schema_version=LATEST_SCHEMA_VERSION)
payload['data']['attributes']['registration_supplement'] = schema._id
payload['data']['attributes']['registration_metadata'] = {}
payload['data']['attributes']['registration_metadata']['datacompletion'] = {
'incorrect_key': 'No, data collection has not begun'}
res = app.post_json_api(
url_draft_registrations,
payload, auth=user.auth,
expect_errors=True)
errors = res.json['errors'][0]
assert res.status_code == 400
assert errors['detail'] == 'Additional properties are not allowed (u\'incorrect_key\' was unexpected)'
def test_question_in_registration_metadata_must_be_in_schema(
self, app, user, payload, url_draft_registrations):
schema = RegistrationSchema.objects.get(
name='OSF-Standard Pre-Data Collection Registration',
schema_version=LATEST_SCHEMA_VERSION)
payload['data']['attributes']['registration_supplement'] = schema._id
payload['data']['attributes']['registration_metadata'] = {}
payload['data']['attributes']['registration_metadata']['q11'] = {
'value': 'No, data collection has not begun'
}
res = app.post_json_api(
url_draft_registrations,
payload, auth=user.auth,
expect_errors=True)
errors = res.json['errors'][0]
assert res.status_code == 400
assert errors['detail'] == 'Additional properties are not allowed (u\'q11\' was unexpected)'
def test_multiple_choice_question_value_must_match_value_in_schema(
self, app, user, payload, url_draft_registrations):
schema = RegistrationSchema.objects.get(
name='OSF-Standard Pre-Data Collection Registration',
schema_version=LATEST_SCHEMA_VERSION)
payload['data']['attributes']['registration_supplement'] = schema._id
payload['data']['attributes']['registration_metadata'] = {}
payload['data']['attributes']['registration_metadata']['datacompletion'] = {
'value': 'Nope, data collection has not begun'}
res = app.post_json_api(
url_draft_registrations,
payload, auth=user.auth,
expect_errors=True)
errors = res.json['errors'][0]
assert res.status_code == 400
assert errors['detail'] == 'u\'Nope, data collection has not begun\' is not one of [u\'No, data collection has not begun\', u\'Yes, data collection is underway or complete\']'
def test_reviewer_cannot_create_draft_registration(
self, app, user_read_contrib, project_public,
payload, url_draft_registrations):
user = AuthUserFactory()
administer_permission = Permission.objects.get(
codename='administer_prereg')
user.user_permissions.add(administer_permission)
user.save()
assert user_read_contrib in project_public.contributors.all()
res = app.post_json_api(
url_draft_registrations,
payload, auth=user.auth,
expect_errors=True)
assert res.status_code == 403
| |
#
# Secret Labs' Regular Expression Engine
#
# various symbols used by the regular expression engine.
# run this script to update the _sre include files!
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# update when constants are added or removed
MAGIC = 20031017
# max code word in this release
MAXREPEAT = 65535
# SRE standard exception (access as sre.error)
# should this really be here?
class error(Exception):
pass
# operators
FAILURE = "failure"
SUCCESS = "success"
ANY = "any"
ANY_ALL = "any_all"
ASSERT = "assert"
ASSERT_NOT = "assert_not"
AT = "at"
BIGCHARSET = "bigcharset"
BRANCH = "branch"
CALL = "call"
CATEGORY = "category"
CHARSET = "charset"
GROUPREF = "groupref"
GROUPREF_IGNORE = "groupref_ignore"
GROUPREF_EXISTS = "groupref_exists"
IN = "in"
IN_IGNORE = "in_ignore"
INFO = "info"
JUMP = "jump"
LITERAL = "literal"
LITERAL_IGNORE = "literal_ignore"
MARK = "mark"
MAX_REPEAT = "max_repeat"
MAX_UNTIL = "max_until"
MIN_REPEAT = "min_repeat"
MIN_UNTIL = "min_until"
NEGATE = "negate"
NOT_LITERAL = "not_literal"
NOT_LITERAL_IGNORE = "not_literal_ignore"
RANGE = "range"
REPEAT = "repeat"
REPEAT_ONE = "repeat_one"
SUBPATTERN = "subpattern"
MIN_REPEAT_ONE = "min_repeat_one"
# positions
AT_BEGINNING = "at_beginning"
AT_BEGINNING_LINE = "at_beginning_line"
AT_BEGINNING_STRING = "at_beginning_string"
AT_BOUNDARY = "at_boundary"
AT_NON_BOUNDARY = "at_non_boundary"
AT_END = "at_end"
AT_END_LINE = "at_end_line"
AT_END_STRING = "at_end_string"
AT_LOC_BOUNDARY = "at_loc_boundary"
AT_LOC_NON_BOUNDARY = "at_loc_non_boundary"
AT_UNI_BOUNDARY = "at_uni_boundary"
AT_UNI_NON_BOUNDARY = "at_uni_non_boundary"
# categories
CATEGORY_DIGIT = "category_digit"
CATEGORY_NOT_DIGIT = "category_not_digit"
CATEGORY_SPACE = "category_space"
CATEGORY_NOT_SPACE = "category_not_space"
CATEGORY_WORD = "category_word"
CATEGORY_NOT_WORD = "category_not_word"
CATEGORY_LINEBREAK = "category_linebreak"
CATEGORY_NOT_LINEBREAK = "category_not_linebreak"
CATEGORY_LOC_WORD = "category_loc_word"
CATEGORY_LOC_NOT_WORD = "category_loc_not_word"
CATEGORY_UNI_DIGIT = "category_uni_digit"
CATEGORY_UNI_NOT_DIGIT = "category_uni_not_digit"
CATEGORY_UNI_SPACE = "category_uni_space"
CATEGORY_UNI_NOT_SPACE = "category_uni_not_space"
CATEGORY_UNI_WORD = "category_uni_word"
CATEGORY_UNI_NOT_WORD = "category_uni_not_word"
CATEGORY_UNI_LINEBREAK = "category_uni_linebreak"
CATEGORY_UNI_NOT_LINEBREAK = "category_uni_not_linebreak"
OPCODES = [
# failure=0 success=1 (just because it looks better that way :-)
FAILURE, SUCCESS,
ANY, ANY_ALL,
ASSERT, ASSERT_NOT,
AT,
BRANCH,
CALL,
CATEGORY,
CHARSET, BIGCHARSET,
GROUPREF, GROUPREF_EXISTS, GROUPREF_IGNORE,
IN, IN_IGNORE,
INFO,
JUMP,
LITERAL, LITERAL_IGNORE,
MARK,
MAX_UNTIL,
MIN_UNTIL,
NOT_LITERAL, NOT_LITERAL_IGNORE,
NEGATE,
RANGE,
REPEAT,
REPEAT_ONE,
SUBPATTERN,
MIN_REPEAT_ONE
]
ATCODES = [
AT_BEGINNING, AT_BEGINNING_LINE, AT_BEGINNING_STRING, AT_BOUNDARY,
AT_NON_BOUNDARY, AT_END, AT_END_LINE, AT_END_STRING,
AT_LOC_BOUNDARY, AT_LOC_NON_BOUNDARY, AT_UNI_BOUNDARY,
AT_UNI_NON_BOUNDARY
]
CHCODES = [
CATEGORY_DIGIT, CATEGORY_NOT_DIGIT, CATEGORY_SPACE,
CATEGORY_NOT_SPACE, CATEGORY_WORD, CATEGORY_NOT_WORD,
CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK, CATEGORY_LOC_WORD,
CATEGORY_LOC_NOT_WORD, CATEGORY_UNI_DIGIT, CATEGORY_UNI_NOT_DIGIT,
CATEGORY_UNI_SPACE, CATEGORY_UNI_NOT_SPACE, CATEGORY_UNI_WORD,
CATEGORY_UNI_NOT_WORD, CATEGORY_UNI_LINEBREAK,
CATEGORY_UNI_NOT_LINEBREAK
]
def makedict(list):
d = {}
for i, item in enumerate(list):
d[item] = i
return d
OPCODES = makedict(OPCODES)
ATCODES = makedict(ATCODES)
CHCODES = makedict(CHCODES)
# replacement operations for "ignore case" mode
OP_IGNORE = {
GROUPREF: GROUPREF_IGNORE,
IN: IN_IGNORE,
LITERAL: LITERAL_IGNORE,
NOT_LITERAL: NOT_LITERAL_IGNORE
}
AT_MULTILINE = {
AT_BEGINNING: AT_BEGINNING_LINE,
AT_END: AT_END_LINE
}
AT_LOCALE = {
AT_BOUNDARY: AT_LOC_BOUNDARY,
AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY
}
AT_UNICODE = {
AT_BOUNDARY: AT_UNI_BOUNDARY,
AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY
}
CH_LOCALE = {
CATEGORY_DIGIT: CATEGORY_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE,
CATEGORY_WORD: CATEGORY_LOC_WORD,
CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK
}
CH_UNICODE = {
CATEGORY_DIGIT: CATEGORY_UNI_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_UNI_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE,
CATEGORY_WORD: CATEGORY_UNI_WORD,
CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK
}
# flags
SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking)
SRE_FLAG_IGNORECASE = 2 # case insensitive
SRE_FLAG_LOCALE = 4 # honour system locale
SRE_FLAG_MULTILINE = 8 # treat target as multiline string
SRE_FLAG_DOTALL = 16 # treat target as a single string
SRE_FLAG_UNICODE = 32 # use unicode "locale"
SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments
SRE_FLAG_DEBUG = 128 # debugging
SRE_FLAG_ASCII = 256 # use ascii "locale"
# flags for INFO primitive
SRE_INFO_PREFIX = 1 # has prefix
SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix)
SRE_INFO_CHARSET = 4 # pattern starts with character from given set
if __name__ == "__main__":
def dump(fp, d, prefix):
for k, v in sorted(d.items(), key=lambda a: a[1]):
fp.write("#define %s_%s %s\n" % (prefix, k.upper(), v))
from sys import argv
try:
header = argv[1]
except IndexError:
header = "include/corgi/constants.h"
with open(header, "w") as fp:
fp.write("""\
#if !defined(CORGI_CONSTANTS_H_INCLUDED)
#define CORGI_CONSTANTS_H_INCLUDED
/**
* Secret Labs' Regular Expression Engine
*
* regular expression matching engine
*
* NOTE: This file is generated by sre_constants.py. If you need
* to change anything in here, edit sre_constants.py and run it.
*
* Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
*
* See the _sre.c file for information on usage and redistribution.
*/
""")
fp.write("#define SRE_MAGIC %d\n" % MAGIC)
dump(fp, OPCODES, "SRE_OP")
dump(fp, ATCODES, "SRE")
dump(fp, CHCODES, "SRE")
fp.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE)
fp.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE)
fp.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE)
fp.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE)
fp.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL)
fp.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE)
fp.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE)
fp.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX)
fp.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL)
fp.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET)
fp.write("""\
#endif
""")
# vim: tabstop=4 shiftwidth=4 expandtab softtabstop=4
| |
from django.shortcuts import render, get_object_or_404
from django.utils.translation import (
ugettext_lazy as _, pgettext_lazy)
from django.utils.text import format_lazy
from django.contrib import messages # noqa
from django.db import transaction
import django.forms as forms
from pieces.models import (
Venue, Study, Piece, PieceTag, AUTOMATIC_PIECE_TAGS,
PieceToStudyAssociation)
from django.contrib.auth.decorators import (
permission_required)
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
import sys
def show_piece(request, id):
piece = get_object_or_404(Piece, pk=id)
paragraphs = piece.content.split("\n")
content = "\n".join(
"<p>%s</p>" % paragraph
for paragraph in paragraphs)
from json import loads
extra_data = loads(piece.extra_data_json)
assert isinstance(extra_data, dict)
extra_data = sorted(extra_data.iteritems())
return render(request, 'pieces/piece.html', {
"piece": piece,
"extra_data": extra_data,
"content": content,
"may_see_non_coder_tags": request.user.has_perm("may_see_non_coder_tags"),
})
# {{{ lexis nexis import
class ImportLNForm(forms.Form):
studies = forms.ModelMultipleChoiceField(
queryset=Study.objects, required=True)
tags = forms.ModelMultipleChoiceField(
queryset=PieceTag.objects
.exclude(name__in=AUTOMATIC_PIECE_TAGS)
.order_by("name"),
required=False,
help_text="Select piece tags (if any) to apply to newly "
"imported pieces.")
html_file = forms.FileField()
repair_content = forms.BooleanField(required=False,
help_text="Check this box if a previous import of the same HTML "
"went wrong (perhaps due to an import issue in Codery). "
"For each piece to be imported, Codery will find all "
"existing pieces that match the metadata "
"of the newly imported piece and replace their content with "
"the one from the new import. The old piece's metadata stays untouched, "
"and its ID as well as its association with studies and samples stays "
"the same. If this box is checked, the 'tags' and 'studies' boxes above "
"are not considered at all.")
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_class = "form-horizontal"
self.helper.label_class = "col-lg-2"
self.helper.field_class = "col-lg-8"
self.helper.add_input(
Submit("submit", "Submit"))
super(ImportLNForm, self).__init__(*args, **kwargs)
@permission_required("pieces.bulk_import")
def import_ln_html(request):
from django.utils.timezone import now
if request.method == "POST":
form = ImportLNForm(request.POST, request.FILES)
if form.is_valid():
from pieces.lexis import import_ln_html
was_successful = True
log_lines = []
try:
data = form.cleaned_data
import_ln_html(
log_lines,
studies=data["studies"],
html_file=request.FILES["html_file"],
tags=data["tags"],
repair_content=data["repair_content"],
create_date=now(),
creator=request.user,
)
log = "\n".join(log_lines)
except Exception:
was_successful = False
from traceback import format_exception
log = "\n".join(log_lines) + "".join(
format_exception(*sys.exc_info()))
return render(request, 'bulk-result.html', {
"process_description": "Import Result",
"log": log,
"status": (
"Import successful."
if was_successful
else "Import failed. See above for error. "
"No changes have been made to the database."),
"was_successful": was_successful,
})
else:
form = ImportLNForm() # An unbound form
return render(request, 'generic-form.html', {
"form": form,
"form_description": "Import LexisNexis HTML",
})
# }}}
# {{{ csv import
class CSVImportForm(forms.Form):
venue = forms.ModelChoiceField(
queryset=Venue.objects, required=True)
studies = forms.ModelMultipleChoiceField(
queryset=Study.objects, required=True)
tags = forms.ModelMultipleChoiceField(
queryset=PieceTag.objects
.exclude(name__in=AUTOMATIC_PIECE_TAGS)
.order_by("name"),
required=False,
help_text="Select piece tags (if any) to apply to newly "
"imported pieces.")
file = forms.FileField(
label=_("File"),
help_text="CSV file with header row")
title_column = forms.IntegerField(
help_text=_("1-based column index for a title"),
min_value=1, required=False)
content_column = forms.IntegerField(
help_text=_("1-based column index for content"),
min_value=1)
url_column = forms.IntegerField(
help_text=_("1-based column index for a URL"),
min_value=1, required=False)
byline_column = forms.IntegerField(
help_text=_("1-based column index for the byline"),
min_value=1, required=False)
def __init__(self, *args, **kwargs):
super(CSVImportForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = "form-horizontal"
self.helper.label_class = "col-lg-2"
self.helper.field_class = "col-lg-8"
self.helper.add_input(Submit("preview", _("Preview")))
self.helper.add_input(Submit("import", _("Import")))
def smart_truncate(content, length=100, suffix='...'):
if len(content) <= length:
return content
else:
return content[:length].rsplit(' ', 1)[0]+suffix
def csv_to_pieces(
log_lines, file_contents,
venue,
title_column, content_column, url_column, byline_column,
creator, create_date):
result = []
csv_data = file_contents.read()
import re
new_line_re = re.compile("\n\r?|\r\n?")
csv_lines = new_line_re.split(csv_data)
import csv
used_columns = [
title_column, content_column, url_column, byline_column]
used_columns = [col-1 for col in used_columns if col is not None]
reader = csv.reader(csv_lines)
header = None
import re
url_regex = re.compile(
'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|'
'(?:%[0-9a-fA-F][0-9a-fA-F]))+')
def get_idx(row, index):
index -= 1
if index >= len(row):
return None
return row[index]
for i, row in enumerate(reader):
if header is None:
header = row
continue
row = [col.decode("utf-8", errors="replace")
for col in row]
piece = Piece()
piece.content = get_idx(row, content_column)
if not piece.content:
log_lines.append("Piece %d had no content. Skipping." % (i+1))
continue
if title_column is not None:
piece.title = get_idx(row, title_column)
else:
piece.title = smart_truncate(piece.content, 60)
piece.venue = venue
if url_column is not None:
piece.url = get_idx(row, url_column)
if not piece.url:
url_match = url_regex.search(piece.content)
if url_match:
piece.url = url_match.group(0)
if byline_column is not None:
piece.byline = row.get_idx(row, byline_column)
extra_data = {}
for icol, (col, header_col) in enumerate(zip(row, header)):
if icol in used_columns:
continue
extra_data[header_col] = col
from json import dumps
piece.extra_data_json = dumps(extra_data)
piece.creator = creator
piece.create_date = create_date
result.append(piece)
return result
@permission_required("pieces.bulk_import")
@transaction.atomic
def import_csv(request):
from django.utils.timezone import now
form_text = ""
log_lines = []
now_datetime = now()
if request.method == "POST":
form = CSVImportForm(request.POST, request.FILES)
is_import = "import" in request.POST
if form.is_valid():
try:
pieces = csv_to_pieces(
log_lines=log_lines,
file_contents=request.FILES["file"],
venue=form.cleaned_data["venue"],
title_column=form.cleaned_data["title_column"],
content_column=form.cleaned_data["content_column"],
url_column=form.cleaned_data["url_column"],
byline_column=form.cleaned_data["byline_column"],
creator=request.user, create_date=now_datetime)
except Exception as e:
messages.add_message(request, messages.ERROR,
format_lazy(
"{err}: {err_type} {err_str}",
pgettext_lazy("Start of Error message", "Error"),
err_type=type(e).__name__,
err_str=str(e)))
else:
messages.add_message(request, messages.INFO,
_("%(total)d pieces found.")
% {'total': len(pieces)})
from django.template.loader import render_to_string
if is_import:
for piece in pieces:
piece.save()
piece.tags = form.cleaned_data["tags"]
piece.save()
for study in form.cleaned_data["studies"]:
pts = PieceToStudyAssociation()
pts.study = study
pts.piece = piece
pts.create_date = now_datetime
pts.creator = request.user
pts.save()
form_text = render_to_string(
"pieces/piece-import-preview.html", {
"pieces": pieces,
"log_lines": log_lines,
})
messages.add_message(request, messages.SUCCESS,
_("%d pieces imported.") % len(pieces))
else:
form_text = render_to_string(
"pieces/piece-import-preview.html", {
"pieces": pieces,
"log_lines": log_lines,
})
else:
form = CSVImportForm()
return render(request, 'generic-form.html', {
"form": form,
"doc": form_text,
"form_description": "Import CSV",
})
# }}}
# vim: foldmethod=marker
| |
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import netaddr
from oslo_log import log as logging
from oslo_utils import uuidutils
import six
from six import iteritems
from tacker.common import exceptions as n_exc
LOG = logging.getLogger(__name__)
ATTRIBUTES_TO_UPDATE = 'attributes_to_update'
ATTR_NOT_SPECIFIED = object()
# Defining a constant to avoid repeating string literal in several modules
SHARED = 'shared'
# Used by range check to indicate no limit for a bound.
UNLIMITED = None
def _verify_dict_keys(expected_keys, target_dict, strict=True):
"""Allows to verify keys in a dictionary.
:param expected_keys: A list of keys expected to be present.
:param target_dict: The dictionary which should be verified.
:param strict: Specifies whether additional keys are allowed to be present.
:return: True, if keys in the dictionary correspond to the specification.
"""
if not isinstance(target_dict, dict):
msg = (_("Invalid input. '%(target_dict)s' must be a dictionary "
"with keys: %(expected_keys)s") %
{'target_dict': target_dict, 'expected_keys': expected_keys})
return msg
expected_keys = set(expected_keys)
provided_keys = set(target_dict.keys())
predicate = expected_keys.__eq__ if strict else expected_keys.issubset
if not predicate(provided_keys):
msg = (_("Validation of dictionary's keys failed."
"Expected keys: %(expected_keys)s "
"Provided keys: %(provided_keys)s") %
{'expected_keys': expected_keys,
'provided_keys': provided_keys})
return msg
def is_attr_set(attribute):
return not (attribute is None or attribute is ATTR_NOT_SPECIFIED)
def _validate_values(data, valid_values=None):
if data not in valid_values:
msg = (_("'%(data)s' is not in %(valid_values)s") %
{'data': data, 'valid_values': valid_values})
LOG.debug(msg)
return msg
def _validate_not_empty_string_or_none(data, max_len=None):
if data is not None:
return _validate_not_empty_string(data, max_len=max_len)
def _validate_not_empty_string(data, max_len=None):
msg = _validate_string(data, max_len=max_len)
if msg:
return msg
if not data.strip():
return _("'%s' Blank strings are not permitted") % data
def _validate_string_or_none(data, max_len=None):
if data is not None:
return _validate_string(data, max_len=max_len)
def _validate_string(data, max_len=None):
if not isinstance(data, six.string_types):
msg = _("'%s' is not a valid string") % data
LOG.debug(msg)
return msg
if max_len is not None and len(data) > max_len:
msg = (_("'%(data)s' exceeds maximum length of %(max_len)s") %
{'data': data, 'max_len': max_len})
LOG.debug(msg)
return msg
def _validate_boolean(data, valid_values=None):
try:
convert_to_boolean(data)
except n_exc.InvalidInput:
msg = _("'%s' is not a valid boolean value") % data
LOG.debug(msg)
return msg
def _validate_range(data, valid_values=None):
"""Check that integer value is within a range provided.
Test is inclusive. Allows either limit to be ignored, to allow
checking ranges where only the lower or upper limit matter.
It is expected that the limits provided are valid integers or
the value None.
"""
min_value = valid_values[0]
max_value = valid_values[1]
try:
data = int(data)
except (ValueError, TypeError):
msg = _("'%s' is not an integer") % data
LOG.debug(msg)
return msg
if min_value is not UNLIMITED and data < min_value:
msg = _("'%(data)s' is too small - must be at least "
"'%(limit)d'") % {'data': data, 'limit': min_value}
LOG.debug(msg)
return msg
if max_value is not UNLIMITED and data > max_value:
msg = _("'%(data)s' is too large - must be no larger than "
"'%(limit)d'") % {'data': data, 'limit': max_value}
LOG.debug(msg)
return msg
def _validate_no_whitespace(data):
"""Validates that input has no whitespace."""
if len(data.split()) > 1:
msg = _("'%s' contains whitespace") % data
LOG.debug(msg)
raise n_exc.InvalidInput(error_message=msg)
return data
def _validate_mac_address(data, valid_values=None):
valid_mac = False
try:
valid_mac = netaddr.valid_mac(_validate_no_whitespace(data))
except Exception:
pass
finally:
# TODO(arosen): The code in this file should be refactored
# so it catches the correct exceptions. _validate_no_whitespace
# raises AttributeError if data is None.
if valid_mac is False:
msg = _("'%s' is not a valid MAC address") % data
LOG.debug(msg)
return msg
def _validate_mac_address_or_none(data, valid_values=None):
if data is None:
return
return _validate_mac_address(data, valid_values)
def _validate_ip_address(data, valid_values=None):
try:
netaddr.IPAddress(_validate_no_whitespace(data))
except Exception:
msg = _("'%s' is not a valid IP address") % data
LOG.debug(msg)
return msg
def _validate_ip_pools(data, valid_values=None):
"""Validate that start and end IP addresses are present.
In addition to this the IP addresses will also be validated
"""
if not isinstance(data, list):
msg = _("Invalid data format for IP pool: '%s'") % data
LOG.debug(msg)
return msg
expected_keys = ['start', 'end']
for ip_pool in data:
msg = _verify_dict_keys(expected_keys, ip_pool)
if msg:
LOG.debug(msg)
return msg
for k in expected_keys:
msg = _validate_ip_address(ip_pool[k])
if msg:
LOG.debug(msg)
return msg
def _validate_fixed_ips(data, valid_values=None):
if not isinstance(data, list):
msg = _("Invalid data format for fixed IP: '%s'") % data
LOG.debug(msg)
return msg
ips = []
for fixed_ip in data:
if not isinstance(fixed_ip, dict):
msg = _("Invalid data format for fixed IP: '%s'") % fixed_ip
LOG.debug(msg)
return msg
if 'ip_address' in fixed_ip:
# Ensure that duplicate entries are not set - just checking IP
# suffices. Duplicate subnet_id's are legitimate.
fixed_ip_address = fixed_ip['ip_address']
if fixed_ip_address in ips:
msg = _("Duplicate IP address '%s'") % fixed_ip_address
else:
msg = _validate_ip_address(fixed_ip_address)
if msg:
LOG.debug(msg)
return msg
ips.append(fixed_ip_address)
if 'subnet_id' in fixed_ip:
msg = _validate_uuid(fixed_ip['subnet_id'])
if msg:
LOG.debug(msg)
return msg
def _validate_nameservers(data, valid_values=None):
if not hasattr(data, '__iter__'):
msg = _("Invalid data format for nameserver: '%s'") % data
LOG.debug(msg)
return msg
ips = []
for ip in data:
msg = _validate_ip_address(ip)
if msg:
# This may be a hostname
msg = _validate_regex(ip, HOSTNAME_PATTERN)
if msg:
msg = _("'%s' is not a valid nameserver") % ip
LOG.debug(msg)
return msg
if ip in ips:
msg = _("Duplicate nameserver '%s'") % ip
LOG.debug(msg)
return msg
ips.append(ip)
def _validate_hostroutes(data, valid_values=None):
if not isinstance(data, list):
msg = _("Invalid data format for hostroute: '%s'") % data
LOG.debug(msg)
return msg
expected_keys = ['destination', 'nexthop']
hostroutes = []
for hostroute in data:
msg = _verify_dict_keys(expected_keys, hostroute)
if msg:
LOG.debug(msg)
return msg
msg = _validate_subnet(hostroute['destination'])
if msg:
LOG.debug(msg)
return msg
msg = _validate_ip_address(hostroute['nexthop'])
if msg:
LOG.debug(msg)
return msg
if hostroute in hostroutes:
msg = _("Duplicate hostroute '%s'") % hostroute
LOG.debug(msg)
return msg
hostroutes.append(hostroute)
def _validate_ip_address_or_none(data, valid_values=None):
if data is None:
return None
return _validate_ip_address(data, valid_values)
def _validate_subnet(data, valid_values=None):
msg = None
try:
net = netaddr.IPNetwork(_validate_no_whitespace(data))
if '/' not in data:
msg = _("'%(data)s' isn't a recognized IP subnet cidr,"
" '%(cidr)s' is recommended") % {"data": data,
"cidr": net.cidr}
else:
return
except Exception:
msg = _("'%s' is not a valid IP subnet") % data
if msg:
LOG.debug(msg)
return msg
def _validate_subnet_list(data, valid_values=None):
if not isinstance(data, list):
msg = _("'%s' is not a list") % data
LOG.debug(msg)
return msg
if len(set(data)) != len(data):
msg = _("Duplicate items in the list: '%s'") % ', '.join(data)
LOG.debug(msg)
return msg
for item in data:
msg = _validate_subnet(item)
if msg:
return msg
def _validate_subnet_or_none(data, valid_values=None):
if data is None:
return
return _validate_subnet(data, valid_values)
def _validate_regex(data, valid_values=None):
try:
if re.match(valid_values, data):
return
except TypeError:
pass
msg = _("'%s' is not a valid input") % data
LOG.debug(msg)
return msg
def _validate_regex_or_none(data, valid_values=None):
if data is None:
return
return _validate_regex(data, valid_values)
def _validate_uuid(data, valid_values=None):
if not uuidutils.is_uuid_like(data):
msg = _("'%s' is not a valid UUID") % data
LOG.debug(msg)
return msg
def _validate_uuid_or_none(data, valid_values=None):
if data is not None:
return _validate_uuid(data)
def _validate_uuid_list(data, valid_values=None):
if not isinstance(data, list):
msg = _("'%s' is not a list") % data
LOG.debug(msg)
return msg
for item in data:
msg = _validate_uuid(item)
if msg:
LOG.debug(msg)
return msg
if len(set(data)) != len(data):
msg = _("Duplicate items in the list: '%s'") % ', '.join(data)
LOG.debug(msg)
return msg
def _validate_dict_item(key, key_validator, data):
# Find conversion function, if any, and apply it
conv_func = key_validator.get('convert_to')
if conv_func:
data[key] = conv_func(data.get(key))
# Find validator function
# TODO(salv-orlando): Structure of dict attributes should be improved
# to avoid iterating over items
val_func = val_params = None
for (k, v) in iteritems(key_validator):
if k.startswith('type:'):
# ask forgiveness, not permission
try:
val_func = validators[k]
except KeyError:
return _("Validator '%s' does not exist.") % k
val_params = v
break
# Process validation
if val_func:
return val_func(data.get(key), val_params)
def _validate_dict(data, key_specs=None):
if not isinstance(data, dict):
msg = _("'%s' is not a dictionary") % data
LOG.debug(msg)
return msg
# Do not perform any further validation, if no constraints are supplied
if not key_specs:
return
# Check whether all required keys are present
required_keys = [key for key, spec in iteritems(key_specs)
if spec.get('required')]
if required_keys:
msg = _verify_dict_keys(required_keys, data, False)
if msg:
LOG.debug(msg)
return msg
# Perform validation and conversion of all values
# according to the specifications.
for key, key_validator in [(k, v) for k, v in iteritems(key_specs)
if k in data]:
msg = _validate_dict_item(key, key_validator, data)
if msg:
LOG.debug(msg)
return msg
def _validate_dict_or_none(data, key_specs=None):
if data is not None:
return _validate_dict(data, key_specs)
def _validate_dict_or_empty(data, key_specs=None):
if data != {}:
return _validate_dict(data, key_specs)
def _validate_dict_or_nodata(data, key_specs=None):
if data:
return _validate_dict(data, key_specs)
def _validate_non_negative(data, valid_values=None):
try:
data = int(data)
except (ValueError, TypeError):
msg = _("'%s' is not an integer") % data
LOG.debug(msg)
return msg
if data < 0:
msg = _("'%s' should be non-negative") % data
LOG.debug(msg)
return msg
def convert_to_boolean(data):
if isinstance(data, six.string_types):
val = data.lower()
if val == "true" or val == "1":
return True
if val == "false" or val == "0":
return False
elif isinstance(data, bool):
return data
elif isinstance(data, int):
if data == 0:
return False
elif data == 1:
return True
msg = _("'%s' cannot be converted to boolean") % data
raise n_exc.InvalidInput(error_message=msg)
def convert_to_int(data):
try:
return int(data)
except (ValueError, TypeError):
msg = _("'%s' is not an integer") % data
raise n_exc.InvalidInput(error_message=msg)
def convert_kvp_str_to_list(data):
"""Convert a value of the form 'key=value' to ['key', 'value'].
:raises n_exc.InvalidInput: if any of the strings are malformed
(e.g. do not contain a key).
"""
kvp = [x.strip() for x in data.split('=', 1)]
if len(kvp) == 2 and kvp[0]:
return kvp
msg = _("'%s' is not of the form <key>=[value]") % data
raise n_exc.InvalidInput(error_message=msg)
def convert_kvp_list_to_dict(kvp_list):
"""Convert a list of 'key=value' strings to a dict.
:raises n_exc.InvalidInput: if any of the strings are malformed
(e.g. do not contain a key) or if any
of the keys appear more than once.
"""
if kvp_list == ['True']:
# No values were provided (i.e. '--flag-name')
return {}
kvp_map = {}
for kvp_str in kvp_list:
key, value = convert_kvp_str_to_list(kvp_str)
kvp_map.setdefault(key, set())
kvp_map[key].add(value)
return dict((x, list(y)) for x, y in iteritems(kvp_map))
def convert_none_to_empty_list(value):
return [] if value is None else value
def convert_none_to_empty_dict(value):
return {} if value is None else value
def convert_to_list(data):
if data is None:
return []
elif hasattr(data, '__iter__') and not isinstance(data, six.string_types):
return list(data)
else:
return [data]
HOSTNAME_PATTERN = ("(?=^.{1,254}$)(^(?:(?!\d+\.|-)[a-zA-Z0-9_\-]"
"{1,63}(?<!-)\.?)+(?:[a-zA-Z]{2,})$)")
HEX_ELEM = '[0-9A-Fa-f]'
UUID_PATTERN = '-'.join([HEX_ELEM + '{8}', HEX_ELEM + '{4}',
HEX_ELEM + '{4}', HEX_ELEM + '{4}',
HEX_ELEM + '{12}'])
# Note: In order to ensure that the MAC address is unicast the first byte
# must be even.
MAC_PATTERN = "^%s[aceACE02468](:%s{2}){5}$" % (HEX_ELEM, HEX_ELEM)
# Dictionary that maintains a list of validation functions
validators = {'type:dict': _validate_dict,
'type:dict_or_none': _validate_dict_or_none,
'type:dict_or_empty': _validate_dict_or_empty,
'type:dict_or_nodata': _validate_dict_or_nodata,
'type:fixed_ips': _validate_fixed_ips,
'type:hostroutes': _validate_hostroutes,
'type:ip_address': _validate_ip_address,
'type:ip_address_or_none': _validate_ip_address_or_none,
'type:ip_pools': _validate_ip_pools,
'type:mac_address': _validate_mac_address,
'type:mac_address_or_none': _validate_mac_address_or_none,
'type:nameservers': _validate_nameservers,
'type:non_negative': _validate_non_negative,
'type:range': _validate_range,
'type:regex': _validate_regex,
'type:regex_or_none': _validate_regex_or_none,
'type:string': _validate_string,
'type:string_or_none': _validate_string_or_none,
'type:not_empty_string': _validate_not_empty_string,
'type:not_empty_string_or_none':
_validate_not_empty_string_or_none,
'type:subnet': _validate_subnet,
'type:subnet_list': _validate_subnet_list,
'type:subnet_or_none': _validate_subnet_or_none,
'type:uuid': _validate_uuid,
'type:uuid_or_none': _validate_uuid_or_none,
'type:uuid_list': _validate_uuid_list,
'type:values': _validate_values,
'type:boolean': _validate_boolean}
# Define constants for base resource name
# Note: a default of ATTR_NOT_SPECIFIED indicates that an
# attribute is not required, but will be generated by the plugin
# if it is not specified. Particularly, a value of ATTR_NOT_SPECIFIED
# is different from an attribute that has been specified with a value of
# None. For example, if 'gateway_ip' is omitted in a request to
# create a subnet, the plugin will receive ATTR_NOT_SPECIFIED
# and the default gateway_ip will be generated.
# However, if gateway_ip is specified as None, this means that
# the subnet does not have a gateway IP.
# The following is a short reference for understanding attribute info:
# default: default value of the attribute (if missing, the attribute
# becomes mandatory.
# allow_post: the attribute can be used on POST requests.
# allow_put: the attribute can be used on PUT requests.
# validate: specifies rules for validating data in the attribute.
# convert_to: transformation to apply to the value before it is returned
# is_visible: the attribute is returned in GET responses.
# required_by_policy: the attribute is required by the policy engine and
# should therefore be filled by the API layer even if not present in
# request body.
# enforce_policy: the attribute is actively part of the policy enforcing
# mechanism, ie: there might be rules which refer to this attribute.
# Identify the attribute used by a resource to reference another resource
RESOURCE_ATTRIBUTE_MAP = {}
RESOURCE_FOREIGN_KEYS = {}
PLURALS = {'extensions': 'extension'}
| |
from .fields import BitField, Field
from nettest.exceptions import NettestError
import struct
class PacketMeta(type):
def __new__(cls, name, bases, attrs):
fields = attrs.get('fields')
if fields is None:
raise NettestError(_("packet class must have 'fields' field"))
_fields = []
for fieldname in attrs['fields']:
field = attrs.get(fieldname)
if field is None:
for baseclass in bases:
field = getattr(baseclass, fieldname)
if field is not None:
break
else:
raise NettestError(_("field '%s' doesn't exsists in class %s")%(fieldname, name))
if not cls.__check_field_type(cls, field):
raise NettestError(_("field '%s' in class %s should be in type (Field, Packet, list)")%(fieldname, name))
_fields.append((fieldname, field))
if isinstance(field, Field):
attrs[fieldname] = field.default_value
if '_fields' in attrs:
raise NettestError(_("the name '_fields' is reserved in class %s")%(name))
attrs['_fields']= _fields
return super(PacketMeta, cls).__new__(cls, name, bases, attrs)
@staticmethod
def __check_field_type(cls, field):
if not isinstance(field, (Field, Packet, list)):
return False
if isinstance(field, (list)):
for subfield in field:
if not cls.__check_field_type(cls, subfield):
return False
return True
class BitDumper(object):
def __init__(self):
self.data= []
self.data_len = []
self.data_len_sum = 0
def clear(self):
self.data = []
self.data_len = []
self.data_len_sum = 0
def push(self, data, length):
data = int(data)
if data < 0 or data > 2**length:
raise NettestError(_("bit value out of range"))
self.data.append(data)
self.data_len.append(length)
self.data_len_sum += length
def dump(self):
if self.data_len_sum % 8 != 0:
raise NettestError(_("incorrect bit field length"))
data = 0
left_len = self.data_len_sum
index = 0
for field_data in self.data:
data += field_data<<(left_len - self.data_len[index])
left_len -= self.data_len[index]
index += 1
length = self.data_len_sum / 8
if length == 1:
return struct.pack('!B', int(data))
elif length == 2:
return struct.pack('!H', int(data))
elif length == 4:
return struct.pack('!I', int(data))
elif length == 8:
return struct.pack('!Q', int(data))
else:
raise NettestError(_("too long bit field"))
class BitLoader(object):
def __init__(self, packet):
self.fields = []
self.bit_len_sum = 0
self.packet = packet
def clear(self):
self.fields = []
self.bit_len_sum = 0
def push(self, fieldname, field):
self.fields.append((fieldname,field))
self.bit_len_sum += field.length
def load(self, data):
if self.bit_len_sum % 8 != 0:
raise NettestError(_("incorrect bit field length"))
byte_len = int(self.bit_len_sum / 8)
data = data[:byte_len]
loaded_len = 0
for field_name, field in self.fields:
field_data = field.from_netbytes(data, loaded_len)
loaded_len += field.length
setattr(self.packet, field_name, field_data)
return byte_len
class Packet(object, metaclass=PacketMeta):
'''define field order
'''
fields=[]
def __init__(self):
for field_name, field in self._fields:
if isinstance(field, Packet):
setattr(self, field_name, field.__class__())
def dump(self):
'''Serialize self to bytes
'''
data = b''
bit_dumper = BitDumper()
for field_name, field in self._fields:
field_value = getattr(self, field_name)
if field_value is None:
raise NettestError(_("%s is None and haven't default value")%(field_name))
if isinstance(field, BitField):
bit_dumper.push(field_value, field.length)
continue
else:
if bit_dumper.data_len_sum > 0:
data += bit_dumper.dump()
bit_dumper.clear()
if isinstance(field, Packet):
data += field_value.dump()
continue
data += field.to_netbytes(field_value)
if bit_dumper.data_len_sum > 0:
data += bit_dumper.dump()
return data
# def __dump_list_data(self, fields):
# data = b''
# for field in fields:
# if isinstance(field, Packet):
# data += field.dump()
# continue
# if isinstance(field, list):
# data += self.__dump_list_data()
# continue
# if isinstance(field, Field):
# data += field.to_netbytes(field_value)
# continue
def load(self, data):
'''Deserialize bytes to a self.
if success, return the total data length used
else return None
'''
loaded_len = 0
bit_loader = BitLoader(self)
for field_name, field in self._fields:
if isinstance(field, BitField):
bit_loader.push(field_name, field)
continue
else:
if bit_loader.bit_len_sum > 0:
loaded_len += bit_loader.load(data[loaded_len:])
bit_loader.clear()
if isinstance(field, Packet):
field_value = getattr(self, field_name)
length = field_value.load(data[loaded_len:])
if length is None:
return None
loaded_len += length
continue
field_data = field.from_netbytes(data[loaded_len:])
if field_data is None:
return None
loaded_len += field.length
setattr(self, field_name, field_data)
if bit_loader.bit_len_sum > 0:
loaded_len += bit_loader.load(data[loaded_len:])
return loaded_len
def to_printable(self):
string = ''
string += '-'*20+str(self.__class__.__name__)+'-'*20+'\n'
for field_name, field in self._fields:
field_value = getattr(self, field_name)
if field_value is None:
string += '%s\tNone\n'%(field_name)
elif isinstance(field, Packet):
string += '%s\t%s\n'%(field_name, field_value.to_printable())
else:
string += '%s\t%s\n'%(field_name, field.to_printable(field_value))
string += '-'*(40+len(self.__class__.__name__))+'\n'
return string
def __eq__(self, other):
for field_name in self.fields:
field_value1 = getattr(self, field_name)
field_value2 = getattr(other, field_name)
if field_value1 != field_value2:
return False
return True
@property
def length(self):
total_len = 0
bit_len = 0
for field_name, field in self._fields:
if isinstance(field, BitField):
bit_len += field.length
elif field.length > 0:
total_len += field.length
else:
field_value = getattr(self, field_name)
total_len += len(field_value)
total_len += int(bit_len/8)
return total_len
| |
from datetime import datetime
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from varappx.handle_init import *
# basedir
## Login stuff.
## Care, this stupid Django automatically adds '_id' to foreign key fields,
## e.g. a foreign key named 'variants_db' here corresponds to 'variants_db_id' in the db.
# @login_manager.user_loader
# def load_user(user_id):
# return Users.query.get(int(user_id))
class UsersModel(object):
"""Abstract, to add these fields to all db"""
id = db.Column(db.Integer, primary_key=True,autoincrement=True)
created_at = db.Column(db.DateTime, nullable=True, default=datetime.now)
updated_at = db.Column(db.DateTime, nullable=True, default=datetime.now)
created_by = db.Column(db.String(50), nullable=True)
updated_by = db.Column(db.String(50), nullable=True)
is_active = db.Column(db.Boolean, default=1)
#
# class Meta:
# abstract = True
def __init__(self,**kwargs):
#import pdb;pdb.set_trace()
for key,value in kwargs.items():
if not hasattr(self,key) and key != 'password':
import pdb;pdb.set_trace()
raise SyntaxError
elif key == 'password':
setattr(self, key, value)
else:
setattr(self,key,value)
class Users(UsersModel, db.Model, UserMixin):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True,autoincrement=True)
username = db.Column(db.String(25), unique=True)
password = db.Column(db.String(255))
salt = db.Column(db.String(255), default='')
email = db.Column(db.String(255))
code = db.Column(db.String(25))
activation_code = db.Column(db.String(25), nullable=True)
is_password_reset = db.Column(db.Integer, nullable=True)
person_id = db.Column(db.Integer, db.ForeignKey('people.id'), nullable=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'), nullable=True)
DbAccess = db.relationship('DbAccess', lazy='dynamic', backref='users')
@property
def password(self):
raise AttributeError('password is not a readable attribute.')
@password.setter
def password(self, password):
self.salt = generate_password_hash(password, method='pbkdf2:sha256', salt_length=8)
def verify_password(self, password):
return check_password_hash(self.salt, password)
class Meta:
managed = True # If True, Django will create a table on migration
db_table = 'users'
def __repr__(self):
return "[User]: <username> %s; <email> %s; <roles> {%s}" % (self.username,self.email,str(self.role).split(':')[1])
class Roles(UsersModel, db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True,autoincrement=True)
name = db.Column(db.String(length=255))
rank = db.Column(db.Integer, nullable=True)
can_validate_user = db.Column(db.Integer, default=0)
can_delete_user = db.Column(db.Integer, default=0)
users = db.relationship(Users, lazy='dynamic', backref='role')
class Meta:
managed = True
def __repr__(self):
return "[Roles]: <name> %s; <rank> %s; <can_validate_user> %s; <can_delete_user> %s" % \
(self.name,self.rank,self.can_validate_user,self.can_delete_user)
class People(UsersModel, db.Model):
"""Extra data on users"""
__tablename__ = 'people'
id = db.Column(db.Integer, primary_key=True,autoincrement=True)
firstname = db.Column(db.String(255))
lastname = db.Column(db.String(255))
institution = db.Column(db.String(255), nullable=True)
street = db.Column(db.String(255), nullable=True)
city = db.Column(db.String(255), nullable=True)
phone = db.Column(db.String(30), nullable=True)
is_laboratory = db.Column(db.Integer, nullable=True)
laboratory = db.Column(db.String(255), nullable=True)
users = db.relationship(Users, lazy='dynamic', backref='person')
class Meta:
managed = True
class Bookmarks(UsersModel, db.Model):
"""App states saved by user"""
__tablename__ = 'bookmarks'
id = db.Column(db.Integer, primary_key=True,autoincrement=True)
query = db.Column(db.Text)
description = db.Column(db.String(255))
long_description = db.Column(db.Text, default='')
db_access_id = db.Column(db.Integer, db.ForeignKey('db_accesses.id'), nullable=True)
class Meta:
managed = True
class DbAccess(UsersModel, db.Model):
"""Many-to-many access of users to databases"""
__tablename__ = 'db_accesses'
id = db.Column(db.Integer, primary_key=True,autoincrement=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=True)
variants_db_id = db.Column(db.Integer, db.ForeignKey('variants_db.id'), nullable=True)
bookmark = db.relationship(Bookmarks, lazy='dynamic', backref='DbAccess')
class Meta:
managed = True
unique_together = ("user", "variants_db")
def __repr__(self):
return "[DbAccess]: <users> {%s}; <access_db> {%s}" % \
(str(self.users).split(':')[1],str(self.variantsdb).split(':')[1])
class VariantsDb(UsersModel, db.Model):
"""Gemini databases"""
__tablename__ = 'variants_db'
id = db.Column(db.Integer, primary_key=True,autoincrement=True)
name = db.Column(db.String(255))
visible_name = db.Column(db.String(255), nullable=True)
filename = db.Column(db.String(255), nullable=True)
location = db.Column(db.Text, nullable=True, default='')
hash = db.Column(db.String(255), nullable=True)
description = db.Column(db.String(255), nullable=True, default='')
size = db.Column(db.BigInteger, nullable=True)
parent_db_id = db.Column(db.Integer, nullable=True) # not a ForeignKey because it is only informative
DbAccess = db.relationship(DbAccess, lazy='dynamic', backref='variantsdb')
class Meta:
managed = True
unique_together = ("filename", "hash")
def __repr__(self):
return "[VariantsDb]: <name> %s; <filename> %s; <description> %s" % (self.name,self.filename,self.description)
class Preferences(UsersModel, db.Model):
"""User preferences, such as columns selection"""
__tablename__ = 'preferences'
id = db.Column(db.Integer, primary_key=True,autoincrement=True)
preferences = db.Column(db.Text, default='')
description = db.Column(db.Text, default='')
user = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=True)
class Meta:
managed = True
class Annotation(UsersModel, db.Model):
"""Versions of databases, programs, gemini etc."""
__tablename__ = 'annotation'
id = db.Column(db.Integer, primary_key=True,autoincrement=True)
source = db.Column(db.String(255), nullable=True)
source_version = db.Column(db.String(255), nullable=True)
annotation = db.Column(db.String(255), nullable=True)
annotation_version = db.Column(db.String(255), nullable=True)
variants_db = db.Column(db.Integer, db.ForeignKey('variants_db.id'), nullable=True)
class Meta:
managed = True
class History(UsersModel, db.Model):
"""Record user actions"""
__tablename__ = 'history'
id = db.Column(db.Integer, primary_key=True,autoincrement=True)
session_start = db.Column(db.DateTime)
url = db.Column(db.Text)
query = db.Column(db.Text, default='')
description = db.Column(db.String(255))
long_description = db.Column(db.Text, default='')
ip_address = db.Column(db.String(255))
user = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=True)
class Meta:
managed = True
class Bam(UsersModel, db.Model):
"""Relate samples to filenames or keys for the bam server"""
__tablename__ = 'bam'
id = db.Column(db.Integer, primary_key=True,autoincrement=True)
filename = db.Column(db.String(255), nullable=True)
key = db.Column(db.String(255), nullable=True)
sample = db.Column(db.String(255), nullable=True)
variants_db = db.Column(db.Integer, db.ForeignKey('variants_db.id'), nullable=True)
class Meta:
managed = True
import os,json
base_dir = os.path.abspath(os.path.dirname(__file__))
def loaddata(file_name = base_dir+'/../../varappx/main/resources/init/basic_info.json'):
#print(file_name)
data = json.load(open(file_name))
#import pdb;pdb.set_trace()
for each in data:
model = each['model']
fields = each['fields']
try:
exec('_cache = %s(**fields)' % model)
except:
import pdb;pdb.set_trace()
exec("db.session.add(_cache);db.session.commit()")
| |
#!/usr/bin/env python
"""Tests for config_lib classes."""
import io
import ntpath
import os
import stat
from absl import app
from absl.testing import absltest
from absl.testing import flagsaver
from grr_response_core import config
from grr_response_core.lib import config_lib
from grr_response_core.lib import config_parser
from grr_response_core.lib import package
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import type_info
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import crypto as rdf_crypto
from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_core.lib.util import temp
from grr.test_lib import test_lib
class YamlConfigTest(absltest.TestCase):
"""Test the Yaml config file support."""
@flagsaver.flagsaver(disallow_missing_config_definitions=True)
def testParsing(self):
conf = config_lib.GrrConfigManager()
conf.DEFINE_list("Section1.test_list", ["a", "b"], "A test integer.")
conf.DEFINE_integer("Section1.test", 0, "An integer")
conf.DEFINE_integer("Section1.test2", 0, "An integer")
self.assertRaises(
config_lib.MissingConfigDefinitionError,
conf.Initialize,
parser=config_parser.YamlConfigFileParser,
data="""
Section2.test: 2
""")
conf.DEFINE_string("Section2.test", "", "A string")
conf.DEFINE_context("Client Context")
conf.DEFINE_context("Windows Context")
conf.Initialize(
parser=config_parser.YamlConfigFileParser,
data="""
# Configuration options can be written as long hand, dot separated parameters.
Section1.test: 2
Section1.test_list: x,y
Section2.test: 3%(Section1.test)
Client Context:
Section1.test: 6
Section1.test2: 1
Windows Context:
Section1.test: 10
Windows Context:
Section1.test: 5
Section1.test2: 2
""")
self.assertEqual(conf["Section1.test"], 2)
# Test interpolation works.
self.assertEqual(conf["Section2.test"], "32")
self.assertEqual(conf["Section1.test_list"], ["x", "y"])
self.assertEqual(
conf.Get(
"Section1.test_list", context=["Client Context",
"Windows Context"]), ["x", "y"])
# Test that contexts affect option selection.
self.assertEqual(conf.Get("Section1.test", context=["Client Context"]), 6)
self.assertEqual(conf.Get("Section1.test", context=["Windows Context"]), 5)
context = ["Client Context", "Windows Context"]
self.assertEqual(conf.Get("Section1.test", context=context), 10)
context = ["Windows Context", "Client Context"]
# Order of the context parameters should not matter.
self.assertEqual(conf.Get("Section1.test", context=context), 10)
def testConflictingContexts(self):
"""Test that conflicting contexts are resolved by precedence."""
conf = config_lib.GrrConfigManager()
conf.DEFINE_integer("Section1.test", 0, "An integer")
conf.DEFINE_context("Client Context")
conf.DEFINE_context("Platform:Windows")
conf.DEFINE_context("Extra Context")
conf.Initialize(
parser=config_parser.YamlConfigFileParser,
data="""
Section1.test: 2
Client Context:
Section1.test: 6
Platform:Windows:
Section1.test: 10
Extra Context:
Section1.test: 15
""")
# Without contexts.
self.assertEqual(conf.Get("Section1.test"), 2)
# When running in the client context only.
self.assertEqual(conf.Get("Section1.test", context=["Client Context"]), 6)
# Later defined contexts (i.e. with later calls to AddContext()) are
# stronger than earlier contexts. For example, contexts set the command line
# --context option are stronger than contexts set by the running binary,
# since they are added last.
self.assertEqual(
conf.Get(
"Section1.test", context=["Client Context", "Platform:Windows"]),
10)
self.assertEqual(
conf.Get(
"Section1.test", context=["Platform:Windows", "Client Context"]), 6)
def testRemoveContext(self):
"""Test that conflicting contexts are resolved by precedence."""
conf = config_lib.GrrConfigManager()
conf.DEFINE_integer("Section1.test", 0, "An integer")
conf.DEFINE_integer("Section1.test2", 9, "An integer")
conf.DEFINE_context("Client Context")
conf.DEFINE_context("Platform:Windows")
conf.DEFINE_context("Extra Context")
conf.Initialize(
parser=config_parser.YamlConfigFileParser,
data="""
Section1.test: 2
Client Context:
Section1.test: 6
Section1.test2: 8
Platform:Windows:
Section1.test: 10
Extra Context:
Section1.test: 15
""")
# Should be defaults, no contexts added
self.assertEqual(conf.Get("Section1.test"), 2)
self.assertEqual(conf.Get("Section1.test2"), 9)
# Now with Client Context
conf.AddContext("Client Context")
self.assertEqual(conf.Get("Section1.test"), 6)
self.assertEqual(conf.Get("Section1.test2"), 8)
# Should be back to defaults
conf.RemoveContext("Client Context")
self.assertEqual(conf.Get("Section1.test"), 2)
self.assertEqual(conf.Get("Section1.test2"), 9)
# Now with Windows Context, test2 is still default
conf.AddContext("Platform:Windows")
self.assertEqual(conf.Get("Section1.test"), 10)
self.assertEqual(conf.Get("Section1.test2"), 9)
# Should be back to defaults
conf.RemoveContext("Platform:Windows")
self.assertEqual(conf.Get("Section1.test"), 2)
self.assertEqual(conf.Get("Section1.test2"), 9)
def testContextApplied(self):
conf = config_lib.GrrConfigManager()
conf.DEFINE_integer("Section1.test", 0, "An integer")
conf.DEFINE_context("Client Context")
conf.DEFINE_context("Unused Context")
conf.Initialize(
parser=config_parser.YamlConfigFileParser,
data="""
Client Context:
Section1.test: 6
""")
# Should be defaults, no contexts added
self.assertFalse(conf.ContextApplied("Client Context"))
self.assertFalse(conf.ContextApplied("Unused Context"))
conf.AddContext("Client Context")
self.assertTrue(conf.ContextApplied("Client Context"))
self.assertFalse(conf.ContextApplied("Unused Context"))
def testBackslashes(self):
conf = config_lib.GrrConfigManager()
conf.DEFINE_string("Section1.parameter", "", "A test.")
conf.DEFINE_string("Section1.parameter2", "", "A test.")
conf.DEFINE_string("Section1.parameter3", "", "A test.")
conf.Initialize(
parser=config_parser.YamlConfigFileParser,
data=r"""
Section1.parameter: |
a\\b\\c\\d
Section1.parameter2: |
%(parameter)\\e
Section1.parameter3: |
\%(a\\b\\c\\d\)
""")
self.assertEqual(conf.Get("Section1.parameter"), "a\\b\\c\\d")
self.assertEqual(conf.Get("Section1.parameter2"), "a\\b\\c\\d\\e")
self.assertEqual(conf.Get("Section1.parameter3"), "%(a\\b\\c\\d)")
def testSemanticValueType(self):
conf = config_lib.GrrConfigManager()
conf.DEFINE_semantic_value(rdfvalue.DurationSeconds, "Section1.foobar",
None, "Sample help.")
conf.Initialize(
parser=config_parser.YamlConfigFileParser,
data="""
Section1.foobar: 6d
""")
value = conf.Get("Section1.foobar")
self.assertIsInstance(value, rdfvalue.DurationSeconds)
self.assertEqual(value, rdfvalue.Duration.From(6, rdfvalue.DAYS))
def testSemanticStructType(self):
conf = config_lib.GrrConfigManager()
conf.DEFINE_semantic_struct(rdf_file_finder.FileFinderArgs,
"Section1.foobar", [], "Sample help.")
conf.Initialize(
parser=config_parser.YamlConfigFileParser,
data="""
Section1.foobar:
paths:
- "a/b"
- "b/c"
pathtype: "TSK"
""")
values = conf.Get("Section1.foobar")
self.assertIsInstance(values, rdf_file_finder.FileFinderArgs)
self.assertEqual(values.paths, ["a/b", "b/c"])
self.assertEqual(values.pathtype, "TSK")
def testSemanticEnum(self):
conf = config_lib.GrrConfigManager()
conf.DEFINE_semantic_enum(
enum_container=rdf_paths.PathSpec.PathType,
name="Foo.Bar",
default=rdf_paths.PathSpec.PathType.TSK)
conf.Initialize(
parser=config_parser.YamlConfigFileParser, data="Foo.Bar: NTFS")
value = conf.Get("Foo.Bar")
self.assertIsInstance(value, rdf_structs.EnumNamedValue)
self.assertEqual(value, "NTFS")
self.assertEqual(value.id, 5)
def testSemanticEnum_defaultValue(self):
conf = config_lib.GrrConfigManager()
conf.DEFINE_semantic_enum(
enum_container=rdf_paths.PathSpec.PathType,
name="Foo.Bar",
default=rdf_paths.PathSpec.PathType.TSK)
conf.Initialize(parser=config_parser.YamlConfigFileParser, data="")
value = conf.Get("Foo.Bar")
self.assertIsInstance(value, rdf_structs.EnumNamedValue)
self.assertEqual(value, "TSK")
def testSemanticEnum_invalidValue(self):
conf = config_lib.GrrConfigManager()
conf.DEFINE_semantic_enum(
enum_container=rdf_paths.PathSpec.PathType,
name="Foo.Bar",
default=rdf_paths.PathSpec.PathType.TSK)
conf.Initialize(
parser=config_parser.YamlConfigFileParser, data="Foo.Bar: Invalid")
with self.assertRaises(ValueError):
conf.Get("Foo.Bar")
class ConfigLibTest(test_lib.GRRBaseTest):
"""Tests for config functionality."""
def testInit(self):
"""Testing initialization of a ConfigManager."""
conf = config_lib.GrrConfigManager()
conf.DEFINE_string("MemoryDriver.device_path", "Default Value", "Help")
conf.DEFINE_context("Platform:Windows")
conf.DEFINE_context("Client Context")
conf.DEFINE_context("Platform:Linux")
data = r"""
Platform:Linux:
MemoryDriver.device_path: /dev/pmem
Platform:Windows:
MemoryDriver.device_path: \\\\.\\pmem
"""
conf.Initialize(parser=config_parser.YamlConfigFileParser, data=data)
# Check that the linux client have a different value from the windows
# client.
self.assertEqual(
conf.Get(
"MemoryDriver.device_path",
context=("Client Context", "Platform:Linux")), "/dev/pmem")
self.assertEqual(
conf.Get(
"MemoryDriver.device_path",
context=("Client Context", "Platform:Windows")), r"\\.\pmem")
def testSet(self):
"""Test setting options."""
# Test access methods.
conf = config_lib.GrrConfigManager()
conf.DEFINE_string("NewSection1.new_option1", "Default Value", "Help")
conf.initialized = True
conf.Set("NewSection1.new_option1", "New Value1")
self.assertEqual(conf["NewSection1.new_option1"], "New Value1")
def testSave(self):
"""Save the config and ensure it still works."""
conf = config_lib.GrrConfigManager()
config_file = os.path.join(self.temp_dir, "writeback.yaml")
conf.SetWriteBack(config_file)
conf.DEFINE_string("NewSection1.new_option1", "Default Value", "Help")
conf.Set("NewSection1.new_option1", "New Value1")
conf.Write()
new_conf = config_lib.GrrConfigManager()
new_conf.DEFINE_string("NewSection1.new_option1", "Default Value", "Help")
new_conf.Initialize(filename=config_file)
self.assertEqual(new_conf["NewSection1.new_option1"], "New Value1")
def testQuotes(self):
conf = config_lib.GrrConfigManager()
conf.DEFINE_string(name="foo.bar", default="\"baz\"", help="Bar.")
conf.DEFINE_string(name="foo.quux", default="\"%(foo.bar)\"", help="Quux.")
conf.Initialize(data="")
self.assertEqual(conf["foo.bar"], "\"baz\"")
self.assertEqual(conf["foo.quux"], "\"\"baz\"\"")
def testWritebackQuotes(self):
def Config():
conf = config_lib.GrrConfigManager()
conf.DEFINE_string(name="foo.bar", default="", help="Bar.")
conf.DEFINE_string(name="foo.baz", default="\"%(foo.bar)\"", help="Baz.")
return conf
with temp.AutoTempFilePath(suffix=".yaml") as confpath:
writeback_conf = Config()
writeback_conf.SetWriteBack(confpath)
writeback_conf.Set("foo.bar", "\"quux\"")
writeback_conf.Write()
loaded_conf = Config()
loaded_conf.Initialize(filename=confpath)
self.assertEqual(loaded_conf["foo.bar"], "\"quux\"")
self.assertEqual(loaded_conf["foo.baz"], "\"\"quux\"\"")
def _SetupConfig(self, value):
conf = config_lib.GrrConfigManager()
config_file = os.path.join(self.temp_dir, "config.yaml")
with io.open(config_file, "w") as fd:
fd.write("Section1.option1: %s" % value)
conf.DEFINE_string("Section1.option1", "Default Value", "Help")
conf.Initialize(filename=config_file)
return conf
def testPersist(self):
writeback_file = os.path.join(self.temp_dir, "writeback.yaml")
conf = self._SetupConfig("Value1")
conf.SetWriteBack(writeback_file)
self.assertEqual(conf["Section1.option1"], "Value1")
conf.Persist("Section1.option1")
conf = self._SetupConfig("Value2")
# This should give the persisted value back.
conf.SetWriteBack(writeback_file)
self.assertEqual(conf["Section1.option1"], "Value1")
# Now overwrite the writeback from the config ("Value2").
conf.Persist("Section1.option1")
conf = self._SetupConfig("Value3")
conf.SetWriteBack(writeback_file)
self.assertEqual(conf["Section1.option1"], "Value2")
# This new config has the same value as the current writeback file.
conf = self._SetupConfig("Value2")
conf.SetWriteBack(writeback_file)
self.assertEqual(conf["Section1.option1"], "Value2")
def DontCall():
raise NotImplementedError("Write was called!")
# If the value in config and writeback are the same, nothing is written.
with utils.Stubber(conf, "Write", DontCall):
conf.Persist("Section1.option1")
def testPersistDoesntOverwriteCustomOptions(self):
conf = config_lib.GrrConfigManager()
writeback_file = os.path.join(self.temp_dir, "writeback.yaml")
conf.SetWriteBack(writeback_file)
conf.DEFINE_string("Section.option", "Default Value", "Help")
conf.Set("Section.option", "custom")
conf.Write()
new_conf = config_lib.GrrConfigManager()
new_conf.DEFINE_string("Section.option", "Default Value", "Help")
new_config_file = os.path.join(self.temp_dir, "config.yaml")
new_conf.Initialize(filename=new_config_file)
new_conf.SetWriteBack(writeback_file)
new_conf.Write()
# At this point, the writeback file has a custom setting for
# "Section.option" but new_conf has nothing set.
with io.open(writeback_file) as fd:
self.assertEqual(fd.read(), "Section.option: custom\n")
# Calling persist does not change the custom value.
new_conf.Persist("Section.option")
with io.open(writeback_file) as fd:
self.assertEqual(fd.read(), "Section.option: custom\n")
def testFileFilters(self):
filename = os.path.join(self.temp_dir, "f.txt")
content = "testcontent"
with io.open(filename, "w") as fd:
fd.write(content)
conf = config_lib.GrrConfigManager()
conf.DEFINE_string("Valid.file", "%%(%s|file)" % filename, "test")
conf.DEFINE_string("Valid.optionalfile", "%%(%s|optionalfile)" % filename,
"test")
conf.DEFINE_string("Invalid.file", "%(notafile|file)", "test")
conf.DEFINE_string("Invalid.optionalfile", "%(notafile|optionalfile)",
"test")
conf.Initialize(data="")
self.assertEqual(conf["Valid.file"], content)
self.assertEqual(conf["Valid.optionalfile"], content)
with self.assertRaises(config_lib.FilterError):
conf["Invalid.file"] # pylint: disable=pointless-statement
self.assertEqual(conf["Invalid.optionalfile"], "")
def testErrorDetection(self):
"""Check that invalid config files are detected immediately."""
test_conf = """
[Section1]
test = val2"""
conf = config_lib.GrrConfigManager()
# Define test as an integer.
conf.DEFINE_integer("Section1.test", 54, "A test integer.")
conf.Initialize(data=test_conf)
# This should raise since the config file is incorrect.
errors = conf.Validate("Section1")
self.assertIn("Invalid value val2 for Integer",
str(errors["Section1.test"]))
def testCopyConfig(self):
"""Check we can copy a config and use it without affecting the old one."""
conf = config.CONFIG.CopyConfig()
conf.initialized = False
conf.DEFINE_string("NewSection1.new_option1", "Default Value", "Help")
conf.Set("NewSection1.new_option1", "New Value1")
conf.initialized = True
conf.Write()
self.assertEqual(conf.Get("NewSection1.new_option1"), "New Value1")
self.assertEqual(config.CONFIG.Get("NewSection1.new_option1", None), None)
def testKeyConfigOptions(self):
"""Check that keys get read correctly from the config."""
# Clone a test config object from the global config so it knows about Client
# options.
conf = config.CONFIG.MakeNewConfig()
conf.Initialize(data="""
[Client]
private_key = -----BEGIN RSA PRIVATE KEY-----
MIIBOwIBAAJBAJTrcBYtenHgT23ZVwYTiMPF+XQi+b9f7idy2eD+ELAUOoBK9A+n
W+WSavIg3cje+yDqd1VjvSo+LGKC+OQkKcsCAwEAAQJALGVsSxBP2rc2ttb+nK8i
LMtOrRLoReeBhn00+2CC9Rr+Ui8GJxvmgJ16+DObU9xIPPG73bqDdsOOrmTV8Jo4
8QIhAMQC2siJr+uuKpGODCM1ItJfG2Uaa9eplYj1pBVuztVPAiEAwn8Lluk7ULX6
SkLzKnsbahInoni6t7SBd/o6hjNsvMUCIQCcUpZ/9udZdAa5HOtrLNZ/pqAniuHV
FoeOujFJcpz8GwIgSRVYE4LcSP24aQMzQDk2GetsfT6EWtc29xBNwXO9XkkCIQCl
7o5SVqKx1wHOj8gV3/8WHJ61MvAQCAX4o/M8cGkTQQ==
-----END RSA PRIVATE KEY-----
executable_signing_public_key = -----BEGIN PUBLIC KEY-----
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBALnfFW1FffeKPs5PLUhFOSkNrr9TDCOD
QAI3WluLh0sW7/ro93eoIZ0FbipnTpzGkPpriONbSOXmxWNTo0b9ma8CAwEAAQ==
-----END PUBLIC KEY-----
""")
errors = conf.Validate(["Client"])
self.assertEqual(errors, {})
self.assertIsInstance(conf["Client.executable_signing_public_key"],
rdf_crypto.RSAPublicKey)
self.assertIsInstance(conf["Client.private_key"], rdf_crypto.RSAPrivateKey)
def testGet(self):
conf = config_lib.GrrConfigManager()
conf.DEFINE_string("Section1.foobar", "test", "A test string.")
conf.DEFINE_string("Section1.foobaz", None, "An empty default string.")
conf.DEFINE_string("Section1.foobin", "", "An empty default string.")
conf.initialized = True
self.assertEqual(conf.Get("Section1.foobar"), "test")
self.assertEqual(conf.Get("Section1.foobar", default=None), None)
conf.Initialize(data="""
[Section1]
foobar = X
""")
self.assertEqual(conf.Get("Section1.foobar", default=None), "X")
# This not being None is a little surprising, but probably not a big deal
self.assertEqual(conf.Get("Section1.foobaz"), "")
self.assertEqual(conf.Get("Section1.foobin"), "")
def testAddOption(self):
"""Test that we can add options."""
conf = config_lib.GrrConfigManager()
conf.DEFINE_string("Section1.foobar", "test", "A test string.")
conf.DEFINE_string("Section1.test", "test", "A test string.")
conf.DEFINE_string("Section1.interpolated", "", "An interpolated string.")
# This entry is not correct - the default is invalid.
conf.DEFINE_integer("Section1.broken_int", "string", "A test integer.")
conf.DEFINE_string("Section1.system", None, "The basic operating system.")
conf.DEFINE_integer("Section1.test_int", 54, "A test integer.")
conf.DEFINE_list("Section1.test_list", ["a", "b"], "A test integer.")
conf.DEFINE_list("Section1.test_list2", ["a", "b"], "A test integer.")
conf.DEFINE_integer("Section2.test_int", None, "A test integer.")
conf.DEFINE_string("Section2.interpolated", "", "An interpolated string.")
conf.DEFINE_integer("Section3.test_int", None, "A test integer.")
conf.DEFINE_string("Section3.interpolated", "", "An interpolated string.")
conf.Initialize(data="""
[Section1]
foobar = X
test_list = x,y
[Section2]
test_int = 34
interpolated = %(Section1.foobar)Y
[Section3]
test_int = 1
interpolated = %(%(Section1.foobar)|lower)Y
""")
# The default value is invalid.
errors = conf.Validate("Section1")
self.assertIn("Invalid value string for Integer",
str(errors["Section1.broken_int"]))
# Section not specified:
self.assertRaises(config_lib.UnknownOption, conf.__getitem__, "a")
# Test direct access.
self.assertEqual(conf["Section1.foobar"], "X")
self.assertEqual(conf["Section1.test_list"], ["x", "y"])
self.assertEqual(conf["Section1.test_list2"], ["a", "b"])
# Test default access.
self.assertEqual(conf["Section1.test"], "test")
# Test interpolation with full section name.
self.assertEqual(conf["Section2.interpolated"], "XY")
# Check that default values are typed.
self.assertEqual(conf["Section1.test_int"], 54)
# Test filter functions.
self.assertEqual(conf["Section3.interpolated"], "xY")
def testConstants(self):
"""Test that we can not modify constant values during runtime."""
conf = config_lib.GrrConfigManager()
conf.DEFINE_constant_string("Section1.const", "test", "A test string.")
# We should be able to read this while the config is not initialized.
self.assertEqual(conf["Section1.const"], "test")
data = """
[Section1]
const = New string
"""
# Modifying a constant value in the config file is OK.
conf.Initialize(data=data)
# Once the config file is loaded and initialized, modification of constant
# values is an error.
self.assertRaises(config_lib.ConstModificationError, conf.Set,
"Section1.const", "New string")
self.assertRaises(config_lib.ConstModificationError, conf.SetRaw,
"Section1.const", "New string")
@flagsaver.flagsaver(disallow_missing_config_definitions=True)
def testBadConfigRaises(self):
conf = config_lib.GrrConfigManager()
conf.initialized = False
data = """
Section1.test: 2
"""
# This config option isn't defined, so it should raise
with self.assertRaises(config_lib.MissingConfigDefinitionError):
conf.Initialize(parser=config_parser.YamlConfigFileParser, data=data)
def testBadFilterRaises(self):
"""Checks that bad filter directive raise."""
conf = config_lib.GrrConfigManager()
conf.DEFINE_string("Section1.foo6", "%(somefile@somepackage|resource)",
"test")
conf.DEFINE_string("Section1.foo1", "%(Section1.foo6)/bar", "test")
conf.Initialize(data="")
with self.assertRaises(config_lib.InterpolationError) as context:
_ = conf["Section1.foo1"]
# Make sure the stringified exception explains the full interpolation chain.
self.assertIn("%(Section1.foo6)/bar", str(context.exception))
@flagsaver.flagsaver(disallow_missing_config_definitions=True)
def testConfigOptionsDefined(self):
"""Test that all config options in use are defined."""
# We need to use the actual config.CONFIG variable since that is where
# all the variables are already defined.
conf = config.CONFIG.MakeNewConfig()
# Check our actual config validates
configpath = package.ResourcePath("grr-response-core",
"install_data/etc/grr-server.yaml")
conf.Initialize(filename=configpath)
def _DefineStringName(self, conf, name):
conf.DEFINE_string(name, "", "A test.")
def testUnbalancedParenthesis(self):
conf = config_lib.GrrConfigManager()
name_list = [
"Section1.foobar", "Section1.foo", "Section1.foo1", "Section1.foo2",
"Section1.foo3", "Section1.foo4", "Section1.foo5", "Section1.foo6",
"Section1.interpolation1", "Section1.interpolation2", "Section1.literal"
]
for name in name_list:
self._DefineStringName(conf, name)
conf.Initialize(data=r"""
[Section1]
foobar = X
foo = %(Section1.foobar)
foo1 = %(foo
# Unbalanced parenthesis
foo2 = foo)
# Unbalanced parenthesis is ok if escaped.
foo3 = foo\)
# Or if enclosed in a literal block.
foo6 = %{foo)}
foo4 = %{%(hello)}
foo5 = %{hello
# Literal blocks can also appear inside filter interpolations to prevent
# automatic expansions.
# This pull the environment variable "sectionX"
interpolation1 = %(section%(Section1.foobar)|env)
# But this means literally section%(Section1.foo):
interpolation2 = %(section%{%(Section1.foo)}|env)
literal = %{aff4:/C\.(?P<path>.\{1,16\}?)($|/.*)}
""")
# Test direct access.
self.assertEqual(conf["Section1.foo"], "X")
self.assertRaises(config_lib.ConfigFormatError, conf.__getitem__,
"Section1.foo1")
self.assertRaises(config_lib.ConfigFormatError, conf.__getitem__,
"Section1.foo2")
self.assertEqual(conf["Section1.foo3"], "foo)")
# Test literal expansion.
self.assertEqual(conf["Section1.foo4"], "%(hello)")
self.assertRaises(config_lib.ConfigFormatError, conf.__getitem__,
"Section1.foo5")
self.assertEqual(conf["Section1.foo6"], "foo)")
# The Env filter forces uppercase on args.
os.environ["sectionX".upper()] = "1"
os.environ["section%(Section1.foo)".upper()] = "2"
self.assertEqual(conf["Section1.interpolation1"], "1")
self.assertEqual(conf["Section1.interpolation2"], "2")
# Test that Set() escapes - i.e. reading the value back will return exactly
# the same as we wrote:
conf.Set("Section1.foo6", "%(Section1.foo3)")
self.assertEqual(conf["Section1.foo6"], "%(Section1.foo3)")
self.assertEqual(conf.GetRaw("Section1.foo6"), r"\%(Section1.foo3\)")
# OTOH when we write it raw, reading it back will interpolate:
conf.SetRaw("Section1.foo6", "%(Section1.foo3)")
self.assertEqual(conf["Section1.foo6"], "foo)")
# A complex regex which gets literally expanded.
self.assertEqual(conf["Section1.literal"],
r"aff4:/C\.(?P<path>.{1,16}?)($|/.*)")
def testDataTypes(self):
conf = config_lib.GrrConfigManager()
conf.DEFINE_float("Section1.float", 0, "A float")
conf.Initialize(
parser=config_parser.YamlConfigFileParser, data="Section1.float: abc")
errors = conf.Validate("Section1")
self.assertIn("Invalid value abc for Float", str(errors["Section1.float"]))
self.assertRaises(config_lib.ConfigFormatError, conf.Get, "Section1.float")
conf.Initialize(
parser=config_parser.YamlConfigFileParser, data="Section1.float: 2")
# Should have no errors now. Validate should normalize the value to a float.
self.assertEqual(conf.Validate("Section1"), {})
self.assertEqual(type(conf.Get("Section1.float")), float)
conf = config_lib.GrrConfigManager()
conf.DEFINE_integer("Section1.int", 0, "An integer")
conf.DEFINE_list("Section1.list", default=[], help="A list")
conf.DEFINE_list("Section1.list2", default=["a", "2"], help="A list")
conf.Initialize(
parser=config_parser.YamlConfigFileParser, data="Section1.int: 2.0")
errors = conf.Validate("Section1")
# Floats can not be coerced to an int because that will lose data.
self.assertIn("Invalid value 2.0 for Integer", str(errors["Section1.int"]))
# A string can be coerced to an int if it makes sense:
conf.Initialize(
parser=config_parser.YamlConfigFileParser, data="Section1.int: '2'")
conf.Validate("Section1")
self.assertEqual(type(conf.Get("Section1.int")), int)
self.assertEqual(type(conf.Get("Section1.list")), list)
self.assertEqual(conf.Get("Section1.list"), [])
self.assertEqual(type(conf.Get("Section1.list2")), list)
self.assertEqual(conf.Get("Section1.list2"), ["a", "2"])
def _GetNewConf(self):
conf = config_lib.GrrConfigManager()
conf.DEFINE_bool("SecondaryFileIncluded", False, "A string")
conf.DEFINE_bool("TertiaryFileIncluded", False, "A string")
conf.DEFINE_integer("Section1.int", 0, "An integer")
conf.DEFINE_context("Client Context")
return conf
def _CheckConf(self, conf):
self.assertTrue(conf.Get("SecondaryFileIncluded"))
self.assertTrue(conf.Get("TertiaryFileIncluded"))
self.assertEqual(conf.Get("Section1.int"), 3)
def testConfigFileInclusion(self):
one = r"""
Config.includes:
- 2.yaml
Section1.int: 1
"""
two = r"""
SecondaryFileIncluded: true
Section1.int: 2
Config.includes:
- subdir/3.yaml
"""
three = r"""
TertiaryFileIncluded: true
Section1.int: 3
"""
with utils.TempDirectory() as temp_dir:
configone = os.path.join(temp_dir, "1.yaml")
configtwo = os.path.join(temp_dir, "2.yaml")
subdir = os.path.join(temp_dir, "subdir")
os.makedirs(subdir)
configthree = os.path.join(subdir, "3.yaml")
with io.open(configone, "w") as fd:
fd.write(one)
with io.open(configtwo, "w") as fd:
fd.write(two)
with io.open(configthree, "w") as fd:
fd.write(three)
# Using filename
conf = self._GetNewConf()
conf.Initialize(
parser=config_parser.YamlConfigFileParser, filename=configone)
self._CheckConf(conf)
# Using fd with no fd.name should raise because there is no way to resolve
# the relative path.
conf = self._GetNewConf()
fd = io.BytesIO(one.encode("utf-8"))
self.assertRaises(
config_lib.ConfigFileNotFound,
conf.Initialize,
parser=config_parser.YamlConfigFileParser,
fd=fd)
# Using data
conf = self._GetNewConf()
self.assertRaises(
config_lib.ConfigFileNotFound,
conf.Initialize,
parser=config_parser.YamlConfigFileParser,
data=one)
def testConfigFileInclusionCanBeTurnedOff(self):
one = r"""
Config.includes:
- 2.yaml
Section1.int: 1
"""
two = r"""
SecondaryFileIncluded: true
Section1.int: 2
"""
with utils.TempDirectory() as temp_dir:
configone = os.path.join(temp_dir, "1.yaml")
configtwo = os.path.join(temp_dir, "2.yaml")
with io.open(configone, "w") as fd:
fd.write(one)
with io.open(configtwo, "w") as fd:
fd.write(two)
# Using filename
conf = self._GetNewConf()
conf.Initialize(
parser=config_parser.YamlConfigFileParser,
filename=configone,
process_includes=False)
self.assertFalse(conf.Get("SecondaryFileIncluded"))
self.assertEqual(conf.Get("Section1.int"), 1)
def testConfigFileIncludeAbsolutePaths(self):
one = r"""
Section1.int: 1
"""
with utils.TempDirectory() as temp_dir:
configone = os.path.join(temp_dir, "1.yaml")
with io.open(configone, "w") as fd:
fd.write(one)
absolute_include = (r"""
Config.includes:
- %s
Section1.int: 2
""" % configone)
conf = self._GetNewConf()
conf.Initialize(
parser=config_parser.YamlConfigFileParser, data=absolute_include)
self.assertEqual(conf["Section1.int"], 1)
relative_include = r"""
Config.includes:
- 1.yaml
Section1.int: 2
"""
conf = self._GetNewConf()
# Can not include a relative path from config without a filename.
self.assertRaises(
config_lib.ConfigFileNotFound,
conf.Initialize,
parser=config_parser.YamlConfigFileParser,
data=relative_include)
# If we write it to a file it should work though.
configtwo = os.path.join(temp_dir, "2.yaml")
with io.open(configtwo, "w") as fd:
fd.write(relative_include)
conf.Initialize(
parser=config_parser.YamlConfigFileParser, filename=configtwo)
self.assertEqual(conf["Section1.int"], 1)
def testConfigFileInclusionWindowsPaths(self):
one = r"""
Config.includes:
- 2.yaml
Section1.int: 1
"""
two = r"""
Section1.int: 2
SecondaryFileIncluded: true
"""
config_path = "C:\\Windows\\System32\\GRR"
def MockedWindowsOpen(filename, _=None):
basename = ntpath.basename(filename)
dirname = ntpath.dirname(filename)
# Make sure we only try to open files from this directory.
if dirname != config_path:
raise IOError("Tried to open wrong file %s" % filename)
if basename == "1.yaml":
return io.BytesIO(one.encode("utf-8"))
if basename == "2.yaml":
return io.BytesIO(two.encode("utf-8"))
raise IOError("File not found %s" % filename)
# TODO(user): this kind of mocking is a questionable practice at best.
# We have Windows-specific tests and should use them for this kind of
# testing.
#
# We need to also use the nt path manipulation modules.
with utils.MultiStubber((io, "open", MockedWindowsOpen),
(os, "path", ntpath)):
conf = self._GetNewConf()
conf.Initialize(filename=ntpath.join(config_path, "1.yaml"))
self.assertEqual(conf["Section1.int"], 2)
self.assertEqual(conf["SecondaryFileIncluded"], True)
def testConfigFileInclusionWithContext(self):
one = r"""
Client Context:
Config.includes:
- 2.yaml
Section1.int: 1
"""
two = r"""
Section1.int: 2
SecondaryFileIncluded: true
"""
with utils.TempDirectory() as temp_dir:
configone = os.path.join(temp_dir, "1.yaml")
configtwo = os.path.join(temp_dir, "2.yaml")
with io.open(configone, "w") as fd:
fd.write(one)
with io.open(configtwo, "w") as fd:
fd.write(two)
# Without specifying the context the includes are not processed.
conf = self._GetNewConf()
conf.Initialize(
parser=config_parser.YamlConfigFileParser, filename=configone)
self.assertEqual(conf["Section1.int"], 1)
# Only one config is loaded.
self.assertEqual(conf.files, [configone])
# Now we specify the context.
conf = self._GetNewConf()
conf.AddContext("Client Context")
conf.Initialize(
parser=config_parser.YamlConfigFileParser, filename=configone)
# Both config files were loaded. Note that load order is important and
# well defined.
self.assertEqual(conf.files, [configone, configtwo])
self.assertEqual(conf["Section1.int"], 2)
def testMatchBuildContext(self):
context = """
Test1 Context:
Client.labels: [Test1]
ClientBuilder.target_platforms:
- linux_amd64_deb
- linux_i386_deb
- windows_amd64_exe
Test2 Context:
Client.labels: [Test2]
Test3 Context:
Client.labels: [Test3]
ClientBuilder.target_platforms:
- linux_amd64_deb
- windows_i386_exe
"""
conf = config.CONFIG.MakeNewConfig()
conf.DEFINE_context("Test1 Context")
conf.DEFINE_context("Test2 Context")
conf.DEFINE_context("Test3 Context")
conf.Initialize(parser=config_parser.YamlConfigFileParser, data=context)
conf.AddContext("Test1 Context")
result_map = [(("linux", "amd64", "deb"), True),
(("linux", "i386", "deb"), True),
(("windows", "amd64", "exe"), True),
(("windows", "i386", "exe"), False)]
for result in result_map:
self.assertEqual(conf.MatchBuildContext(*result[0]), result[1])
def testMatchBuildContextError(self):
"""Raise because the same target was listed twice."""
context = """
Test1 Context:
Client.labels: [Test1]
ClientBuilder.target_platforms:
- linux_amd64_deb
- linux_i386_deb
- linux_amd64_deb
- windows_amd64_exe
"""
conf = config.CONFIG.MakeNewConfig()
conf.DEFINE_context("Test1 Context")
conf.Initialize(parser=config_parser.YamlConfigFileParser, data=context)
conf.AddContext("Test1 Context")
with self.assertRaises(type_info.TypeValueError):
conf.MatchBuildContext("linux", "amd64", "deb")
def testNoUnicodeWriting(self):
conf = config.CONFIG.MakeNewConfig()
config_file = os.path.join(self.temp_dir, "writeback.yaml")
conf.SetWriteBack(config_file)
conf.DEFINE_string("NewSection1.new_option1", u"Default Value", "Help")
conf.Set(str("NewSection1.new_option1"), u"New Value1")
conf.Write()
data = io.open(config_file).read()
self.assertNotIn("!!python/unicode", data)
def testNoUnicodeReading(self):
"""Check that we can parse yaml files with unicode tags."""
data = """
Client.labels: [Test1]
!!python/unicode ClientBuilder.target_platforms:
- linux_amd64_deb
"""
conf = config.CONFIG.MakeNewConfig()
conf.Initialize(parser=config_parser.YamlConfigFileParser, data=data)
def testRenameOnWritebackFailure(self):
conf = config.CONFIG.MakeNewConfig()
writeback_file = os.path.join(self.temp_dir, "writeback.yaml")
with io.open(writeback_file, "w") as f:
f.write("This is a bad line of yaml{[(\n")
f.close()
self.assertRaises(AttributeError, conf.SetWriteBack, writeback_file)
self.assertTrue(os.path.isfile(writeback_file + ".bak"))
def testNoRenameOfReadProtectedFile(self):
"""Don't rename config files we don't have permission to read."""
writeback_file = os.path.join(self.temp_dir, "writeback.yaml")
with io.open(writeback_file, mode="w", encoding="utf-8") as f:
f.write("")
# Remove all permissions except user write.
os.chmod(writeback_file, stat.S_IWUSR)
conf = config.CONFIG.MakeNewConfig()
conf.SetWriteBack(writeback_file)
# File is still in the same place
self.assertTrue(os.path.isfile(writeback_file))
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| |
############################################
# Copyright (c) 2012 Microsoft Corporation
#
# Z3 Python interface
#
# Author: Leonardo de Moura (leonardo)
############################################
import sys, io, z3
from z3consts import *
from z3core import *
from ctypes import *
##############################
#
# Configuration
#
##############################
# Z3 operator names to Z3Py
_z3_op_to_str = {
Z3_OP_TRUE : 'True', Z3_OP_FALSE : 'False', Z3_OP_EQ : '==', Z3_OP_DISTINCT : 'Distinct',
Z3_OP_ITE : 'If', Z3_OP_AND : 'And', Z3_OP_OR : 'Or', Z3_OP_IFF : '==', Z3_OP_XOR : 'Xor',
Z3_OP_NOT : 'Not', Z3_OP_IMPLIES : 'Implies', Z3_OP_IDIV : '/', Z3_OP_MOD : '%',
Z3_OP_TO_REAL : 'ToReal', Z3_OP_TO_INT : 'ToInt', Z3_OP_POWER : '**', Z3_OP_IS_INT : 'IsInt',
Z3_OP_BADD : '+', Z3_OP_BSUB : '-', Z3_OP_BMUL : '*', Z3_OP_BOR : '|', Z3_OP_BAND : '&',
Z3_OP_BNOT : '~', Z3_OP_BXOR : '^', Z3_OP_BNEG : '-', Z3_OP_BUDIV : 'UDiv', Z3_OP_BSDIV : '/', Z3_OP_BSMOD : '%',
Z3_OP_BSREM : 'SRem', Z3_OP_BUREM : 'URem', Z3_OP_EXT_ROTATE_LEFT : 'RotateLeft', Z3_OP_EXT_ROTATE_RIGHT : 'RotateRight',
Z3_OP_SLEQ : '<=', Z3_OP_SLT : '<', Z3_OP_SGEQ : '>=', Z3_OP_SGT : '>',
Z3_OP_ULEQ : 'ULE', Z3_OP_ULT : 'ULT', Z3_OP_UGEQ : 'UGE', Z3_OP_UGT : 'UGT',
Z3_OP_SIGN_EXT : 'SignExt', Z3_OP_ZERO_EXT : 'ZeroExt', Z3_OP_REPEAT : 'RepeatBitVec',
Z3_OP_BASHR : '>>', Z3_OP_BSHL : '<<', Z3_OP_BLSHR : 'LShR',
Z3_OP_CONCAT : 'Concat', Z3_OP_EXTRACT : 'Extract', Z3_OP_BV2INT : 'BV2Int',
Z3_OP_ARRAY_MAP : 'Map', Z3_OP_SELECT : 'Select', Z3_OP_STORE : 'Store',
Z3_OP_CONST_ARRAY : 'K', Z3_OP_ARRAY_EXT : 'Ext',
Z3_OP_PB_AT_MOST : 'AtMost', Z3_OP_PB_LE : 'PbLe', Z3_OP_PB_GE : 'PbGe'
}
# List of infix operators
_z3_infix = [
Z3_OP_EQ, Z3_OP_IFF, Z3_OP_ADD, Z3_OP_SUB, Z3_OP_MUL, Z3_OP_DIV, Z3_OP_IDIV, Z3_OP_MOD, Z3_OP_POWER,
Z3_OP_LE, Z3_OP_LT, Z3_OP_GE, Z3_OP_GT, Z3_OP_BADD, Z3_OP_BSUB, Z3_OP_BMUL, Z3_OP_BSDIV, Z3_OP_BSMOD, Z3_OP_BOR, Z3_OP_BAND,
Z3_OP_BXOR, Z3_OP_BSDIV, Z3_OP_SLEQ, Z3_OP_SLT, Z3_OP_SGEQ, Z3_OP_SGT, Z3_OP_BASHR, Z3_OP_BSHL
]
_z3_unary = [ Z3_OP_UMINUS, Z3_OP_BNOT, Z3_OP_BNEG ]
# Precedence
_z3_precedence = {
Z3_OP_POWER : 0,
Z3_OP_UMINUS : 1, Z3_OP_BNEG : 1, Z3_OP_BNOT : 1,
Z3_OP_MUL : 2, Z3_OP_DIV : 2, Z3_OP_IDIV : 2, Z3_OP_MOD : 2, Z3_OP_BMUL : 2, Z3_OP_BSDIV : 2, Z3_OP_BSMOD : 2,
Z3_OP_ADD : 3, Z3_OP_SUB : 3, Z3_OP_BADD : 3, Z3_OP_BSUB : 3,
Z3_OP_BASHR : 4, Z3_OP_BSHL : 4,
Z3_OP_BAND : 5,
Z3_OP_BXOR : 6,
Z3_OP_BOR : 7,
Z3_OP_LE : 8, Z3_OP_LT : 8, Z3_OP_GE : 8, Z3_OP_GT : 8, Z3_OP_EQ : 8, Z3_OP_SLEQ : 8, Z3_OP_SLT : 8, Z3_OP_SGEQ : 8, Z3_OP_SGT : 8,
Z3_OP_IFF : 8,
Z3_OP_FPA_NEG : 1,
Z3_OP_FPA_MUL : 2, Z3_OP_FPA_DIV : 2, Z3_OP_FPA_REM : 2, Z3_OP_FPA_FMA : 2,
Z3_OP_FPA_ADD: 3, Z3_OP_FPA_SUB : 3,
Z3_OP_FPA_LE : 8, Z3_OP_FPA_LT : 8, Z3_OP_FPA_GE : 8, Z3_OP_FPA_GT : 8, Z3_OP_FPA_EQ : 8
}
# FPA operators
_z3_op_to_fpa_normal_str = {
Z3_OP_FPA_RM_NEAREST_TIES_TO_EVEN : 'RoundNearestTiesToEven()', Z3_OP_FPA_RM_NEAREST_TIES_TO_AWAY : 'RoundNearestTiesToAway()',
Z3_OP_FPA_RM_TOWARD_POSITIVE : 'RoundTowardPositive()', Z3_OP_FPA_RM_TOWARD_NEGATIVE : 'RoundTowardNegative()',
Z3_OP_FPA_RM_TOWARD_ZERO : 'RoundTowardZero()',
Z3_OP_FPA_PLUS_INF : 'fpPlusInfinity', Z3_OP_FPA_MINUS_INF : 'fpMinusInfinity',
Z3_OP_FPA_NAN : 'fpNaN', Z3_OP_FPA_PLUS_ZERO : 'fpPZero', Z3_OP_FPA_MINUS_ZERO : 'fpNZero',
Z3_OP_FPA_ADD : 'fpAdd', Z3_OP_FPA_SUB : 'fpSub', Z3_OP_FPA_NEG : 'fpNeg', Z3_OP_FPA_MUL : 'fpMul',
Z3_OP_FPA_DIV : 'fpDiv', Z3_OP_FPA_REM : 'fpRem', Z3_OP_FPA_ABS : 'fpAbs',
Z3_OP_FPA_MIN : 'fpMin', Z3_OP_FPA_MAX : 'fpMax',
Z3_OP_FPA_FMA : 'fpFMA', Z3_OP_FPA_SQRT : 'fpSqrt', Z3_OP_FPA_ROUND_TO_INTEGRAL : 'fpRoundToIntegral',
Z3_OP_FPA_EQ : 'fpEQ', Z3_OP_FPA_LT : 'fpLT', Z3_OP_FPA_GT : 'fpGT', Z3_OP_FPA_LE : 'fpLEQ',
Z3_OP_FPA_GE : 'fpGEQ',
Z3_OP_FPA_IS_NAN : 'fpIsNaN', Z3_OP_FPA_IS_INF : 'fpIsInf', Z3_OP_FPA_IS_ZERO : 'fpIsZero',
Z3_OP_FPA_IS_NORMAL : 'fpIsNormal', Z3_OP_FPA_IS_SUBNORMAL : 'fpIsSubnormal',
Z3_OP_FPA_IS_NEGATIVE : 'fpIsNegative', Z3_OP_FPA_IS_POSITIVE : 'fpIsPositive',
Z3_OP_FPA_FP : 'fpFP', Z3_OP_FPA_TO_FP : 'fpToFP', Z3_OP_FPA_TO_FP_UNSIGNED: 'fpToFPUnsigned',
Z3_OP_FPA_TO_UBV : 'fpToUBV', Z3_OP_FPA_TO_SBV : 'fpToSBV', Z3_OP_FPA_TO_REAL: 'fpToReal',
Z3_OP_FPA_TO_IEEE_BV : 'fpToIEEEBV'
}
_z3_op_to_fpa_pretty_str = {
Z3_OP_FPA_RM_NEAREST_TIES_TO_EVEN : 'RNE()', Z3_OP_FPA_RM_NEAREST_TIES_TO_AWAY : 'RNA()',
Z3_OP_FPA_RM_TOWARD_POSITIVE : 'RTP()', Z3_OP_FPA_RM_TOWARD_NEGATIVE : 'RTN()',
Z3_OP_FPA_RM_TOWARD_ZERO : 'RTZ()',
Z3_OP_FPA_PLUS_INF : '+oo', Z3_OP_FPA_MINUS_INF : '-oo',
Z3_OP_FPA_NAN : 'NaN', Z3_OP_FPA_PLUS_ZERO : '+0.0', Z3_OP_FPA_MINUS_ZERO : '-0.0',
Z3_OP_FPA_ADD : '+', Z3_OP_FPA_SUB : '-', Z3_OP_FPA_MUL : '*', Z3_OP_FPA_DIV : '/',
Z3_OP_FPA_REM : '%', Z3_OP_FPA_NEG : '-',
Z3_OP_FPA_EQ : 'fpEQ', Z3_OP_FPA_LT : '<', Z3_OP_FPA_GT : '>', Z3_OP_FPA_LE : '<=', Z3_OP_FPA_GE : '>='
}
_z3_fpa_infix = [
Z3_OP_FPA_ADD, Z3_OP_FPA_SUB, Z3_OP_FPA_MUL, Z3_OP_FPA_DIV, Z3_OP_FPA_REM,
Z3_OP_FPA_LT, Z3_OP_FPA_GT, Z3_OP_FPA_LE, Z3_OP_FPA_GE
]
def _is_assoc(k):
return k == Z3_OP_BOR or k == Z3_OP_BXOR or k == Z3_OP_BAND or k == Z3_OP_ADD or k == Z3_OP_BADD or k == Z3_OP_MUL or k == Z3_OP_BMUL
def _is_left_assoc(k):
return _is_assoc(k) or k == Z3_OP_SUB or k == Z3_OP_BSUB
def _is_html_assoc(k):
return k == Z3_OP_AND or k == Z3_OP_OR or k == Z3_OP_IFF or _is_assoc(k)
def _is_html_left_assoc(k):
return _is_html_assoc(k) or k == Z3_OP_SUB or k == Z3_OP_BSUB
def _is_add(k):
return k == Z3_OP_ADD or k == Z3_OP_BADD
def _is_sub(k):
return k == Z3_OP_SUB or k == Z3_OP_BSUB
import sys
if sys.version < '3':
import codecs
def u(x):
return codecs.unicode_escape_decode(x)[0]
else:
def u(x):
return x
_z3_infix_compact = [ Z3_OP_MUL, Z3_OP_BMUL, Z3_OP_POWER, Z3_OP_DIV, Z3_OP_IDIV, Z3_OP_MOD, Z3_OP_BSDIV, Z3_OP_BSMOD ]
_ellipses = '...'
_html_ellipses = '…'
# Overwrite some of the operators for HTML
_z3_pre_html_op_to_str = { Z3_OP_EQ : '=', Z3_OP_IFF : '=', Z3_OP_NOT : '¬',
Z3_OP_AND : '∧', Z3_OP_OR : '∨', Z3_OP_IMPLIES : '⇒',
Z3_OP_LT : '<', Z3_OP_GT : '>', Z3_OP_LE : '≤', Z3_OP_GE : '≥',
Z3_OP_MUL : '·',
Z3_OP_SLEQ : '≤', Z3_OP_SLT : '<', Z3_OP_SGEQ : '≥', Z3_OP_SGT : '>',
Z3_OP_ULEQ : '≤<sub>u</sub>', Z3_OP_ULT : '<<sub>u</sub>',
Z3_OP_UGEQ : '≥<sub>u</sub>', Z3_OP_UGT : '><sub>u</sub>',
Z3_OP_BMUL : '·',
Z3_OP_BUDIV : '/<sub>u</sub>', Z3_OP_BUREM : '%<sub>u</sub>',
Z3_OP_BASHR : '>>', Z3_OP_BSHL : '<<',
Z3_OP_BLSHR : '>><sub>u</sub>'
}
# Extra operators that are infix/unary for HTML
_z3_html_infix = [ Z3_OP_AND, Z3_OP_OR, Z3_OP_IMPLIES,
Z3_OP_ULEQ, Z3_OP_ULT, Z3_OP_UGEQ, Z3_OP_UGT, Z3_OP_BUDIV, Z3_OP_BUREM, Z3_OP_BLSHR
]
_z3_html_unary = [ Z3_OP_NOT ]
# Extra Precedence for HTML
_z3_pre_html_precedence = { Z3_OP_BUDIV : 2, Z3_OP_BUREM : 2,
Z3_OP_BLSHR : 4,
Z3_OP_ULEQ : 8, Z3_OP_ULT : 8,
Z3_OP_UGEQ : 8, Z3_OP_UGT : 8,
Z3_OP_ULEQ : 8, Z3_OP_ULT : 8,
Z3_OP_UGEQ : 8, Z3_OP_UGT : 8,
Z3_OP_NOT : 1,
Z3_OP_AND : 10,
Z3_OP_OR : 11,
Z3_OP_IMPLIES : 12 }
##############################
#
# End of Configuration
#
##############################
def _support_pp(a):
return isinstance(a, z3.Z3PPObject) or isinstance(a, list) or isinstance(a, tuple)
_infix_map = {}
_unary_map = {}
_infix_compact_map = {}
for _k in _z3_infix:
_infix_map[_k] = True
for _k in _z3_unary:
_unary_map[_k] = True
for _k in _z3_infix_compact:
_infix_compact_map[_k] = True
def _is_infix(k):
global _infix_map
return _infix_map.get(k, False)
def _is_infix_compact(k):
global _infix_compact_map
return _infix_compact_map.get(k, False)
def _is_unary(k):
global _unary_map
return _unary_map.get(k, False)
def _op_name(a):
if isinstance(a, z3.FuncDeclRef):
f = a
else:
f = a.decl()
k = f.kind()
n = _z3_op_to_str.get(k, None)
if n == None:
return f.name()
else:
return n
def _get_precedence(k):
global _z3_precedence
return _z3_precedence.get(k, 100000)
_z3_html_op_to_str = {}
for _k in _z3_op_to_str:
_v = _z3_op_to_str[_k]
_z3_html_op_to_str[_k] = _v
for _k in _z3_pre_html_op_to_str:
_v = _z3_pre_html_op_to_str[_k]
_z3_html_op_to_str[_k] = _v
_z3_html_precedence = {}
for _k in _z3_precedence:
_v = _z3_precedence[_k]
_z3_html_precedence[_k] = _v
for _k in _z3_pre_html_precedence:
_v = _z3_pre_html_precedence[_k]
_z3_html_precedence[_k] = _v
_html_infix_map = {}
_html_unary_map = {}
for _k in _z3_infix:
_html_infix_map[_k] = True
for _k in _z3_html_infix:
_html_infix_map[_k] = True
for _k in _z3_unary:
_html_unary_map[_k] = True
for _k in _z3_html_unary:
_html_unary_map[_k] = True
def _is_html_infix(k):
global _html_infix_map
return _html_infix_map.get(k, False)
def _is_html_unary(k):
global _html_unary_map
return _html_unary_map.get(k, False)
def _html_op_name(a):
global _z3_html_op_to_str
if isinstance(a, z3.FuncDeclRef):
f = a
else:
f = a.decl()
k = f.kind()
n = _z3_html_op_to_str.get(k, None)
if n == None:
sym = Z3_get_decl_name(f.ctx_ref(), f.ast)
if Z3_get_symbol_kind(f.ctx_ref(), sym) == Z3_INT_SYMBOL:
return "ζ<sub>%s</sub>" % Z3_get_symbol_int(f.ctx_ref(), sym)
else:
# Sanitize the string
return f.name()
else:
return n
def _get_html_precedence(k):
global _z3_html_predence
return _z3_html_precedence.get(k, 100000)
class FormatObject:
def is_compose(self):
return False
def is_choice(self):
return False
def is_indent(self):
return False
def is_string(self):
return False
def is_linebreak(self):
return False
def is_nil(self):
return True
def children(self):
return []
def as_tuple(self):
return None
def space_upto_nl(self):
return (0, False)
def flat(self):
return self
class NAryFormatObject(FormatObject):
def __init__(self, fs):
assert all([isinstance(a, FormatObject) for a in fs])
self.children = fs
def children(self):
return self.children
class ComposeFormatObject(NAryFormatObject):
def is_compose(sef):
return True
def as_tuple(self):
return ('compose', [ a.as_tuple() for a in self.children ])
def space_upto_nl(self):
r = 0
for child in self.children:
s, nl = child.space_upto_nl()
r = r + s
if nl:
return (r, True)
return (r, False)
def flat(self):
return compose([a.flat() for a in self.children ])
class ChoiceFormatObject(NAryFormatObject):
def is_choice(sef):
return True
def as_tuple(self):
return ('choice', [ a.as_tuple() for a in self.children ])
def space_upto_nl(self):
return self.children[0].space_upto_nl()
def flat(self):
return self.children[0].flat()
class IndentFormatObject(FormatObject):
def __init__(self, indent, child):
assert isinstance(child, FormatObject)
self.indent = indent
self.child = child
def children(self):
return [self.child]
def as_tuple(self):
return ('indent', self.indent, self.child.as_tuple())
def space_upto_nl(self):
return self.child.space_upto_nl()
def flat(self):
return indent(self.indent, self.child.flat())
def is_indent(self):
return True
class LineBreakFormatObject(FormatObject):
def __init__(self):
self.space = ' '
def is_linebreak(self):
return True
def as_tuple(self):
return '<line-break>'
def space_upto_nl(self):
return (0, True)
def flat(self):
return to_format(self.space)
class StringFormatObject(FormatObject):
def __init__(self, string):
assert isinstance(string, str)
self.string = string
def is_string(self):
return True
def as_tuple(self):
return self.string
def space_upto_nl(self):
return (getattr(self, 'size', len(self.string)), False)
def fits(f, space_left):
s, nl = f.space_upto_nl()
return s <= space_left
def to_format(arg, size=None):
if isinstance(arg, FormatObject):
return arg
else:
r = StringFormatObject(str(arg))
if size != None:
r.size = size
return r
def compose(*args):
if len(args) == 1 and (isinstance(args[0], list) or isinstance(args[0], tuple)):
args = args[0]
return ComposeFormatObject(args)
def indent(i, arg):
return IndentFormatObject(i, arg)
def group(arg):
return ChoiceFormatObject([arg.flat(), arg])
def line_break():
return LineBreakFormatObject()
def _len(a):
if isinstance(a, StringFormatObject):
return getattr(a, 'size', len(a.string))
else:
return len(a)
def seq(args, sep=',', space=True):
nl = line_break()
if not space:
nl.space = ''
r = []
r.append(args[0])
num = len(args)
for i in range(num - 1):
r.append(to_format(sep))
r.append(nl)
r.append(args[i+1])
return compose(r)
def seq1(header, args, lp='(', rp=')'):
return group(compose(to_format(header),
to_format(lp),
indent(len(lp) + _len(header),
seq(args)),
to_format(rp)))
def seq2(header, args, i=4, lp='(', rp=')'):
if len(args) == 0:
return compose(to_format(header), to_format(lp), to_format(rp))
else:
return group(compose(indent(len(lp), compose(to_format(lp), to_format(header))),
indent(i, compose(seq(args), to_format(rp)))))
def seq3(args, lp='(', rp=')'):
if len(args) == 0:
return compose(to_format(lp), to_format(rp))
else:
return group(indent(len(lp), compose(to_format(lp), seq(args), to_format(rp))))
class StopPPException(Exception):
def __str__(self):
return 'pp-interrupted'
class PP:
def __init__(self):
self.max_lines = 200
self.max_width = 60
self.bounded = False
self.max_indent = 40
def pp_string(self, f, indent):
if not self.bounded or self.pos <= self.max_width:
sz = _len(f)
if self.bounded and self.pos + sz > self.max_width:
self.out.write(u(_ellipses))
else:
self.pos = self.pos + sz
self.ribbon_pos = self.ribbon_pos + sz
self.out.write(u(f.string))
def pp_compose(self, f, indent):
for c in f.children:
self.pp(c, indent)
def pp_choice(self, f, indent):
space_left = self.max_width - self.pos
if space_left > 0 and fits(f.children[0], space_left):
self.pp(f.children[0], indent)
else:
self.pp(f.children[1], indent)
def pp_line_break(self, f, indent):
self.pos = indent
self.ribbon_pos = 0
self.line = self.line + 1
if self.line < self.max_lines:
self.out.write(u('\n'))
for i in range(indent):
self.out.write(u(' '))
else:
self.out.write(u('\n...'))
raise StopPPException()
def pp(self, f, indent):
if f.is_string():
self.pp_string(f, indent)
elif f.is_indent():
self.pp(f.child, min(indent + f.indent, self.max_indent))
elif f.is_compose():
self.pp_compose(f, indent)
elif f.is_choice():
self.pp_choice(f, indent)
elif f.is_linebreak():
self.pp_line_break(f, indent)
else:
return
def __call__(self, out, f):
try:
self.pos = 0
self.ribbon_pos = 0
self.line = 0
self.out = out
self.pp(f, 0)
except StopPPException:
return
class Formatter:
def __init__(self):
global _ellipses
self.max_depth = 20
self.max_args = 128
self.rational_to_decimal = False
self.precision = 10
self.ellipses = to_format(_ellipses)
self.max_visited = 10000
self.fpa_pretty = True
def pp_ellipses(self):
return self.ellipses
def pp_arrow(self):
return ' ->'
def pp_unknown(self):
return '<unknown>'
def pp_name(self, a):
return to_format(_op_name(a))
def is_infix(self, a):
return _is_infix(a)
def is_unary(self, a):
return _is_unary(a)
def get_precedence(self, a):
return _get_precedence(a)
def is_infix_compact(self, a):
return _is_infix_compact(a)
def is_infix_unary(self, a):
return self.is_infix(a) or self.is_unary(a)
def add_paren(self, a):
return compose(to_format('('), indent(1, a), to_format(')'))
def pp_sort(self, s):
if isinstance(s, z3.ArraySortRef):
return seq1('Array', (self.pp_sort(s.domain()), self.pp_sort(s.range())))
elif isinstance(s, z3.BitVecSortRef):
return seq1('BitVec', (to_format(s.size()), ))
elif isinstance(s, z3.FPSortRef):
return seq1('FPSort', (to_format(s.ebits()), to_format(s.sbits())))
else:
return to_format(s.name())
def pp_const(self, a):
return self.pp_name(a)
def pp_int(self, a):
return to_format(a.as_string())
def pp_rational(self, a):
if not self.rational_to_decimal:
return to_format(a.as_string())
else:
return to_format(a.as_decimal(self.precision))
def pp_algebraic(self, a):
return to_format(a.as_decimal(self.precision))
def pp_string(self, a):
return to_format(a.as_string())
def pp_bv(self, a):
return to_format(a.as_string())
def pp_fd(self, a):
return to_format(a.as_string())
def pp_fprm_value(self, a):
z3._z3_assert(z3.is_fprm_value(a), 'expected FPRMNumRef')
if self.fpa_pretty and (a.decl().kind() in _z3_op_to_fpa_pretty_str):
return to_format(_z3_op_to_fpa_pretty_str.get(a.decl().kind()))
else:
return to_format(_z3_op_to_fpa_normal_str.get(a.decl().kind()))
def pp_fp_value(self, a):
z3._z3_assert(isinstance(a, z3.FPNumRef), 'type mismatch')
if not self.fpa_pretty:
r = []
if (a.isNaN()):
r.append(to_format(_z3_op_to_fpa_normal_str[Z3_OP_FPA_NAN]))
r.append(to_format('('))
r.append(to_format(a.sort()))
r.append(to_format(')'))
return compose(r)
elif (a.isInf()):
if (a.isNegative()):
r.append(to_format(_z3_op_to_fpa_normal_str[Z3_OP_FPA_MINUS_INF]))
else:
r.append(to_format(_z3_op_to_fpa_normal_str[Z3_OP_FPA_PLUS_INF]))
r.append(to_format('('))
r.append(to_format(a.sort()))
r.append(to_format(')'))
return compose(r)
elif (a.isZero()):
if (a.isNegative()):
return to_format('-zero')
else:
return to_format('+zero')
else:
z3._z3_assert(z3.is_fp_value(a), 'expecting FP num ast')
r = []
sgn = c_int(0)
sgnb = Z3_fpa_get_numeral_sign(a.ctx_ref(), a.ast, byref(sgn))
sig = Z3_fpa_get_numeral_significand_string(a.ctx_ref(), a.ast)
exp = Z3_fpa_get_numeral_exponent_string(a.ctx_ref(), a.ast)
r.append(to_format('FPVal('))
if sgnb and sgn.value != 0:
r.append(to_format('-'))
r.append(to_format(sig))
r.append(to_format('*(2**'))
r.append(to_format(exp))
r.append(to_format(', '))
r.append(to_format(a.sort()))
r.append(to_format('))'))
return compose(r)
else:
if (a.isNaN()):
return to_format(_z3_op_to_fpa_pretty_str[Z3_OP_FPA_NAN])
elif (a.isInf()):
if (a.isNegative()):
return to_format(_z3_op_to_fpa_pretty_str[Z3_OP_FPA_MINUS_INF])
else:
return to_format(_z3_op_to_fpa_pretty_str[Z3_OP_FPA_PLUS_INF])
elif (a.isZero()):
if (a.isNegative()):
return to_format(_z3_op_to_fpa_pretty_str[Z3_OP_FPA_MINUS_ZERO])
else:
return to_format(_z3_op_to_fpa_pretty_str[Z3_OP_FPA_PLUS_ZERO])
else:
z3._z3_assert(z3.is_fp_value(a), 'expecting FP num ast')
r = []
sgn = (ctypes.c_int)(0)
sgnb = Z3_fpa_get_numeral_sign(a.ctx_ref(), a.ast, byref(sgn))
sig = Z3_fpa_get_numeral_significand_string(a.ctx_ref(), a.ast)
exp = Z3_fpa_get_numeral_exponent_string(a.ctx_ref(), a.ast)
if sgnb and sgn.value != 0:
r.append(to_format('-'))
r.append(to_format(sig))
if (exp != '0'):
r.append(to_format('*(2**'))
r.append(to_format(exp))
r.append(to_format(')'))
return compose(r)
def pp_fp(self, a, d, xs):
z3._z3_assert(isinstance(a, z3.FPRef), "type mismatch")
k = a.decl().kind()
op = '?'
if (self.fpa_pretty and k in _z3_op_to_fpa_pretty_str):
op = _z3_op_to_fpa_pretty_str[k]
elif k in _z3_op_to_fpa_normal_str:
op = _z3_op_to_fpa_normal_str[k]
elif k in _z3_op_to_str:
op = _z3_op_to_str[k]
n = a.num_args()
if self.fpa_pretty:
if self.is_infix(k) and n >= 3:
rm = a.arg(0)
if z3.is_fprm_value(rm) and z3._dflt_rm(a.ctx).eq(rm):
arg1 = to_format(self.pp_expr(a.arg(1), d+1, xs))
arg2 = to_format(self.pp_expr(a.arg(2), d+1, xs))
r = []
r.append(arg1)
r.append(to_format(' '))
r.append(to_format(op))
r.append(to_format(' '))
r.append(arg2)
return compose(r)
elif k == Z3_OP_FPA_NEG:
return compose([to_format('-') , to_format(self.pp_expr(a.arg(0), d+1, xs))])
if k in _z3_op_to_fpa_normal_str:
op = _z3_op_to_fpa_normal_str[k]
r = []
r.append(to_format(op))
if not z3.is_const(a):
r.append(to_format('('))
first = True
for c in a.children():
if first:
first = False
else:
r.append(to_format(', '))
r.append(self.pp_expr(c, d+1, xs))
r.append(to_format(')'))
return compose(r)
else:
return to_format(a.as_string())
def pp_prefix(self, a, d, xs):
r = []
sz = 0
for child in a.children():
r.append(self.pp_expr(child, d+1, xs))
sz = sz + 1
if sz > self.max_args:
r.append(self.pp_ellipses())
break
return seq1(self.pp_name(a), r)
def is_assoc(self, k):
return _is_assoc(k)
def is_left_assoc(self, k):
return _is_left_assoc(k)
def infix_args_core(self, a, d, xs, r):
sz = len(r)
k = a.decl().kind()
p = self.get_precedence(k)
first = True
for child in a.children():
child_pp = self.pp_expr(child, d+1, xs)
child_k = None
if z3.is_app(child):
child_k = child.decl().kind()
if k == child_k and (self.is_assoc(k) or (first and self.is_left_assoc(k))):
self.infix_args_core(child, d, xs, r)
sz = len(r)
if sz > self.max_args:
return
elif self.is_infix_unary(child_k):
child_p = self.get_precedence(child_k)
if p > child_p or (_is_add(k) and _is_sub(child_k)) or (_is_sub(k) and first and _is_add(child_k)):
r.append(child_pp)
else:
r.append(self.add_paren(child_pp))
sz = sz + 1
elif z3.is_quantifier(child):
r.append(self.add_paren(child_pp))
else:
r.append(child_pp)
sz = sz + 1
if sz > self.max_args:
r.append(self.pp_ellipses())
return
first = False
def infix_args(self, a, d, xs):
r = []
self.infix_args_core(a, d, xs, r)
return r
def pp_infix(self, a, d, xs):
k = a.decl().kind()
if self.is_infix_compact(k):
op = self.pp_name(a)
return group(seq(self.infix_args(a, d, xs), op, False))
else:
op = self.pp_name(a)
sz = _len(op)
op.string = ' ' + op.string
op.size = sz + 1
return group(seq(self.infix_args(a, d, xs), op))
def pp_unary(self, a, d, xs):
k = a.decl().kind()
p = self.get_precedence(k)
child = a.children()[0]
child_k = None
if z3.is_app(child):
child_k = child.decl().kind()
child_pp = self.pp_expr(child, d+1, xs)
if k != child_k and self.is_infix_unary(child_k):
child_p = self.get_precedence(child_k)
if p <= child_p:
child_pp = self.add_paren(child_pp)
if z3.is_quantifier(child):
child_pp = self.add_paren(child_pp)
name = self.pp_name(a)
return compose(to_format(name), indent(_len(name), child_pp))
def pp_power_arg(self, arg, d, xs):
r = self.pp_expr(arg, d+1, xs)
k = None
if z3.is_app(arg):
k = arg.decl().kind()
if self.is_infix_unary(k) or (z3.is_rational_value(arg) and arg.denominator_as_long() != 1):
return self.add_paren(r)
else:
return r
def pp_power(self, a, d, xs):
arg1_pp = self.pp_power_arg(a.arg(0), d+1, xs)
arg2_pp = self.pp_power_arg(a.arg(1), d+1, xs)
return group(seq((arg1_pp, arg2_pp), '**', False))
def pp_neq(self):
return to_format("!=")
def pp_distinct(self, a, d, xs):
if a.num_args() == 2:
op = self.pp_neq()
sz = _len(op)
op.string = ' ' + op.string
op.size = sz + 1
return group(seq(self.infix_args(a, d, xs), op))
else:
return self.pp_prefix(a, d, xs)
def pp_select(self, a, d, xs):
if a.num_args() != 2:
return self.pp_prefix(a, d, xs)
else:
arg1_pp = self.pp_expr(a.arg(0), d+1, xs)
arg2_pp = self.pp_expr(a.arg(1), d+1, xs)
return compose(arg1_pp, indent(2, compose(to_format('['), arg2_pp, to_format(']'))))
def pp_unary_param(self, a, d, xs):
p = Z3_get_decl_int_parameter(a.ctx_ref(), a.decl().ast, 0)
arg = self.pp_expr(a.arg(0), d+1, xs)
return seq1(self.pp_name(a), [ to_format(p), arg ])
def pp_extract(self, a, d, xs):
h = Z3_get_decl_int_parameter(a.ctx_ref(), a.decl().ast, 0)
l = Z3_get_decl_int_parameter(a.ctx_ref(), a.decl().ast, 1)
arg = self.pp_expr(a.arg(0), d+1, xs)
return seq1(self.pp_name(a), [ to_format(h), to_format(l), arg ])
def pp_pattern(self, a, d, xs):
if a.num_args() == 1:
return self.pp_expr(a.arg(0), d, xs)
else:
return seq1('MultiPattern', [ self.pp_expr(arg, d+1, xs) for arg in a.children() ])
def pp_map(self, a, d, xs):
r = []
sz = 0
f = z3.get_map_func(a)
r.append(to_format(f.name()))
for child in a.children():
r.append(self.pp_expr(child, d+1, xs))
sz = sz + 1
if sz > self.max_args:
r.append(self.pp_ellipses())
break
return seq1(self.pp_name(a), r)
def pp_K(self, a, d, xs):
return seq1(self.pp_name(a), [ self.pp_sort(a.domain()), self.pp_expr(a.arg(0), d+1, xs) ])
def pp_atmost(self, a, d, f, xs):
k = Z3_get_decl_int_parameter(a.ctx_ref(), a.decl().ast, 0)
return seq1(self.pp_name(a), [seq3([ self.pp_expr(ch, d+1, xs) for ch in a.children()]), to_format(k)])
def pp_pbcmp(self, a, d, f, xs):
chs = a.children()
rchs = range(len(chs))
k = Z3_get_decl_int_parameter(a.ctx_ref(), a.decl().ast, 0)
ks = [Z3_get_decl_int_parameter(a.ctx_ref(), a.decl().ast, i+1) for i in rchs]
ls = [ seq3([self.pp_expr(chs[i], d+1,xs), to_format(ks[i])]) for i in rchs]
return seq1(self.pp_name(a), [seq3(ls), to_format(k)])
def pp_app(self, a, d, xs):
if z3.is_int_value(a):
return self.pp_int(a)
elif z3.is_rational_value(a):
return self.pp_rational(a)
elif z3.is_algebraic_value(a):
return self.pp_algebraic(a)
elif z3.is_bv_value(a):
return self.pp_bv(a)
elif z3.is_finite_domain_value(a):
return self.pp_fd(a)
elif z3.is_fprm_value(a):
return self.pp_fprm_value(a)
elif z3.is_fp_value(a):
return self.pp_fp_value(a)
elif z3.is_fp(a):
return self.pp_fp(a, d, xs)
elif z3.is_string_value(a):
return self.pp_string(a)
elif z3.is_const(a):
return self.pp_const(a)
else:
f = a.decl()
k = f.kind()
if k == Z3_OP_POWER:
return self.pp_power(a, d, xs)
elif k == Z3_OP_DISTINCT:
return self.pp_distinct(a, d, xs)
elif k == Z3_OP_SELECT:
return self.pp_select(a, d, xs)
elif k == Z3_OP_SIGN_EXT or k == Z3_OP_ZERO_EXT or k == Z3_OP_REPEAT:
return self.pp_unary_param(a, d, xs)
elif k == Z3_OP_EXTRACT:
return self.pp_extract(a, d, xs)
elif k == Z3_OP_ARRAY_MAP:
return self.pp_map(a, d, xs)
elif k == Z3_OP_CONST_ARRAY:
return self.pp_K(a, d, xs)
elif k == Z3_OP_PB_AT_MOST:
return self.pp_atmost(a, d, f, xs)
elif k == Z3_OP_PB_LE:
return self.pp_pbcmp(a, d, f, xs)
elif k == Z3_OP_PB_GE:
return self.pp_pbcmp(a, d, f, xs)
elif z3.is_pattern(a):
return self.pp_pattern(a, d, xs)
elif self.is_infix(k):
return self.pp_infix(a, d, xs)
elif self.is_unary(k):
return self.pp_unary(a, d, xs)
else:
return self.pp_prefix(a, d, xs)
def pp_var(self, a, d, xs):
idx = z3.get_var_index(a)
sz = len(xs)
if idx >= sz:
return seq1('Var', (to_format(idx),))
else:
return to_format(xs[sz - idx - 1])
def pp_quantifier(self, a, d, xs):
ys = [ to_format(a.var_name(i)) for i in range(a.num_vars()) ]
new_xs = xs + ys
body_pp = self.pp_expr(a.body(), d+1, new_xs)
if len(ys) == 1:
ys_pp = ys[0]
else:
ys_pp = seq3(ys, '[', ']')
if a.is_forall():
header = 'ForAll'
else:
header = 'Exists'
return seq1(header, (ys_pp, body_pp))
def pp_expr(self, a, d, xs):
self.visited = self.visited + 1
if d > self.max_depth or self.visited > self.max_visited:
return self.pp_ellipses()
if z3.is_app(a):
return self.pp_app(a, d, xs)
elif z3.is_quantifier(a):
return self.pp_quantifier(a, d, xs)
elif z3.is_var(a):
return self.pp_var(a, d, xs)
else:
return to_format(self.pp_unknown())
def pp_seq_core(self, f, a, d, xs):
self.visited = self.visited + 1
if d > self.max_depth or self.visited > self.max_visited:
return self.pp_ellipses()
r = []
sz = 0
for elem in a:
r.append(f(elem, d+1, xs))
sz = sz + 1
if sz > self.max_args:
r.append(self.pp_ellipses())
break
return seq3(r, '[', ']')
def pp_seq(self, a, d, xs):
return self.pp_seq_core(self.pp_expr, a, d, xs)
def pp_seq_seq(self, a, d, xs):
return self.pp_seq_core(self.pp_seq, a, d, xs)
def pp_model(self, m):
r = []
sz = 0
for d in m:
i = m[d]
if isinstance(i, z3.FuncInterp):
i_pp = self.pp_func_interp(i)
else:
i_pp = self.pp_expr(i, 0, [])
name = self.pp_name(d)
r.append(compose(name, to_format(' = '), indent(_len(name) + 3, i_pp)))
sz = sz + 1
if sz > self.max_args:
r.append(self.pp_ellipses())
break
return seq3(r, '[', ']')
def pp_func_entry(self, e):
num = e.num_args()
if num > 1:
args = []
for i in range(num):
args.append(self.pp_expr(e.arg_value(i), 0, []))
args_pp = group(seq3(args))
else:
args_pp = self.pp_expr(e.arg_value(0), 0, [])
value_pp = self.pp_expr(e.value(), 0, [])
return group(seq((args_pp, value_pp), self.pp_arrow()))
def pp_func_interp(self, f):
r = []
sz = 0
num = f.num_entries()
for i in range(num):
r.append(self.pp_func_entry(f.entry(i)))
sz = sz + 1
if sz > self.max_args:
r.append(self.pp_ellipses())
break
if sz <= self.max_args:
else_val = f.else_value()
if else_val == None:
else_pp = to_format('#unspecified')
else:
else_pp = self.pp_expr(else_val, 0, [])
r.append(group(seq((to_format('else'), else_pp), self.pp_arrow())))
return seq3(r, '[', ']')
def pp_list(self, a):
r = []
sz = 0
for elem in a:
if _support_pp(elem):
r.append(self.main(elem))
else:
r.append(to_format(str(elem)))
sz = sz + 1
if sz > self.max_args:
r.append(self.pp_ellipses())
break
if isinstance(a, tuple):
return seq3(r)
else:
return seq3(r, '[', ']')
def main(self, a):
if z3.is_expr(a):
return self.pp_expr(a, 0, [])
elif z3.is_sort(a):
return self.pp_sort(a)
elif z3.is_func_decl(a):
return self.pp_name(a)
elif isinstance(a, z3.Goal) or isinstance(a, z3.AstVector):
return self.pp_seq(a, 0, [])
elif isinstance(a, z3.Solver):
return self.pp_seq(a.assertions(), 0, [])
elif isinstance(a, z3.Fixedpoint):
return a.sexpr()
elif isinstance(a, z3.Optimize):
return a.sexpr()
elif isinstance(a, z3.ApplyResult):
return self.pp_seq_seq(a, 0, [])
elif isinstance(a, z3.ModelRef):
return self.pp_model(a)
elif isinstance(a, z3.FuncInterp):
return self.pp_func_interp(a)
elif isinstance(a, list) or isinstance(a, tuple):
return self.pp_list(a)
else:
return to_format(self.pp_unknown())
def __call__(self, a):
self.visited = 0
return self.main(a)
class HTMLFormatter(Formatter):
def __init__(self):
Formatter.__init__(self)
global _html_ellipses
self.ellipses = to_format(_html_ellipses)
def pp_arrow(self):
return to_format(' →', 1)
def pp_unknown(self):
return '<b>unknown</b>'
def pp_name(self, a):
r = _html_op_name(a)
if r[0] == '&' or r[0] == '/' or r[0] == '%':
return to_format(r, 1)
else:
pos = r.find('__')
if pos == -1 or pos == 0:
return to_format(r)
else:
sz = len(r)
if pos + 2 == sz:
return to_format(r)
else:
return to_format('%s<sub>%s</sub>' % (r[0:pos], r[pos+2:sz]), sz - 2)
def is_assoc(self, k):
return _is_html_assoc(k)
def is_left_assoc(self, k):
return _is_html_left_assoc(k)
def is_infix(self, a):
return _is_html_infix(a)
def is_unary(self, a):
return _is_html_unary(a)
def get_precedence(self, a):
return _get_html_precedence(a)
def pp_neq(self):
return to_format("≠")
def pp_power(self, a, d, xs):
arg1_pp = self.pp_power_arg(a.arg(0), d+1, xs)
arg2_pp = self.pp_expr(a.arg(1), d+1, xs)
return compose(arg1_pp, to_format('<sup>', 1), arg2_pp, to_format('</sup>', 1))
def pp_var(self, a, d, xs):
idx = z3.get_var_index(a)
sz = len(xs)
if idx >= sz:
# 957 is the greek letter nu
return to_format('ν<sub>%s</sub>' % idx, 1)
else:
return to_format(xs[sz - idx - 1])
def pp_quantifier(self, a, d, xs):
ys = [ to_format(a.var_name(i)) for i in range(a.num_vars()) ]
new_xs = xs + ys
body_pp = self.pp_expr(a.body(), d+1, new_xs)
ys_pp = group(seq(ys))
if a.is_forall():
header = '∀'
else:
header = '∃'
return group(compose(to_format(header, 1),
indent(1, compose(ys_pp, to_format(' :'), line_break(), body_pp))))
_PP = PP()
_Formatter = Formatter()
def set_pp_option(k, v):
if k == 'html_mode':
if v:
set_html_mode(True)
else:
set_html_mode(False)
return True
if k == 'fpa_pretty':
if v:
set_fpa_pretty(True)
else:
set_fpa_pretty(False)
return True
val = getattr(_PP, k, None)
if val != None:
z3._z3_assert(type(v) == type(val), "Invalid pretty print option value")
setattr(_PP, k, v)
return True
val = getattr(_Formatter, k, None)
if val != None:
z3._z3_assert(type(v) == type(val), "Invalid pretty print option value")
setattr(_Formatter, k, v)
return True
return False
def obj_to_string(a):
out = io.StringIO()
_PP(out, _Formatter(a))
return out.getvalue()
_html_out = None
def set_html_mode(flag=True):
global _Formatter
if flag:
_Formatter = HTMLFormatter()
else:
_Formatter = Formatter()
def set_fpa_pretty(flag=True):
global _Formatter
global _z3_op_to_str
_Formatter.fpa_pretty = flag
if flag:
for (_k,_v) in _z3_op_to_fpa_pretty_str.items():
_z3_op_to_str[_k] = _v
for _k in _z3_fpa_infix:
_infix_map[_k] = True
else:
for (_k,_v) in _z3_op_to_fpa_normal_str.items():
_z3_op_to_str[_k] = _v
for _k in _z3_fpa_infix:
_infix_map[_k] = False
set_fpa_pretty(True)
def get_fpa_pretty():
global Formatter
return _Formatter.fpa_pretty
def in_html_mode():
return isinstance(_Formatter, HTMLFormatter)
def pp(a):
if _support_pp(a):
print(obj_to_string(a))
else:
print(a)
def print_matrix(m):
z3._z3_assert(isinstance(m, list) or isinstance(m, tuple), "matrix expected")
if not in_html_mode():
print(obj_to_string(m))
else:
print('<table cellpadding="2", cellspacing="0", border="1">')
for r in m:
z3._z3_assert(isinstance(r, list) or isinstance(r, tuple), "matrix expected")
print('<tr>')
for c in r:
print('<td>%s</td>' % c)
print('</tr>')
print('</table>')
def insert_line_breaks(s, width):
"""Break s in lines of size width (approx)"""
sz = len(s)
if sz <= width:
return s
new_str = io.StringIO()
w = 0
for i in range(sz):
if w > width and s[i] == ' ':
new_str.write(u('<br />'))
w = 0
else:
new_str.write(u(s[i]))
w = w + 1
return new_str.getvalue()
| |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2019 Edgewall Software
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
from trac.core import *
from trac.core import ComponentManager
import unittest
class TracErrorTestCase(unittest.TestCase):
def test_init(self):
e = TracError("the message", "the title", True)
self.assertEqual("the message", e.message)
self.assertEqual("the title", e.title)
self.assertTrue(e.show_traceback)
def test_unicode(self):
e = TracError("the message")
self.assertEqual("the message", unicode(e))
class ITest(Interface):
def test():
"""Dummy function."""
class IOtherTest(Interface):
def other_test():
"""Other dummy function."""
class ComponentTestCase(unittest.TestCase):
def setUp(self):
from trac.core import ComponentMeta
self.compmgr = ComponentManager()
# Make sure we have no external components hanging around in the
# component registry
self.old_registry = ComponentMeta._registry
ComponentMeta._registry = {}
def tearDown(self):
# Restore the original component registry
from trac.core import ComponentMeta
ComponentMeta._registry = self.old_registry
def test_base_class_not_registered(self):
"""
Make sure that the Component base class does not appear in the component
registry.
"""
from trac.core import ComponentMeta
self.assertNotIn(Component, ComponentMeta._components)
self.assertRaises(TracError, self.compmgr.__getitem__, Component)
def test_abstract_component_not_registered(self):
"""
Make sure that a Component class marked as abstract does not appear in
the component registry.
"""
from trac.core import ComponentMeta
class AbstractComponent(Component):
abstract = True
self.assertNotIn(AbstractComponent, ComponentMeta._components)
self.assertRaises(TracError, self.compmgr.__getitem__,
AbstractComponent)
def test_unregistered_component(self):
"""
Make sure the component manager refuses to manage classes not derived
from `Component`.
"""
class NoComponent(object):
pass
self.assertRaises(TracError, self.compmgr.__getitem__, NoComponent)
def test_component_registration(self):
"""
Verify that classes derived from `Component` are managed by the
component manager.
"""
class ComponentA(Component):
pass
self.assertTrue(self.compmgr[ComponentA])
self.assertTrue(ComponentA(self.compmgr))
def test_component_identity(self):
"""
Make sure instantiating a component multiple times just returns the
same instance again.
"""
class ComponentA(Component):
pass
c1 = ComponentA(self.compmgr)
c2 = ComponentA(self.compmgr)
self.assertIs(c1, c2, 'Expected same component instance')
c2 = self.compmgr[ComponentA]
self.assertIs(c1, c2, 'Expected same component instance')
def test_component_initializer(self):
"""
Makes sure that a components' `__init__` method gets called.
"""
class ComponentA(Component):
def __init__(self):
self.data = 'test'
self.assertEqual('test', ComponentA(self.compmgr).data)
ComponentA(self.compmgr).data = 'newtest'
self.assertEqual('newtest', ComponentA(self.compmgr).data)
def test_inherited_component_initializer(self):
"""
Makes sure that a the `__init__` method of a components' super-class
gets called if the component doesn't override it.
"""
class ComponentA(Component):
def __init__(self):
self.data = 'foo'
class ComponentB(ComponentA):
def __init__(self):
self.data = 'bar'
class ComponentC(ComponentB):
pass
self.assertEqual('bar', ComponentC(self.compmgr).data)
ComponentC(self.compmgr).data = 'baz'
self.assertEqual('baz', ComponentC(self.compmgr).data)
def test_implements_called_outside_classdef(self):
"""
Verify that calling implements() outside a class definition raises an
`AssertionError`.
"""
try:
implements()
except AssertionError:
pass
else:
self.fail('Expected AssertionError')
def test_implements_multiple(self):
"""
Verify that a component "implementing" an interface more than once
(e.g. through inheritance) is not called more than once from an
extension point.
"""
log = []
class Parent(Component):
abstract = True
implements(ITest)
class Child(Parent):
implements(ITest)
def test(self):
log.append("call")
class Other(Component):
tests = ExtensionPoint(ITest)
for test in Other(self.compmgr).tests:
test.test()
self.assertEqual(["call"], log)
def test_attribute_access(self):
"""
Verify that accessing undefined attributes on components raises an
`AttributeError`.
"""
class ComponentA(Component):
pass
comp = ComponentA(self.compmgr)
try:
comp.foo
self.fail('Expected AttributeError')
except AttributeError:
pass
def test_nonconforming_extender(self):
"""
Verify that accessing a method of a declared extension point interface
raises a normal `AttributeError` if the component does not implement
the method.
"""
class ComponentA(Component):
tests = ExtensionPoint(ITest)
class ComponentB(Component):
implements(ITest)
tests = iter(ComponentA(self.compmgr).tests)
try:
next(tests).test()
self.fail('Expected AttributeError')
except AttributeError:
pass
def test_extension_point_with_no_extension(self):
"""
Verify that accessing an extension point with no extenders returns an
empty list.
"""
class ComponentA(Component):
tests = ExtensionPoint(ITest)
tests = iter(ComponentA(self.compmgr).tests)
self.assertRaises(StopIteration, next, tests)
def test_extension_point_with_one_extension(self):
"""
Verify that a single component extending an extension point can be
accessed through the extension point attribute of the declaring
component.
"""
class ComponentA(Component):
tests = ExtensionPoint(ITest)
class ComponentB(Component):
implements(ITest)
def test(self):
return 'x'
tests = iter(ComponentA(self.compmgr).tests)
self.assertEqual('x', next(tests).test())
self.assertRaises(StopIteration, next, tests)
def test_extension_point_with_two_extensions(self):
"""
Verify that two components extending an extension point can be accessed
through the extension point attribute of the declaring component.
"""
class ComponentA(Component):
tests = ExtensionPoint(ITest)
class ComponentB(Component):
implements(ITest)
def test(self):
return 'x'
class ComponentC(Component):
implements(ITest)
def test(self):
return 'y'
results = [test.test() for test in ComponentA(self.compmgr).tests]
self.assertEqual(['x', 'y'], sorted(results))
def test_inherited_extension_point(self):
"""
Verify that extension points are inherited to sub-classes.
"""
class BaseComponent(Component):
tests = ExtensionPoint(ITest)
class ConcreteComponent(BaseComponent):
pass
class ExtendingComponent(Component):
implements(ITest)
def test(self):
return 'x'
tests = iter(ConcreteComponent(self.compmgr).tests)
self.assertEqual('x', next(tests).test())
self.assertRaises(StopIteration, next, tests)
def test_inherited_implements(self):
"""
Verify that a component with a super-class implementing an extension
point interface is also registered as implementing that interface.
"""
class BaseComponent(Component):
implements(ITest)
abstract = True
class ConcreteComponent(BaseComponent):
pass
from trac.core import ComponentMeta
self.assertIn(ConcreteComponent, ComponentMeta._registry.get(ITest, []))
def test_inherited_implements_multilevel(self):
"""
Verify that extension point interfaces are inherited for more than
one level of inheritance.
"""
class BaseComponent(Component):
implements(ITest)
abstract = True
class ChildComponent(BaseComponent):
implements(IOtherTest)
abstract = True
class ConcreteComponent(ChildComponent):
pass
from trac.core import ComponentMeta
self.assertIn(ConcreteComponent, ComponentMeta._registry.get(ITest, []))
self.assertIn(ConcreteComponent, ComponentMeta._registry.get(IOtherTest, []))
def test_component_manager_component(self):
"""
Verify that a component manager can itself be a component with its own
extension points.
"""
class ManagerComponent(ComponentManager, Component):
tests = ExtensionPoint(ITest)
def __init__(self, foo, bar):
ComponentManager.__init__(self)
self.foo, self.bar = foo, bar
class Extender(Component):
implements(ITest)
def test(self):
return 'x'
mgr = ManagerComponent('Test', 42)
self.assertEqual(id(mgr), id(mgr[ManagerComponent]))
tests = iter(mgr.tests)
self.assertEqual('x', next(tests).test())
self.assertRaises(StopIteration, next, tests)
def test_component_manager_component_isolation(self):
"""
Verify that a component manager that is also a component will only
be listed in extension points for components instantiated in
its scope.
See bh:comment:5:ticket:438 and #11121
"""
class ManagerComponentA(ComponentManager, Component):
implements(ITest)
def test(self):
pass
class ManagerComponentB(ManagerComponentA):
pass
class Tester(Component):
tests = ExtensionPoint(ITest)
mgrA = ManagerComponentA()
mgrB = ManagerComponentB()
self.assertEqual([mgrA], Tester(mgrA).tests)
self.assertEqual([mgrB], Tester(mgrB).tests)
def test_instantiation_doesnt_enable(self):
"""
Make sure that a component disabled by the ComponentManager is not
implicitly enabled by instantiating it directly.
"""
class DisablingComponentManager(ComponentManager):
def is_component_enabled(self, cls):
return False
class ComponentA(Component):
pass
mgr = DisablingComponentManager()
instance = ComponentA(mgr)
self.assertIsNone(mgr[ComponentA])
def test_invalid_argument_raises(self):
"""
AssertionError is raised when first argument to initializer is not a
ComponentManager instance.
"""
class ComponentA(Component):
pass
self.assertRaises(AssertionError, Component)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TracErrorTestCase))
suite.addTest(unittest.makeSuite(ComponentTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| |
#!/usr/bin/env python3
''' You know, for security. '''
import io
import os
import abc
import ssl
import sys
import pickle
import getpass
import hashlib
import argparse
import datetime
import configparser
import xmlrpc.client
import xmlrpc.server
import OpenSSL # pip3 install pyOpenSSL
class Role(abc.ABC):
''' abstract class implementing features common to all roles '''
def __init__(self, args):
''' just store the parsed program arguments '''
self._args = args
@abc.abstractmethod
def generate(self):
'''
method to generate the required cryptographic material
for a specific role, must be overriden in all subclasses
'''
raise NotImplementedError
@staticmethod
def generate_keypair():
''' generates a new private/public key pair '''
key = OpenSSL.crypto.PKey()
key.generate_key(OpenSSL.crypto.TYPE_RSA, 4096)
return key
def create_csr(self, key):
''' creates a certificate signing request '''
csr = OpenSSL.crypto.X509Req()
csr.set_pubkey(key)
subj = csr.get_subject()
subj.C = self._args.country
subj.ST = self._args.state
subj.L = self._args.location
subj.O = self._args.orgname
subj.OU = self._args.orgunit
subj.CN = self._args.cname
subj.emailAddress = self._args.email
# prove to CA that we own the corresponding private key
csr.sign(key, "sha512")
return csr
@staticmethod
def confirm_signing(csr):
''' asks for confirmation before signing a CSR '''
# X.509 extensions not implemented because :effort:,
# so all certificates can be used for all operations
# (eg. even a client certificate will be able to act as a CA)
x509_to_human = {
"CN": "Common Name",
"C": "Country",
"ST": "State",
"L": "Location",
"OU": "Organizational Unit",
"O": "Organization",
"emailAddress": "e-mail address"
}
for key, value in csr.get_subject().get_components():
print("%s: %s" % (x509_to_human[key.decode("ascii")],
value.decode("ascii")),
file=sys.stderr)
# grr, input() can't write the prompt to stderr
sys.stderr.write("Really sign the above CSR? [yN] ")
answer = input()
if answer not in ["y", "Y", "yes", "YES"]:
print("Not signing...", file=sys.stderr)
return False
return True
def sign_csr(self, csr, cacert, cakey, serial):
''' makes CA sign a CSR '''
cert = OpenSSL.crypto.X509()
# copy data from CSR
cert.set_subject(csr.get_subject())
cert.set_pubkey(csr.get_pubkey())
cert.set_version(csr.get_version())
# add CA data
cert.set_issuer(cacert.get_subject())
notbefore = datetime.datetime.utcnow()
cert.set_notBefore(bytes(notbefore.strftime(r"%Y%m%d%H%M%SZ"),
"ascii"))
notafter = notbefore + datetime.timedelta(
days=self._args.sign_for_days)
cert.set_notAfter(bytes(notafter.strftime(r"%Y%m%d%H%M%SZ"),
"ascii"))
cert.set_serial_number(serial)
cert.sign(cakey, "sha512")
return cert
@staticmethod
def _get_password():
''' password prompt for private key encryption '''
return getpass.getpass().encode("utf_8")
def write_key(self, key, keyfile):
''' dumps private key into a file-like object '''
dump = OpenSSL.crypto.dump_privatekey(
OpenSSL.crypto.FILETYPE_PEM, key,
cipher="aes-256-cbc", passphrase=self._get_password())
keyfile.write(dump.decode("ascii"))
def read_key(self, keyfile):
''' loads private key from a file-like object '''
# the verification done here is enough
dump = keyfile.read()
try:
key = OpenSSL.crypto.load_privatekey(
OpenSSL.crypto.FILETYPE_PEM, dump,
passphrase=self._get_password())
except OpenSSL.crypto.Error:
print("Wrong password!", file=sys.stderr)
sys.exit(1)
assert key.check(), "Private key corrupt!"
return key
@staticmethod
def write_pub(key, pubfile):
''' dumps public key into a file-like object '''
dump = OpenSSL.crypto.dump_publickey(
OpenSSL.crypto.FILETYPE_PEM, key)
pubfile.write(dump.decode("ascii"))
@staticmethod
def read_pub(pubfile):
''' loads public key from a file-like object '''
dump = pubfile.read()
key = OpenSSL.crypto.load_publickey(
OpenSSL.crypto.FILETYPE_PEM, dump)
return key
@staticmethod
def write_csr(csr, csrfile):
''' dumps CSR into a file-like object '''
dump = OpenSSL.crypto.dump_certificate_request(
OpenSSL.crypto.FILETYPE_PEM, csr)
csrfile.write(dump.decode("ascii"))
@staticmethod
def read_csr(csrfile):
''' loads CSR from a file-like object '''
# the verification done here is NOT enough, it only checks that
# the requester posesses the corresponding private key;
# you need to do:
# * manual subject validation (see `Role.confirm_signing()`)
dump = csrfile.read()
csr = OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_PEM, dump)
assert csr.verify(csr.get_pubkey()), "Invalid requester signature!"
return csr
@staticmethod
def write_crt(crt, crtfile):
''' dumps certificate into a file-like object '''
dump = OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, crt)
crtfile.write(dump.decode("ascii"))
@staticmethod
def read_crt(crtfile):
''' loads certificate from a file-like object '''
# no verification done here;
# you need to check that:
# * notBefore and notAfter times are in the past/future
# * a trusted CA issued this certificate & the CA signature is valid
# * the CA has not revoked this certificate (via CRL or OCSP)
# The `Role.verify_crt()` function does all except the revoked check
dump = crtfile.read()
return OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM, dump)
@staticmethod
def verify_crt(crt, cacerts):
''' verifies a certificate against a list of CA certs '''
# CA cert must already be verified!
assert not crt.has_expired(), "Certificate has expired!"
store = OpenSSL.crypto.X509Store()
for cacert in cacerts:
store.add_cert(cacert)
storectx = OpenSSL.crypto.X509StoreContext(store, crt)
try:
storectx.verify_certificate()
except OpenSSL.crypto.X509StoreContextError:
raise AssertionError("Certificate not signed by the trusted CA!")
@staticmethod
def write_serial(ser, serfile):
''' dumps most recent CA serial number into a file-like object '''
serfile.write("%d\n" % ser)
@staticmethod
def read_serial(serfile):
''' loads most recent CA serial number from a file-like object '''
return int(serfile.read())
@staticmethod
def write_cat(cas, catfile):
''' dumps a list of trusted CAs to a file-like object '''
dump = pickle.dumps(cas)
catfile.write(dump)
@staticmethod
def read_cat(catfile):
''' loads a list of trusted CAs from a file-like object '''
# remeber to check notAfter and CRL/OSCP
dump = catfile.read()
return pickle.loads(dump)
class CertificateAuthority(Role):
''' the CA role class '''
def __init__(self, args):
''' copy selfsign_for_days arg into sign_for_days '''
# for easier CA generate() implementation, otherwise sign_csr()
# would not be able to determine for how long CA cert should be valid
super().__init__(args)
if hasattr(self._args, "selfsign_for_days"):
setattr(self._args, "sign_for_days", self._args.selfsign_for_days)
self._path = os.path.join(self._args.home, "ca")
def generate(self):
''' generates the whole CA shebang '''
print("Generating a new keypair...", file=sys.stderr)
key = self.generate_keypair()
print("Saving public and encrypted private key...", file=sys.stderr)
with open("%s.key" % self._path, "w") as keyfile:
self.write_key(key, keyfile)
with open("%s.pub" % self._path, "w") as pubfile:
self.write_pub(key, pubfile)
print("Creating a Certificate Signing Request.", file=sys.stderr)
csr = self.create_csr(key)
with open("%s.csr" % self._path, "w") as csrfile:
self.write_csr(csr, csrfile)
print("CSR saved to `%s.csr`, now going to self-sign it."
% self._path, file=sys.stderr)
serial = 1
crt = self.sign_csr(csr, csr, key, serial)
with open("%s.pem" % self._path, "w") as crtfile:
self.write_crt(crt, crtfile)
with open("%s.ser" % self._path, "w") as serfile:
self.write_serial(serial, serfile)
print("Successfully self-signed the CA cert."
" You can give the `%s.pem` file to other people to \"trust\"."
% self._path, file=sys.stderr)
def sign(self):
''' signs a client, server, or even another CA's CSR '''
# see the note in confirm_signing()
for csrfile in self._args.csr:
csr = self.read_csr(csrfile)
if self.confirm_signing(csr):
print("Loading the CA private key to make signature...",
file=sys.stderr)
with open("%s.pem" % self._path) as crtfile:
cacert = self.read_crt(crtfile)
with open("%s.key" % self._path) as keyfile:
cakey = self.read_key(keyfile)
with open("%s.ser" % self._path) as serfile:
serial = self.read_serial(serfile) + 1
cert = self.sign_csr(csr, cacert, cakey, serial)
with open("%s.ser" % self._path, "w") as serfile:
self.write_serial(serial, serfile)
self.write_crt(cert, self._args.cert)
print("Certificate successfully signed!", file=sys.stderr)
class Client(Role):
''' the client role class '''
def __init__(self, args):
super().__init__(args)
self._path = os.path.join(self._args.home, "client")
self.trusted_cas = []
if os.path.isfile("%s.cat" % self._path):
with open("%s.cat" % self._path, "rb") as catfile:
self.trusted_cas = self.read_cat(catfile)
def generate(self):
'''
generates a client private key and CSR
(give CSR to the CA and then import CA-signed cert with "--client")
'''
print("Generating a new keypair...", file=sys.stderr)
key = self.generate_keypair()
print("Saving public and encrypted private key...", file=sys.stderr)
with open("%s.key" % self._path, "w") as keyfile:
self.write_key(key, keyfile)
with open("%s.pub" % self._path, "w") as pubfile:
self.write_pub(key, pubfile)
print("Creating a Certificate Signing Request.", file=sys.stderr)
csr = self.create_csr(key)
with open("%s.csr" % self._path, "w") as csrfile:
self.write_csr(csr, csrfile)
print("CSR saved to `%s.csr`, give it to your CA to sign"
" and then import with 'client import --client'."
" Don't forget to also 'client import --ca' the CA cert!"
% self._path, file=sys.stderr)
# create empty CA trust store
with open("%s.cat" % self._path, "wb") as catfile:
self.write_cat([], catfile)
def cert_import(self):
'''
imports a certificate;
either the client's CA-signed cert or a CA cert
(determined by self._args.cert_type)
'''
if self._args.cert_type == "client":
print("Importing our CA-signed cert...", file=sys.stderr)
cert = self.read_crt(self._args.cert)
# make sure it's mine
with open("%s.pub" % self._path) as pubfile:
mypub = self.read_pub(pubfile)
dump1 = OpenSSL.crypto.dump_publickey(
OpenSSL.crypto.FILETYPE_PEM, mypub)
dump2 = OpenSSL.crypto.dump_publickey(
OpenSSL.crypto.FILETYPE_PEM, cert.get_pubkey())
if dump1 != dump2:
print("This cert does not have our pubkey on it!",
file=sys.stderr)
# is it signed by a trusted CA? or even signed at all?
# we shall never know...
#
# not really a bug, tho -- I wanna be able to import a signed cert
# without trusting the issuing CA first (or at all)
# again, :effort:
#
# you could (and should) always manually inspect the stuff CA gives
# you, anyway as you might even become an intermediary CA
# by "mistake": https://goo.gl/oEQFMe #topkek
print("Signed cert is valid, saving.", file=sys.stderr)
with open("%s.pem" % self._path, "w") as crtfile:
self.write_crt(cert, crtfile)
elif self._args.cert_type == "ca":
print("Importing a new trusted CA cert...", file=sys.stderr)
cacert = self.read_crt(self._args.cert)
if cacert.has_expired():
print("The CA cert has expired!", file=sys.stderr)
sys.exit(1)
# not much to verify here, so just save it
print("Saving CA cert to trusted cacert store.", file=sys.stderr)
self.trusted_cas.append(OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, cacert))
with open("%s.cat" % self._path, "wb") as catfile:
self.write_cat(self.trusted_cas, catfile)
else:
raise NotImplementedError
def _makectx(self):
''' returns a TLS context '''
# pylint: disable=no-member
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
# pylint: enable=no-member
print("Loading client certificate and private key...", file=sys.stderr)
ctx.load_cert_chain("%s.pem" % self._path,
keyfile="%s.key" % self._path,
password=self._get_password)
for cacert in self.trusted_cas:
ctx.load_verify_locations(cadata=cacert.decode("utf_8"))
ctx.set_ciphers("HIGH")
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.check_hostname = False # not very secure, but oh well
# at least you don't have to mess with /etc/hosts for a simple PoC
return ctx
def put(self):
''' uploads a file to a server '''
ctx = self._makectx()
uri = "https://%s:%s" % (self._args.address, self._args.port)
server = xmlrpc.client.ServerProxy(uri, context=ctx)
for inf in self._args.infile:
result = server.upload(inf.read())
print("File uploaded successfully! Hash: %s" % result,
file=sys.stderr)
def get(self):
''' downloads a file from a server '''
ctx = self._makectx()
uri = "https://%s:%s" % (self._args.address, self._args.port)
server = xmlrpc.client.ServerProxy(uri, context=ctx)
result = server.download(self._args.filehash[0])
self._args.outfile.write(result)
print("File downloaded successfully!", file=sys.stderr)
class Server(Role):
''' the server role class '''
def __init__(self, args):
super().__init__(args)
self._path = os.path.join(self._args.home, "server")
self.trusted_cas = []
print("Loading trusted CA certificates...", file=sys.stderr)
if os.path.isfile("%s.cat" % self._path):
with open("%s.cat" % self._path, "rb") as catfile:
self.trusted_cas = self.read_cat(catfile)
def generate(self):
'''
generates a server private key and CSR
(give CSR to the CA and then import CA-signed cert with "--server")
'''
print("Generating a new keypair...", file=sys.stderr)
key = self.generate_keypair()
print("Saving public and encrypted private key...", file=sys.stderr)
with open("%s.key" % self._path, "w") as keyfile:
self.write_key(key, keyfile)
with open("%s.pub" % self._path, "w") as pubfile:
self.write_pub(key, pubfile)
print("Creating a Certificate Signing Request.", file=sys.stderr)
csr = self.create_csr(key)
with open("%s.csr" % self._path, "w") as csrfile:
self.write_csr(csr, csrfile)
print("CSR saved to `%s.csr`, give it to your CA to sign"
" and then import with 'server import --server'."
" Don't forget to also 'server import --ca' the CA cert!"
% self._path, file=sys.stderr)
# create empty CA trust store
print("Creating an empty CA trust store.", file=sys.stderr)
with open("%s.cat" % self._path, "wb") as catfile:
self.write_cat([], catfile)
def cert_import(self):
'''
imports a certificate;
either a CA cert, client cert or a CA-signed server cert
(determined by self._args.cert_type)
'''
if self._args.cert_type == "server":
print("Importing our CA-signed cert...", file=sys.stderr)
cert = self.read_crt(self._args.cert)
# make sure it's mine
with open("%s.pub" % self._path) as pubfile:
mypub = self.read_pub(pubfile)
dump1 = OpenSSL.crypto.dump_publickey(
OpenSSL.crypto.FILETYPE_PEM, mypub)
dump2 = OpenSSL.crypto.dump_publickey(
OpenSSL.crypto.FILETYPE_PEM, cert.get_pubkey())
if dump1 != dump2:
print("This cert does not have our pubkey on it!",
file=sys.stderr)
sys.exit(1)
print("Signed cert is valid, saving.", file=sys.stderr)
with open("%s.pem" % self._path, "w") as crtfile:
self.write_crt(cert, crtfile)
elif self._args.cert_type == "ca":
print("Importing a new trusted CA cert...", file=sys.stderr)
cacert = self.read_crt(self._args.cert)
if cacert.has_expired():
print("The CA cert has expired!", file=sys.stderr)
sys.exit(1)
# not much to verify here, so just save it
print("Saving CA cert to trusted cacert store.", file=sys.stderr)
self.trusted_cas.append(OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, cacert))
with open("%s.cat" % self._path, "wb") as catfile:
self.write_cat(self.trusted_cas, catfile)
else:
raise NotImplementedError
def _makectx(self):
''' returns a TLS context '''
# pylint: disable=no-member
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
# pylint: enable=no-member
print("Loading server certificate and private key...", file=sys.stderr)
ctx.load_cert_chain("%s.pem" % self._path,
keyfile="%s.key" % self._path,
password=self._get_password)
for cacert in self.trusted_cas:
ctx.load_verify_locations(cadata=cacert.decode("utf_8"))
ctx.set_ciphers("HIGH")
ctx.verify_mode = ssl.CERT_REQUIRED
return ctx
def start(self):
''' starts the network server for secure file up-/downloads '''
if not (self._args.port > 0 and self._args.port < 65536):
print("Please choose a valid port number.", file=sys.stderr)
sys.exit(1)
# create the storage directory if it does not exist
if not os.path.isdir("%s-storage" % self._path):
os.mkdir("%s-storage" % self._path)
ctx = self._makectx()
server = xmlrpc.server.SimpleXMLRPCServer(
(self._args.address, self._args.port), bind_and_activate=False)
server.socket = ctx.wrap_socket(server.socket, server_side=True)
server.server_bind()
server.server_activate()
print("Server listening on %s:%s..."
% (self._args.address, self._args.port), file=sys.stderr)
server.register_multicall_functions()
server.register_function(self.upload, "upload")
server.register_function(self.download, "download")
server.serve_forever()
def upload(self, data):
''' handles client uploads -- stores and returns sha1 '''
data = data.encode("utf_8")
digest = hashlib.sha1(data).hexdigest()
with open("%s-storage/%s" % (self._path, digest), "wb") as outf:
outf.write(data)
return digest
def download(self, digest):
''' handles client downloads -- sends file by sha1 hash '''
digest = digest.lower()
with open("%s-storage/%s" % (self._path, digest)) as inf:
return inf.read()
class MyConfiguration(object):
''' manages the program's configuration file '''
def __init__(self):
''' initializes in-memory config and populates it with defaults '''
self.cfp = configparser.ConfigParser()
# set defaults
self.cfp.add_section("ca")
self.cfp.set("ca", "selfsign_for_days", "3650")
self.cfp.set("ca", "sign_for_days", "365")
self.cfp.add_section("client")
self.cfp.set("client", "server_address", "localhost")
self.cfp.set("client", "server_port", "1337")
self.cfp.add_section("server")
self.cfp.set("server", "listen_address", "localhost")
self.cfp.set("server", "listen_port", "1337")
for role in ["ca", "client", "server"]:
self.cfp.set(role, "x509_country", "CZ")
self.cfp.set(role, "x509_state", "Jihomoravsky kraj")
self.cfp.set(role, "x509_location", "Brno")
self.cfp.set(role, "x509_orgname", "Brno University of Technology")
self.cfp.set(role, "x509_orgunit",
"Faculty of Electrical Engineering and Communication")
self.cfp.set(role, "x509_cname", "%s.vutbr.cz" % role)
self.cfp.set(role, "x509_email", "%s@vutbr.cz" % role)
def write(self, configfile):
''' dumps config into a file-like object '''
self.cfp.write(configfile)
def load(self, configfile):
''' loads config into a file-like object '''
self.cfp.read_file(configfile)
def open(self, path):
''' opens & loads config file by path or writes a default one if NX '''
try:
cfile = io.open(path)
self.load(cfile)
return cfile
except FileNotFoundError:
with io.open(path, "w") as cfile:
self.write(cfile)
return self.open(path)
def main():
'''
pretty much just parses commandline arguments & config file
and then calls the appropriate class metods
'''
parser = argparse.ArgumentParser(
description="A reasonably secure paste bin.")
proghome = os.path.join(os.path.expanduser("~"),
".%s" % parser.prog.split(".")[0])
progrc = "%src" % proghome
if not os.path.isdir(proghome):
os.mkdir(proghome)
config = MyConfiguration()
# pylint: disable=too-few-public-methods
class ConfigAction(argparse.Action):
''' (re)loads config from file if --config option passed '''
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
config.load(values)
# pylint: enable=too-few-public-methods
# global options
parser.add_argument(
"--home", action="store", default=proghome, metavar="DIRECTORY",
help="directory for storing certificates; default: %(default)s")
parser.add_argument(
"--config", action=ConfigAction, type=argparse.FileType("r"),
default=config.open(progrc), metavar="FILE",
help="configuration file; default: %s" % progrc)
subparsers = parser.add_subparsers(title="Role", dest="role")
# "ca" subcommand
parser_ca = subparsers.add_parser("ca", help="certificate authority")
subparsers_ca = parser_ca.add_subparsers(title="Action", dest="action")
# "ca generate" subcommand stub, will be filled out later on
parser_ca_generate = subparsers_ca.add_parser(
"generate",
help="generate a new self-signed Certificate Authority certificate")
# "ca sign" subcommand
parser_ca_sign = subparsers_ca.add_parser(
"sign", help="process a CSR (Certificate Signing Request)")
parser_ca_sign.add_argument(
"csr", action="store", type=argparse.FileType("r"), nargs=1,
help="CSR file to process")
parser_ca_sign.add_argument(
"cert", action="store", type=argparse.FileType("w"),
default=sys.stdout, nargs="?",
help="file to output the signed certificate to; default: -")
parser_ca_sign.add_argument(
"--sign-for-days", action="store", type=int, metavar="DAYS",
default=config.cfp.getint("ca", "sign_for_days"),
help="how long should the cert be valid; default: %(default)s")
# "client" subcommand
parser_client = subparsers.add_parser("client", help="client")
subparsers_client = parser_client.add_subparsers(
title="Action", dest="action")
# "client generate" stub, will be filled out later on
parser_client_generate = subparsers_client.add_parser(
"generate", help="generate a new client keypair and CSR")
# "client import" subcommand
parser_client_import = subparsers_client.add_parser(
"import", help="import a certificate")
parser_client_import.add_argument(
"cert", action="store", type=argparse.FileType("r"), default=sys.stdin,
nargs="?", help="certificate file to import; default: -")
group_certtypes = parser_client_import.add_argument_group(
"imported certificate type (required)")
group_certtype = group_certtypes.add_mutually_exclusive_group(
required=True)
group_certtype.add_argument(
"--ca", action="store_const", const="ca", dest="cert_type",
help="CA certificate")
group_certtype.add_argument(
"--my", action="store_const", const="client", dest="cert_type",
help="CA-signed client certificate")
# "client put" subcommand
parser_client_put = subparsers_client.add_parser(
"put", help="send a file to a server")
parser_client_put.add_argument(
"--address", action="store",
default=config.cfp.get("client", "server_address"),
help="server to connect to; default: %(default)s")
parser_client_put.add_argument(
"--port", action="store", type=int,
default=config.cfp.getint("client", "server_port"),
help="server port to connect to; default: %(default)s")
parser_client_put.add_argument(
"infile", action="store", type=argparse.FileType("r"),
default=sys.stdin,
nargs="*", help="file(s) to upload to the server; default: -")
# "client get" subcommand
parser_client_get = subparsers_client.add_parser(
"get", help="retrieve a file from a server")
parser_client_get.add_argument(
"--address", action="store",
default=config.cfp.get("client", "server_address"),
help="server to connect to; default: %(default)s")
parser_client_get.add_argument(
"--port", action="store", type=int,
default=config.cfp.getint("client", "server_port"),
help="server port to connect to; default: %(default)s")
parser_client_get.add_argument(
"filehash", action="store", nargs=1,
help="sha1 hash of the file to download from the server")
parser_client_get.add_argument(
"outfile", action="store", type=argparse.FileType("w"),
default=sys.stdout, nargs="?",
help="where to save downloaded file; default: -")
# "server" subcommand
parser_server = subparsers.add_parser("server", help="server")
subparsers_server = parser_server.add_subparsers(
title="Action", dest="action")
parser_server_generate = subparsers_server.add_parser(
"generate", help="generate a new server keypair and CSR")
parser_server_import = subparsers_server.add_parser(
"import", help="import a certificate")
parser_server_import.add_argument(
"cert", action="store", type=argparse.FileType("r"), default=sys.stdin,
nargs="?", help="certificate file to import; default: -")
group_certtypes = parser_server_import.add_argument_group(
"imported certificate type (required)")
group_certtype = group_certtypes.add_mutually_exclusive_group(
required=True)
group_certtype.add_argument(
"--ca", action="store_const", const="ca", dest="cert_type",
help="CA certificate")
group_certtype.add_argument(
"--my", action="store_const", const="server", dest="cert_type",
help="CA-signed server certificate")
parser_server_start = subparsers_server.add_parser(
"start", help="start accepting client connections")
parser_server_start.add_argument(
"--address", action="store",
default=config.cfp.get("server", "listen_address"),
help="network address to listen on; default: %(default)s")
parser_server_start.add_argument(
"--port", action="store", type=int,
default=config.cfp.getint("server", "listen_port"),
help="port to listen on")
# "generate" subsubcommand options (common to all roles)
for prsr in [parser_ca_generate, parser_client_generate,
parser_server_generate]:
role = prsr.prog.split()[-2]
group_x509 = prsr.add_argument_group("X.509 attributes")
group_x509.add_argument(
"--country", action="store",
default=config.cfp.get(role, "x509_country"),
help="subject country (C); default: %(default)s")
group_x509.add_argument(
"--state", action="store",
default=config.cfp.get(role, "x509_state"),
help="subject state (S); default: %(default)s")
group_x509.add_argument(
"--location", action="store",
default=config.cfp.get(role, "x509_location"),
help="subject location (L); default: %(default)s")
group_x509.add_argument(
"--orgname", action="store",
default=config.cfp.get(role, "x509_orgname"),
help="subject organization name (O); default: %(default)s")
group_x509.add_argument(
"--orgunit", action="store",
default=config.cfp.get(role, "x509_orgunit"),
help="subject organizational unit name (OU); default: %(default)s")
group_x509.add_argument(
"--cname", action="store",
default=config.cfp.get(role, "x509_cname"),
help="subject common name (CN); default: %(default)s")
group_x509.add_argument(
"--email", action="store",
default=config.cfp.get(role, "x509_email"),
help="subject e-mail address (emailAddress); default: %(default)s")
if role == "ca":
# other roles generate only a CSR, CA self-signs
prsr.add_argument(
"--selfsign-for-days", action="store", metavar="DAYS",
type=int, default=config.cfp.get(role, "selfsign_for_days"),
help="how long should the cert be valid; default: %(default)s")
args = parser.parse_args()
if args.role is None:
parser.print_usage()
elif args.action is None:
locals()["parser_%s" % args.role].print_usage()
else:
# look up class and method by "role" & "action" arguments and execute
role = {"ca": CertificateAuthority,
"client": Client,
"server": Server}[args.role](args)
# "import" not a valid python method name, gotta rename it
method_translator = {"import": "cert_import"}
getattr(role, method_translator.get(args.action, args.action))()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| |
# Copyright (c) 2007-2008 The PyAMF Project.
# See LICENSE for details.
"""
General gateway tests.
@author: U{Nick Joyce<mailto:nick@boxdesign.co.uk>}
@since: 0.1.0
"""
import unittest, sys
import pyamf
from pyamf import remoting
from pyamf.remoting import gateway, amf0
class TestService(object):
def spam(self):
return 'spam'
def echo(self, x):
return x
class FaultTestCase(unittest.TestCase):
def test_create(self):
x = remoting.ErrorFault()
self.assertEquals(x.code, '')
self.assertEquals(x.details, '')
self.assertEquals(x.description, '')
x = remoting.ErrorFault(code=404, details='Not Found', description='Spam eggs')
self.assertEquals(x.code, 404)
self.assertEquals(x.details, 'Not Found')
self.assertEquals(x.description, 'Spam eggs')
def test_build(self):
fault = None
try:
raise TypeError, "unknown type"
except TypeError, e:
fault = amf0.build_fault(*sys.exc_info())
self.assertTrue(isinstance(fault, remoting.ErrorFault))
self.assertEquals(fault.level, 'error')
self.assertEquals(fault.code, 'TypeError')
self.assertTrue("\\n" not in fault.details)
def test_encode(self):
encoder = pyamf.get_encoder(pyamf.AMF0)
decoder = pyamf.get_decoder(pyamf.AMF0)
decoder.stream = encoder.stream
try:
raise TypeError, "unknown type"
except TypeError, e:
encoder.writeElement(amf0.build_fault(*sys.exc_info()))
buffer = encoder.stream
buffer.seek(0, 0)
fault = decoder.readElement()
old_fault = amf0.build_fault(*sys.exc_info())
self.assertEquals(fault.level, old_fault.level)
self.assertEquals(fault.type, old_fault.type)
self.assertEquals(fault.code, old_fault.code)
self.assertEquals(fault.details, old_fault.details)
self.assertEquals(fault.description, old_fault.description)
def test_explicit_code(self):
class X(Exception):
_amf_code = 'Server.UnknownResource'
try:
raise X
except X, e:
fault = amf0.build_fault(*sys.exc_info())
self.assertEquals(fault.code, 'Server.UnknownResource')
class ServiceWrapperTestCase(unittest.TestCase):
def test_create(self):
x = gateway.ServiceWrapper('blah')
self.assertEquals(x.service, 'blah')
def test_create_preprocessor(self):
x = gateway.ServiceWrapper('blah', preprocessor=ord)
self.assertEquals(x.preprocessor, ord)
def test_cmp(self):
x = gateway.ServiceWrapper('blah')
y = gateway.ServiceWrapper('blah')
z = gateway.ServiceWrapper('bleh')
self.assertEquals(x, y)
self.assertNotEquals(y, z)
def test_call(self):
def add(x, y):
self.assertEquals(x, 1)
self.assertEquals(y, 2)
return x + y
x = gateway.ServiceWrapper(add)
self.assertTrue(callable(x))
self.assertEquals(x(None, [1, 2]), 3)
x = gateway.ServiceWrapper('blah')
self.assertRaises(gateway.UnknownServiceMethodError, x, None, [])
x = gateway.ServiceWrapper(TestService)
self.assertRaises(gateway.UnknownServiceMethodError, x, None, [])
self.assertEquals(x('spam', []), 'spam')
self.assertRaises(gateway.UnknownServiceMethodError, x, 'xyx', [])
self.assertRaises(gateway.InvalidServiceMethodError, x, '_private', [])
self.assertEquals(x('echo', [x]), x)
class ServiceRequestTestCase(unittest.TestCase):
def test_create(self):
sw = gateway.ServiceWrapper(TestService)
request = remoting.Envelope()
x = gateway.ServiceRequest(request, sw, None)
self.assertEquals(x.request, request)
self.assertEquals(x.service, sw)
self.assertEquals(x.method, None)
def test_call(self):
sw = gateway.ServiceWrapper(TestService)
request = remoting.Envelope()
x = gateway.ServiceRequest(request, sw, None)
self.assertRaises(gateway.UnknownServiceMethodError, x)
x = gateway.ServiceRequest(request, sw, 'spam')
self.assertEquals(x(), 'spam')
x = gateway.ServiceRequest(request, sw, 'echo')
self.assertEquals(x(x), x)
class ServiceCollectionTestCase(unittest.TestCase):
def test_contains(self):
x = gateway.ServiceCollection()
self.assertFalse(TestService in x)
self.assertFalse('spam.eggs' in x)
x['spam.eggs'] = gateway.ServiceWrapper(TestService)
self.assertTrue(TestService in x)
self.assertTrue('spam.eggs' in x)
class BaseGatewayTestCase(unittest.TestCase):
def test_create(self):
x = gateway.BaseGateway()
self.assertEquals(x.services, {})
x = gateway.BaseGateway({})
self.assertEquals(x.services, {})
x = gateway.BaseGateway({})
self.assertEquals(x.services, {})
x = gateway.BaseGateway({'x': TestService})
self.assertEquals(x.services, {'x': TestService})
self.assertRaises(TypeError, gateway.BaseGateway, [])
def test_add_service(self):
gw = gateway.BaseGateway()
self.assertEquals(gw.services, {})
gw.addService(TestService)
self.assertTrue(TestService in gw.services)
self.assertTrue('TestService' in gw.services)
del gw.services['TestService']
gw.addService(TestService, 'spam.eggs')
self.assertTrue(TestService in gw.services)
self.assertTrue('spam.eggs' in gw.services)
del gw.services['spam.eggs']
class SpamService(object):
def __str__(self):
return 'spam'
def __call__(*args, **kwargs):
pass
x = SpamService()
gw.addService(x)
self.assertTrue(x in gw.services)
self.assertTrue('spam' in gw.services)
del gw.services['spam']
self.assertEquals(gw.services, {})
self.assertRaises(TypeError, gw.addService, 1)
import new
temp = new.module('temp')
gw.addService(temp)
self.assertTrue(temp in gw.services)
self.assertTrue('temp' in gw.services)
del gw.services['temp']
self.assertEquals(gw.services, {})
def test_remove_service(self):
gw = gateway.BaseGateway({'test': TestService})
self.assertTrue('test' in gw.services)
wrapper = gw.services['test']
gw.removeService('test')
self.assertFalse('test' in gw.services)
self.assertFalse(TestService in gw.services)
self.assertFalse(wrapper in gw.services)
self.assertEquals(gw.services, {})
gw = gateway.BaseGateway({'test': TestService})
self.assertTrue(TestService in gw.services)
wrapper = gw.services['test']
gw.removeService(TestService)
self.assertFalse('test' in gw.services)
self.assertFalse(TestService in gw.services)
self.assertFalse(wrapper in gw.services)
self.assertEquals(gw.services, {})
gw = gateway.BaseGateway({'test': TestService})
self.assertTrue(TestService in gw.services)
wrapper = gw.services['test']
gw.removeService(wrapper)
self.assertFalse('test' in gw.services)
self.assertFalse(TestService in gw.services)
self.assertFalse(wrapper in gw.services)
self.assertEquals(gw.services, {})
self.assertRaises(NameError, gw.removeService, 'test')
self.assertRaises(NameError, gw.removeService, TestService)
self.assertRaises(NameError, gw.removeService, wrapper)
def test_service_request(self):
gw = gateway.BaseGateway({'test': TestService})
envelope = remoting.Envelope()
message = remoting.Request('spam', [], envelope=envelope)
self.assertRaises(gateway.UnknownServiceError, gw.getServiceRequest,
message, 'spam')
message = remoting.Request('test.spam', [], envelope=envelope)
sr = gw.getServiceRequest(message, 'test.spam')
self.assertTrue(isinstance(sr, gateway.ServiceRequest))
self.assertEquals(sr.request, envelope)
self.assertEquals(sr.service, TestService)
self.assertEquals(sr.method, 'spam')
message = remoting.Request('test')
sr = gw.getServiceRequest(message, 'test')
self.assertTrue(isinstance(sr, gateway.ServiceRequest))
self.assertEquals(sr.request, None)
self.assertEquals(sr.service, TestService)
self.assertEquals(sr.method, None)
gw = gateway.BaseGateway({'test': TestService})
envelope = remoting.Envelope()
message = remoting.Request('test')
sr = gw.getServiceRequest(message, 'test')
self.assertTrue(isinstance(sr, gateway.ServiceRequest))
self.assertEquals(sr.request, None)
self.assertEquals(sr.service, TestService)
self.assertEquals(sr.method, None)
# try to access an unknown service
message = remoting.Request('spam')
self.assertRaises(gateway.UnknownServiceError, gw.getServiceRequest,
message, 'spam')
# check x.x calls
message = remoting.Request('test.test')
sr = gw.getServiceRequest(message, 'test.test')
self.assertTrue(isinstance(sr, gateway.ServiceRequest))
self.assertEquals(sr.request, None)
self.assertEquals(sr.service, TestService)
self.assertEquals(sr.method, 'test')
def test_long_service_name(self):
gw = gateway.BaseGateway({'a.c.b.d': TestService})
envelope = remoting.Envelope()
message = remoting.Request('a.c.b.d', [], envelope=envelope)
sr = gw.getServiceRequest(message, 'a.c.b.d.spam')
self.assertTrue(isinstance(sr, gateway.ServiceRequest))
self.assertEquals(sr.request, envelope)
self.assertEquals(sr.service, TestService)
self.assertEquals(sr.method, 'spam')
def test_get_response(self):
gw = gateway.BaseGateway({'test': TestService})
envelope = remoting.Envelope()
self.assertRaises(NotImplementedError, gw.getResponse, envelope)
def test_process_request(self):
gw = gateway.BaseGateway({'test': TestService})
envelope = remoting.Envelope()
request = remoting.Request('test.spam', envelope=envelope)
processor = gw.getProcessor(request)
response = processor(request)
self.assertTrue(isinstance(response, remoting.Response))
self.assertEquals(response.status, remoting.STATUS_OK)
self.assertEquals(response.body, 'spam')
# Test a non existant service call
request = remoting.Request('nope', envelope=envelope)
processor = gw.getProcessor(request)
response = processor(request)
self.assertTrue(isinstance(response, remoting.Message))
self.assertEquals(response.status, remoting.STATUS_ERROR)
self.assertTrue(isinstance(response.body, remoting.ErrorFault))
self.assertEquals(response.body.code, 'Service.ResourceNotFound')
self.assertEquals(response.body.description, 'Unknown service nope')
def test_malformed_credentials_header(self):
gw = gateway.BaseGateway({'test': TestService})
envelope = remoting.Envelope()
request = remoting.Request('test.spam', envelope=envelope)
request.headers['Credentials'] = {'spam': 'eggs'}
processor = gw.getProcessor(request)
response = processor(request)
self.assertTrue(isinstance(response, remoting.Response))
self.assertEquals(response.status, remoting.STATUS_ERROR)
self.assertTrue(isinstance(response.body, remoting.ErrorFault))
self.assertEquals(response.body.code, 'KeyError')
def test_authenticate(self):
gw = gateway.BaseGateway({'test': TestService})
sr = gateway.ServiceRequest(None, gw.services['test'], None)
self.assertTrue(gw.authenticateRequest(sr, None, None))
def auth(u, p):
if u == 'spam' and p == 'eggs':
return True
return False
gw = gateway.BaseGateway({'test': TestService}, authenticator=auth)
self.assertFalse(gw.authenticateRequest(sr, None, None))
self.assertTrue(gw.authenticateRequest(sr, 'spam', 'eggs'))
class QueryBrowserTestCase(unittest.TestCase):
def test_request(self):
gw = gateway.BaseGateway()
echo = lambda x: x
gw.addService(echo, 'echo', description='This is a test')
envelope = remoting.Envelope()
request = remoting.Request('echo')
envelope['/1'] = request
request.headers['DescribeService'] = None
processor = gw.getProcessor(request)
response = processor(request)
self.assertEquals(response.status, remoting.STATUS_OK)
self.assertEquals(response.body, 'This is a test')
class AuthenticatorTestCase(unittest.TestCase):
def setUp(self):
self.called = False
def tearDown(self):
if self.called is False:
self.fail("authenticator not called")
def _auth(self, username, password):
self.called = True
if username == 'fred' and password == 'wilma':
return True
return False
def test_gateway(self):
gw = gateway.BaseGateway(authenticator=self._auth)
echo = lambda x: x
gw.addService(echo, 'echo')
envelope = remoting.Envelope()
request = remoting.Request('echo', body=['spam'])
envelope.headers['Credentials'] = dict(userid='fred', password='wilma')
envelope['/1'] = request
processor = gw.getProcessor(request)
response = processor(request)
self.assertEquals(response.status, remoting.STATUS_OK)
self.assertEquals(response.body, 'spam')
def test_service(self):
gw = gateway.BaseGateway()
echo = lambda x: x
gw.addService(echo, 'echo', authenticator=self._auth)
envelope = remoting.Envelope()
request = remoting.Request('echo', body=['spam'])
envelope.headers['Credentials'] = dict(userid='fred', password='wilma')
envelope['/1'] = request
processor = gw.getProcessor(request)
response = processor(request)
self.assertEquals(response.status, remoting.STATUS_OK)
self.assertEquals(response.body, 'spam')
def test_class_decorator(self):
class TestService:
def echo(self, x):
return x
TestService.echo = gateway.authenticate(TestService.echo, self._auth)
gw = gateway.BaseGateway({'test': TestService})
envelope = remoting.Envelope()
request = remoting.Request('test.echo', body=['spam'])
envelope.headers['Credentials'] = dict(userid='fred', password='wilma')
envelope['/1'] = request
processor = gw.getProcessor(request)
response = processor(request)
self.assertEquals(response.status, remoting.STATUS_OK)
self.assertEquals(response.body, 'spam')
def test_func_decorator(self):
def echo(x):
return x
echo = gateway.authenticate(echo, self._auth)
gw = gateway.BaseGateway({'echo': echo})
envelope = remoting.Envelope()
request = remoting.Request('echo', body=['spam'])
envelope.headers['Credentials'] = dict(userid='fred', password='wilma')
envelope['/1'] = request
processor = gw.getProcessor(request)
response = processor(request)
self.assertEquals(response.status, remoting.STATUS_OK)
self.assertEquals(response.body, 'spam')
def test_expose_request_decorator(self):
def echo(x):
return x
def exposed_auth(request, username, password):
return self._auth(username, password)
exposed_auth = gateway.expose_request(exposed_auth)
echo = gateway.authenticate(echo, exposed_auth)
gw = gateway.BaseGateway({'echo': echo})
envelope = remoting.Envelope()
request = remoting.Request('echo', body=['spam'])
envelope.headers['Credentials'] = dict(userid='fred', password='wilma')
envelope['/1'] = request
processor = gw.getProcessor(request)
response = processor(request)
self.assertEquals(response.status, remoting.STATUS_OK)
self.assertEquals(response.body, 'spam')
def test_expose_request_keyword(self):
def echo(x):
return x
def exposed_auth(request, username, password):
return self._auth(username, password)
echo = gateway.authenticate(echo, exposed_auth, expose_request=True)
gw = gateway.BaseGateway({'echo': echo})
envelope = remoting.Envelope()
request = remoting.Request('echo', body=['spam'])
envelope.headers['Credentials'] = dict(userid='fred', password='wilma')
envelope['/1'] = request
processor = gw.getProcessor(request)
response = processor(request)
self.assertEquals(response.status, remoting.STATUS_OK)
self.assertEquals(response.body, 'spam')
class ExposeRequestTestCase(unittest.TestCase):
def test_default(self):
gw = gateway.BaseGateway()
gw.addService(lambda x: x, 'test')
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(envelope, gw.services['test'], None)
self.assertFalse(gw.mustExposeRequest(service_request))
def test_gateway(self):
gw = gateway.BaseGateway(expose_request=True)
gw.addService(lambda x: x, 'test')
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(envelope, gw.services['test'], None)
self.assertTrue(gw.mustExposeRequest(service_request))
def test_service(self):
gw = gateway.BaseGateway()
gw.addService(lambda x: x, 'test', expose_request=True)
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(envelope, gw.services['test'], None)
self.assertTrue(gw.mustExposeRequest(service_request))
def test_decorator(self):
def echo(x):
return x
gateway.expose_request(echo)
gw = gateway.BaseGateway()
gw.addService(echo, 'test')
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(envelope, gw.services['test'], None)
self.assertTrue(gw.mustExposeRequest(service_request))
class PreProcessingTestCase(unittest.TestCase):
def _preproc(self):
pass
def test_default(self):
gw = gateway.BaseGateway()
gw.addService(lambda x: x, 'test')
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(envelope, gw.services['test'], None)
self.assertEquals(gw.getPreprocessor(service_request), None)
def test_global(self):
gw = gateway.BaseGateway(preprocessor=self._preproc)
gw.addService(lambda x: x, 'test')
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(envelope, gw.services['test'], None)
self.assertEquals(gw.getPreprocessor(service_request), self._preproc)
def test_service(self):
gw = gateway.BaseGateway()
gw.addService(lambda x: x, 'test', preprocessor=self._preproc)
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(envelope, gw.services['test'], None)
self.assertEquals(gw.getPreprocessor(service_request), self._preproc)
def test_decorator(self):
def echo(x):
return x
gateway.preprocess(echo, self._preproc)
gw = gateway.BaseGateway()
gw.addService(echo, 'test')
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(envelope, gw.services['test'], None)
self.assertEquals(gw.getPreprocessor(service_request), self._preproc)
def test_call(self):
def preproc(sr, *args):
self.called = True
self.assertEquals(args, tuple())
self.assertTrue(isinstance(sr, gateway.ServiceRequest))
gw = gateway.BaseGateway({'test': TestService}, preprocessor=preproc)
envelope = remoting.Envelope()
request = remoting.Request('test.spam', envelope=envelope)
processor = gw.getProcessor(request)
response = processor(request)
self.assertTrue(isinstance(response, remoting.Response))
self.assertEquals(response.status, remoting.STATUS_OK)
self.assertEquals(response.body, 'spam')
self.assertTrue(self.called)
def test_fail(self):
def preproc(sr, *args):
raise IndexError
gw = gateway.BaseGateway({'test': TestService}, preprocessor=preproc)
envelope = remoting.Envelope()
request = remoting.Request('test.spam', envelope=envelope)
processor = gw.getProcessor(request)
response = processor(request)
self.assertTrue(isinstance(response, remoting.Response))
self.assertEquals(response.status, remoting.STATUS_ERROR)
def suite():
suite = unittest.TestSuite()
# basics first
suite.addTest(unittest.makeSuite(FaultTestCase))
suite.addTest(unittest.makeSuite(ServiceWrapperTestCase))
suite.addTest(unittest.makeSuite(ServiceRequestTestCase))
suite.addTest(unittest.makeSuite(ServiceCollectionTestCase))
suite.addTest(unittest.makeSuite(BaseGatewayTestCase))
suite.addTest(unittest.makeSuite(QueryBrowserTestCase))
suite.addTest(unittest.makeSuite(AuthenticatorTestCase))
suite.addTest(unittest.makeSuite(ExposeRequestTestCase))
suite.addTest(unittest.makeSuite(PreProcessingTestCase))
try:
import wsgiref
except ImportError:
wsgiref = None
if wsgiref:
from pyamf.tests.gateway import test_wsgi
suite.addTest(test_wsgi.suite())
try:
from twisted import web
except ImportError:
web = None
if web:
from pyamf.tests.gateway import test_twisted
suite.addTest(test_twisted.suite())
try:
import django
except ImportError:
django = None
if django:
from pyamf.tests.gateway import test_django
suite.addTest(test_django.suite())
try:
from google.appengine.ext import webapp
except ImportError:
try:
import dev_appserver, sys
sys.path = dev_appserver.EXTRA_PATHS + sys.path
from google.appengine.ext import webapp
except ImportError:
webapp = None
if webapp:
from pyamf.tests.gateway import test_google
suite.addTest(test_google.suite())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| |
# voter/views_admin.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from .models import Voter, VoterAddressManager, VoterDeviceLinkManager
from admin_tools.views import redirect_to_sign_in_page
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.contrib.auth import login
from django.contrib.auth.decorators import login_required
from django.contrib.messages import get_messages
from django.db.models import Q
from django.http import HttpResponseRedirect
from django.shortcuts import render
from exception.models import handle_record_found_more_than_one_exception, handle_record_not_found_exception, \
handle_record_not_saved_exception
from import_export_facebook.models import FacebookLinkToVoter, FacebookManager
from organization.models import Organization, OrganizationManager, INDIVIDUAL
from position.controllers import merge_duplicate_positions_for_voter
from position.models import PositionEntered, PositionForFriends
from twitter.models import TwitterLinkToOrganization, TwitterLinkToVoter, TwitterUserManager
from voter.models import fetch_voter_id_from_voter_device_link, voter_has_authority, voter_setup
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, get_voter_api_device_id, set_voter_api_device_id, \
positive_value_exists
logger = wevote_functions.admin.get_logger(__name__)
def login_complete_view(request):
try:
voter_api_device_id = get_voter_api_device_id(request)
if not positive_value_exists(voter_api_device_id):
messages.add_message(request, messages.INFO, 'Missing voter_api_device_id.')
return HttpResponseRedirect(reverse('admin_tools:admin_home', args=()))
voter_object = request.user
if not voter_object:
messages.add_message(request, messages.INFO, 'Missing voter.')
return HttpResponseRedirect(reverse('admin_tools:admin_home', args=()))
# TODO Write the Twitter or Facebook information to the voter table so we can access it via the APIs
# Currently all of the twitter authentication for Django is in the separate social_auth* tables
# Relink this voter_api_device_id to this Voter account
voter_device_manager = VoterDeviceLinkManager()
voter_device_link_results = voter_device_manager.retrieve_voter_device_link(voter_api_device_id)
voter_device_link = voter_device_link_results['voter_device_link']
update_voter_device_link_results = voter_device_manager.update_voter_device_link(
voter_device_link, voter_object)
if update_voter_device_link_results['voter_device_link_updated']:
messages.add_message(request, messages.INFO, 'Voter updated.')
else:
messages.add_message(request, messages.INFO, 'Voter could not be relinked.')
except:
messages.add_message(request, messages.INFO, 'Voter not updated.')
return HttpResponseRedirect(reverse('login_we_vote', args=()))
# This is open to anyone, and provides psql to update the database directly
def voter_authenticate_manually_view(request):
messages_on_stage = get_messages(request)
voter_api_device_id = get_voter_api_device_id(request) # We look in the cookies for voter_api_device_id
store_new_voter_api_device_id_in_cookie = False
if not positive_value_exists(voter_api_device_id):
# Create a voter_device_id and voter in the database if one doesn't exist yet
results = voter_setup(request)
voter_api_device_id = results['voter_api_device_id']
store_new_voter_api_device_id_in_cookie = results['store_new_voter_api_device_id_in_cookie']
voter_id = fetch_voter_id_from_voter_device_link(voter_api_device_id)
voter_id = convert_to_int(voter_id)
voter_on_stage_found = False
voter_on_stage = Voter()
try:
voter_on_stage = Voter.objects.get(id=voter_id)
voter_on_stage_found = True
except Voter.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except Voter.DoesNotExist:
# This is fine, we will display an error
pass
if voter_on_stage_found:
set_this_voter_as_admin = "UPDATE voter_voter SET is_admin=True WHERE id={voter_id};".format(voter_id=voter_id)
unset_this_voter_as_admin = "UPDATE voter_voter SET is_admin=False WHERE id={voter_id};".format(
voter_id=voter_id)
set_as_partner_organization = "UPDATE voter_voter SET is_partner_organization=True WHERE id={voter_id};" \
"".format(voter_id=voter_id)
unset_as_partner_organization = "UPDATE voter_voter SET is_partner_organization=False WHERE id={voter_id};" \
"".format(voter_id=voter_id)
set_as_political_data_manager = "UPDATE voter_voter SET is_political_data_manager=True WHERE id={voter_id};" \
"".format(voter_id=voter_id)
unset_as_political_data_manager = "UPDATE voter_voter SET is_political_data_manager=False " \
"WHERE id={voter_id};" \
"".format(voter_id=voter_id)
set_as_political_data_viewer = "UPDATE voter_voter SET is_political_data_viewer=True WHERE id={voter_id};" \
"".format(voter_id=voter_id)
unset_as_political_data_viewer = "UPDATE voter_voter SET is_political_data_viewer=False WHERE id={voter_id};" \
"".format(voter_id=voter_id)
set_as_verified_volunteer = "UPDATE voter_voter SET is_verified_volunteer=True WHERE id={voter_id};" \
"".format(voter_id=voter_id)
unset_as_verified_volunteer = "UPDATE voter_voter SET is_verified_volunteer=False WHERE id={voter_id};" \
"".format(voter_id=voter_id)
template_values = {
'messages_on_stage': messages_on_stage,
'voter': voter_on_stage,
'voter_api_device_id': voter_api_device_id,
'is_authenticated': request.user.is_authenticated(),
'set_this_voter_as_admin': set_this_voter_as_admin,
'unset_this_voter_as_admin': unset_this_voter_as_admin,
'set_as_partner_organization': set_as_partner_organization,
'unset_as_partner_organization': unset_as_partner_organization,
'set_as_political_data_manager': set_as_political_data_manager,
'unset_as_political_data_manager': unset_as_political_data_manager,
'set_as_political_data_viewer': set_as_political_data_viewer,
'unset_as_political_data_viewer': unset_as_political_data_viewer,
'set_as_verified_volunteer': set_as_verified_volunteer,
'unset_as_verified_volunteer': unset_as_verified_volunteer,
}
else:
template_values = {
'messages_on_stage': messages_on_stage,
}
response = render(request, 'voter/voter_authenticate_manually.html', template_values)
# We want to store the voter_api_device_id cookie if it is new
# if positive_value_exists(voter_api_device_id) and positive_value_exists(store_new_voter_api_device_id_in_cookie):
# DALE 2016-02-15 Always set if we have a voter_api_device_id
if positive_value_exists(store_new_voter_api_device_id_in_cookie):
set_voter_api_device_id(request, response, voter_api_device_id)
return response
# This is open to anyone, and provides psql to update the database directly
def voter_authenticate_manually_process_view(request):
voter_api_device_id = get_voter_api_device_id(request) # We look in the cookies for voter_api_device_id
voter_id = fetch_voter_id_from_voter_device_link(voter_api_device_id)
voter_id = convert_to_int(voter_id)
voter_signed_in = False
try:
voter_on_stage = Voter.objects.get(id=voter_id)
# If the account associated with this voter_api_device_id is an admin, complete Django authentication
if voter_on_stage.is_admin:
voter_on_stage.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, voter_on_stage)
messages.add_message(request, messages.INFO, 'Voter logged in.')
voter_signed_in = True
else:
messages.add_message(request, messages.INFO, 'This account does not have Admin access.')
except Voter.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
messages.add_message(request, messages.ERROR, 'More than one voter found. Voter not logged in.')
except Voter.DoesNotExist:
# This is fine, we will display an error
messages.add_message(request, messages.ERROR, 'Voter not found. Voter not logged in.')
if voter_signed_in:
return HttpResponseRedirect(reverse('admin_tools:admin_home', args=()))
else:
return HttpResponseRedirect(reverse('voter:authenticate_manually', args=()))
@login_required
def voter_edit_process_view(request):
"""
Process the new or edit voter forms
:param request:
:return:
"""
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
# NOTE: create_twitter_link_to_voter is processed in voter_edit_view
voter_on_stage = Voter()
at_least_one_value_changed = False
voter_id = request.POST.get('voter_id', 0)
voter_id = convert_to_int(voter_id)
first_name = request.POST.get('first_name', False)
last_name = request.POST.get('last_name', False)
twitter_handle = request.POST.get('twitter_handle', False)
email = request.POST.get('email', False)
password_text = request.POST.get('password_text', False)
# Check to see if this voter is already being used anywhere
voter_on_stage_found = False
try:
voter_query = Voter.objects.filter(id=voter_id)
if len(voter_query):
voter_on_stage = voter_query[0]
voter_on_stage_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
if voter_on_stage_found:
try:
# Update existing voter
if first_name is not False:
voter_on_stage.first_name = first_name
at_least_one_value_changed = True
if last_name is not False:
voter_on_stage.last_name = last_name
at_least_one_value_changed = True
if twitter_handle is not False:
voter_on_stage.twitter_screen_name = twitter_handle
at_least_one_value_changed = True
if email is not False:
voter_on_stage.email = email
at_least_one_value_changed = True
if password_text is not False:
voter_on_stage.set_password(password_text)
at_least_one_value_changed = True
if at_least_one_value_changed:
voter_on_stage.save()
if password_text:
# Check to see if a login has already been created
pass
messages.add_message(request, messages.INFO, 'Voter information updated.')
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
messages.add_message(request, messages.ERROR, 'Could not save voter.')
else:
try:
# Create new
voter_on_stage = Voter.objects.create_user(email, email, password_text)
# Update new voter
if first_name is not False:
voter_on_stage.first_name = first_name
at_least_one_value_changed = True
if last_name is not False:
voter_on_stage.last_name = last_name
at_least_one_value_changed = True
if twitter_handle is not False:
voter_on_stage.twitter_screen_name = twitter_handle
at_least_one_value_changed = True
if email is not False:
voter_on_stage.email = email
at_least_one_value_changed = True
if at_least_one_value_changed:
voter_on_stage.save()
messages.add_message(request, messages.INFO, 'Added new Voter.')
except Exception as e:
messages.add_message(request, messages.ERROR, 'Could not save voter.')
return HttpResponseRedirect(reverse('voter:voter_edit', args=(voter_id,)))
@login_required
def voter_edit_view(request, voter_id=0, voter_we_vote_id=""):
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
create_facebook_link_to_voter = request.GET.get('create_facebook_link_to_voter', False)
create_organization_for_voter = request.GET.get('create_organization_for_voter', False)
create_twitter_link_to_voter = request.GET.get('create_twitter_link_to_voter', False)
cross_link_all_voter_positions = request.GET.get('cross_link_all_voter_positions', False)
merge_duplicate_positions = request.GET.get('merge_duplicate_positions', False)
voter_id = convert_to_int(voter_id)
voter_on_stage = Voter()
voter_on_stage_found = False
facebook_id_from_link_to_voter = 0
facebook_id_from_link_to_voter_for_another_voter = False
twitter_id_from_link_to_voter = 0
twitter_id_from_link_to_voter_for_another_voter = False
positions_cross_linked = 0
positions_not_cross_linked = 0
status_print_list = ""
facebook_manager = FacebookManager()
organization_manager = OrganizationManager()
twitter_user_manager = TwitterUserManager()
try:
if positive_value_exists(voter_id):
voter_on_stage = Voter.objects.get(id=voter_id)
elif positive_value_exists(voter_we_vote_id):
voter_on_stage = Voter.objects.get(we_vote_id=voter_we_vote_id)
voter_on_stage_found = True
except Voter.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except Voter.DoesNotExist:
# This is fine
pass
if voter_on_stage_found:
# Get FacebookLinkToVoter
try:
facebook_link_to_voter = FacebookLinkToVoter.objects.get(
voter_we_vote_id__iexact=voter_on_stage.we_vote_id)
if positive_value_exists(facebook_link_to_voter.facebook_user_id):
facebook_id_from_link_to_voter = facebook_link_to_voter.facebook_user_id
voter_on_stage.facebook_id_from_link_to_voter = facebook_link_to_voter.facebook_user_id
except FacebookLinkToVoter.DoesNotExist:
pass
# Get TwitterLinkToVoter
try:
twitter_link_to_voter = TwitterLinkToVoter.objects.get(
voter_we_vote_id__iexact=voter_on_stage.we_vote_id)
if positive_value_exists(twitter_link_to_voter.twitter_id):
twitter_id_from_link_to_voter = twitter_link_to_voter.twitter_id
voter_on_stage.twitter_id_from_link_to_voter = twitter_link_to_voter.twitter_id
# We reach out for the twitter_screen_name
voter_on_stage.twitter_screen_name_from_link_to_voter = \
twitter_link_to_voter.fetch_twitter_handle_locally_or_remotely()
except TwitterLinkToVoter.DoesNotExist:
pass
# Get TwitterLinkToOrganization
try:
if positive_value_exists(twitter_id_from_link_to_voter):
twitter_id_to_search = twitter_id_from_link_to_voter
twitter_link_to_organization_twitter_id_source_text = "FROM TW_LINK_TO_VOTER"
else:
twitter_id_to_search = voter_on_stage.twitter_id
twitter_link_to_organization_twitter_id_source_text = "FROM VOTER RECORD"
if positive_value_exists(twitter_id_to_search):
twitter_link_to_organization = TwitterLinkToOrganization.objects.get(
twitter_id=twitter_id_to_search)
if positive_value_exists(twitter_link_to_organization.twitter_id):
voter_on_stage.organization_we_vote_id_from_link_to_organization = \
twitter_link_to_organization.organization_we_vote_id
voter_on_stage.twitter_id_from_link_to_organization = twitter_link_to_organization.twitter_id
# We reach out for the twitter_screen_name
voter_on_stage.twitter_screen_name_from_link_to_organization = \
twitter_link_to_organization.fetch_twitter_handle_locally_or_remotely()
voter_on_stage.twitter_link_to_organization_twitter_id_source_text = \
twitter_link_to_organization_twitter_id_source_text
except TwitterLinkToOrganization.DoesNotExist:
pass
# ########################################
# Looks for other voters that have the same Facebook data
at_least_one_voter_facebook_value_found = False
voter_facebook_filters = []
if positive_value_exists(voter_on_stage.facebook_id):
new_filter = Q(facebook_id=voter_on_stage.facebook_id)
voter_facebook_filters.append(new_filter)
at_least_one_voter_facebook_value_found = True
voter_list_duplicate_facebook_updated = []
if at_least_one_voter_facebook_value_found:
voter_list_duplicate_facebook = Voter.objects.all()
# Add the first query
final_filters = []
if len(voter_facebook_filters):
final_filters = voter_facebook_filters.pop()
# ...and "OR" the remaining items in the list
for item in voter_facebook_filters:
final_filters |= item
voter_list_duplicate_facebook = voter_list_duplicate_facebook.filter(final_filters)
voter_list_duplicate_facebook = voter_list_duplicate_facebook.exclude(id=voter_on_stage.id)
voter_list_duplicate_facebook = voter_list_duplicate_facebook[:100]
for one_duplicate_voter in voter_list_duplicate_facebook:
try:
facebook_link_to_another_voter = FacebookLinkToVoter.objects.get(
voter_we_vote_id__iexact=one_duplicate_voter.we_vote_id)
if positive_value_exists(facebook_link_to_another_voter.facebook_user_id):
facebook_id_from_link_to_voter_for_another_voter = True
one_duplicate_voter.facebook_id_from_link_to_voter = \
facebook_link_to_another_voter.facebook_user_id
except FacebookLinkToVoter.DoesNotExist:
pass
voter_list_duplicate_facebook_updated.append(one_duplicate_voter)
list(voter_list_duplicate_facebook_updated)
# ########################################
# Looks for voters that have the same Twitter data
at_least_one_voter_twitter_value_found = False
voter_twitter_filters = []
if positive_value_exists(voter_on_stage.twitter_id):
new_filter = Q(twitter_id=voter_on_stage.twitter_id)
voter_twitter_filters.append(new_filter)
at_least_one_voter_twitter_value_found = True
if positive_value_exists(voter_on_stage.twitter_screen_name):
new_filter = Q(twitter_screen_name__iexact=voter_on_stage.twitter_screen_name)
voter_twitter_filters.append(new_filter)
at_least_one_voter_twitter_value_found = True
voter_list_duplicate_twitter_updated = []
if at_least_one_voter_twitter_value_found:
voter_list_duplicate_twitter = Voter.objects.all()
# Add the first query
final_filters = []
if len(voter_twitter_filters):
final_filters = voter_twitter_filters.pop()
# ...and "OR" the remaining items in the list
for item in voter_twitter_filters:
final_filters |= item
voter_list_duplicate_twitter = voter_list_duplicate_twitter.filter(final_filters)
voter_list_duplicate_twitter = voter_list_duplicate_twitter.exclude(id=voter_on_stage.id)
voter_list_duplicate_twitter = voter_list_duplicate_twitter[:100]
for one_duplicate_voter in voter_list_duplicate_twitter:
try:
twitter_link_to_another_voter = TwitterLinkToVoter.objects.get(
voter_we_vote_id__iexact=one_duplicate_voter.we_vote_id)
if positive_value_exists(twitter_link_to_another_voter.twitter_id):
twitter_id_from_link_to_voter_for_another_voter = True
one_duplicate_voter.twitter_id_from_link_to_voter = twitter_link_to_another_voter.twitter_id
# We reach out for the twitter_screen_name
one_duplicate_voter.twitter_screen_name_from_link_to_voter = \
twitter_link_to_another_voter.fetch_twitter_handle_locally_or_remotely()
except TwitterLinkToVoter.DoesNotExist:
pass
voter_list_duplicate_twitter_updated.append(one_duplicate_voter)
list(voter_list_duplicate_twitter_updated)
# ########################################
# Looks for orgs that have the same Twitter data
# (excluding the org connected by linked_organization_we_vote_id)
org_twitter_filters = []
at_least_one_twitter_value_found = False
if positive_value_exists(voter_on_stage.twitter_id):
new_filter = Q(twitter_user_id=voter_on_stage.twitter_id)
org_twitter_filters.append(new_filter)
at_least_one_twitter_value_found = True
if positive_value_exists(voter_on_stage.twitter_screen_name):
new_filter = Q(organization_twitter_handle__iexact=voter_on_stage.twitter_screen_name)
org_twitter_filters.append(new_filter)
at_least_one_twitter_value_found = True
organization_list_with_duplicate_twitter_updated = []
final_filters = []
if at_least_one_twitter_value_found:
# Add the first query
if len(org_twitter_filters):
final_filters = org_twitter_filters.pop()
# ...and "OR" the remaining items in the list
for item in org_twitter_filters:
final_filters |= item
organization_list_with_duplicate_twitter = Organization.objects.all()
organization_list_with_duplicate_twitter = organization_list_with_duplicate_twitter.filter(final_filters)
organization_list_with_duplicate_twitter = organization_list_with_duplicate_twitter.exclude(
we_vote_id=voter_on_stage.linked_organization_we_vote_id)
for one_duplicate_organization in organization_list_with_duplicate_twitter:
try:
linked_voter = Voter.objects.get(
linked_organization_we_vote_id__iexact=one_duplicate_organization.we_vote_id)
one_duplicate_organization.linked_voter = linked_voter
except Voter.DoesNotExist:
pass
organization_list_with_duplicate_twitter_updated.append(one_duplicate_organization)
# ####################################
# Find the voter that has this organization as their linked_organization_we_vote_id
linked_organization_we_vote_id_list_updated = []
linked_organization_we_vote_id_list = Organization.objects.all()
linked_organization_we_vote_id_list = linked_organization_we_vote_id_list.filter(
we_vote_id__iexact=voter_on_stage.linked_organization_we_vote_id)
linked_organization_found = False
for one_linked_organization in linked_organization_we_vote_id_list:
try:
linked_voter = Voter.objects.get(
linked_organization_we_vote_id__iexact=one_linked_organization.we_vote_id)
one_linked_organization.linked_voter = linked_voter
linked_organization_found = True
except Voter.DoesNotExist:
linked_organization_found = False
pass
linked_organization_we_vote_id_list_updated.append(one_linked_organization)
# Do some checks on all of the public positions owned by this voter
position_filters = []
new_filter = Q(voter_we_vote_id__iexact=voter_on_stage.we_vote_id)
position_filters.append(new_filter)
if positive_value_exists(voter_on_stage.linked_organization_we_vote_id):
new_filter = Q(organization_we_vote_id__iexact=voter_on_stage.linked_organization_we_vote_id)
position_filters.append(new_filter)
final_position_filters = []
if len(position_filters):
final_position_filters = position_filters.pop()
# ...and "OR" the remaining items in the list
for item in position_filters:
final_position_filters |= item
# PositionEntered
public_positions_owned_by_this_voter = PositionEntered.objects.all()
public_positions_owned_by_this_voter = public_positions_owned_by_this_voter.filter(final_position_filters)
if merge_duplicate_positions:
public_positions_owned_by_this_voter = \
merge_duplicate_positions_for_voter(public_positions_owned_by_this_voter)
# PositionForFriends
positions_for_friends_owned_by_this_voter = PositionForFriends.objects.all()
positions_for_friends_owned_by_this_voter = \
positions_for_friends_owned_by_this_voter.filter(final_position_filters)
if merge_duplicate_positions:
positions_for_friends_owned_by_this_voter = \
merge_duplicate_positions_for_voter(positions_for_friends_owned_by_this_voter)
if cross_link_all_voter_positions and voter_on_stage.linked_organization_we_vote_id \
and not twitter_id_from_link_to_voter_for_another_voter:
linked_organization_id = \
organization_manager.fetch_organization_id(voter_on_stage.linked_organization_we_vote_id)
if positive_value_exists(linked_organization_id):
for one_public_position in public_positions_owned_by_this_voter:
voter_info_saved = False
voter_info_not_saved = False
organization_info_saved = False
organization_info_not_saved = False
# Update the voter information
try:
one_public_position.voter_id = voter_on_stage.id
one_public_position.voter_we_vote_id = voter_on_stage.we_vote_id
one_public_position.save()
voter_info_saved = True
except Exception as e:
voter_info_not_saved = True
# Update the organization information
try:
one_public_position.organization_id = linked_organization_id
one_public_position.organization_we_vote_id = voter_on_stage.linked_organization_we_vote_id
one_public_position.save()
organization_info_saved = True
except Exception as e:
organization_info_not_saved = True
if voter_info_saved or organization_info_saved:
positions_cross_linked += 1
if voter_info_not_saved or organization_info_not_saved:
positions_not_cross_linked += 1
for one_position_for_friends in positions_for_friends_owned_by_this_voter:
voter_info_saved = False
voter_info_not_saved = False
organization_info_saved = False
organization_info_not_saved = False
# Update the voter information
try:
one_position_for_friends.voter_id = voter_on_stage.id
one_position_for_friends.voter_we_vote_id = voter_on_stage.we_vote_id
one_position_for_friends.save()
voter_info_saved = True
except Exception as e:
voter_info_not_saved = True
# Update the organization information
try:
one_position_for_friends.organization_id = linked_organization_id
one_position_for_friends.organization_we_vote_id = voter_on_stage.linked_organization_we_vote_id
one_position_for_friends.save()
organization_info_saved = True
except Exception as e:
organization_info_not_saved = True
if voter_info_saved or organization_info_saved:
positions_cross_linked += 1
if voter_info_not_saved or organization_info_not_saved:
positions_not_cross_linked += 1
if create_facebook_link_to_voter:
if not facebook_id_from_link_to_voter \
and not facebook_id_from_link_to_voter_for_another_voter:
# If here, we want to create a TwitterLinkToVoter
create_results = facebook_manager.create_facebook_link_to_voter(voter_on_stage.facebook_id,
voter_on_stage.we_vote_id)
messages.add_message(request, messages.INFO, 'FacebookLinkToVoter created:' +
" " + create_results['status'])
if positive_value_exists(create_results['facebook_link_to_voter_saved']):
facebook_link_to_voter = create_results['facebook_link_to_voter']
if positive_value_exists(facebook_link_to_voter.facebook_user_id):
voter_on_stage.facebook_id_from_link_to_voter = facebook_link_to_voter.facebook_user_id
else:
if facebook_id_from_link_to_voter:
messages.add_message(request, messages.ERROR, 'FacebookLinkToVoter could not be created: '
'There is already a FacebookLinkToVoter for this voter.')
if facebook_id_from_link_to_voter_for_another_voter:
messages.add_message(request, messages.ERROR,
'FacebookLinkToVoter could not be created: '
'There is already a FacebookLinkToVoter for ANOTHER voter.')
if create_twitter_link_to_voter:
if not twitter_id_from_link_to_voter \
and not twitter_id_from_link_to_voter_for_another_voter:
# If here, we want to create a TwitterLinkToVoter
create_results = twitter_user_manager.create_twitter_link_to_voter(voter_on_stage.twitter_id,
voter_on_stage.we_vote_id)
messages.add_message(request, messages.INFO, 'TwitterLinkToVoter created:' +
" " + create_results['status'])
if positive_value_exists(create_results['twitter_link_to_voter_saved']):
twitter_link_to_voter = create_results['twitter_link_to_voter']
if positive_value_exists(twitter_link_to_voter.twitter_id):
voter_on_stage.twitter_id_from_link_to_voter = twitter_link_to_voter.twitter_id
# We reach out for the twitter_screen_name
voter_on_stage.twitter_screen_name_from_link_to_voter = \
twitter_link_to_voter.fetch_twitter_handle_locally_or_remotely()
else:
if twitter_id_from_link_to_voter:
messages.add_message(request, messages.ERROR, 'TwitterLinkToVoter could not be created: '
'There is already a TwitterLinkToVoter for this voter.')
if twitter_id_from_link_to_voter_for_another_voter:
messages.add_message(request, messages.ERROR,
'TwitterLinkToVoter could not be created: '
'There is already a TwitterLinkToVoter for ANOTHER voter.')
if create_organization_for_voter:
do_not_create_organization = linked_organization_found
if do_not_create_organization:
do_not_create_organization_message = "Organization could not be created. "
if linked_organization_found:
do_not_create_organization_message += "Linked organization found. "
messages.add_message(request, messages.ERROR, do_not_create_organization_message)
else:
organization_name = voter_on_stage.get_full_name()
organization_website = ""
organization_twitter_handle = ""
organization_twitter_id = ""
organization_email = ""
organization_facebook = ""
organization_image = voter_on_stage.voter_photo_url()
organization_type = INDIVIDUAL
create_results = organization_manager.create_organization(
organization_name, organization_website, organization_twitter_handle,
organization_email, organization_facebook, organization_image, organization_twitter_id,
organization_type)
if create_results['organization_created']:
organization = create_results['organization']
try:
voter_on_stage.linked_organization_we_vote_id = organization.we_vote_id
voter_on_stage.save()
status_print_list += "Organization created.<br />"
if twitter_id_from_link_to_voter:
results = twitter_user_manager.create_twitter_link_to_organization(
twitter_id_from_link_to_voter, organization.we_vote_id)
if results['twitter_link_to_organization_saved']:
twitter_link_to_organization = results['twitter_link_to_organization']
except Exception as e:
messages.add_message(request, messages.ERROR,
"Could not update voter.linked_organization_we_vote_id.")
else:
messages.add_message(request, messages.ERROR, "Could not create organization.")
if positive_value_exists(positions_cross_linked):
status_print_list += "positions_cross_linked: " + str(positions_cross_linked) + "<br />"
if positive_value_exists(positions_not_cross_linked):
status_print_list += "positions_not_cross_linked: " + str(positions_not_cross_linked) + "<br />"
messages.add_message(request, messages.INFO, status_print_list)
messages_on_stage = get_messages(request)
template_values = {
'messages_on_stage': messages_on_stage,
'voter_id': voter_on_stage.id,
'voter': voter_on_stage,
'voter_list_duplicate_facebook': voter_list_duplicate_facebook_updated,
'voter_list_duplicate_twitter': voter_list_duplicate_twitter_updated,
'organization_list_with_duplicate_twitter': organization_list_with_duplicate_twitter_updated,
'linked_organization_we_vote_id_list': linked_organization_we_vote_id_list_updated,
'public_positions_owned_by_this_voter': public_positions_owned_by_this_voter,
'positions_for_friends_owned_by_this_voter': positions_for_friends_owned_by_this_voter,
}
else:
messages_on_stage = get_messages(request)
template_values = {
'messages_on_stage': messages_on_stage,
'voter_id': 0,
}
return render(request, 'voter/voter_edit.html', template_values)
@login_required
def voter_change_authority_process_view(request):
"""
Grant or remove an existing account volunteer or admin rights
:param request:
:return:
"""
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
voter_on_stage = Voter()
authority_changed = False
voter_id = request.GET.get('voter_id', 0)
voter_id = convert_to_int(voter_id)
authority_granted = request.GET.get('authority_granted', False)
authority_removed = request.GET.get('authority_removed', False)
# Check to see if this voter is already being used anywhere
voter_on_stage_found = False
try:
voter_query = Voter.objects.filter(id=voter_id)
if len(voter_query):
voter_on_stage = voter_query[0]
voter_on_stage_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
if voter_on_stage_found:
try:
if authority_granted == 'admin':
voter_on_stage.is_admin = True
authority_changed = True
elif authority_granted == 'partner_organization':
voter_on_stage.is_partner_organization = True
authority_changed = True
elif authority_granted == 'political_data_manager':
voter_on_stage.is_political_data_manager = True
authority_changed = True
elif authority_granted == 'political_data_viewer':
voter_on_stage.is_political_data_viewer = True
authority_changed = True
elif authority_granted == 'verified_volunteer':
voter_on_stage.is_verified_volunteer = True
authority_changed = True
if authority_removed == 'admin':
voter_on_stage.is_admin = False
authority_changed = True
elif authority_removed == 'partner_organization':
voter_on_stage.is_partner_organization = False
authority_changed = True
elif authority_removed == 'political_data_manager':
voter_on_stage.is_political_data_manager = False
authority_changed = True
elif authority_removed == 'political_data_viewer':
voter_on_stage.is_political_data_viewer = False
authority_changed = True
elif authority_removed == 'verified_volunteer':
voter_on_stage.is_verified_volunteer = False
authority_changed = True
if authority_changed:
voter_on_stage.save()
messages.add_message(request, messages.INFO, 'Voter authority updated.')
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
messages.add_message(request, messages.ERROR, 'Could not save voter.')
else:
messages.add_message(request, messages.ERROR, 'Could not save change to authority.')
return HttpResponseRedirect(reverse('voter:voter_edit', args=(voter_id,)))
@login_required
def voter_list_view(request):
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
voter_search = request.GET.get('voter_search', '')
voter_api_device_id = get_voter_api_device_id(request) # We look in the cookies for voter_api_device_id
voter_id = fetch_voter_id_from_voter_device_link(voter_api_device_id)
voter_id = convert_to_int(voter_id)
messages_on_stage = get_messages(request)
if positive_value_exists(voter_search):
voter_list = Voter.objects.all()
filters = []
new_filter = Q(first_name__icontains=voter_search)
filters.append(new_filter)
new_filter = Q(middle_name__icontains=voter_search)
filters.append(new_filter)
new_filter = Q(last_name__icontains=voter_search)
filters.append(new_filter)
new_filter = Q(we_vote_id__icontains=voter_search)
filters.append(new_filter)
new_filter = Q(email__icontains=voter_search)
filters.append(new_filter)
new_filter = Q(facebook_email__icontains=voter_search)
filters.append(new_filter)
new_filter = Q(twitter_screen_name__icontains=voter_search)
filters.append(new_filter)
new_filter = Q(twitter_name__icontains=voter_search)
filters.append(new_filter)
new_filter = Q(linked_organization_we_vote_id__icontains=voter_search)
filters.append(new_filter)
# Add the first query
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
voter_list = voter_list.filter(final_filters)
else:
voter_list = Voter.objects.order_by('-is_admin', '-is_verified_volunteer', 'email', 'twitter_screen_name',
'linked_organization_we_vote_id', 'facebook_email',
'last_name', 'first_name')
voter_list = voter_list[:200]
template_values = {
'messages_on_stage': messages_on_stage,
'voter_list': voter_list,
'voter_id_signed_in': voter_id,
'voter_search': voter_search,
}
return render(request, 'voter/voter_list.html', template_values)
@login_required
def voter_summary_view(request, voter_id):
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
voter_id = convert_to_int(voter_id)
voter_on_stage_found = False
voter_on_stage = Voter()
try:
voter_on_stage = Voter.objects.get(id=voter_id)
voter_on_stage_found = True
except Voter.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except Voter.DoesNotExist:
# This is fine, create new
pass
voter_address_manager = VoterAddressManager()
address_results = voter_address_manager.retrieve_voter_address_list(voter_id=voter_id)
voter_address_list = address_results['voter_address_list']
if voter_on_stage_found:
template_values = {
'messages_on_stage': messages_on_stage,
'voter': voter_on_stage,
'voter_address_list': voter_address_list,
}
else:
template_values = {
'messages_on_stage': messages_on_stage,
}
return render(request, 'voter/voter_summary.html', template_values)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper for a Session-like object that handles threads and recovery.
Based on an original design of Illia Polosukhin.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import coordinated_session
from tensorflow.contrib.learn.python.learn import monitored_session
from tensorflow.contrib.learn.python.learn import recoverable_session
from tensorflow.contrib.learn.python.learn import summary_writer_cache
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner
from tensorflow.python.training import saver as training_saver
from tensorflow.python.training import session_manager as sm
from tensorflow.python.training import training_util
# TODO(touts): Share that with the Supervisor.
class Scaffold(object):
"""Structure to create or gather pieces commonly needed to train a model.
When you build a model for training you usually need ops to initialize
variables, a `Saver` to checkpoint them, an op to collect summaries for
the visualizer, and so on.
Various libraries built on top of the core TensorFlow library take care of
creating some or all of these pieces and storing them in well known
collections in the graph. The `Scaffold` class helps pick these pieces from
the graph collections, creating and adding them to the collections if needed.
If you call the scaffold constructor without any arguments it will pick
pieces from the collections, creating default ones if needed. You can pass
arguments to the constructor to provide your own pieces. Pieces that you
pass to the constructor are not added to the graph collections.
The following pieces are directly accessible as attributes of the `Scaffold`
object:
* `saver`: A `tf.Saver` object taking care of saving the variables. Picked
from and stored into the `SAVERS` collection in the graph.
* `init_op`: An op to run to initialize the variables. Picked from and
stored into the `INIT_OP` collection in the graph.
* `ready_op`: An op to verify that the variables are initialized. Picked
from and stored into the `READY_OP` collection in the graph.
* `local_init_op`: An op to initialize the local variables. Picked
from and stored into the `LOCAL_INIT_OP` collection in the graph.
* `summary_op`: An op to run and merge the summaries in the graph. Picked
from and stored into the `SUMMARY_OP` collection in the graph.
* `global_step`: A tensor containing the global step counter. Picked
from and stored into the `GLOBAL_STEP` collection in the graph.
You can also pass the following additional pieces to the constructor:
* `init_feed_dict`: A sessionn feed dictionary that should be used when
running the init op.
* `init_fn`: A callable to run run after the init op to perform additional
initializations. The callable will be called as
`init_fn(scaffold, session)`.
"""
# TODO(touts): consider adding the output dir and summary writer (cached)?
# TODO(touts): I do not think we should pass keep_checkpoint_max here.
# TODO(touts): Add individual static functions for init_op(), etc. that
# implement the caching logic.
def __init__(self,
global_step_tensor=None,
init_op=None,
init_feed_dict=None,
init_fn=None,
ready_op=None,
local_init_op=None,
summary_op=None,
saver=None,
keep_checkpoint_max=5):
"""Create a scaffold.
Args:
global_step_tensor: Optional tensor to use as the global step counter.
init_op: Optional op for initializing variables.
init_feed_dict: Optional session feed dictionary to use when running the
init_op.
init_fn: Optional function to use to initialize the model after running
the init_op. Will be called as `init_fn(scaffold, session)`.
ready_op: Optional op to verify that the variables are initialized. Must
return an empty scalar string tensor when the variables are
initialized, or a non-empty one listing the names of the
non-initialized variables.
local_init_op: Optional op to initialize local variables.
summary_op: Optional op to gather all summaries. Must return a scalar
string tensor containing a serialized `Summary` proto.
saver: Optional `tf.Saver` object to use to save and restore variables.
keep_checkpoint_max: Optional parameter to use to construct a saver if
none is already there in the graph.
"""
# NOTE(touts): modifying the init function to be passed the scaffold is a
# hack to make it easy to find the saver. Is there a better way?
if init_fn:
self._init_fn = lambda sess: init_fn(self, sess)
else:
self._init_fn = None
self._global_step_tensor = global_step_tensor
self._init_op = init_op
self._ready_op = ready_op
self._local_init_op = local_init_op
self._summary_op = summary_op
self._saver = saver
self._keep_checkpoint_max = keep_checkpoint_max
self._init_feed_dict = init_feed_dict
def finalize(self):
"""Creates operations if needed and finalizes the graph."""
if self._global_step_tensor is None:
self._global_step_tensor = contrib_variables.get_or_create_global_step()
if self._init_op is None:
self._init_op = Scaffold._get_or_default(
'init_op', ops.GraphKeys.INIT_OP, variables.initialize_all_variables)
if self._ready_op is None:
self._ready_op = Scaffold._get_or_default(
'ready_op', ops.GraphKeys.READY_OP,
variables.report_uninitialized_variables)
if self._local_init_op is None:
self._local_init_op = Scaffold._get_or_default(
'local_init_op', ops.GraphKeys.LOCAL_INIT_OP,
Scaffold._default_local_init_op)
if self._summary_op is None:
self._summary_op = Scaffold._get_or_default(
'summary_op', ops.GraphKeys.SUMMARY_OP,
logging_ops.merge_all_summaries)
# pylint: disable=g-long-lambda
if self._saver is None:
self._saver = Scaffold._get_or_default(
'saver',
ops.GraphKeys.SAVERS,
lambda: training_saver.Saver(sharded=True,
max_to_keep=self._keep_checkpoint_max))
# pylint: enable=g-long-lambda
ops.get_default_graph().finalize()
@property
def global_step_tensor(self):
return self._global_step_tensor
@property
def init_fn(self):
return self._init_fn
@property
def init_op(self):
return self._init_op
@property
def ready_op(self):
return self._ready_op
@property
def local_init_op(self):
return self._local_init_op
@property
def summary_op(self):
return self._summary_op
@property
def saver(self):
return self._saver
@property
def init_feed_dict(self):
return self._init_feed_dict
@staticmethod
def _get_or_default(arg_name, collection_key, default_constructor):
"""Get from cache or create a default operation."""
elements = ops.get_collection(collection_key)
if elements:
if len(elements) > 1:
raise RuntimeError('More than one item in the collection "%s". '
'Please indicate which one to use by passing it to '
'the tf.Scaffold constructor as: '
'tf.Scaffold(%s=item to use)', collection_key,
arg_name)
return elements[0]
op = default_constructor()
if op is not None:
ops.add_to_collection(collection_key, op)
return op
@staticmethod
def _default_local_init_op():
return control_flow_ops.group(variables.initialize_local_variables(),
data_flow_ops.initialize_all_tables())
def _call_monitor_end(monitor, sess):
# TODO(ispir): Remove following check when switch to MonitorV2
if 'session' in inspect.getargspec(monitor.end).args:
monitor.end(session=sess)
else:
monitor.end()
# TODO(ispir): Document this class after interface is finalized.
# mention StopIteration and OutOfRangeError
class SupervisedSession(object):
"""Session-like object that supports recovery and monitors.
"""
def __init__(self,
master,
is_chief=True,
checkpoint_dir=None,
monitors=None,
scaffold=None,
config=None):
self._graph = ops.get_default_graph()
self._master = master
self._checkpoint_dir = checkpoint_dir
self._is_chief = is_chief
self._config = config
self._monitors = monitors or []
self._scaffold = scaffold or Scaffold()
for monitor in self._monitors:
monitor.begin(max_steps=None)
# Create the session.
self._scaffold.finalize()
self._session_manager = sm.SessionManager(
local_init_op=self._scaffold.local_init_op,
ready_op=self._scaffold.ready_op,
graph=ops.get_default_graph())
self._sess = recoverable_session.RecoverableSession(self._create_session)
# Call the begin() method of monitors.
self._init_step = self._tf_sess.run(self._scaffold.global_step_tensor)
# Write the graph out, note: this uses self._init_step.
self.write_graph()
def _create_session(self):
"""Factory for the RecoverableSession.
Returns:
A session, initialized or recovered as needed.
"""
if self._is_chief:
tf_sess = self._session_manager.prepare_session(
self._master, saver=self._scaffold.saver,
checkpoint_dir=self._checkpoint_dir, config=self._config,
init_op=self._scaffold.init_op,
init_feed_dict=self._scaffold.init_feed_dict,
init_fn=self._scaffold.init_fn)
else:
tf_sess = self._session_manager.wait_for_session(
self._master, config=self._config)
# Keep the tf_sess for quick runs of global step when needed.
self._tf_sess = tf_sess
# We don't want coordinator to suppress any exception.
coord = coordinator.Coordinator(clean_stop_exception_types=[])
coordinated_threads_to_join = queue_runner.start_queue_runners(sess=tf_sess,
coord=coord)
return coordinated_session.CoordinatedSession(
monitored_session.MonitoredSession(tf_sess, self._monitors,
self._scaffold.global_step_tensor),
coord, coordinated_threads_to_join)
@property
def scaffold(self):
return self._scaffold
@property
def session(self):
return self._tf_sess
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Run ops in the supervised session.
This method is completely compatible with the `tf.Session.run()` method.
Args:
fetches: Same as `tf.Session.run()`.
feed_dict: Same as `tf.Session.run()`.
options: Same as `tf.Session.run()`.
run_metadata: Same as `tf.Session.run()`.
Returns:
Same as `tf.Session.run()`.
"""
return self._sess.run(fetches, feed_dict=feed_dict, options=options,
run_metadata=run_metadata)
def should_stop(self):
if self._sess:
return self._sess.should_stop()
return True
def close(self):
self._close_internal()
def _close_internal(self, exception_type=None):
try:
if not exception_type:
for monitor in self._monitors:
_call_monitor_end(monitor, self._tf_sess)
finally:
self._sess.close()
self._sess = None
self._tf_sess = None
def _is_closed(self):
"""Return True if the supervised session is closed. For tests only.
Returns:
A boolean.
"""
return self._tf_sess is None
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
if exception_type in [errors.OutOfRangeError, StopIteration]:
# TODO(ispir): log error if Coordinator hasn't done already.
exception_type = None
self._close_internal(exception_type)
# __exit__ should return True to suppress an exception.
return exception_type is None
def write_graph(self):
"""Saves current graph."""
if self._checkpoint_dir is not None and self._is_chief:
summary_writer = summary_writer_cache.SummaryWriterCache.get(
self._checkpoint_dir)
training_util.write_graph(self._graph.as_graph_def(add_shapes=True),
self._checkpoint_dir, 'graph.pbtxt')
summary_writer.add_graph(self._graph)
summary_writer.add_session_log(SessionLog(status=SessionLog.START),
self._init_step)
| |
import json
import time
from datetime import timedelta
import os
from autobahn.wamp import ApplicationError, TransportLost
from mock import mock, call
from pyfakefs.fake_filesystem_unittest import Patcher
from mdstudio.api.exception import CallException
from mdstudio.deferred.chainable import test_chainable
from mdstudio.logging.impl.session_observer import SessionLogObserver
from mdstudio.logging.log_type import LogType
from mdstudio.unittest.db import DBTestCase
from mdstudio.utc import from_utc_string, now, to_utc_string, timestamp
# Python 2 compatibility.
try:
TimeoutError
except NameError:
import socket
TimeoutError = socket.timeout
class SessionObserverTests(DBTestCase):
def setUp(self):
SessionLogObserver._instance = None
self.session = mock.MagicMock()
self.session.__str__ = mock.MagicMock(return_value='"MagicMock"')
self.session.component_root_path = mock.MagicMock(return_value='/')
self.observer = SessionLogObserver(self.session)
self.observer.flusher = mock.MagicMock()
def tearDown(self):
SessionLogObserver._instance = None
def test_construction(self):
self.assertEqual(self.observer.session, None)
self.assertEqual(self.observer.sessions, [])
self.assertEqual(self.observer.log_type, LogType.User)
self.assertLessEqual(now() - from_utc_string(self.observer.logs[0]['time']), timedelta(seconds=1))
del self.observer.logs[0]['time']
self.assertEqual(self.observer.logs, [{
'level': 'info',
'source': 'mdstudio.logging.impl.session_observer.SessionLogObserver',
'message': 'Collecting logs on session "MagicMock"'
}])
self.assertEqual(self.observer.flushing, False)
self.assertEqual(os.path.abspath(self.observer.recovery_file(self.session)), os.path.abspath('/logs/recovery.json'))
def test_call(self):
t = now()
self.observer({
'log_format': 'hello {str}',
'log_namespace': 'test namespace',
'log_level': LogType.Group,
'log_time': timestamp(t),
'str': 'test'
})
self.assertLessEqual(t - from_utc_string(self.observer.logs[1]['time']), timedelta(seconds=1))
del self.observer.logs[1]['time']
self.assertEqual(self.observer.logs[1], {
'level': 'Group',
'source': 'test namespace',
'message': 'hello test'
})
def test_call2(self):
t = now()
self.observer({
'message': 'hello test',
'log_namespace': 'test namespace',
'log_level': LogType.Group,
'log_time': timestamp(t),
})
self.assertLessEqual(t - from_utc_string(self.observer.logs[1]['time']), timedelta(seconds=1))
del self.observer.logs[1]['time']
self.assertEqual(self.observer.logs[1], {
'level': 'Group',
'source': 'test namespace',
'message': 'hello test'
})
def test_call3(self):
self.observer({
'message': '',
'log_namespace': 'test namespace',
'log_level': LogType.Group,
'log_time': time.mktime(now().timetuple()),
})
self.assertEqual(len(self.observer.logs), 1)
@test_chainable
def test_store_recovery(self):
with Patcher() as patcher:
self.observer.session = self.session
patcher.fs.MakeDirectory('/logs')
self.assertLessEqual(now() - from_utc_string(self.observer.logs[0]['time']), timedelta(seconds=1))
del self.observer.logs[0]['time']
self.assertEqual(self.observer.logs, [{
'level': 'info',
'source': 'mdstudio.logging.impl.session_observer.SessionLogObserver',
'message': 'Collecting logs on session "MagicMock"'
}])
yield self.observer.store_recovery()
self.assertEqual(self.observer.logs, [])
with open(self.observer.recovery_file(self.session)) as f:
self.assertEqual(json.load(f), [{
'level': 'info',
'source': 'mdstudio.logging.impl.session_observer.SessionLogObserver',
'message': 'Collecting logs on session "MagicMock"'
}])
@test_chainable
def test_store_recovery2(self):
with Patcher() as patcher:
self.observer.session = self.session
patcher.fs.MakeDirectory('/logs')
self.observer.logs = []
yield self.observer.store_recovery()
self.assertEqual(self.observer.logs, [])
self.assertFalse(os.path.isfile(self.observer.recovery_file(self.session)))
@test_chainable
def test_flush_logs(self):
self.observer.session = self.session
self.session.flush_logs = mock.MagicMock()
del self.observer.logs[0]['time']
yield self.observer.flush_logs()
self.assertEqual(self.observer.logs, [])
self.session.flush_logs.assert_called_once_with([{
'level': 'info',
'source': 'mdstudio.logging.impl.session_observer.SessionLogObserver',
'message': 'Collecting logs on session "MagicMock"'
}])
@test_chainable
def test_flush_logs2(self):
def raise_(ex):
raise ex
self.observer.session = self.session
self.observer.sleep = mock.MagicMock()
self.observer.log = mock.MagicMock()
self.session.flush_logs = mock.MagicMock(wraps=lambda ex: raise_(TimeoutError))
del self.observer.logs[0]['time']
yield self.observer.flush_logs()
self.assertEqual(self.observer.logs, [{
'level': 'info',
'message': 'Collecting logs on session "MagicMock"',
'source': 'mdstudio.logging.impl.session_observer.SessionLogObserver'
}])
self.observer.sleep.assert_has_calls([
call(3),
call(1)
])
@test_chainable
def test_flush_logs3(self):
def raise_(ex):
raise ex
self.observer.session = self.session
self.observer.sleep = mock.MagicMock()
self.observer.log = mock.MagicMock()
self.session.flush_logs = mock.MagicMock(wraps=lambda ex: raise_(ApplicationError))
del self.observer.logs[0]['time']
yield self.observer.flush_logs()
self.assertEqual(self.observer.logs, [{
'level': 'info',
'message': 'Collecting logs on session "MagicMock"',
'source': 'mdstudio.logging.impl.session_observer.SessionLogObserver'
}])
self.observer.sleep.assert_has_calls([
call(1)
])
@test_chainable
def test_flush_logs4(self):
def raise_(ex):
raise ex
self.observer.session = self.session
self.observer.sleep = mock.MagicMock()
self.observer.log = mock.MagicMock()
self.session.flush_logs = mock.MagicMock(wraps=lambda ex: raise_(TransportLost))
del self.observer.logs[0]['time']
yield self.observer.flush_logs()
self.assertEqual(self.observer.logs, [{
'level': 'info',
'message': 'Collecting logs on session "MagicMock"',
'source': 'mdstudio.logging.impl.session_observer.SessionLogObserver'
}])
self.observer.sleep.assert_has_calls([
call(1),
call(1)
])
@test_chainable
def test_flush_logs5(self):
def raise_(ex):
raise ex
self.observer.session = self.session
self.observer.sleep = mock.MagicMock()
self.observer.log = mock.MagicMock()
self.session.flush_logs = mock.MagicMock(wraps=lambda ex: raise_(CallException))
del self.observer.logs[0]['time']
yield self.observer.flush_logs()
self.assertEqual(self.observer.logs, [{
'level': 'info',
'message': 'Collecting logs on session "MagicMock"',
'source': 'mdstudio.logging.impl.session_observer.SessionLogObserver'
}])
self.observer.sleep.assert_has_calls([
call(1),
call(1)
])
@test_chainable
def test_flush_logs6(self):
def raise_(ex):
raise ex()
self.observer.session = self.session
self.observer.sleep = mock.MagicMock()
self.observer.log = mock.MagicMock()
self.observer.log.error = mock.MagicMock()
self.session.flush_logs = mock.MagicMock(wraps=lambda ex: raise_(Exception))
del self.observer.logs[0]['time']
yield self.observer.flush_logs()
self.assertEqual(self.observer.logs, [{
'level': 'info',
'message': 'Collecting logs on session "MagicMock"',
'source': 'mdstudio.logging.impl.session_observer.SessionLogObserver'
}])
self.observer.log.error.assert_called_once()
@test_chainable
def test_flush_logs7(self):
def raise_(ex):
raise ex()
self.observer.session = self.session
self.observer.sleep = mock.MagicMock()
self.observer.log = mock.MagicMock()
self.observer.log.error = mock.MagicMock()
self.session.flush_logs = mock.MagicMock(wraps=lambda ex: raise_(Exception))
self.observer.logs = []
yield self.observer.flush_logs()
self.assertEqual(self.observer.logs, [])
@test_chainable
def test_flush_logs8(self):
self.observer.session = self.session
self.observer.sleep = mock.MagicMock()
self.observer.log = mock.MagicMock()
self.observer.log.error = mock.MagicMock()
self.observer.logs = [0] * 9
yield self.observer.flush_logs()
self.observer.sleep.assert_has_calls([
call(1)
])
@test_chainable
def test_flush_logs9(self):
self.observer.session = self.session
self.observer.sleep = mock.MagicMock()
self.observer.log = mock.MagicMock()
self.observer.log.error = mock.MagicMock()
self.observer.logs = [0] * 10
yield self.observer.flush_logs()
self.observer.sleep.assert_not_called()
@test_chainable
def test_start_flushing(self):
self.observer.session = self.session
self.observer.sleep = mock.MagicMock()
yield self.observer.start_flushing(self.session)
self.assertLessEqual(now() - from_utc_string(self.observer.logs[0]['time']), timedelta(seconds=1))
del self.observer.logs[0]['time']
yield self.observer.flush_logs()
self.assertFalse(os.path.isfile(self.observer.recovery_file(self.session)))
self.assertEqual(self.observer.logs, [])
self.session.flush_logs.assert_called_once_with([{
'level': 'info',
'source': 'mdstudio.logging.impl.session_observer.SessionLogObserver',
'message': 'Collecting logs on session "MagicMock"'
}])
self.assertEqual(self.observer.session, self.session)
@test_chainable
def test_start_flushing2(self):
with Patcher() as patcher:
patcher.fs.CreateFile(self.observer.recovery_file(self.session), contents=json.dumps([{'est': 'error'}]))
self.observer.session = self.session
self.observer.sleep = mock.MagicMock()
yield self.observer.start_flushing(self.session)
self.assertEqual(self.observer.logs, [{'est': 'error'}])
yield self.observer.flush_logs()
self.assertFalse(os.path.isfile(self.observer.recovery_file(self.session)))
self.session.flush_logs.assert_called_once_with([{'est': 'error'}])
self.assertEqual(self.observer.session, self.session)
@test_chainable
def test_start_flushing3(self):
with Patcher() as patcher:
patcher.fs.CreateFile(self.observer.recovery_file(self.session), contents='sdfwef')
self.observer.session = self.session
self.observer.sleep = mock.MagicMock()
yield self.observer.start_flushing(self.session)
self.assertNotEqual(self.observer.logs, 'sdfwef')
@test_chainable
def test_start_flushing4(self):
with Patcher() as _:
self.observer.session = self.session
self.observer.sleep = mock.MagicMock()
self.observer.flushing = True
yield self.observer.start_flushing(self.session)
self.assertNotEqual(self.observer.logs, 'sdfwef')
self.assertEqual(self.observer.sessions, [self.session])
def test_pause_flushing(self):
self.observer.pause_flushing(self.session)
def test_pause_flushing2(self):
self.observer.flushing = True
self.observer.sessions = [self.session]
self.observer.pause_flushing(self.session)
self.assertEqual(self.observer.sessions, [])
def test_pause_flushing3(self):
self.observer.flushing = True
self.observer.session = self.session
self.observer.sessions = [self.session]
self.observer.pause_flushing(self.session)
self.assertEqual(self.observer.sessions, [])
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods for working with WSGI servers."""
import errno
import os
import socket
import ssl
import sys
import time
import eventlet
import eventlet.wsgi
import greenlet
from oslo.config import cfg
from paste import deploy
import routes.middleware
import webob.dec
import webob.exc
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import utils
socket_opts = [
cfg.IntOpt('backlog',
default=4096,
help="Number of backlog requests to configure the socket with"),
cfg.IntOpt('tcp_keepidle',
default=600,
help="Sets the value of TCP_KEEPIDLE in seconds for each "
"server socket. Not supported on OS X."),
cfg.StrOpt('ssl_ca_file',
default=None,
help="CA certificate file to use to verify "
"connecting clients"),
cfg.StrOpt('ssl_cert_file',
default=None,
help="Certificate file to use when starting "
"the server securely"),
cfg.StrOpt('ssl_key_file',
default=None,
help="Private key file to use when starting "
"the server securely"),
]
CONF = cfg.CONF
CONF.register_opts(socket_opts)
LOG = logging.getLogger(__name__)
class Server(object):
"""Server class to manage a WSGI server, serving a WSGI application."""
default_pool_size = 1000
def __init__(self, name, app, host=None, port=None, pool_size=None,
protocol=eventlet.wsgi.HttpProtocol):
"""Initialize, but do not start, a WSGI server.
:param name: Pretty name for logging.
:param app: The WSGI application to serve.
:param host: IP address to serve the application.
:param port: Port number to server the application.
:param pool_size: Maximum number of eventlets to spawn concurrently.
:returns: None
"""
self.name = name
self.app = app
self._host = host or "0.0.0.0"
self._port = port or 0
self._server = None
self._socket = None
self._protocol = protocol
self._pool = eventlet.GreenPool(pool_size or self.default_pool_size)
self._logger = logging.getLogger("eventlet.wsgi.server")
self._wsgi_logger = logging.WritableLogger(self._logger)
def _get_socket(self, host, port, backlog):
bind_addr = (host, port)
# TODO(dims): eventlet's green dns/socket module does not actually
# support IPv6 in getaddrinfo(). We need to get around this in the
# future or monitor upstream for a fix
try:
info = socket.getaddrinfo(bind_addr[0],
bind_addr[1],
socket.AF_UNSPEC,
socket.SOCK_STREAM)[0]
family = info[0]
bind_addr = info[-1]
except Exception:
family = socket.AF_INET
cert_file = CONF.ssl_cert_file
key_file = CONF.ssl_key_file
ca_file = CONF.ssl_ca_file
use_ssl = cert_file or key_file
if cert_file and not os.path.exists(cert_file):
raise RuntimeError(_("Unable to find cert_file : %s") % cert_file)
if ca_file and not os.path.exists(ca_file):
raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)
if key_file and not os.path.exists(key_file):
raise RuntimeError(_("Unable to find key_file : %s") % key_file)
if use_ssl and (not cert_file or not key_file):
raise RuntimeError(_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
def wrap_ssl(sock):
ssl_kwargs = {
'server_side': True,
'certfile': cert_file,
'keyfile': key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ssl_ca_file:
ssl_kwargs['ca_certs'] = ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
return ssl.wrap_socket(sock, **ssl_kwargs)
sock = None
retry_until = time.time() + 30
while not sock and time.time() < retry_until:
try:
sock = eventlet.listen(bind_addr,
backlog=backlog,
family=family)
if use_ssl:
sock = wrap_ssl(sock)
except socket.error as err:
if err.args[0] != errno.EADDRINUSE:
raise
eventlet.sleep(0.1)
if not sock:
raise RuntimeError(_("Could not bind to %(host)s:%(port)s "
"after trying for 30 seconds") %
{'host': host, 'port': port})
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# sockets can hang around forever without keepalive
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
# This option isn't available in the OS X version of eventlet
if hasattr(socket, 'TCP_KEEPIDLE'):
sock.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE,
CONF.tcp_keepidle)
return sock
def _start(self):
"""Run the blocking eventlet WSGI server.
:returns: None
"""
eventlet.wsgi.server(self._socket,
self.app,
protocol=self._protocol,
custom_pool=self._pool,
log=self._wsgi_logger)
def start(self, backlog=128):
"""Start serving a WSGI application.
:param backlog: Maximum number of queued connections.
:returns: None
:raises: cinder.exception.InvalidInput
"""
if backlog < 1:
raise exception.InvalidInput(
reason='The backlog must be more than 1')
self._socket = self._get_socket(self._host,
self._port,
backlog=backlog)
self._server = eventlet.spawn(self._start)
(self._host, self._port) = self._socket.getsockname()[0:2]
LOG.info(_("Started %(name)s on %(_host)s:%(_port)s") % self.__dict__)
@property
def host(self):
return self._host
@property
def port(self):
return self._port
def stop(self):
"""Stop this server.
This is not a very nice action, as currently the method by which a
server is stopped is by killing its eventlet.
:returns: None
"""
LOG.info(_("Stopping WSGI server."))
self._server.kill()
def wait(self):
"""Block, until the server has stopped.
Waits on the server's eventlet to finish, then returns.
:returns: None
"""
try:
self._server.wait()
except greenlet.GreenletExit:
LOG.info(_("WSGI server has stopped."))
class Request(webob.Request):
pass
class Application(object):
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [app:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[app:wadl]
latest_version = 1.3
paste.app_factory = cinder.api.fancy_api:Wadl.factory
which would result in a call to the `Wadl` class as
import cinder.api.fancy_api
fancy_api.Wadl(latest_version='1.3')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
return cls(**local_config)
def __call__(self, environ, start_response):
r"""Subclasses will probably want to implement __call__ like this:
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
# Any of the following objects work as responses:
# Option 1: simple string
res = 'message\n'
# Option 2: a nicely formatted HTTP exception page
res = exc.HTTPForbidden(detail='Nice try')
# Option 3: a webob Response object (in case you need to play with
# headers, or you want to be treated like an iterable, or or or)
res = Response();
res.app_iter = open('somefile')
# Option 4: any wsgi app to be run next
res = self.application
# Option 5: you can get a Response object for a wsgi app, too, to
# play with headers etc
res = req.get_response(self.application)
# You can then just return your response...
return res
# ... or set req.response and return None.
req.response = res
See the end of http://pythonpaste.org/webob/modules/dec.html
for more info.
"""
raise NotImplementedError(_('You must implement __call__'))
class Middleware(Application):
"""Base WSGI middleware.
These classes require an application to be
initialized that will be called next. By default the middleware will
simply call its wrapped app, or you can override __call__ to customize its
behavior.
"""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = cinder.api.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import cinder.api.analytics
analytics.Analytics(app_from_paste, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
return cls(app, **local_config)
return _factory
def __init__(self, application):
self.application = application
def process_request(self, req):
"""Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, response):
"""Do whatever you'd like to the response."""
return response
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
response = self.process_request(req)
if response:
return response
response = req.get_response(self.application)
return self.process_response(response)
class Debug(Middleware):
"""Helper class for debugging a WSGI application.
Can be inserted into any WSGI application chain to get information
about the request and response.
"""
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
print ('*' * 40) + ' REQUEST ENVIRON'
for key, value in req.environ.items():
print key, '=', value
print
resp = req.get_response(self.application)
print ('*' * 40) + ' RESPONSE HEADERS'
for (key, value) in resp.headers.iteritems():
print key, '=', value
print
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""Iterator that prints the contents of a wrapper string."""
print ('*' * 40) + ' BODY'
for part in app_iter:
sys.stdout.write(part)
sys.stdout.flush()
yield part
print
class Router(object):
"""WSGI middleware that maps incoming requests to WSGI apps."""
def __init__(self, mapper):
"""Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be an object that can route
the request to the action-specific method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, '/svrlist', controller=sc, action='list')
# Actions are all implicitly defined
mapper.resource('server', 'servers', controller=sc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp())
"""
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Route the incoming request to a controller based on self.map.
If no match, return a 404.
"""
return self._router
@staticmethod
@webob.dec.wsgify(RequestClass=Request)
def _dispatch(req):
"""Dispatch the request to the appropriate controller.
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404
or the routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
return webob.exc.HTTPNotFound()
app = match['controller']
return app
class Loader(object):
"""Used to load WSGI applications from paste configurations."""
def __init__(self, config_path=None):
"""Initialize the loader, and attempt to find the config.
:param config_path: Full or relative path to the paste config.
:returns: None
"""
config_path = config_path or CONF.api_paste_config
self.config_path = utils.find_config(config_path)
def load_app(self, name):
"""Return the paste URLMap wrapped WSGI application.
:param name: Name of the application to load.
:returns: Paste URLMap object wrapping the requested application.
:raises: `cinder.exception.PasteAppNotFound`
"""
try:
return deploy.loadapp("config:%s" % self.config_path, name=name)
except LookupError as err:
LOG.error(err)
raise exception.PasteAppNotFound(name=name, path=self.config_path)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
resource_group_name: str,
registry_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-12-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/tokens')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
registry_name: str,
token_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-12-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/tokens/{tokenName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
"tokenName": _SERIALIZER.url("token_name", token_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9-]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_request_initial(
subscription_id: str,
resource_group_name: str,
registry_name: str,
token_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-12-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/tokens/{tokenName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
"tokenName": _SERIALIZER.url("token_name", token_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9-]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
registry_name: str,
token_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-12-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/tokens/{tokenName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
"tokenName": _SERIALIZER.url("token_name", token_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9-]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_request_initial(
subscription_id: str,
resource_group_name: str,
registry_name: str,
token_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-12-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/tokens/{tokenName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
"tokenName": _SERIALIZER.url("token_name", token_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9-]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
class TokensOperations(object):
"""TokensOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerregistry.v2021_12_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
registry_name: str,
**kwargs: Any
) -> Iterable["_models.TokenListResult"]:
"""Lists all the tokens for the specified container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TokenListResult or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerregistry.v2021_12_01_preview.models.TokenListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TokenListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("TokenListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/tokens'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
registry_name: str,
token_name: str,
**kwargs: Any
) -> "_models.Token":
"""Gets the properties of the specified token.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param token_name: The name of the token.
:type token_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Token, or the result of cls(response)
:rtype: ~azure.mgmt.containerregistry.v2021_12_01_preview.models.Token
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Token"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
token_name=token_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Token', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/tokens/{tokenName}'} # type: ignore
def _create_initial(
self,
resource_group_name: str,
registry_name: str,
token_name: str,
token_create_parameters: "_models.Token",
**kwargs: Any
) -> "_models.Token":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Token"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(token_create_parameters, 'Token')
request = build_create_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
token_name=token_name,
content_type=content_type,
json=_json,
template_url=self._create_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Token', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Token', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/tokens/{tokenName}'} # type: ignore
@distributed_trace
def begin_create(
self,
resource_group_name: str,
registry_name: str,
token_name: str,
token_create_parameters: "_models.Token",
**kwargs: Any
) -> LROPoller["_models.Token"]:
"""Creates a token for a container registry with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param token_name: The name of the token.
:type token_name: str
:param token_create_parameters: The parameters for creating a token.
:type token_create_parameters: ~azure.mgmt.containerregistry.v2021_12_01_preview.models.Token
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Token or the result of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.containerregistry.v2021_12_01_preview.models.Token]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Token"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
token_name=token_name,
token_create_parameters=token_create_parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Token', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/tokens/{tokenName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
registry_name: str,
token_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
token_name=token_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/tokens/{tokenName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
registry_name: str,
token_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes a token from a container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param token_name: The name of the token.
:type token_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
token_name=token_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/tokens/{tokenName}'} # type: ignore
def _update_initial(
self,
resource_group_name: str,
registry_name: str,
token_name: str,
token_update_parameters: "_models.TokenUpdateParameters",
**kwargs: Any
) -> "_models.Token":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Token"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(token_update_parameters, 'TokenUpdateParameters')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
token_name=token_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Token', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Token', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/tokens/{tokenName}'} # type: ignore
@distributed_trace
def begin_update(
self,
resource_group_name: str,
registry_name: str,
token_name: str,
token_update_parameters: "_models.TokenUpdateParameters",
**kwargs: Any
) -> LROPoller["_models.Token"]:
"""Updates a token with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param token_name: The name of the token.
:type token_name: str
:param token_update_parameters: The parameters for updating a token.
:type token_update_parameters:
~azure.mgmt.containerregistry.v2021_12_01_preview.models.TokenUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Token or the result of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.containerregistry.v2021_12_01_preview.models.Token]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Token"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
token_name=token_name,
token_update_parameters=token_update_parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Token', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/tokens/{tokenName}'} # type: ignore
| |
#!/bin/python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 17 10:24:49 2014
:author: chuong nguyen, chuong.nguyen@anu.edu.au
:author: Gareth Dunstone, gareth.dunstone@anu.edu.au
"""
import sys
import os, json
from datetime import datetime
import shutil
import tarfile
import logging, logging.config
import numpy as np
import time
import csv
import yaml
import tempfile
import re
from libs.IPCamera import IPCamera
from libs.PanTilt import PanTilt
from PIL import Image
import datetime
try:
logging.config.fileConfig("logging.ini")
except:
pass
try:
import telegraf
except Exception as e:
print(str(e))
logging.getLogger("paramiko").setLevel(logging.WARNING)
def sec2human(seconds) -> str:
"""
formats a timedelta object into semi-fuzzy human readable time periods.
:param seconds: seconds to format into a time period
:type seconds: int or float
:return: human readable string
:rtype: str
"""
periods = [
('year', 60 * 60 * 24 * 365),
('month', 60 * 60 * 24 * 30),
('day', 60 * 60 * 24),
('hour', 60 * 60),
('minute', 60),
('second', 1)
]
strings = []
for period_name, period_seconds in periods:
if seconds > period_seconds:
period_value, seconds = divmod(seconds, period_seconds)
fmt_st = "{val} {name}" if period_value == 1 else "{val} {name}s"
strings.append(fmt_st.format(val=period_value, name=period_name))
return ", ".join(strings)
regex = re.compile(r'((?P<hours>\d+?)hr)?((?P<minutes>\d+?)m)?((?P<seconds>\d+?)s)?')
def parse_time(time_str):
parts = regex.match(time_str)
if not parts:
return
parts = parts.groupdict()
time_params = {}
for (name, param) in parts.items():
if param:
time_params[name] = int(param)
return datetime.timedelta(**time_params)
class Panorama(object):
"""
Panorama class.
Provides the calibration and creation of tiled panoramas with a configuration file.
"""
accuracy = 3
def __init__(self, config=None, config_filename=None):
if not config:
config = dict()
if config_filename:
config = yaml.load(open(config_filename).read())
config = config.copy()
self.use_focus_at_center = config.get("use_focus_at_center", True)
e = os.environ.get("USE_FOCUS_AT_CENTER", None)
self.use_focus_at_center = e if e is not None else self.use_focus_at_center
self.name = config.get("name", "DEFAULT_PANO_NAME")
e = os.environ.get("NAME", None)
self.name = e if e is not None else self.name
self.logger = logging.getLogger(self.name)
self._output_dir = config.get("output_dir", "/data")
self.output_dir = self._output_dir
start_time_string = str(config.get('starttime', "0000"))
e = os.environ.get("START_TIME", None)
start_time_string = e if e is not None else start_time_string
end_time_string = str(config.get('stoptime', "2359"))
e = os.environ.get("STOP_TIME", None)
end_time_string = e if e is not None else end_time_string
start_time_string = start_time_string.replace(":", "")
end_time_string = end_time_string.replace(":", "")
start_time_string = start_time_string[:4]
end_time_string = end_time_string[:4]
assert end_time_string.isdigit(), "Non numerical start time, {}".format(str(end_time_string))
self.begin_capture = datetime.datetime.strptime(start_time_string, "%H%M").time()
assert end_time_string.isdigit(), "Non numerical start time, {}".format(str(end_time_string))
self.end_capture = datetime.datetime.strptime(end_time_string, "%H%M").time()
interval = config.get("interval", "1hr")
e = os.environ.get("INTERVAL", None)
interval = e if e is not None else interval
self.interval = parse_time(interval)
camera = None
ptz = None
try:
while not camera:
camera_config = config.get("camera")
if not camera_config:
raise ValueError("No 'camera' section found in config file.")
camera = IPCamera(self.name, config=camera_config)
except Exception as e:
self.logger.error("Couldnt initialise Camera: " + str(e))
time.sleep(30)
camera = None
self._camera = camera
if self._camera:
fov = config.get("camera_fov", None)
e = os.environ.get("CAMERA_FOV", None)
fov = e if e is not None else fov
if type(fov) is str:
fov = re.split("[\W+\|,|x|x|:]", fov)
fov = [float(x) for x in fov]
if fov is not None:
self._camera.hfov, self._camera.vfov = fov
self.logger.debug("Camera initialised")
while not ptz:
try:
ptz_config = config.get("ptz")
if not ptz_config:
raise ValueError("No 'ptz' section found in config file.")
ptz = PanTilt(config=ptz_config)
self.logger.debug("ptz initialised")
except Exception as e:
self.logger.error("Couldnt initialise PTZ: " + str(e))
time.sleep(30)
ptz = None
self._pantilt = ptz
self._zoom_position = config.get('ptz', {}).get('zoom', 800)
self._image_overlap = float(config.get("overlap", 50))
e = os.environ.get("OVERLAP", None)
self._image_overlap = e if e is not None else self._image_overlap
self._image_overlap /= 100
self._seconds_per_image = 5
# this is vital to create the output folder
self._csv_log = None
self._recovery_filepath = os.path.join("/persist", ".gv_recover_{}.json".format(self.name))
self._recovery_file = dict(image_index=0)
try:
if os.path.exists(self._recovery_filepath):
with open(self._recovery_filepath, "r") as file:
self._recovery_file = json.loads(file.read())
except:
with open(self._recovery_filepath, "w+") as f:
f.write("{}")
f.seek(0)
self._recovery_file = json.loads(f.read())
first_corner = config.get("first_corner", [100, 20])
e = os.environ.get("FIRST_CORNER", None)
first_corner = e if e is not None else first_corner
if type(first_corner) is str:
first_corner = re.split("[\W+\|,|x|x|:]", first_corner)
second_corner = config.get("second_corner", [300, -20])
e = os.environ.get("SECOND_CORNER", None)
second_corner = e if e is not None else second_corner
if type(second_corner) is str:
second_corner = re.split("[\W+\|,|x|x|:]", second_corner)
assert type(first_corner) in (list, tuple), "first corner must be a list or tuple"
assert type(second_corner) in (list, tuple), "second corner must be a list or tuple"
assert len(first_corner) == 2, "first corner must be of length 2"
assert len(second_corner) == 2, "second corner must be of length 2"
self._pan_range = sorted([first_corner[0], second_corner[0]])
self._tilt_range = sorted([first_corner[1], second_corner[1]])
self._pan_step = self._tilt_step = None
self._pan_pos_list = self._tilt_pos_list = list()
scan_order_unparsed = config.get("scan_order", "0")
e = os.environ.get("SCAN_ORDER", None)
scan_order_unparsed = e if e is not None else scan_order_unparsed
self._scan_order_translation = {
'cols,right': 0,
'cols,left': 1,
'rows,down': 2,
'rows,up': 3,
"0": 0,
"1": 1,
"2": 2,
"3": 3,
0: 0,
1: 1,
2: 2,
3: 3
}
self._scan_order_translation_r = {
0: 'cols,right',
1: 'cols,left',
2: 'rows,down',
3: 'rows,up'
}
self._scan_order = self._scan_order_translation.get(str(scan_order_unparsed).lower().replace(" ", ""), 0)
self.logger.info(self.summary)
try:
telegraf_client = telegraf.TelegrafClient(host="telegraf", port=8092)
metric = {
"num_rows": len(self._tilt_pos_list),
"num_cols": len(self._pan_pos_list),
"recovery_index": int(self._recovery_file.get("image_index", 0)),
"hfov": self.camera.hfov,
"vfov": self.camera.vfov
}
telegraf_client.metric("gigavision", metric, tags={'name': self.name})
except:
pass
def set_current_as_first_corner(self):
"""
This and :func:`set_current_as_second_corner`, both internally call enumerate positions.
"""
self.first_corner = self._pantilt.position
def set_current_as_second_corner(self):
"""
See :func:`set_current_as_first_corner`.
"""
self.second_corner = self._pantilt.position
def enumerate_positions(self):
"""
Uses the currrent image overlap, camera fov and corners to calculate a "grid" of pan and tilt positions.
Also sets the internal enumeration of pan/tilt positions.
"""
self.logger.debug("Enumerating positions")
self._pan_step = (1 - self._image_overlap) * self._camera.hfov
self._tilt_step = (1 - self._image_overlap) * self._camera.vfov
pan_start = self._pan_range[0]
pan_stop = self._pan_range[1]
tilt_start = self._tilt_range[0]
tilt_stop = self._tilt_range[1]
if self._scan_order == 1:
# cols left
pan_start, pan_stop = pan_stop, pan_start
self._pan_step *= -1
elif self._scan_order == 3:
# rows up
tilt_start, tilt_stop = tilt_stop, tilt_start
self._tilt_step *= -1
self._pan_pos_list = np.arange(pan_start, pan_stop, self._pan_step)
self._tilt_pos_list = np.arange(tilt_start, tilt_stop, self._tilt_step)
self.logger.debug("pan {}-{}".format(pan_start, pan_stop))
self.logger.debug("tilt {}-{}".format(tilt_start, tilt_stop))
@property
def summary(self) -> str:
"""
returns a human readable summary of the panorama parameters.
These include pan step, camera fov etc.
:return: information about the panorama
:rtype: str
"""
self.enumerate_positions()
max_num_images = len(self._pan_pos_list) * len(self._tilt_pos_list)
last_image_index = int(self._recovery_file.get("image_index", 0))
s = "\n"
s += "----- PANO SUMMARY -----\n"
s += "This panorama has {}(H) x {}(V) = {} images\n".format(
len(self._pan_pos_list), len(self._tilt_pos_list), max_num_images)
s += "Camera fov {0:.2f}|{1:.2f}\n".format(self.camera.hfov, self.camera.vfov)
minutes, seconds = divmod(self._seconds_per_image * (max_num_images - last_image_index), 60)
if last_image_index > 0:
s += "RECOVERY AT {}\n".format(last_image_index)
s += "This will complete in approx {0:.2f} min {1:.2f} sec\n".format(minutes, seconds)
s += "pan step = {0:.3f} deg, tilt step = {1:.3f} deg\n".format(self._pan_step, self._tilt_step)
s += "------------------------\n"
return s
@property
def camera(self) -> IPCamera:
return self._camera
@camera.setter
def camera(self, value: IPCamera):
self._camera = value
@property
def pantilt(self) -> PanTilt:
return self._pantilt
@pantilt.setter
def pantilt(self, value: PanTilt):
self._pantilt = value
@property
def image_overlap(self):
return self._image_overlap
@image_overlap.setter
def image_overlap(self, value):
self._image_overlap = value
@property
def scan_order(self) -> str:
return self._scan_order_translation_r.get(self._scan_order, "cols,right")
@scan_order.setter
def scan_order(self, value: str):
self._scan_order = self._scan_order_translation.get(str(value).lower().replace(" ", ""), 0)
@property
def output_dir(self) -> str:
return self._output_dir
@output_dir.setter
def output_dir(self, value: str):
assert type(value) is str, "Set the output folder to a string"
if not os.path.isdir(value):
os.makedirs(value)
self._output_dir = value
@property
def panorama_fov(self) -> tuple:
"""
Gets the total fov of the Panorama.
:return: total fov of the panorama as (hfov, vfov)
:rtype: tuple[float, float]
"""
return self._pan_range, self._tilt_range
@panorama_fov.setter
def panorama_fov(self, value: tuple):
"""
sets the pan range and tilt range of the panorama using the fov, and centre points
:param value: 4 length tuple of pan_fov, tilt_fov, pan_centre, tilt_centre
:type value: tuple
"""
try:
pan_fov, tilt_fov, pan_centre, tilt_centre = value
except ValueError:
raise ValueError("You must pass an iterable with the PanFov, TiltFov, PanCentre, TiltCentre")
self._pan_range = [pan_centre - (pan_fov / 2), pan_centre + (pan_fov / 2)]
self._tilt_range = [tilt_centre - (tilt_fov / 2), tilt_centre + (tilt_fov / 2)]
self.enumerate_positions()
@property
def first_corner(self) -> tuple:
"""
the starting corner of the panorama.
:return: tuple of first corner as (pan,tilt)
:rtype: tuple[float,float]
"""
return self._pan_range[0], self._tilt_range[1]
@first_corner.setter
def first_corner(self, value):
assert type(value) in (list, tuple), "must be a list or tuple"
assert len(value) == 2, "must have 2 elements"
self._pan_range[0], self._tilt_range[1] = value
self.enumerate_positions()
@property
def center(self) -> tuple:
"""
:return: tuple of center position as (pan,tilt)
:rtype: tuple[float,float]
"""
return tuple((np.array(self.first_corner) + np.array(self.second_corner)) / 2)
@property
def second_corner(self):
"""
the finishing corner of the panorama.
:return: tuple of second corner as (pan,tilt)
:rtype: tuple[float,float]
"""
return self._pan_range[1], self._tilt_range[0]
@second_corner.setter
def second_corner(self, value):
assert type(value) in (list, tuple), "must be a list or tuple"
assert len(value) == 2, "must have 2 elements"
self._pan_range[1], self._tilt_range[0] = value
self.enumerate_positions()
def _init_csv_log(self, path: str):
self._csv_log = path
if not os.path.exists(self._csv_log):
with open(self._csv_log, 'w') as file:
file.write("image_index,pan_deg,tilt_deg,zoom_pos,focus_pos\n")
def load_csv_log(self) -> dict:
"""
loads a csv log into a dictionary so that we can continue to write to it.
:return: dict of values in the csv.
"""
if not os.path.isfile(self._csv_log):
self._init_csv_log(self._csv_log)
cfg = {"image_index": [], "pan_deg": [], "tilt_deg": [], "zoom_pos": [], "focus_pos": []}
with open(self._csv_log) as file:
csvread = csv.DictReader(file)
for row in csvread:
cfg["image_index"].append(int(row["image_index"]))
cfg["pan_deg"].append(float(row["pan_deg"]))
cfg["tilt_deg"].append(float(row["tilt_deg"]))
cfg["zoom_pos"].append(int(row["zoom_pos"]))
fp = row["focus_pos"]
if fp == "None":
fp = self._camera.focus_position
else:
fp = int(float(fp))
cfg["focus_pos"].append(fp)
return cfg
def write_csv_log(self, image_index, pan_pos, tilt_pos):
"""
writes a new line of values to the csv log.
:param image_index: current index to be written
:param pan_pos: the current pan position
:param tilt_pos: the current tilt position.
"""
if self._csv_log and os.path.isfile(self._csv_log):
with open(self._csv_log, 'a') as File:
File.write("{},{},{},{},{}\n".format(
image_index, pan_pos, tilt_pos,
self._pantilt.zoom_position,
self._camera.focus_position))
def write_to_recovery_file(self, index, started_time):
"""
writes the current state to the recovery file.
:param index: the current index into the panorama.
:param started_time: the time the panorama was started.
"""
self._recovery_file['image_index'] = index
with open(self._recovery_filepath, 'w') as file:
data = {"cols": len(self._pan_pos_list),
"rows": len(self._tilt_pos_list),
"image_index": index,
"sec_per_image": self._seconds_per_image,
"started_time": started_time}
file.write(json.dumps(data))
def take_panorama(self):
"""
takes a panorama using the current values stored in this :class:`Panorama` object.
"""
ts_fmt = "%Y_%m_%d_%H_%M_00"
tar_fmt = "%Y_%m_%d_%H"
last_image_captured = 0
now = datetime.datetime.now()
if self.use_focus_at_center:
self.logger.debug("Moving to center to focus...")
self._pantilt.position = np.mean(self._pan_range), np.mean(self._tilt_range)
self._pantilt.zoom_position = self._zoom_position
time.sleep(1)
self._camera.focus()
time.sleep(1)
self._camera.focus_mode = "off"
last_started = self._recovery_file.get('started_time', None)
if last_started:
last_started = datetime.datetime.strptime(last_started, ts_fmt)
if int(now.timestamp()) - int(last_started.timestamp()) < self.interval.total_seconds():
now = last_started
else:
self.logger.warning("Recovery exists, but its now too late. Starting from beginning.")
self.write_to_recovery_file(0, now.strftime(ts_fmt))
this_dir = os.path.join(self._output_dir, now.strftime("%Y/%Y_%m/%Y_%m_%d/%Y_%m_%d_%H"))
os.makedirs(this_dir, exist_ok=True)
self._csv_log = os.path.join(os.getcwd(), now.strftime("{name}-" + ts_fmt + ".csv").format(name=self.name))
cfg = self.load_csv_log()
focus_list = cfg.get('focus_pos', [])
start_time = time.time()
# this is just here in case you want to update overview.jpg
# im1 = cv2.resize(self.camera.capture_image(), None, fx=0.1, fy=0.1)
# overview = np.zeros((im1.shape[0]*len(self._tilt_pos_list),
# im1.shape[1]*len(self._pan_pos_list),
# 3), np.uint8)
# cv2.imwrite("overview.jpg", overview)
try:
telegraf_client = telegraf.TelegrafClient(host="telegraf", port=8092)
except:
pass
# reverse it because we should start from top and go down
tilt_pos_list = list(reversed(self._tilt_pos_list))
pan_pos_list = self._pan_pos_list
if self.scan_order == 1:
# cols left
pan_pos_list = self._pan_pos_list
tilt_pos_list = list(reversed(self._tilt_pos_list))
elif self.scan_order == 3:
# rows up
tilt_pos_list = self._tilt_pos_list
pan_pos_list = list(reversed(self._pan_pos_list))
recovery_index = self._recovery_file.get('image_index', 0)
rolling = []
preview_width = int(os.environ.get("PREVIEW_WIDTH", 320))
preview_height = int(preview_width * self.camera._image_size[1] / self.camera._image_size[0])
if os.environ.get("OVERVIEW", None) is not None:
overview_width = len(pan_pos_list)*preview_width
overview_height = len(tilt_pos_list)*preview_height
overview_fn = os.path.join(this_dir, "{}_overview_{}.jpg".format(self.name, now.strftime(ts_fmt)))
overview = None
try:
overview = Image.open(overview_fn)
if not (overview.size[0] == overview_width and overview.size[1] == overview_height):
overview = Image.new('RGB', (overview_width, overview_height))
except:
overview = Image.new('RGB', (overview_width, overview_height))
with tempfile.TemporaryDirectory(prefix=self.name) as spool:
def cap(_pan_pos: float, _tilt_pos: float, _image_index: int, lcap: int, _i: int, _j: int) -> int:
"""
captures an image for the position _pan_pos,_tilt_pos with the image index _image_index
:param _pan_pos: the pan position to take an image
:param _tilt_pos: the tilt position to take an image
:param _image_index: index of the current image. used to write the image filename.
:param lcap: used to calculate how long capture is taking.
:return:
"""
this_img_capture_s = time.time()
self._pantilt.position = _pan_pos, _tilt_pos
self.write_csv_log(_image_index, _pan_pos, _tilt_pos)
self.write_to_recovery_file(_image_index, now.strftime(ts_fmt))
for _ in range(0, 15):
filename = os.path.join(spool,
now.strftime("{name}_" + ts_fmt + "_{index:04}").format(name=self.name,
index=_image_index + 1))
try:
output_filenames = list(self._camera.capture_image(filename=filename))
# output_filenames = self._camera.capture_monkey(filename=filename)
if type(output_filenames) is list and len(output_filenames):
metric = dict()
try:
t = time.time()
image = self.camera._image.resize((preview_width,preview_height), Image.NEAREST)
yoff = _i * preview_height
xoff = _j * preview_width
if os.environ.get("OVERVIEW", None) is not None:
overview.paste(image, (xoff, yoff))
overview.save(overview_fn)
metric['overview_resize_s'] = time.time()-t
telegraf_client.metric("gigavision", metric, tags={"name": self.name})
except Exception as e:
self.logger.error("couldnt write overview to {}".format(overview_fn))
self.logger.error(str(e))
for f in output_filenames:
shutil.move(f, os.path.join(this_dir, os.path.basename(f)))
self.logger.info("wrote image {}/{}".format(_image_index + 1,
(len(self._pan_pos_list) * len(
self._tilt_pos_list))))
lcap += 1
# update time per image
current_time = time.time()
self._seconds_per_image = (current_time - start_time) / lcap
self.logger.info("Seconds per image {0:.2f}s".format(self._seconds_per_image))
metric['image_capture_s'] = time.time()-this_img_capture_s
telegraf_client.metric("gigavision", metric, tags={"name": self.name})
return lcap
except Exception as e:
self.logger.error("Bad things happened: {}".format(str(e)))
else:
self.logger.error("failed capturing!")
return lcap
if self._scan_order >= 2:
for i, tilt_pos in enumerate(tilt_pos_list):
for j, pan_pos in enumerate(pan_pos_list):
image_index = i * len(pan_pos_list) + j
if image_index < recovery_index:
continue
t = time.time()
last_image_captured = cap(pan_pos, tilt_pos, image_index, last_image_captured, i, j)
rolling.append(time.time()-t)
try:
metric = {'timing_avg_s': sum(rolling)/len(rolling), 'rolling_index': len(rolling)}
telegraf_client.metric("gigavision", metric, tags={"name": self.name})
except:
pass
else:
for j, pan_pos in enumerate(pan_pos_list):
for i, tilt_pos in enumerate(tilt_pos_list):
image_index = j * (len(tilt_pos_list)) + i
if image_index < recovery_index:
continue
t = time.time()
last_image_captured = cap(pan_pos, tilt_pos, image_index, last_image_captured, i, j)
rolling.append(time.time() - t)
try:
metric = {'timing_avg_s': sum(rolling) / len(rolling), 'rolling_index': len(rolling)}
telegraf_client.metric("gigavision", metric, tags={"name": self.name})
except:
pass
try:
metric = { 'timing_avg_s': sum(rolling) / len(rolling),
'total_images': len(rolling),
"num_cols": len(self._pan_pos_list),
"num_rows": len(self._tilt_pos_list)}
telegraf_client.metric("gigavision", metric, tags={"name": self.name})
except:
pass
try:
shutil.move(self._csv_log, os.path.join(this_dir, now.strftime("{name}-" + ts_fmt + ".csv").format(name=self.name)))
except Exception as e:
self.logger.error("Couldnt move csv log.")
# self.logger.error(e)
os.remove(self._recovery_filepath)
self._recovery_file['image_index'] = 0
t = time.time() - start_time
self.logger.info("Panorama complete in {}".format(sec2human(t)))
try:
telegraf_client.metric("gigavision", {'timing_total_s': t}, tags={"name": self.name})
except:
pass
def calibrate_and_run(self):
"""
calibrates, and takes a panorama.
"""
self._pantilt.position = np.mean(self._pantilt.pan_range), 0
fovlists = self.calibrate_fov_list(zoom_list=list(range(2)), increment=2)
self.logger.info("Calibration complete")
self.logger.info(Panorama.format_calibration(fovlists, "Calibration results: "))
h, v = fovlists
try:
self._camera.zoom_list = list(range(2))
self._camera.vfov_list = v
self._camera.hfov_list = h
self._camera.hfov = np.mean(h)
self._camera.vfov = np.mean(v)
self.enumerate_positions()
self.logger.info(self.summary)
except Exception as e:
self.logger.error(str(e))
self.take_panorama()
def run_from_config(self):
"""
Prints the summary and takes a panorama
"""
self.logger.info(self.summary)
self.take_panorama()
@staticmethod
def time2seconds(t: datetime.datetime) -> int:
"""
converts a datetime to an integer of seconds since epoch
:return: seconds since 1970-01-01 00:00
:rtype: int
"""
try:
return int(t.timestamp())
except:
# only implemented in python3.3
# this is an old compatibility thing
return t.hour * 60 * 60 + t.minute * 60 + t.second
def time_to_capture(self):
"""
filters out times for capture, returns True by default
returns False if the conditions where the camera should NOT capture are met.
:return: whether we should start capturing images now or not
:rtype: bool
"""
current_capture_time = datetime.datetime.now()
current_naive_time = current_capture_time.time()
if self.begin_capture < self.end_capture:
# where the start capture time is less than the end capture time
if not self.begin_capture <= current_naive_time <= self.end_capture:
return False
else:
# where the start capture time is greater than the end capture time
# i.e. capturing across midnight.
if self.end_capture <= current_naive_time <= self.begin_capture:
return False
# capture interval
if not (self.time2seconds(current_capture_time) % self.interval.total_seconds() < Panorama.accuracy):
return False
return True
@property
def next_pano(self):
"""
calculates the amount of time until the next panorama
:return: time until the next panorama (in seconds)
:rtype: int
"""
nextin = self.time2seconds(datetime.datetime.now())
nowstamp = self.time2seconds(datetime.datetime.now())
while True:
nextin += 1
if (nextin % self.interval.total_seconds()) < Panorama.accuracy:
break
return nextin - nowstamp
def run_loop(self):
"""
runs the panorama taker in a loop based on the interval in the config file.
"""
while True:
if self.time_to_capture():
self.logger.info(self.summary)
self.take_panorama()
self.logger.info("Next pano in {}".format(sec2human(self.next_pano)))
time.sleep(1)
if __name__ == "__main__":
config_file = sys.argv[-1]
config = dict()
if config_file.endswith(".yaml") or config_file.endswith(".yml") and os.path.exists(config_file):
with open(config_file) as config_fh:
config = yaml.load(config_fh.read())
while True:
try:
pano = Panorama(config=config)
break
except:
time.sleep(10)
pano.take_panorama()
pano.logger.info("Next pano in {}".format(sec2human(pano.next_pano)))
pano.run_loop()
| |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime
import pytest
import numpy as np
import pandas as pd
from pandas.compat import PY37
from pandas import (Index, MultiIndex, CategoricalIndex,
DataFrame, Categorical, Series, qcut)
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas.util.testing as tm
def cartesian_product_for_groupers(result, args, names):
""" Reindex to a cartesian production for the groupers,
preserving the nature (Categorical) of each grouper """
def f(a):
if isinstance(a, (CategoricalIndex, Categorical)):
categories = a.categories
a = Categorical.from_codes(np.arange(len(categories)),
categories=categories,
ordered=a.ordered)
return a
index = pd.MultiIndex.from_product(map(f, args), names=names)
return result.reindex(index).sort_index()
def test_apply_use_categorical_name(df):
cats = qcut(df.C, 4)
def get_stats(group):
return {'min': group.min(),
'max': group.max(),
'count': group.count(),
'mean': group.mean()}
result = df.groupby(cats, observed=False).D.apply(get_stats)
assert result.index.names[0] == 'C'
def test_basic():
cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"], ordered=True)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = CategoricalIndex(list('abcd'), name='b', ordered=True)
expected = DataFrame({'a': [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b", observed=False).mean()
tm.assert_frame_equal(result, expected)
cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A", observed=False)
exp_idx = CategoricalIndex(['a', 'b', 'z'], name='A', ordered=True)
expected = DataFrame({'values': Series([3, 7, 0], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = DataFrame([[1, 'John P. Doe'], [2, 'Jane Dove'],
[1, 'John P. Doe']],
columns=['person_id', 'person_name'])
x['person_name'] = Categorical(x.person_name)
g = x.groupby(['person_id'], observed=False)
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[['person_name']])
result = x.drop_duplicates('person_name')
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates('person_name').iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name='person_id')
expected['person_name'] = expected['person_name'].astype('object')
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df['a'])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)),
df['a'])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(sum),
df[['a']])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.max(xs)),
df[['a']])
# Filter
tm.assert_series_equal(
df.a.groupby(c, observed=False).filter(np.all),
df['a'])
tm.assert_frame_equal(
df.groupby(c, observed=False).filter(np.all),
df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df['a'])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)),
df['a'])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(sum),
df[['a']])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)),
df[['a']])
# GH 9603
df = DataFrame({'a': [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list('abcd')))
result = df.groupby(c, observed=False).apply(len)
exp_index = CategoricalIndex(
c.values.categories, ordered=c.values.ordered)
expected = Series([1, 0, 0, 0], index=exp_index)
expected.index.name = 'a'
tm.assert_series_equal(result, expected)
# more basic
levels = ['foo', 'bar', 'baz', 'qux']
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
exp_idx = CategoricalIndex(levels, categories=cats.categories,
ordered=True)
expected = expected.reindex(exp_idx)
assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
exp_cats = Categorical(ord_labels, ordered=True,
categories=['foo', 'bar', 'baz', 'qux'])
expected = ord_data.groupby(
exp_cats, sort=False, observed=False).describe()
assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8),
levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index
.get_level_values(0)), exp)
exp = Index(['count', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max'] * 4)
tm.assert_index_equal((desc_result.stack().index
.get_level_values(1)), exp)
def test_level_get_group(observed):
# GH15155
df = DataFrame(data=np.arange(2, 22, 2),
index=MultiIndex(
levels=[pd.CategoricalIndex(["a", "b"]), range(10)],
labels=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"]))
g = df.groupby(level=["Index1"], observed=observed)
# expected should equal test.loc[["a"]]
# GH15166
expected = DataFrame(data=np.arange(2, 12, 2),
index=pd.MultiIndex(levels=[pd.CategoricalIndex(
["a", "b"]), range(5)],
labels=[[0] * 5, range(5)],
names=["Index1", "Index2"]))
result = g.get_group('a')
assert_frame_equal(result, expected)
@pytest.mark.xfail(PY37, reason="flaky on 3.7, xref gh-21636")
@pytest.mark.parametrize('ordered', [True, False])
def test_apply(ordered):
# GH 10138
dense = Categorical(list('abc'), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(
list('aaa'), categories=['a', 'b'], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({'missing': missing,
'dense': dense,
'values': values})
grouped = df.groupby(['missing', 'dense'], observed=True)
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_arrays(
[missing, dense], names=['missing', 'dense'])
expected = DataFrame([0, 1, 2.],
index=idx,
columns=['values'])
result = grouped.apply(lambda x: np.mean(x))
assert_frame_equal(result, expected)
# we coerce back to ints
expected = expected.astype('int')
result = grouped.mean()
assert_frame_equal(result, expected)
result = grouped.agg(np.mean)
assert_frame_equal(result, expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_arrays([missing, dense],
names=['missing', 'dense'])
expected = Series(1, index=idx)
result = grouped.apply(lambda x: 1)
assert_series_equal(result, expected)
def test_observed(observed):
# multiple groupers, don't re-expand the output space
# of the grouper
# gh-14942 (implement)
# gh-10132 (back-compat)
# gh-8138 (back-compat)
# gh-8869
cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
df['C'] = ['foo', 'bar'] * 2
# multiple groupers with a non-cat
gb = df.groupby(['A', 'B', 'C'], observed=observed)
exp_index = pd.MultiIndex.from_arrays(
[cat1, cat2, ['foo', 'bar'] * 2],
names=['A', 'B', 'C'])
expected = DataFrame({'values': Series(
[1, 2, 3, 4], index=exp_index)}).sort_index()
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected,
[cat1, cat2, ['foo', 'bar']],
list('ABC'))
tm.assert_frame_equal(result, expected)
gb = df.groupby(['A', 'B'], observed=observed)
exp_index = pd.MultiIndex.from_arrays(
[cat1, cat2],
names=['A', 'B'])
expected = DataFrame({'values': [1, 2, 3, 4]},
index=exp_index)
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected,
[cat1, cat2],
list('AB'))
tm.assert_frame_equal(result, expected)
# https://github.com/pandas-dev/pandas/issues/8138
d = {'cat':
pd.Categorical(["a", "b", "a", "b"], categories=["a", "b", "c"],
ordered=True),
'ints': [1, 1, 2, 2],
'val': [10, 20, 30, 40]}
df = pd.DataFrame(d)
# Grouping on a single column
groups_single_key = df.groupby("cat", observed=observed)
result = groups_single_key.mean()
exp_index = pd.CategoricalIndex(list('ab'), name="cat",
categories=list('abc'),
ordered=True)
expected = DataFrame({"ints": [1.5, 1.5], "val": [20., 30]},
index=exp_index)
if not observed:
index = pd.CategoricalIndex(list('abc'), name="cat",
categories=list('abc'),
ordered=True)
expected = expected.reindex(index)
tm.assert_frame_equal(result, expected)
# Grouping on two columns
groups_double_key = df.groupby(["cat", "ints"], observed=observed)
result = groups_double_key.agg('mean')
expected = DataFrame(
{"val": [10, 30, 20, 40],
"cat": pd.Categorical(['a', 'a', 'b', 'b'],
categories=['a', 'b', 'c'],
ordered=True),
"ints": [1, 2, 1, 2]}).set_index(["cat", "ints"])
if not observed:
expected = cartesian_product_for_groupers(
expected,
[df.cat.values, [1, 2]],
['cat', 'ints'])
tm.assert_frame_equal(result, expected)
# GH 10132
for key in [('a', 1), ('b', 2), ('b', 1), ('a', 2)]:
c, i = key
result = groups_double_key.get_group(key)
expected = df[(df.cat == c) & (df.ints == i)]
assert_frame_equal(result, expected)
# gh-8869
# with as_index
d = {'foo': [10, 8, 4, 8, 4, 1, 1], 'bar': [10, 20, 30, 40, 50, 60, 70],
'baz': ['d', 'c', 'e', 'a', 'a', 'd', 'c']}
df = pd.DataFrame(d)
cat = pd.cut(df['foo'], np.linspace(0, 10, 3))
df['range'] = cat
groups = df.groupby(['range', 'baz'], as_index=False, observed=observed)
result = groups.agg('mean')
groups2 = df.groupby(['range', 'baz'], as_index=True, observed=observed)
expected = groups2.agg('mean').reset_index()
tm.assert_frame_equal(result, expected)
def test_observed_codes_remap(observed):
d = {'C1': [3, 3, 4, 5], 'C2': [1, 2, 3, 4], 'C3': [10, 100, 200, 34]}
df = pd.DataFrame(d)
values = pd.cut(df['C1'], [1, 2, 3, 6])
values.name = "cat"
groups_double_key = df.groupby([values, 'C2'], observed=observed)
idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]],
names=["cat", "C2"])
expected = DataFrame({"C1": [3, 3, 4, 5],
"C3": [10, 100, 200, 34]}, index=idx)
if not observed:
expected = cartesian_product_for_groupers(
expected,
[values.values, [1, 2, 3, 4]],
['cat', 'C2'])
result = groups_double_key.agg('mean')
tm.assert_frame_equal(result, expected)
def test_observed_perf():
# we create a cartesian product, so this is
# non-performant if we don't use observed values
# gh-14942
df = DataFrame({
'cat': np.random.randint(0, 255, size=30000),
'int_id': np.random.randint(0, 255, size=30000),
'other_id': np.random.randint(0, 10000, size=30000),
'foo': 0})
df['cat'] = df.cat.astype(str).astype('category')
grouped = df.groupby(['cat', 'int_id', 'other_id'], observed=True)
result = grouped.count()
assert result.index.levels[0].nunique() == df.cat.nunique()
assert result.index.levels[1].nunique() == df.int_id.nunique()
assert result.index.levels[2].nunique() == df.other_id.nunique()
def test_observed_groups(observed):
# gh-20583
# test that we have the appropriate groups
cat = pd.Categorical(['a', 'c', 'a'], categories=['a', 'b', 'c'])
df = pd.DataFrame({'cat': cat, 'vals': [1, 2, 3]})
g = df.groupby('cat', observed=observed)
result = g.groups
if observed:
expected = {'a': Index([0, 2], dtype='int64'),
'c': Index([1], dtype='int64')}
else:
expected = {'a': Index([0, 2], dtype='int64'),
'b': Index([], dtype='int64'),
'c': Index([1], dtype='int64')}
tm.assert_dict_equal(result, expected)
def test_datetime():
# GH9049: ensure backward compatibility
levels = pd.date_range('2014-01-01', periods=4)
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
expected = expected.reindex(levels)
expected.index = CategoricalIndex(expected.index,
categories=expected.index,
ordered=True)
assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = cats.take_nd(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(ord_labels, observed=False).describe()
assert_frame_equal(desc_result, expected)
tm.assert_index_equal(desc_result.index, expected.index)
tm.assert_index_equal(
desc_result.index.get_level_values(0),
expected.index.get_level_values(0))
# GH 10460
expc = Categorical.from_codes(
np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index
.get_level_values(0)), exp)
exp = Index(['count', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max'] * 4)
tm.assert_index_equal((desc_result.stack().index
.get_level_values(1)), exp)
def test_categorical_index():
s = np.random.RandomState(12345)
levels = ['foo', 'bar', 'baz', 'qux']
codes = s.randint(0, 4, size=20)
cats = Categorical.from_codes(codes, levels, ordered=True)
df = DataFrame(
np.repeat(
np.arange(20), 4).reshape(-1, 4), columns=list('abcd'))
df['cats'] = cats
# with a cat index
result = df.set_index('cats').groupby(level=0, observed=False).sum()
expected = df[list('abcd')].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes(
[0, 1, 2, 3], levels, ordered=True), name='cats')
assert_frame_equal(result, expected)
# with a cat column, should produce a cat index
result = df.groupby('cats', observed=False).sum()
expected = df[list('abcd')].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes(
[0, 1, 2, 3], levels, ordered=True), name='cats')
assert_frame_equal(result, expected)
def test_describe_categorical_columns():
# GH 11558
cats = pd.CategoricalIndex(['qux', 'foo', 'baz', 'bar'],
categories=['foo', 'bar', 'baz', 'qux'],
ordered=True)
df = DataFrame(np.random.randn(20, 4), columns=cats)
result = df.groupby([1, 2, 3, 4] * 5).describe()
tm.assert_index_equal(result.stack().columns, cats)
tm.assert_categorical_equal(result.stack().columns.values, cats.values)
def test_unstack_categorical():
# GH11558 (example is taken from the original issue)
df = pd.DataFrame({'a': range(10),
'medium': ['A', 'B'] * 5,
'artist': list('XYXXY') * 2})
df['medium'] = df['medium'].astype('category')
gcat = df.groupby(
['artist', 'medium'], observed=False)['a'].count().unstack()
result = gcat.describe()
exp_columns = pd.CategoricalIndex(['A', 'B'], ordered=False,
name='medium')
tm.assert_index_equal(result.columns, exp_columns)
tm.assert_categorical_equal(result.columns.values, exp_columns.values)
result = gcat['A'] + gcat['B']
expected = pd.Series([6, 4], index=pd.Index(['X', 'Y'], name='artist'))
tm.assert_series_equal(result, expected)
def test_bins_unequal_len():
# GH3011
series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])
bins = pd.cut(series.dropna().values, 4)
# len(bins) != len(series) here
def f():
series.groupby(bins).mean()
pytest.raises(ValueError, f)
def test_as_index():
# GH13204
df = DataFrame({'cat': Categorical([1, 2, 2], [1, 2, 3]),
'A': [10, 11, 11],
'B': [101, 102, 103]})
result = df.groupby(['cat', 'A'], as_index=False, observed=True).sum()
expected = DataFrame(
{'cat': Categorical([1, 2], categories=df.cat.cat.categories),
'A': [10, 11],
'B': [101, 205]},
columns=['cat', 'A', 'B'])
tm.assert_frame_equal(result, expected)
# function grouper
f = lambda r: df.loc[r, 'A']
result = df.groupby(['cat', f], as_index=False, observed=True).sum()
expected = DataFrame(
{'cat': Categorical([1, 2], categories=df.cat.cat.categories),
'A': [10, 22],
'B': [101, 205]},
columns=['cat', 'A', 'B'])
tm.assert_frame_equal(result, expected)
# another not in-axis grouper (conflicting names in index)
s = Series(['a', 'b', 'b'], name='cat')
result = df.groupby(['cat', s], as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
# is original index dropped?
group_columns = ['cat', 'A']
expected = DataFrame(
{'cat': Categorical([1, 2], categories=df.cat.cat.categories),
'A': [10, 11],
'B': [101, 205]},
columns=['cat', 'A', 'B'])
for name in [None, 'X', 'B', 'cat']:
df.index = Index(list("abc"), name=name)
if name in group_columns and name in df.index.names:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = df.groupby(
group_columns, as_index=False, observed=True).sum()
else:
result = df.groupby(
group_columns, as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
def test_preserve_categories():
# GH-13179
categories = list('abc')
# ordered=True
df = DataFrame({'A': pd.Categorical(list('ba'),
categories=categories,
ordered=True)})
index = pd.CategoricalIndex(categories, categories, ordered=True)
tm.assert_index_equal(
df.groupby('A', sort=True, observed=False).first().index, index)
tm.assert_index_equal(
df.groupby('A', sort=False, observed=False).first().index, index)
# ordered=False
df = DataFrame({'A': pd.Categorical(list('ba'),
categories=categories,
ordered=False)})
sort_index = pd.CategoricalIndex(categories, categories, ordered=False)
nosort_index = pd.CategoricalIndex(list('bac'), list('bac'),
ordered=False)
tm.assert_index_equal(
df.groupby('A', sort=True, observed=False).first().index,
sort_index)
tm.assert_index_equal(
df.groupby('A', sort=False, observed=False).first().index,
nosort_index)
def test_preserve_categorical_dtype():
# GH13743, GH13854
df = DataFrame({'A': [1, 2, 1, 1, 2],
'B': [10, 16, 22, 28, 34],
'C1': Categorical(list("abaab"),
categories=list("bac"),
ordered=False),
'C2': Categorical(list("abaab"),
categories=list("bac"),
ordered=True)})
# single grouper
exp_full = DataFrame({'A': [2.0, 1.0, np.nan],
'B': [25.0, 20.0, np.nan],
'C1': Categorical(list("bac"),
categories=list("bac"),
ordered=False),
'C2': Categorical(list("bac"),
categories=list("bac"),
ordered=True)})
for col in ['C1', 'C2']:
result1 = df.groupby(by=col, as_index=False, observed=False).mean()
result2 = df.groupby(
by=col, as_index=True, observed=False).mean().reset_index()
expected = exp_full.reindex(columns=result1.columns)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
def test_categorical_no_compress():
data = Series(np.random.randn(9))
codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean()
exp.index = CategoricalIndex(exp.index, categories=cats.categories,
ordered=cats.ordered)
assert_series_equal(result, exp)
codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3])
cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean().reindex(cats.categories)
exp.index = CategoricalIndex(exp.index, categories=cats.categories,
ordered=cats.ordered)
assert_series_equal(result, exp)
cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"], ordered=True)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
result = data.groupby("b", observed=False).mean()
result = result["a"].values
exp = np.array([1, 2, 4, np.nan])
tm.assert_numpy_array_equal(result, exp)
def test_sort():
# http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby # noqa: flake8
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
res = df.groupby(['value_group'], observed=False)['value_group'].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_sort2():
# dataframe groupby sort was being ignored # GH 8868
df = DataFrame([['(7.5, 10]', 10, 10],
['(7.5, 10]', 8, 20],
['(2.5, 5]', 5, 30],
['(5, 7.5]', 6, 40],
['(2.5, 5]', 4, 50],
['(0, 2.5]', 1, 60],
['(5, 7.5]', 7, 70]], columns=['range', 'foo', 'bar'])
df['range'] = Categorical(df['range'], ordered=True)
index = CategoricalIndex(['(0, 2.5]', '(2.5, 5]', '(5, 7.5]',
'(7.5, 10]'], name='range', ordered=True)
expected_sort = DataFrame([[1, 60], [5, 30], [6, 40], [10, 10]],
columns=['foo', 'bar'], index=index)
col = 'range'
result_sort = df.groupby(col, sort=True, observed=False).first()
assert_frame_equal(result_sort, expected_sort)
# when categories is ordered, group is ordered by category's order
expected_sort = result_sort
result_sort = df.groupby(col, sort=False, observed=False).first()
assert_frame_equal(result_sort, expected_sort)
df['range'] = Categorical(df['range'], ordered=False)
index = CategoricalIndex(['(0, 2.5]', '(2.5, 5]', '(5, 7.5]',
'(7.5, 10]'], name='range')
expected_sort = DataFrame([[1, 60], [5, 30], [6, 40], [10, 10]],
columns=['foo', 'bar'], index=index)
index = CategoricalIndex(['(7.5, 10]', '(2.5, 5]', '(5, 7.5]',
'(0, 2.5]'],
categories=['(7.5, 10]', '(2.5, 5]',
'(5, 7.5]', '(0, 2.5]'],
name='range')
expected_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]],
index=index, columns=['foo', 'bar'])
col = 'range'
# this is an unordered categorical, but we allow this ####
result_sort = df.groupby(col, sort=True, observed=False).first()
assert_frame_equal(result_sort, expected_sort)
result_nosort = df.groupby(col, sort=False, observed=False).first()
assert_frame_equal(result_nosort, expected_nosort)
def test_sort_datetimelike():
# GH10505
# use same data as test_groupby_sort_categorical, which category is
# corresponding to datetime.month
df = DataFrame({'dt': [datetime(2011, 7, 1), datetime(2011, 7, 1),
datetime(2011, 2, 1), datetime(2011, 5, 1),
datetime(2011, 2, 1), datetime(2011, 1, 1),
datetime(2011, 5, 1)],
'foo': [10, 8, 5, 6, 4, 1, 7],
'bar': [10, 20, 30, 40, 50, 60, 70]},
columns=['dt', 'foo', 'bar'])
# ordered=True
df['dt'] = Categorical(df['dt'], ordered=True)
index = [datetime(2011, 1, 1), datetime(2011, 2, 1),
datetime(2011, 5, 1), datetime(2011, 7, 1)]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=['foo', 'bar'])
result_sort.index = CategoricalIndex(index, name='dt', ordered=True)
index = [datetime(2011, 7, 1), datetime(2011, 2, 1),
datetime(2011, 5, 1), datetime(2011, 1, 1)]
result_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]],
columns=['foo', 'bar'])
result_nosort.index = CategoricalIndex(index, categories=index,
name='dt', ordered=True)
col = 'dt'
assert_frame_equal(
result_sort, df.groupby(col, sort=True, observed=False).first())
# when categories is ordered, group is ordered by category's order
assert_frame_equal(
result_sort, df.groupby(col, sort=False, observed=False).first())
# ordered = False
df['dt'] = Categorical(df['dt'], ordered=False)
index = [datetime(2011, 1, 1), datetime(2011, 2, 1),
datetime(2011, 5, 1), datetime(2011, 7, 1)]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=['foo', 'bar'])
result_sort.index = CategoricalIndex(index, name='dt')
index = [datetime(2011, 7, 1), datetime(2011, 2, 1),
datetime(2011, 5, 1), datetime(2011, 1, 1)]
result_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]],
columns=['foo', 'bar'])
result_nosort.index = CategoricalIndex(index, categories=index,
name='dt')
col = 'dt'
assert_frame_equal(
result_sort, df.groupby(col, sort=True, observed=False).first())
assert_frame_equal(
result_nosort, df.groupby(col, sort=False, observed=False).first())
def test_empty_sum():
# https://github.com/pandas-dev/pandas/issues/18678
df = pd.DataFrame({"A": pd.Categorical(['a', 'a', 'b'],
categories=['a', 'b', 'c']),
'B': [1, 2, 1]})
expected_idx = pd.CategoricalIndex(['a', 'b', 'c'], name='A')
# 0 by default
result = df.groupby("A", observed=False).B.sum()
expected = pd.Series([3, 1, 0], expected_idx, name='B')
tm.assert_series_equal(result, expected)
# min_count=0
result = df.groupby("A", observed=False).B.sum(min_count=0)
expected = pd.Series([3, 1, 0], expected_idx, name='B')
tm.assert_series_equal(result, expected)
# min_count=1
result = df.groupby("A", observed=False).B.sum(min_count=1)
expected = pd.Series([3, 1, np.nan], expected_idx, name='B')
tm.assert_series_equal(result, expected)
# min_count>1
result = df.groupby("A", observed=False).B.sum(min_count=2)
expected = pd.Series([3, np.nan, np.nan], expected_idx, name='B')
tm.assert_series_equal(result, expected)
def test_empty_prod():
# https://github.com/pandas-dev/pandas/issues/18678
df = pd.DataFrame({"A": pd.Categorical(['a', 'a', 'b'],
categories=['a', 'b', 'c']),
'B': [1, 2, 1]})
expected_idx = pd.CategoricalIndex(['a', 'b', 'c'], name='A')
# 1 by default
result = df.groupby("A", observed=False).B.prod()
expected = pd.Series([2, 1, 1], expected_idx, name='B')
tm.assert_series_equal(result, expected)
# min_count=0
result = df.groupby("A", observed=False).B.prod(min_count=0)
expected = pd.Series([2, 1, 1], expected_idx, name='B')
tm.assert_series_equal(result, expected)
# min_count=1
result = df.groupby("A", observed=False).B.prod(min_count=1)
expected = pd.Series([2, 1, np.nan], expected_idx, name='B')
tm.assert_series_equal(result, expected)
def test_groupby_multiindex_categorical_datetime():
# https://github.com/pandas-dev/pandas/issues/21390
df = pd.DataFrame({
'key1': pd.Categorical(list('abcbabcba')),
'key2': pd.Categorical(
list(pd.date_range('2018-06-01 00', freq='1T', periods=3)) * 3),
'values': np.arange(9),
})
result = df.groupby(['key1', 'key2']).mean()
idx = pd.MultiIndex.from_product(
[pd.Categorical(['a', 'b', 'c']),
pd.Categorical(pd.date_range('2018-06-01 00', freq='1T', periods=3))],
names=['key1', 'key2'])
expected = pd.DataFrame(
{'values': [0, 4, 8, 3, 4, 5, 6, np.nan, 2]}, index=idx)
assert_frame_equal(result, expected)
| |
#!/usr/bin/env python
# Jay Smith
# jay.smith@fireeye.com
#
########################################################################
# Copyright 2012 Mandiant
# Copyright 2014 FireEye
#
# Mandiant licenses this file to you under the Apache License, Version
# 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
########################################################################
# Traverse a directory, trying to find all exports for all valid PE
# executable files. Computes common shellcode hashes and stores them
# to a sqlite database file for later use, such as in IDA Pro.
import os
import sys
import time
import zlib
import ctypes
import os.path
import sqlite3
try:
import pefile
except ImportError, err:
print "Error while importing pefile module: %s" % str(err)
print "Please make sure it is installed: http://code.google.com/p/pefile/"
sys.exit(1)
#This is a list of interesting dll's to use if not traversing a directory
INTERESTING_DLLS = [
'kernel32.dll', 'comctl32.dll', 'advapi32.dll', 'comdlg32.dll',
'gdi32.dll', 'msvcrt.dll', 'netapi32.dll', 'ntdll.dll',
'ntoskrnl.exe', 'oleaut32.dll', 'psapi.dll', 'shell32.dll',
'shlwapi.dll', 'srsvc.dll', 'urlmon.dll', 'user32.dll',
'winhttp.dll', 'wininet.dll', 'ws2_32.dll', 'wship6.dll',
'advpack.dll', 'ole32.dll', 'rstrtmgr.dll', 'iphlpapi.dll',
]
VERBOSE=False
############################################################
# SQL queries
############################################################
sql_testTableExists='''
SELECT name
FROM sqlite_master
WHERE name=?;
'''
sql_create_tables='''
create table symbol_hashes (
hash_key integer primary key,
hash_val integer,
hash_type integer,
lib_key integer,
symbol_name varchar(256)
);
create table source_libs (
lib_key integer primary key,
lib_name varchar(256)
);
create table hash_types (
hash_type integer primary key,
hash_size integer,
hash_name varchar(256),
hash_code text
);
--Index just the hash vals for when we don't know the hash type
create index idx_hash_val on symbol_hashes (hash_val);
--Index with hash_type prefix for when we know the type we're
-- looking for
create index idx_hash_type_hash_val on symbol_hashes (hash_type, hash_val);
'''
sql_add_hash_type='''
insert into hash_types (
hash_size,
hash_name,
hash_code
) values (?,?,?);
'''
sql_get_hash_type='''
select
hash_type
from hash_types
where hash_name=?;
'''
sql_get_hash_type_hash_size='''
select
hash_type
from hash_types
where hash_name=? and hash_size=?;
'''
sql_add_source_lib='''
insert into source_libs (
lib_name
) values (?);
'''
sql_add_symbol_hash='''
insert into symbol_hashes (
hash_val,
hash_type,
lib_key,
symbol_name
) values (?,?,?,?);
'''
sql_lookup_hash_value='''
select
hash_key,
hash_val,
hash_type,
source_lib,
symbol_name
from symbol_hashes
where hash_val=?;
'''
sql_lookup_hash_value_hash_type='''
select
hash_key,
hash_val,
hash_type,
source_lib,
symbol_name
from symbol_hashes
where hash_val=? and hash_type=?;
'''
sql_find_source_lib_by_name='''
select
lib_key
from source_libs
where lib_name=?;
'''
sql_find_symbol_hash_type_lib_symbol='''
select
hash_key
from symbol_hashes
where hash_val=? and hash_type=? and lib_key=? and symbol_name=?;
'''
############################################################
# Start of functions to implement operator primitives
############################################################
ROTATE_BITMASK = {
8 : 0xff,
16 : 0xffff,
32 : 0xffffffff,
64 : 0xffffffffffffffff,
}
def rcr(inVal, numShifts, cb, dataSize=32):
'''rotate carry right instruction emulation'''
if numShifts == 0:
return inVal
if (numShifts < 0) or (numShifts > dataSize):
raise ValueError('Bad numShifts')
#make sure carry in bit is only 0 or 1
cb = cb & 1
if (dataSize != 8) and (dataSize != 16) and (dataSize != 32) and (dataSize != 64):
raise ValueError('Bad dataSize')
#or the carry value in there
bitMask = ROTATE_BITMASK[dataSize]
inVal = inVal | (cb << dataSize)
x = (dataSize - numShifts) + 1
res = (inVal >> numShifts) | (inVal << x)
return (bitMask & res, 1 & (res >> dataSize))
def ror(inVal, numShifts, dataSize=32):
'''rotate right instruction emulation'''
if numShifts == 0:
return inVal
if (numShifts < 0) or (numShifts > dataSize):
raise ValueError('Bad numShifts')
if (dataSize != 8) and (dataSize != 16) and (dataSize != 32) and (dataSize != 64):
raise ValueError('Bad dataSize')
bitMask = ROTATE_BITMASK[dataSize]
return bitMask & ((inVal >> numShifts) | (inVal << (dataSize-numShifts)))
def rol(inVal, numShifts, dataSize=32):
'''rotate left instruction emulation'''
if numShifts == 0:
return inVal
if (numShifts < 0) or (numShifts > dataSize):
raise ValueError('Bad numShifts')
if (dataSize != 8) and (dataSize != 16) and (dataSize != 32) and (dataSize != 64):
raise ValueError('Bad dataSize')
bitMask = ROTATE_BITMASK[dataSize]
currVal = inVal
return bitMask & ((inVal << numShifts) | (inVal >> (dataSize-numShifts)))
############################################################
# Start of hash implementations
############################################################
def poisonIvyHash(inStr,fName):
#need a null at the end of the string
if inStr[-1] != '\x00':
inStr = inStr + '\x00'
cx = 0xffff
dx = 0xffff
for b1 in inStr:
bx = 0
ax = ord(b1) ^ (cx & 0xff)
cx = ((cx>>8)&0xff) | ((dx&0xff)<<8)
dx = ((dx>>8)&0xff) | 0x800
while (dx & 0xff00) != 0:
c_in = bx & 1
bx = bx >> 1
ax, c_out = rcr(ax, 1, c_in, 16)
if c_out != 0:
ax = ax ^ 0x8320
bx = bx ^ 0xedb8
dx = (dx&0xff) | (((((dx>>8)&0xff)-1)&0xff)<<8)
cx = cx ^ ax
dx = dx ^ bx
dx = 0xffff & ~dx
cx = 0xffff & ~cx
return 0xffffffff & ((dx<<16) | cx)
pseudocode_poisonIvyHash = '''Too hard to explain.\nString hash function from POISON IVY RAT.\nSee code for information'''
def rol3XorEax(inString,fName):
if inString is None:
return 0
ecx = 0
eax = 0
for i in inString:
eax = eax | ord(i)
ecx = ecx ^ eax
ecx = rol(ecx, 0x3, 32)
ecx += 1
eax = 0xffffffff & (eax << 8)
return ecx
pseudocode_rol3XorEax = '''eax := 0;
ecx := 0;
for c in input_string {
eax := eax | c ;
ecx := ecx ^ eax;
ecx := ROL(ecx, 0x3);
ecx : ecx + 1;
eax := 0xffffffff & (eax << 8);
};
return ecx;
'''
def rol7AddHash32(inString,fName):
if inString is None:
return 0
val = 0
for i in inString:
val = rol(val, 0x7, 32)
val += ord(i)
return val
pseudocode_rol7AddHash32 = '''acc := 0;
for c in input_string {
acc := ROL(acc, 7):
acc := acc + c;
}
'''
def rol5AddHash32(inString,fName):
if inString is None:
return 0
val = 0
for i in inString:
val = rol(val, 0x5, 32)
val += ord(i)
return val
pseudocode_rol5AddHash32 = '''acc := 0;
for c in input_string {
acc := ROL(acc, 5):
acc := acc + c;
}
'''
def addRor4WithNullHash32(inString,fName):
if inString is None:
return 0
val = 0
for i in inString + "\x00":
val = (val & 0xffffff00) + ((val + ord(i)) & 0xff)
val = ror(val, 0x4, 32)
return val
pseudocode_addRor4WithNullHash32 = '''acc := 0;
for c in input_string_with_trailing_NULL {
acc := (acc & 0xffffff00) + ((acc + c) & 0xff);
acc := ROR(acc, 4):
}
'''
def ror7AddHash32(inString,fName):
if inString is None:
return 0
val = 0
for i in inString:
val = ror(val, 0x7, 32)
val += ord(i)
return val
pseudocode_ror7AddHash32 = '''acc := 0;
for c in input_string {
acc := ROR(acc, 7):
acc := acc + c;
}
'''
def ror9AddHash32(inString,fName):
if inString is None:
return 0
val = 0
for i in inString:
val = ror(val, 0x9, 32)
val += ord(i)
return val
pseudocode_ror9AddHash32 = '''acc := 0;
for c in input_string {
acc := ROR(acc, 9);
acc := acc + c;
}
'''
def ror11AddHash32(inString,fName):
if inString is None:
return 0
val = 0
for i in inString:
val = ror(val, 0xb, 32)
val += ord(i)
return val
pseudocode_ror11AddHash32 = '''acc := 0;
for c in input_string {
acc := ROR(acc, 11);
acc := acc + c;
}
'''
def ror13AddHash32(inString,fName):
if inString is None:
return 0
val = 0
for i in inString:
val = ror(val, 0xd, 32)
val += ord(i)
return val
pseudocode_ror13AddHash32 = '''acc := 0;
for c in input_string {
acc := ROR(acc, 13);
acc := acc + c;
}
'''
def ror13AddWithNullHash32(inString,fName):
if inString is None:
return 0
val = 0
for i in inString + "\x00":
val = ror(val, 0xd, 32)
val += ord(i)
return val
pseudocode_ror13AddWithNullHash32 = '''acc := 0;
for c in input_string_with_trailing_NULL {
acc := ROR(acc, 13);
acc := acc + c;
}
'''
def ror13AddHash32Sub1(inString,fName):
'''Same as ror13AddHash32, but subtract 1 afterwards'''
return ror13AddHash32(inString,fName) - 1
pseudocode_ror13AddHash32 = '''acc := 0;
for c in input_string {
acc := ROR(acc, 13);
acc := acc + c;
}
acc := acc - 1;
'''
def shl7Shr19XorHash32(inString,fName):
val = 0
for i in inString:
edx = 0xffffffff & (val << 7)
ecx = 0xffffffff & (val >> 0x19)
eax = edx | ecx
t = 0xff & (ord(i) ^ 0xf4)
val = eax ^ t
return val
pseudocode_shl7Shr19XorHash32 = '''acc := 0;
for c in input_string {
t0 = (acc << 7);
t1 = (acc >> 0x19);
t2 = t0 | t1;
acc = t2 ^ c ^ 0xf4;
}
'''
def sll1AddHash32(inString,fName):
if inString is None:
return 0
val = 0
for i in inString:
b = ord(i)
b = 0xff & (b | 0x60)
val = val + b
val = val << 1
val = 0xffffffff & val
return val
pseudocode_sll1AddHash32 = '''acc := 0;
for c in input_string {
acc = acc + (c | 0x60);
acc = acc << 1;
}
'''
def crc32(inString,fName):
return 0xffffffff & (zlib.crc32(inString))
def ror13AddHash32AddDll(inString,fName):
dllHash = 0
for c in fName:
dllHash = ror(dllHash, 0xd, 32)
if ord(c) < 97:
dllHash = int(dllHash) + ord(c)
else:
dllHash = int(dllHash) + ord(c) - 32
dllHash = ror(dllHash, 0xd, 32)
dllHash = ror(dllHash, 0xd, 32)
dllHash = ror(dllHash, 0xd, 32)
if inString is None:
return 0
val = 0
for i in inString:
val = ror(val, 0xd, 32)
val += ord(i)
val = ror(val, 0xd, 32)
val += dllHash
if val >= 4294967296:
val -= 4294967296
return val
pseudocode_ror13AddHash32AddDll = '''acc := 0;
for c in input_string {
acc := ROR(acc, 13);
acc := acc + c;
}
acc := acc + ror13add(DllName);
'''
def mult21AddHash32(inString,fName):
acc = 0
for i in inString:
acc = 0xffffffff & (acc * 0x21)
acc = 0xffffffff & (acc + ord(i))
return acc
pseudocode_hashMult21 = '''acc := 0;
for c in input_string {
acc := acc * 0x21;
acc := acc + c;
}
'''
def add1505Shl5Hash32(inString,fName):
val = 0x1505
for ch in inString:
val += (val << 5)
val &= 0xFFFFFFFF
val += ord(ch)
val &= 0xFFFFFFFF
return val
pseudocode_add1505Shl5Hash32 = '''val := 0x1505;
for c in input_string {
val := val + (val << 5);
val := val + c;
}
'''
def rol7XorHash32(inString,fName):
if inString is None:
return 0
val = 0
for i in inString:
val = rol(val, 0x7, 32)
val = val ^ (0xff & ord(i))
return val
pseudocode_rol7XorHash32 = '''acc := 0;
for c in input_string {
acc := ROL(acc, 7):
acc := acc ^ c;
}
'''
def rol7AddXor2Hash32(inString,fName):
if inString is None:
return 0
val = 0
for i in inString:
val = rol(val, 0x7, 32)
val += (ord(i) ^ 2)
return val
pseudocode_rol7AddXor2Hash32 = '''acc := 0;
for c in input_string {
acc := ROL(acc, 7):
acc := acc + (c ^ 2);
}
'''
def dualaccModFFF1Hash(inString,fName):
if inString is None:
return 0
v4, v8 = 0, 1
for ltr in inString:
v8 = (ord(ltr) + v8) % 0x0FFF1
v4 = (v4 + v8) % 0x0FFF1
return (v4 << 0x10)|v8
pseudocode_dualaccModFFF1Hash = '''
acc_1 := 0
acc_2 := 0
for c in input_string {
acc_2 = (acc_2 + c) % 0x0FFF1
acc_1 = (acc_1 + acc2) % 0x0FFF1
}
return (acc_1 << 0x10) | acc2
'''
def hash_Carbanak(inString,fName):
a2 = map(ord, inString)
ctr = 0
for i in a2:
ctr = (ctr << 4) + i
if (ctr & 0xF0000000):
ctr = (((ctr & 0xF0000000) >> 24) ^ ctr) & 0x0FFFFFFF
return ctr
pseudocode_hash_Carbanak = '''
acc_1 = 0
for c in input_string:
acc_1 = (acc_1 << 4) + c
if (acc_1 & 0xF0000000):
acc_1 = (((acc_1 & 0xF0000000) >> 24) ^ acc_1) & 0x0FFFFFFF
return acc_1
'''
def hash_ror13AddUpperDllnameHash32(inString,fName):
if inString is None:
return 0
val = 0
dllHash = 0
for i in fName:
dllHash = ror(dllHash, 0xd, 32)
b = ord(i)
if b >= 0x61:
b -= 0x20
dllHash += b
dllHash = 0xffffffff & dllHash
for i in inString:
val = ror(val, 0xd, 32)
val += ord(i)
val = 0xffffffff & val
return 0xffffffff & (dllHash + val)
pseudocode_hash_ror13AddUpperDllnameHash32 = '''
acc := 0
dllhash := 0
for i in dllname {
dllhash := ROR(acc, 13);
dllhash := dllhash + toupper(c);
}
for i in input_string {
acc := ROR(acc, 13);
acc := acc + toupper(c);
}
return acc + dllhash
'''
# as seen in Neutrino Bot launcher
def fnv1Xor67f(inString,fName):
val = 0x811c9dc5
for c in inString:
val = (0x1000193 * (ord(c) ^ val)) & 0xffffffff
return val ^ 0x67f
pseudocode_fnv1Xor67f = '''
acc = 0x811c9dc5
for c in inString:
acc = (0x1000193 * (ord(c) ^ acc)) & 0xffffffff
return acc ^ 0x67f
return acc
'''
def ror13AddHash32DllSimple(inString,fName):
dll_hash = 0
for c in fName:
dll_hash = ror(dll_hash, 0xd, 32)
if ord(c) < 97:
dll_hash = int(dll_hash) + ord(c)
else:
dll_hash = int(dll_hash) + ord(c) - 32
if inString is None:
return 0
val = 0
for i in inString:
val = ror(val, 0xd, 32)
val += ord(i)
val += dll_hash
return val & 0xFFFFFFFF
pseudocode_ror13AddHash32DllSimple = '''acc := 0;
for c in input_string {
acc := ROR(acc, 13);
acc := acc + c;
}
acc := acc + ror13add(dll_name);
'''
def imul83hAdd(inString,fName):
if inString is None:
return 0
val = 0
for i in inString:
val = val * 131
val += ord(i)
val = val & 0xFFFFFFFF
return val
pseudocode_imul83hAdd = '''acc := 0;
for c in input_string {
acc := acc * 83h:
acc := acc + c;
}
'''
def ror13AddHash32Sub20h(inString,fName):
if inString is None:
return 0
val = 0
for i in inString:
val = ror(val, 0xd, 32)
if ord(i) < 97:
val = int(val) + ord(i)
else:
val = int(val) + ord(i) - 32
return val
pseudocode_ror13AddHash32Sub20h = '''acc := 0;
for c in input_string {
acc := ROR(acc, 13);
if (c > 0x61)
c = c - 0x20;
acc := acc + c;
}
'''
def rol3XorHash32(inString,fName):
if inString is None:
return 0
val = 0
for i in inString:
val = rol(val, 0x3, 32)
val = val ^ ord(i)
return val
pseudocode_rol3XorHash32 = '''acc := 0;
for c in input_string {
acc := ROL(acc, 3):
acc := acc ^ c;
}
'''
def chAddRol8Hash32(inString,fName):
if inString is None:
return 0
val = 0
for i in inString:
val = val ^ (ord(i) * 256)
val = rol(val, 0x8, 32)
val_hex = "%08x"%val
valh_str = val_hex[4:6]
valh = int(valh_str, 16)
val = val ^ valh
return val
pseudocode_chAddRol8Hash32 = '''acc := 0;
for c in input_string {
acc := ch ^ c
acc := ROL(acc, 8):
acc := cl ^ ch;
}
'''
def xorShr8Hash32(inString,fName):
if inString is None:
return 0
val = 0xFFFFFFFF
for i in inString:
ci = ord(i)
ci = ci ^ val
ci = ci * val
ci_hex = "%16x"%ci
ci_hex = ci_hex[8:16]
ci_hex = int(ci_hex, 16)
shr8 = val >> 8
val = ci_hex ^ shr8
return val
pseudocode_xorShr8Hash32 = '''acc := 0;
for c in input_string {
acc = (acc >> 8) ^ acc * (acc ^ c);
}
'''
def addRor13Hash32(inString,fName):
if inString is None:
return 0
val = 0
for i in inString:
val += ord(i)
val = ror(val, 0xd, 32)
return val
pseudocode_addRor13Hash32 = '''acc := 0;
for c in input_string {
acc := acc + c;
acc := ROR(acc, 13);
}
'''
def addRor13HashOncemore32(inString,fName):
if inString is None:
return 0
val = 0
for i in inString:
val += ord(i)
val = ror(val, 0xd, 32)
val = ror(val, 0xd, 32)
return val
pseudocode_addRor13HashOncemore32 = '''acc := 0;
for c in input_string {
acc := acc + c;
acc := ROR(acc, 13);
}
acc := ROR(acc, 13);
'''
def addRol5HashOncemore32(inString,fName):
if inString is None:
return 0
val = 0
for i in inString:
val += ord(i)
val = rol(val, 0x5, 32)
val = rol(val, 0x5, 32)
return val
pseudocode_addRol5HashOncemore32 = '''acc := 0;
for c in input_string {
acc := acc + c;
acc := ROL(acc, 5);
}
acc := ROL(acc, 5);
'''
def or21hXorRor11Hash32(inString,fName):
if inString is None:
return 0
val = 0
ors = 0
for i in inString:
ors = ord(i) | 33
val = val ^ ors
val = rol(val, 0xb, 32)
return val
pseudocode_or21hXorRor11Hash32 = '''acc := 0;
for c in input_string {
chr_or := chr | 21h;
acc := acc ^ chr_or;
acc := ROR(acc, 11);
}
'''
def or23hXorRor17Hash32(inString,fName):
if inString is None:
return 0
val = 0
ors = 0
for i in inString:
ors = ord(i) | 35
val = val ^ ors
val = rol(val, 0x11, 32)
return val
pseudocode_or23hXorRor17Hash32 = '''acc := 0;
for c in input_string {
chr_or := chr | 23h;
acc := acc ^ chr_or;
acc := ROR(acc, 17);
}
'''
def rol9AddHash32(inString,fName):
if inString is None:
return 0
val = 0
for i in inString:
val = rol(val, 0x9, 32)
val += ord(i)
return val
pseudocode_rol9AddHash32 = '''acc := 0;
for c in input_string {
acc := ROL(acc, 9):
acc := acc + c;
}
'''
def rol9XorHash32(inString,fName):
if inString is None:
return 0
val = 0
for i in inString:
val = rol(val, 0x9, 32)
val = val ^ ord(i)
return val
pseudocode_rol9XorHash32 = '''acc := 0;
for c in input_string {
acc := ROL(acc, 9):
acc := acc ^ c;
}
'''
def xorRol9Hash32(inString,fName):
if inString is None:
return 0
val = 0
for i in inString:
val = val ^ ord(i)
val = rol(val, 0x9, 32)
return val
pseudocode_xorRol9Hash32 = '''acc := 0;
for c in input_string {
acc := acc ^ c;
acc := ROL(acc, 9):
}
'''
def shl7Shr19AddHash32(inString,fName):
val = 0
for i in inString:
edx = 0xffffffff & (val << 7)
ecx = 0xffffffff & (val >> 0x19)
eax = edx | ecx
t = 0xff & ord(i)
val = eax + t
return val
pseudocode_shl7Shr19AddHash32 = '''acc := 0;
for c in input_string {
t0 = (acc << 7);
t1 = (acc >> 0x19);
t2 = t0 | t1;
acc = t2 + c;
}
'''
def playWith0xe8677835Hash(inString,fName):
val = 0xFFFFFFFF
for i in inString:
val ^= ord(i)
for j in range(0, 8):
if (val&0x1) == 1:
val ^= 0xe8677835
val >>= 1
return val ^ 0xFFFFFFFF
pseudocode_playWith0xe8677835Hash = '''
TBC
'''
def rol5XorHash32(inString,fName):
if inString is None:
return 0
val = 0
for i in inString:
val = rol(val, 0x5, 32)
ors = ord(i) | 32
val = val ^ ors
return val
pseudocode_rol5XorHash32 = '''acc := 0;
for c in input_string {
acc := ROL(acc, 5):
acc := acc ^ c;
}
'''
def shl7SubHash32DoublePulser(inString,fName):
eax = 0
edi = 0
for i in inString:
edi = 0xffffffff & (eax << 7)
eax = 0xffffffff & (edi - eax)
eax = eax + (0xff & ord(i))
edi = 0xffffffff & (eax << 7)
eax = 0xffffffff & (edi - eax)
return eax
pseudocode_shl7SubHash32DoublePulser = '''acc := 0;
for c in input_string {
t0 = (acc << 7);
t2 = t0 - t1;
acc = t2 + c;
}
'''
def imul21hAddHash32(inString,fName):
if inString is None:
return 0
val = 0x1505
for i in inString:
val = (val * 0x21) & 0xFFFFFFFF
val = (val + (ord(i) & 0xFFFFFFDF)) & 0xFFFFFFFF
return val
pseudocode_imul21hAddHash32 = '''acc := 0x1505;
for c in input_string {
acc := acc * 21h;
acc := acc + (c & 0xFFFFFFDF);
}
acc := SHL(acc, 7) - acc
'''
def crc32bzip2lower(inString,fName):
crc32_table = [0] * 256
for i in xrange(256):
v = i << 24
for j in xrange(8):
if (v & 0x80000000) == 0:
v = (2 * v) & 0xffffffff
else:
v = ((2 * v) ^ 0x4C11DB7) & 0xffffffff
crc32_table[i] = v
result = 0xffffffff
for c in inString:
result = (crc32_table[ ord(c.lower()) ^ ((result >> 24) & 0xff) ] ^ (result << 8)) & 0xffffffff
return (result ^ 0xffffffff) & 0xffffffff
def shr2Shl5XorHash32(inString,fName):
result = 0x4e67c6a7
if inString.startswith("Nt") or inString.startswith("Zw"):
inString = inString[2:]
for i in inString:
result ^= (ord(i) + (result >> 2) + (result << 5)) & 0xffffffff
return result
pseudocode_shr2Shl5XorHash32 = '''acc := 0x4e67c6a7;
if input_string.startswith("Nt") or input_string.startswith("Zw") {
input_string += 2;
}
for c in input_string {
t0 := (acc >> 2);
t1 := (acc << 5);
acc := acc ^ (c + t0 + t1);
}
'''
def rol8Xor0xB0D4D06Hash32(inString,fName):
if inString is None:
return 0
val = 0
for i in inString:
val = val ^ (ord(i) & 0xDF)
val = rol(val, 0x8, 32)
val = val + (ord(i) & 0xDF)
return (val ^ 0xB0D4D06) & 0xffffffff
pseudocode_rol8Xor0xB0D4D06Hash32 = '''acc := 0;
for c in input_string {
acc := ROL(acc, 8):
acc := acc ^ c ^ 0xB0D4D06;
}
Smork_bot
'''
def crc32Xor0xca9d4d4e(inString,fName):
return (0xffffffff & (zlib.crc32(inString))) ^ 0xca9d4d4e
def adler32_666(inString,fName):
return zlib.adler32(inString.upper(), 666) & 0xffffffff
def shift0x82F63B78(inString,fName):
val = 0
for i in inString:
v1 = ((((ord(i) | 0x20) ^ val) >> 1) ^ (0x82F63B78 * (((ord(i) | 0x20) ^ val) & 1))) & 0xffffffff
v2 = ((((v1 >> 1) ^ (0x82F63B78 * (v1 & 1))) >> 1) ^ (0x82F63B78 * (((v1 >> 1) ^ (0x78 * (v1 & 1))) & 1))) & 0xffffffff
v3 = ((((v2 >> 1) ^ (0x82F63B78 * (v2 & 1))) >> 1) ^ (0x82F63B78 * (((v2 >> 1) ^ (0x78 * (v2 & 1))) & 1))) & 0xffffffff
v4 = ((((v3 >> 1) ^ (0x82F63B78 * (v3 & 1))) >> 1) ^ (0x82F63B78 * (((v3 >> 1) ^ (0x78 * (v3 & 1))) & 1))) & 0xffffffff
val = ((v4 >> 1) ^ (0x82F63B78 * (v4 & 1))) & 0xffffffff
return val ^ 0xBC
pseudocode_contiApiHashing = '''Too hard to explain.\nAPI string hash function from Conti ransomware.\nSee code for information'''
def contiApiHashing(inString, fName):
API_buffer = []
i = len(inString) >> 3
count = 0
while i != 0:
for index in range(0, 8):
API_buffer.append(inString[index + count])
count += 8
i -= 1
if len(inString) & 7 != 0:
v8 = len(inString) & 7
while v8 != 0:
API_buffer.append(inString[count])
count += 1
v8 -= 1
hash_val = 0
for i in range(0, len(API_buffer)):
API_buffer[i] = ord(API_buffer[i].lower())
v15 = 0xFF889912
string_length_2 = len(inString)
API_buffer_count = 0
if len(inString) >= 4:
count = string_length_2 >> 2
string_length_2 = (string_length_2 - 4 *
(string_length_2 >> 2)) & 0xFFFFFFFF
while True:
temp_buffer_val = API_buffer[API_buffer_count +
3] << 24 | API_buffer[API_buffer_count +
2] << 16 | API_buffer[API_buffer_count +
1] << 8 | API_buffer[API_buffer_count]
temp = (0x5BD1E995 * temp_buffer_val) & 0xFFFFFFFF
API_buffer_count += 4
v15 = ((0x5BD1E995 * (temp ^
(temp >> 0x18))) & 0xFFFFFFFF) ^ ((0x5BD1E995 * v15) & 0xFFFFFFFF)
count -= 1
if count == 0:
break
v18 = string_length_2 - 1
v19 = v18 - 1
if v18 == 0:
hash_val ^= API_buffer[API_buffer_count]
elif v19 == 0:
hash_val ^= API_buffer[API_buffer_count + 1] << 8
hash_val ^= API_buffer[API_buffer_count]
elif v19 == 1:
hash_val ^= API_buffer[API_buffer_count + 2] << 16
hash_val ^= API_buffer[API_buffer_count + 1] << 8
hash_val ^= API_buffer[API_buffer_count]
v20 = (0x5BD1E995 * hash_val) & 0xFFFFFFFF
edi = (0x5BD1E995 * len(inString)) & 0xFFFFFFFF
eax = v20 >> 0x18
eax ^= v20
ecx = (0x5BD1E995 * eax) & 0xFFFFFFFF
eax = (0x5BD1E995 * v15) & 0xFFFFFFFF
ecx ^= eax
eax = edi
eax >>= 0x18
eax ^= edi
edx = (0x5BD1E995 * ecx) & 0xFFFFFFFF
eax = (0x5BD1E995 * eax) & 0xFFFFFFFF
edx ^= eax
eax = edx
eax >>= 0xD
eax ^= edx
ecx = (0x5BD1E995 * eax) & 0xFFFFFFFF
eax = ecx
eax >>= 0xF
eax ^= ecx
return eax
def fnv1(inString,fName):
val = 0x811c9dc5
for c in inString:
val = (0x1000193 * (ord(c) ^ val)) & 0xffffffff
return val
pseudocode_fnv1 = '''
acc = 0x811c9dc5
for c in inString:
acc = (0x1000193 * (ord(c) ^ acc)) & 0xffffffff
return acc
'''
############################################################
# The list of tuples of (supported hash name, hash size, pseudo_code)
HASH_TYPES = [
('ror7AddHash32', 32, pseudocode_ror7AddHash32),
('ror9AddHash32', 32, pseudocode_ror9AddHash32),
('ror11AddHash32', 32, pseudocode_ror11AddHash32),
('ror13AddHash32', 32, pseudocode_ror13AddHash32),
('ror13AddWithNullHash32', 32, pseudocode_ror13AddWithNullHash32),
('ror13AddHash32AddDll', 32, pseudocode_ror13AddHash32AddDll),
('ror13AddHash32DllSimple', 32, pseudocode_ror13AddHash32DllSimple),
('ror13AddHash32Sub20h', 32, pseudocode_ror13AddHash32Sub20h),
('ror13AddHash32Sub1', 32, pseudocode_ror13AddHash32),
('addRor4WithNullHash32', 32, pseudocode_addRor4WithNullHash32),
('addRor13Hash32', 32, pseudocode_addRor13Hash32),
('addRor13HashOncemore32', 32, pseudocode_addRor13HashOncemore32),
('rol3XorEax', 32, pseudocode_rol3XorEax),
('rol3XorHash32', 32, pseudocode_rol3XorHash32),
('rol5AddHash32', 32, pseudocode_rol5AddHash32),
('addRol5HashOncemore32', 32, pseudocode_addRol5HashOncemore32),
('rol7AddHash32', 32, pseudocode_rol7AddHash32),
('rol7AddXor2Hash32', 32, pseudocode_rol7AddXor2Hash32),
('rol7XorHash32', 32, pseudocode_rol7XorHash32),
('rol5XorHash32', 32, pseudocode_rol5XorHash32),
('rol8Xor0xB0D4D06Hash32', 32, pseudocode_rol8Xor0xB0D4D06Hash32),
('chAddRol8Hash32', 32, pseudocode_chAddRol8Hash32),
('rol9AddHash32', 32, pseudocode_rol9AddHash32),
('rol9XorHash32', 32, pseudocode_rol9XorHash32),
('xorRol9Hash32', 32, pseudocode_xorRol9Hash32),
('shl7Shr19XorHash32', 32, pseudocode_shl7Shr19XorHash32),
('shl7Shr19AddHash32', 32, pseudocode_shl7Shr19AddHash32),
('shl7SubHash32DoublePulser', 32, pseudocode_shl7SubHash32DoublePulser),
('sll1AddHash32', 32, pseudocode_sll1AddHash32),
('shr2Shl5XorHash32', 32, pseudocode_shr2Shl5XorHash32),
('xorShr8Hash32', 32, pseudocode_xorShr8Hash32),
('imul83hAdd', 32, pseudocode_imul83hAdd),
('imul21hAddHash32', 32, pseudocode_imul21hAddHash32),
('or21hXorRor11Hash32', 32, pseudocode_or21hXorRor11Hash32),
('or23hXorRor17Hash32', 32, pseudocode_or23hXorRor17Hash32),
('playWith0xe8677835Hash', 32, pseudocode_playWith0xe8677835Hash),
('poisonIvyHash', 32, pseudocode_poisonIvyHash),
('crc32', 32, 'Standard crc32'),
('crc32Xor0xca9d4d4e', 32, 'crc32 ^ 0xCA9D4D4E'),
('crc32bzip2lower', 32, 'crc32 bzip2 and str lower'),
('mult21AddHash32', 32, pseudocode_hashMult21),
('add1505Shl5Hash32', 32, pseudocode_add1505Shl5Hash32),
('dualaccModFFF1Hash', 32, pseudocode_dualaccModFFF1Hash),
('hash_Carbanak', 32, pseudocode_hash_Carbanak),
('hash_ror13AddUpperDllnameHash32',32, pseudocode_hash_ror13AddUpperDllnameHash32),
('fnv1Xor67f', 32, pseudocode_fnv1Xor67f),
('adler32_666', 32, 'Adler32 with starting value 666'),
('shift0x82F63B78', 32, 'like crc32c'),
('contiApiHashing', 32, pseudocode_contiApiHashing),
('fnv1', 32, pseudocode_fnv1)
]
############################################################
# Database creator
############################################################
class ShellcodeDbCreator(object):
def __init__(self, dbPath, dirName):
self.dbPath = dbPath
self.dirName = dirName
self.conn = sqlite3.connect(dbPath)
self.initDb()
self.initHashesDict()
def close(self):
self.conn.close()
self.conn = None
def run(self):
#process all the files in the given directory
self.processDir(self.dirName)
def initDb(self):
#check for tables, create if not present
if not self.checkForTable('symbol_hashes'):
cur = self.conn.executescript(sql_create_tables)
self.conn.commit()
#add the known hashtypes
for hashName, hashSize, hashCode in HASH_TYPES:
self.addHashType(hashName, hashSize, hashCode)
def initHashesDict(self):
#The hashes dict will store tuple (hashtype_key, dyn method),
# indexed by name. used to iterate over when processing export names.
self.hashes = {}
for hashName, hashSize, hashCode in HASH_TYPES:
try:
meth = globals()[hashName]
hashType = self.getHashTypeByName(hashName)
self.hashes[hashName] = (hashType, meth)
except AttributeError, err:
print "Could not find method %s" % hashName
def processDir(self, dirName):
for fName in os.listdir(dirName):
filePath = os.path.join(dirName, fName)
if not os.path.isfile(filePath):
#print "Could not find file: %s. Skipping" % fName
continue
try:
peFile = pefile.PE(filePath)
if ((not hasattr(peFile, "DIRECTORY_ENTRY_EXPORT")) or (peFile.DIRECTORY_ENTRY_EXPORT is None)):
if VERBOSE:
print "No exports: %s" % filePath
else:
#add the library to the lib table
print "Processing file %s" % filePath
time1 = time.time()
libKey = self.addSourceLib(fName)
symCount = 0
for sym in peFile.DIRECTORY_ENTRY_EXPORT.symbols:
if sym.name is not None:
symCount += 1
for hashName in self.hashes.keys():
hashType, hashMeth = self.hashes[hashName]
#print "Trying to hash: %s:%s" % (hashName, sym.name)
symHash = hashMeth(sym.name,fName)
#print " Done hashing: %08x:%s" % (symHash, sym.name)
if symHash is not None:
self.addSymbolHash(symHash, hashType, libKey, sym.name)
#commit outstanding transaction
self.conn.commit()
time2 = time.time()
timeDiff = time2 - time1
print "Processed %d export symbols in %.02f seconds: %s" % (symCount, timeDiff, filePath)
except pefile.PEFormatError, err:
if VERBOSE:
print "Skipping non-PE file %s: %s" % (filePath, str(err))
except Exception, err:
if VERBOSE:
print "Skipping %s: %s" % (filePath, str(err))
raise
def addHashType(self, hashName, hashSize, code):
#check if the hashname already exists
cur = self.conn.execute(sql_get_hash_type_hash_size, (hashName, hashSize))
retList = cur.fetchall()
if len(retList) > 0:
return
cur = self.conn.execute(sql_add_hash_type, (hashSize, hashName, code))
self.conn.commit()
if cur is None:
raise RuntimeError("Cursor is None following hash type insert")
if cur.lastrowid is None:
raise RuntimeError("lastrowid is None following hash type insert")
return cur.lastrowid
def getHashTypeByName(self, hashName):
'''
Returns None if the hashName is not found, else returns
the integer hash type key for the requested hash
'''
cur = self.conn.execute(sql_get_hash_type, (hashName, ))
retList = cur.fetchall()
if len(retList) == 0:
return None
elif len(retList) > 1:
print "ERROR: database in odd state. Multiple entries for hash name: %s" % hashName
#always return first entry, even on error
return retList[0][0]
def getSourceLibByName(self, libName):
'''
Returns None if the libName is not found, else returns
the integer key for the requested souce lib.
'''
cur = self.conn.execute(sql_find_source_lib_by_name, (libName, ))
retList = cur.fetchall()
if len(retList) == 0:
return None
elif len(retList) > 1:
print "ERROR: database in odd state. Multiple entries for source lib: %s" % libName
#always return first entry, even on error
return retList[0][0]
def addSourceLib(self, libName):
'''
Adds the given source lib to the db (if not already present) & returns the lib key.
'''
#lookup the library, insert if it doesn't exist
libKey = self.getSourceLibByName(libName)
if libKey is None:
cur = self.conn.execute(sql_add_source_lib, (libName, ))
self.conn.commit()
if cur is None:
raise RuntimeError("Cursor is None following source lib insert")
if cur.lastrowid is None:
raise RuntimeError("lastrowid is None following source lib insert")
return cur.lastrowid
else:
return libKey
def addSymbolHash(self, hashVal, hashType, libKey, symbolName):
'''Note: requires explicit commit afterwards by caller'''
#determine if tuple (hashVal, hashType, libKey, symbolName) already exists or not
#print "Trying to add symbol: %s %s, %s %s, %s %s, %s %s" % (
# type(hashVal), str(hashVal),
# type(hashType), str(hashType),
# type(libKey), str(libKey),
# type(symbolName), str(symbolName))
cur = self.conn.execute(sql_find_symbol_hash_type_lib_symbol,
(ctypes.c_int64(hashVal).value, hashType, libKey, symbolName)
)
retList = cur.fetchall()
if len(retList) == 0:
#insert it now
cur = self.conn.execute(sql_add_symbol_hash,
(ctypes.c_int64(hashVal).value, hashType, libKey, symbolName)
)
if cur is None:
raise RuntimeError("Cursor is None following symbol hash insert")
if cur.lastrowid is None:
raise RuntimeError("lastrowid is None following symbol hash insert")
return cur.lastrowid
else:
#print "Skipping duplicate hash: %08x %08x %08x %s" % (hashVal, hashType, libKey, symbolName)
pass
def checkForTable(self, tableName):
'''
Returns True if the given table name already exists, else returns False.
'''
cur = self.conn.execute(sql_testTableExists, (tableName,))
row = cur.fetchone()
if row is None:
#raise UnpreparedDatabaseException("Missing database table: %s" % tableName)
return False
return True
if __name__ == '__main__':
if len(sys.argv) != 3:
print "python %s <db_path> <dll_dir>" % sys.argv[0]
sys.exit(1)
dbPath = sys.argv[1]
walkPath = sys.argv[2]
hasher = ShellcodeDbCreator(dbPath, walkPath)
hasher.run()
hasher.close()
print "Done with symbol name hashing"
| |
from __future__ import division
from bisect import bisect_left
from stat_helper import a12s as a12rank
import bins
import csv
__author__ = "Jianfeng Chen"
__copyright__ = "Copyright (C) 2016 Jianfeng Chen"
__license__ = "MIT"
__version__ = "2.0"
__email__ = "jchen37@ncsu.edu"
def str2num(s):
if type(s) == float or type(s) == int: return s
try:
s = int(s)
except ValueError:
try:
s = float(s)
except ValueError:
pass
return s
def median(l):
"""
return the median of the list l.
l WILL NOT be changed.
:param l:
:return:
>>> median([4,2,2,2,1,1,1,1,1,1,1,1,1])
1
"""
return sorted(l)[int(len(l)/2)]
def binrange(data_list, enough=None, cohen=0.2, maxBins=16, minBin=4, trivial=1.05):
"""
:param data_list:
:param enough:
:param cohen:
:param maxBins:
:param minBin:
:param trivial:
:return: ist of bin# e.g. {a,b,c,d,e} [a,b] (b,c] (c,d] (d,e]
"""
ranges = bins.bins(t=data_list, enough=enough, cohen=cohen,
maxBins=maxBins, minBin=minBin,trivial=trivial)
res = [ranges[0].lo]
for r in ranges:
res.append(r.up)
return res
def apply_bin_range(datalist, enough=None, cohen=0.2, maxBins=16, minBin=4, trivial=1.05):
if len(datalist) == 0: return datalist
range_divide = binrange(datalist, enough, cohen, maxBins, minBin, trivial)
x = list()
for i in datalist:
t = bisect_left(range_divide, i)
x.append(t)
return x
def attr_norm(all_elements):
"""
This is the normalization/de-normalization function generator for one kind of attribute
:param all_elements: all the elements for one attribute
:return: two functions. The first one can normalize the element; the second one is de-normalize the element
e.g.
loc = [100,200,100,300]
norm_loc, denorm_loc = attr_norm(loc)
l1 = map(norm_loc,loc)
l2 = map(denorm_loc, l1)
print l1 # [0.0, 0.5, 0.0, 1.0]
print l2 # [100.0, 200.0, 100.0, 300.0]
"""
if not type(all_elements) is list: all_elements = [all_elements]
M = max(all_elements)
m = min(all_elements)
def norm(element):
return (element-m)/(M-m) if M != m else 1
def denorm(element):
s = element*(M-m)+m if M != m else m
if m <= s <= M:
return s
elif m < s:
s = 2 * m - s
else:
s = 2 * M - s
return max(min(s, M), m)
return norm, denorm
def euclidean_dist(x, y):
"""
the Eulerian distance between x and y
:param x: instance x. type--list or one number
:param y: instance y. type--list or one number
:return: the Eulerian distance between x and y
"""
if type(x) is not list:
x = [x]
if type(y) is not list:
y = [y]
assert len(x) == len(y), "the dimension of two parameters must be the same"
return sum([(i-j)**2 for i, j in zip(x, y)]) ** 0.5
def normalize_cols_for_table(table):
"""
normalize a list of list--table
data are grouped by cols
:param table:
:return:
"""
result = []
for col in zip(*table):
f1, f2 = attr_norm(list(col))
result.append(map(f1, col))
return map(list, zip(*result))
def del_col_in_table(list_of_list, col_index):
"""
delete one column or multiple columns in the table (list of list)
:param list_of_list: data table
:param col_index: index of the col. can be single number or a list. can be negative
:return: new alloc pruned table
"""
if type(col_index) is not list:
col_index = [col_index]
for i in range(len(col_index)):
if col_index[i] < 0:
col_index[i] += len(list_of_list[0])
list_of_list = map(list, zip(*list_of_list))
return_table = []
for index, col in enumerate(list_of_list):
if index not in col_index:
return_table.append(col)
return map(list, zip(*return_table))
def load_csv(folder, file_name, has_header=True):
"""
loading the csv file at folder/file_name.csv
:param folder:
:param file_name:
:param has_header:
:return: (header if possible) + (content)
"""
if not folder.endswith('/'):
folder += '/'
folder = folder.replace('//', '/')
with open(folder + file_name+'.csv', 'r') as db:
reader = csv.reader(db)
if has_header:
header = next(reader)
content = []
for line in reader:
content.append(line)
if has_header:
return header, content
else:
return content
def write_csv(folder, file_name, content, header=None):
with open(folder + '/' + file_name + '.csv', 'w') as f:
writer = csv.writer(f)
if header is not None:
writer.writerow(header)
writer.writerows(content)
def append_csv_row(folder, file_name, row):
with open(folder+'/'+file_name+'.csv', 'a') as f:
writer = csv.writer(f)
writer.writerow(row)
def log_v(variable, value):
if type(value) is str:
print(variable + ": " + value)
else:
print(variable + ": " + str(value))
def make_it_list(single_object_or_a_list):
if type(single_object_or_a_list) is not list:
single_object_or_a_list = [single_object_or_a_list]
return single_object_or_a_list
def a12s(rxs,rev=False,enough=0.75):
"""
Given a performance measure M seen in m measures of X and n measures
; of Y, the A12 statistics measures the probability that running
; algorithm X yields higher M values than running another algorithm Y.
;
; A12 = #(X > Y)/mn + 0.5*#(X=Y)/mn
;
; According to Vargha and Delaney, a small, medium, large difference
; between two populations:
;
; + Big is A12 over 0.71
; + Medium is A12 over 0.64
; + Small is A12 over 0.56
;
; In my view, this seems gratitiously different to...
;
; + Big is A12 over three-quarters (0.75)
; + Medium is A12 over two-thirds (0.66)
; + Small is A12 over half (0.5)
;
; Whatever, the following code parameterizes that magic number
; so you can use the standard values if you want to.
;
; While A12 studies two treatments. LA12 handles multiple treatments.
; Samples from each population are sorted by their mean. Then
; b4= sample[i] and after= sample[i+1] and rank(after) = 1+rank(b4)
; if a12 reports that the two populations are different.
To simplify that process, I offer the following syntax. A population
; is a list of numbers, which may be unsorted, and starts with some
; symbol or string describing the population. A12s expects a list of
; such populations. For examples of that syntax, see the following use cases
rxs= [["x1", 0.34, 0.49, 0.51, 0.60],
["x2", 0.9, 0.7, 0.8, 0.60],
["x3", 0.15, 0.25, 0.4, 0.35],
["x4", 0.6, 0.7, 0.8, 0.90],
["x5", 0.1, 0.2, 0.3, 0.40]]
for rx in a12s(rxs,rev=False,enough=0.75): print rx
"""
return a12rank(rxs, rev, enough)
| |
""" Implementation of all the benchmark functions used in the 2010 GECCO workshop BBOB
(Black-Box Optimization Benchmarking).
Note: f_opt is fixed to 0 for all.
"""
__author__ = 'Tom Schaul, tom@idsia.ch'
from pybrain.rl.environments.functions.unimodal import * #@UnusedWildImport
from pybrain.rl.environments.functions.transformations import BBOBTransformationFunction
from pybrain.rl.environments.functions.multimodal import * #@UnusedWildImport
# --- separable ---
def bbob_f1(dim):
return BBOBTransformationFunction(SphereFunction(dim))
def bbob_f2(dim):
return BBOBTransformationFunction(ElliFunction(dim),
oscillate=True)
def bbob_f3(dim):
return BBOBTransformationFunction(RastriginFunction(dim),
oscillate=True,
asymmetry=0.2)
def bbob_f4(dim):
return BBOBTransformationFunction(BucheRastriginFunction(dim),
oscillate=True,
penalized=100)
def bbob_f5(dim):
return BBOBTransformationFunction(BoundedLinear(dim),
translate=False)
# --- moderate conditioning ---
def bbob_f6(dim):
return BBOBTransformationFunction(AttractiveSectorFunction(dim),
rotate=True,
translate=False)
def bbob_f7(dim):
return BBOBTransformationFunction(StepElliFunction(dim),
conditioning=10,
penalized=1,
rotate=True)
def bbob_f8(dim):
return BBOBTransformationFunction(RosenbrockFunction(dim))
def bbob_f9(dim):
return BBOBTransformationFunction(RosenbrockFunction(dim),
rotate=True)
# --- unimodal, high conditioning ---
def bbob_f10(dim):
return BBOBTransformationFunction(ElliFunction(dim),
oscillate=True,
rotate=True)
def bbob_f11(dim):
return BBOBTransformationFunction(TabletFunction(dim),
oscillate=True,
rotate=True)
def bbob_f12(dim):
return BBOBTransformationFunction(CigarFunction(dim),
asymmetry=0.5,
rotate=True)
def bbob_f13(dim):
return BBOBTransformationFunction(SharpRFunctionBis(dim),
conditioning=10,
rotate=True)
def bbob_f14(dim):
return BBOBTransformationFunction(DiffPowFunction(dim, a=4),
rotate=True)
# --- multi-modal with global structure ---
def bbob_f15(dim):
return BBOBTransformationFunction(RastriginFunction(dim),
conditioning=10,
oscillate=True,
asymmetry=0.2,
rotate=True)
def bbob_f16(dim):
return BBOBTransformationFunction(WeierstrassFunction(dim, kmax=11),
conditioning=0.01,
oscillate=True,
rotate=True)
def bbob_f17(dim):
return BBOBTransformationFunction(SchaffersF7Function(dim),
conditioning=10,
asymmetry=0.5,
penalized=10,
rotate=True)
def bbob_f18(dim):
return BBOBTransformationFunction(SchaffersF7Function(dim),
conditioning=1000,
asymmetry=0.5,
penalized=10,
rotate=True)
def bbob_f19(dim):
return BBOBTransformationFunction(GriewankRosenbrockFunction(dim),
rotate=True)
# --- multi-modal with weak global structure ---
def bbob_f20(dim):
return BBOBTransformationFunction(Schwefel20Function(dim),
translate=False)
def bbob_f21(dim):
return BBOBTransformationFunction(GallagherGauss101MeFunction(dim),
translate=False)
def bbob_f22(dim):
return BBOBTransformationFunction(GallagherGauss21HiFunction(dim),
translate=False)
def bbob_f23(dim):
return BBOBTransformationFunction(KatsuuraFunction(dim),
rotate=True,
conditioning=100)
def bbob_f24(dim):
return BBOBTransformationFunction(LunacekBiRastriginFunction(dim),
translate=False)
# all of them
bbob_collection = [bbob_f1, bbob_f2, bbob_f3, bbob_f4,
bbob_f5,
bbob_f6, bbob_f7, bbob_f8, bbob_f9, bbob_f10,
bbob_f11, bbob_f12, bbob_f13, bbob_f14, bbob_f15,
bbob_f16,
bbob_f17,
bbob_f18,
bbob_f19, bbob_f20,
bbob_f21, bbob_f22, bbob_f23, bbob_f24]
#moderate noise
def bbob_f101(dim):
return BBOBTransformationFunction(SphereFunction(dim),
gnoise=0.01,
penalized=1)
def bbob_f102(dim):
return BBOBTransformationFunction(SphereFunction(dim),
unoise=0.01,
penalized=1)
def bbob_f103(dim):
return BBOBTransformationFunction(SphereFunction(dim),
cnoise=(0.01,0.05),
penalized=1)
def bbob_f104(dim):
return BBOBTransformationFunction(RosenbrockFunction(dim),
gnoise=0.01,
penalized=1)
def bbob_f105(dim):
return BBOBTransformationFunction(RosenbrockFunction(dim),
unoise=0.01,
penalized=1)
def bbob_f106(dim):
return BBOBTransformationFunction(RosenbrockFunction(dim),
cnoise=(0.01,0.05),
penalized=1)
# severe noise
def bbob_f107(dim):
return BBOBTransformationFunction(SphereFunction(dim),
gnoise=1,
penalized=1)
def bbob_f108(dim):
return BBOBTransformationFunction(SphereFunction(dim),
unoise=1,
penalized=1)
def bbob_f109(dim):
return BBOBTransformationFunction(SphereFunction(dim),
cnoise=(1,0.2),
penalized=1)
def bbob_f110(dim):
return BBOBTransformationFunction(RosenbrockFunction(dim),
gnoise=1,
penalized=1)
def bbob_f111(dim):
return BBOBTransformationFunction(RosenbrockFunction(dim),
unoise=1,
penalized=1)
def bbob_f112(dim):
return BBOBTransformationFunction(RosenbrockFunction(dim),
cnoise=(1,0.2),
penalized=1)
def bbob_f113(dim):
return BBOBTransformationFunction(StepElliFunction(dim),
conditioning=10,
penalized=1,
rotate=True,
gnoise=1)
def bbob_f114(dim):
return BBOBTransformationFunction(StepElliFunction(dim),
conditioning=10,
penalized=1,
rotate=True,
unoise=1)
def bbob_f115(dim):
return BBOBTransformationFunction(StepElliFunction(dim),
conditioning=10,
penalized=1,
rotate=True,
cnoise=(1,0.2))
def bbob_f116(dim):
return BBOBTransformationFunction(ElliFunction(dim, a=100),
oscillate=True,
penalized=1,
rotate=True,
gnoise=1)
def bbob_f117(dim):
return BBOBTransformationFunction(ElliFunction(dim, a=100),
oscillate=True,
penalized=1,
rotate=True,
unoise=1)
def bbob_f118(dim):
return BBOBTransformationFunction(ElliFunction(dim, a=100),
oscillate=True,
penalized=1,
rotate=True,
cnoise=(1,0.2))
def bbob_f119(dim):
return BBOBTransformationFunction(DiffPowFunction(dim),
penalized=1,
rotate=True,
gnoise=1)
def bbob_f120(dim):
return BBOBTransformationFunction(DiffPowFunction(dim),
penalized=1,
rotate=True,
unoise=1)
def bbob_f121(dim):
return BBOBTransformationFunction(DiffPowFunction(dim),
penalized=1,
rotate=True,
cnoise=(1,0.2))
# multi-modal with severe noise
def bbob_f122(dim):
return BBOBTransformationFunction(SchaffersF7Function(dim),
conditioning=10,
asymmetry=0.5,
penalized=1,
rotate=True,
gnoise=1)
def bbob_f123(dim):
return BBOBTransformationFunction(SchaffersF7Function(dim),
conditioning=10,
asymmetry=0.5,
penalized=1,
rotate=True,
unoise=1)
def bbob_f124(dim):
return BBOBTransformationFunction(SchaffersF7Function(dim),
conditioning=10,
asymmetry=0.5,
penalized=1,
rotate=True,
cnoise=(1,0.2))
def bbob_f125(dim):
return BBOBTransformationFunction(GriewankRosenbrockFunction(dim),
penalized=1,
rotate=True,
gnoise=1)
def bbob_f126(dim):
return BBOBTransformationFunction(GriewankRosenbrockFunction(dim),
penalized=1,
rotate=True,
unoise=1)
def bbob_f127(dim):
return BBOBTransformationFunction(GriewankRosenbrockFunction(dim),
penalized=1,
rotate=True,
cnoise=(1,0.2))
def bbob_f128(dim):
return BBOBTransformationFunction(GallagherGauss101MeFunction(dim),
translate=False,
penalized=1,
gnoise=1)
def bbob_f129(dim):
return BBOBTransformationFunction(GallagherGauss101MeFunction(dim),
translate=False,
penalized=1,
unoise=1)
def bbob_f130(dim):
return BBOBTransformationFunction(GallagherGauss101MeFunction(dim),
translate=False,
penalized=1,
cnoise=(1,0.2))
bbob_noise_collection = [bbob_f101, bbob_f102, bbob_f103,
bbob_f104, bbob_f105, bbob_f106,
bbob_f107, bbob_f108, bbob_f109,
bbob_f110, bbob_f111, bbob_f112,
bbob_f113, bbob_f114, bbob_f115,
bbob_f116, bbob_f117, bbob_f118,
bbob_f119, bbob_f120, bbob_f121,
bbob_f122, bbob_f123, bbob_f124,
bbob_f125, bbob_f126, bbob_f127,
bbob_f128, bbob_f129, bbob_f130
]
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import logging
from novaclient.v1_1 import client as nova_client
from novaclient.v1_1 import security_group_rules as nova_rules
from novaclient.v1_1.servers import REBOOT_HARD
from horizon.api.base import *
LOG = logging.getLogger(__name__)
# API static values
INSTANCE_ACTIVE_STATE = 'ACTIVE'
VOLUME_STATE_AVAILABLE = "available"
class Flavor(APIResourceWrapper):
"""Simple wrapper around novaclient.flavors.Flavor"""
_attrs = ['disk', 'id', 'links', 'name', 'ram', 'vcpus']
class FloatingIp(APIResourceWrapper):
"""Simple wrapper for floating ip pools"""
_attrs = ['ip', 'fixed_ip', 'instance_id', 'id', 'pool']
class FloatingIpPool(APIResourceWrapper):
"""Simple wrapper for floating ips"""
_attrs = ['name']
class KeyPair(APIResourceWrapper):
"""Simple wrapper around novaclient.keypairs.Keypair"""
_attrs = ['fingerprint', 'name', 'private_key']
class VirtualInterface(APIResourceWrapper):
_attrs = ['id', 'mac_address']
class Volume(APIResourceWrapper):
"""Nova Volume representation"""
_attrs = ['id', 'status', 'displayName', 'size', 'volumeType', 'createdAt',
'attachments', 'displayDescription']
class VNCConsole(APIDictWrapper):
"""Simple wrapper for floating ips"""
_attrs = ['url', 'type']
class Quota(object):
""" Basic wrapper for individual limits in a quota."""
def __init__(self, name, limit):
self.name = name
self.limit = limit
def __repr__(self):
return "<Quota: (%s, %s)>" % (self.name, self.limit)
class QuotaSet(object):
""" Basic wrapper for quota sets."""
def __init__(self, apiresource):
self.items = []
for k in apiresource._info.keys():
if k in ['id']:
continue
v = int(apiresource._info[k])
q = Quota(k, v)
self.items.append(q)
setattr(self, k, v)
class Server(APIResourceWrapper):
"""Simple wrapper around novaclient.server.Server
Preserves the request info so image name can later be retrieved
"""
_attrs = ['addresses', 'attrs', 'hostId', 'id', 'image', 'links',
'metadata', 'name', 'private_ip', 'public_ip', 'status', 'uuid',
'image_name', 'VirtualInterfaces', 'flavor', 'key_name',
'OS-EXT-STS:power_state', 'OS-EXT-STS:task_state']
def __init__(self, apiresource, request):
super(Server, self).__init__(apiresource)
self.request = request
@property
def image_name(self):
from glance.common import exception as glance_exceptions
from horizon.api import glance
try:
image = glance.image_get_meta(self.request, self.image['id'])
return image.name
except glance_exceptions.NotFound:
return "(not found)"
def reboot(self, hardness=REBOOT_HARD):
novaclient(self.request).servers.reboot(self.id, hardness)
class Usage(APIResourceWrapper):
"""Simple wrapper around contrib/simple_usage.py"""
_attrs = ['start', 'server_usages', 'stop', 'tenant_id',
'total_local_gb_usage', 'total_memory_mb_usage',
'total_vcpus_usage', 'total_hours']
def get_summary(self):
return {'instances': self.total_active_instances,
'memory_mb': self.memory_mb,
'vcpus': getattr(self, "total_vcpus_usage", 0),
'vcpu_hours': self.vcpu_hours,
'local_gb': self.local_gb,
'disk_gb_hours': self.disk_gb_hours}
@property
def total_active_instances(self):
return sum(1 for s in self.server_usages if s['ended_at'] == None)
@property
def vcpus(self):
return sum(s['vcpus'] for s in self.server_usages
if s['ended_at'] == None)
@property
def vcpu_hours(self):
return getattr(self, "total_hours", 0)
@property
def local_gb(self):
return sum(s['local_gb'] for s in self.server_usages
if s['ended_at'] == None)
@property
def memory_mb(self):
return sum(s['memory_mb'] for s in self.server_usages
if s['ended_at'] == None)
@property
def disk_gb_hours(self):
return getattr(self, "total_local_gb_usage", 0)
class SecurityGroup(APIResourceWrapper):
"""Simple wrapper around novaclient.security_groups.SecurityGroup"""
_attrs = ['id', 'name', 'description', 'tenant_id']
@property
def rules(self):
""" Wraps transmitted rule info in the novaclient rule class. """
if not hasattr(self, "_rules"):
manager = nova_rules.SecurityGroupRuleManager
self._rules = [nova_rules.SecurityGroupRule(manager, rule) for \
rule in self._apiresource.rules]
return self._rules
@rules.setter
def rules(self, value):
self._rules = value
class SecurityGroupRule(APIResourceWrapper):
""" Simple wrapper for individual rules in a SecurityGroup. """
_attrs = ['id', 'ip_protocol', 'from_port', 'to_port', 'ip_range']
def __unicode__(self):
vals = {'from': self.from_port,
'to': self.to_port,
'cidr': self.ip_range['cidr']}
return 'ALLOW %(from)s:%(to)s from %(cidr)s' % vals
def novaclient(request):
LOG.debug('novaclient connection created using token "%s" and url "%s"' %
(request.user.token, url_for(request, 'compute')))
c = nova_client.Client(request.user.username,
request.user.token,
project_id=request.user.tenant_id,
auth_url=url_for(request, 'compute'))
c.client.auth_token = request.user.token
c.client.management_url = url_for(request, 'compute')
return c
def server_vnc_console(request, instance_id, type='novnc'):
return VNCConsole(novaclient(request).servers.get_vnc_console(instance_id,
type)['console'])
def flavor_create(request, name, memory, vcpu, disk, flavor_id):
return Flavor(novaclient(request).flavors.create(
name, int(memory), int(vcpu), int(disk), flavor_id))
def flavor_delete(request, flavor_id):
novaclient(request).flavors.delete(flavor_id)
def flavor_get(request, flavor_id):
return Flavor(novaclient(request).flavors.get(flavor_id))
def flavor_list(request):
return [Flavor(f) for f in novaclient(request).flavors.list()]
def tenant_floating_ip_list(request):
"""
Fetches a list of all floating ips.
"""
return [FloatingIp(ip) for ip in novaclient(request).floating_ips.list()]
def floating_ip_pools_list(request):
"""
Fetches a list of all floating ip pools.
"""
return [FloatingIpPool(pool)
for pool in novaclient(request).floating_ip_pools.list()]
def tenant_floating_ip_get(request, floating_ip_id):
"""
Fetches a floating ip.
"""
return novaclient(request).floating_ips.get(floating_ip_id)
def tenant_floating_ip_allocate(request, pool=None):
"""
Allocates a floating ip to tenant.
Optionally you may provide a pool for which you would like the IP.
"""
return novaclient(request).floating_ips.create(pool=pool)
def tenant_floating_ip_release(request, floating_ip_id):
"""
Releases floating ip from the pool of a tenant.
"""
return novaclient(request).floating_ips.delete(floating_ip_id)
def snapshot_create(request, instance_id, name):
return novaclient(request).servers.create_image(instance_id, name)
def keypair_create(request, name):
return KeyPair(novaclient(request).keypairs.create(name))
def keypair_import(request, name, public_key):
return KeyPair(novaclient(request).keypairs.create(name, public_key))
def keypair_delete(request, keypair_id):
novaclient(request).keypairs.delete(keypair_id)
def keypair_list(request):
return [KeyPair(key) for key in novaclient(request).keypairs.list()]
def server_create(request, name, image, flavor, key_name, user_data,
security_groups, block_device_mapping, instance_count=1):
return Server(novaclient(request).servers.create(
name, image, flavor, userdata=user_data,
security_groups=security_groups,
key_name=key_name, block_device_mapping=block_device_mapping,
min_count=instance_count), request)
def server_delete(request, instance):
novaclient(request).servers.delete(instance)
def server_get(request, instance_id):
return Server(novaclient(request).servers.get(instance_id), request)
def server_list(request, search_opts=None, all_tenants=False):
if search_opts is None:
search_opts = {}
if all_tenants:
search_opts['all_tenants'] = True
else:
search_opts['project_id'] = request.user.tenant_id
return [Server(s, request) for s in novaclient(request).\
servers.list(True, search_opts)]
def server_console_output(request, instance_id, tail_length=None):
"""Gets console output of an instance"""
return novaclient(request).servers.get_console_output(instance_id,
length=tail_length)
def server_pause(request, instance_id):
novaclient(request).servers.pause(instance_id)
def server_unpause(request, instance_id):
novaclient(request).servers.unpause(instance_id)
def server_suspend(request, instance_id):
novaclient(request).servers.suspend(instance_id)
def server_resume(request, instance_id):
novaclient(request).servers.resume(instance_id)
def server_reboot(request,
instance_id,
hardness=REBOOT_HARD):
server = server_get(request, instance_id)
server.reboot(hardness)
def server_update(request, instance_id, name):
return novaclient(request).servers.update(instance_id, name=name)
def server_add_floating_ip(request, server, address):
"""
Associates floating IP to server's fixed IP.
"""
server = novaclient(request).servers.get(server)
fip = novaclient(request).floating_ips.get(address)
return novaclient(request).servers.add_floating_ip(server, fip)
def server_remove_floating_ip(request, server, address):
"""
Removes relationship between floating and server's fixed ip.
"""
fip = novaclient(request).floating_ips.get(address)
server = novaclient(request).servers.get(fip.instance_id)
return novaclient(request).servers.remove_floating_ip(server, fip)
def tenant_quota_get(request, tenant_id):
return QuotaSet(novaclient(request).quotas.get(tenant_id))
def tenant_quota_update(request, tenant_id, **kwargs):
novaclient(request).quotas.update(tenant_id, **kwargs)
def tenant_quota_defaults(request, tenant_id):
return QuotaSet(novaclient(request).quotas.defaults(tenant_id))
def usage_get(request, tenant_id, start, end):
return Usage(novaclient(request).usage.get(tenant_id, start, end))
def usage_list(request, start, end):
return [Usage(u) for u in novaclient(request).usage.list(start, end, True)]
def security_group_list(request):
return [SecurityGroup(g) for g in novaclient(request).\
security_groups.list()]
def security_group_get(request, security_group_id):
return SecurityGroup(novaclient(request).\
security_groups.get(security_group_id))
def security_group_create(request, name, description):
return SecurityGroup(novaclient(request).\
security_groups.create(name, description))
def security_group_delete(request, security_group_id):
novaclient(request).security_groups.delete(security_group_id)
def security_group_rule_create(request, parent_group_id, ip_protocol=None,
from_port=None, to_port=None, cidr=None,
group_id=None):
return SecurityGroupRule(novaclient(request).\
security_group_rules.create(parent_group_id,
ip_protocol,
from_port,
to_port,
cidr,
group_id))
def security_group_rule_delete(request, security_group_rule_id):
novaclient(request).security_group_rules.delete(security_group_rule_id)
def virtual_interfaces_list(request, instance_id):
return novaclient(request).virtual_interfaces.list(instance_id)
def volume_list(request):
return [Volume(vol) for vol in novaclient(request).volumes.list()]
def volume_get(request, volume_id):
return Volume(novaclient(request).volumes.get(volume_id))
def volume_instance_list(request, instance_id):
return novaclient(request).volumes.get_server_volumes(instance_id)
def volume_create(request, size, name, description):
return Volume(novaclient(request).volumes.create(
size, display_name=name, display_description=description))
def volume_delete(request, volume_id):
novaclient(request).volumes.delete(volume_id)
def volume_attach(request, volume_id, instance_id, device):
novaclient(request).volumes.create_server_volume(
instance_id, volume_id, device)
def volume_detach(request, instance_id, attachment_id):
novaclient(request).volumes.delete_server_volume(
instance_id, attachment_id)
def volume_snapshot_list(request):
return novaclient(request).volume_snapshots.list()
def volume_snapshot_create(request, volume_id, name, description):
return novaclient(request).volume_snapshots.create(
volume_id, display_name=name, display_description=description)
def volume_snapshot_delete(request, snapshot_id):
novaclient(request).volume_snapshots.delete(snapshot_id)
| |
# pylint: disable=too-many-lines
"""
All code related to running system commands.
Command: Class to run arbitrary system commands.
Archive: Used to fetch a source archive.
Git: Used to fetch a git repository.
Hg: Used to fetch a mercurial repository.
"""
from __future__ import absolute_import
from abc import ABCMeta, abstractmethod, abstractproperty
import atexit
import functools
import glob
import inspect
import logging
import os
import shlex
import shutil
import signal
import subprocess
import sys
from tempfile import NamedTemporaryFile as TempFile
import threading
import time
import hashlib
import tarfile
# pylint: disable=import-error
try:
import urllib2 as ulib
except ImportError: # pragma: no cover
import urllib.request as ulib # pylint: disable=no-name-in-module
# pylint: enable=import-error
import zipfile
import pakit.conf
from pakit.exc import (
PakitError, PakitCmdError, PakitCmdTimeout, PakitLinkError
)
EXT_FUNCS = {
'application/x-7z-compressed': 'extract_7z',
'application/x-rar': 'extract_rar',
'application/gzip': 'extract_tar_gz',
'application/x-gzip': 'extract_tar_gz',
'application/x-bzip2': 'extract_tar_gz',
'application/x-tar': 'extract_tar_gz',
'application/x-xz': 'extract_tar_xz',
'application/zip': 'extract_zip',
}
@atexit.register
def cmd_cleanup():
"""
Cleans up any command stdout files left over,
"""
shutil.rmtree(pakit.conf.TMP_DIR)
def check_connectivity():
"""
Returns true iff and only iff can reach github.
"""
connected = True
try:
ulib.urlopen('https://github.com/starcraftman/pakit', timeout=2)
except ulib.URLError:
connected = False
return connected
def user_input(msg):
"""
Get user input, works on python 2 and 3.
Args:
msg: The message to print to user.
Returns:
Whatever the user typed.
"""
if sys.version_info < (3, 0):
return raw_input(msg)
else: # pragma: no cover
return input(msg) # pylint: disable=bad-builtin
def wrap_extract(extract_func):
"""
A decorator that handles some boiler plate between
extract functions.
Condition: extract_func must extract the folder with source
into the tmp_dir. Rest is handled automatically.
"""
@functools.wraps(extract_func)
def inner(filename, target):
"""
Inner part of decorator.
"""
tmp_dir = os.path.join(pakit.conf.TMP_DIR, os.path.basename(filename))
extract_func(filename, tmp_dir)
extracted = glob.glob(os.path.join(tmp_dir, '*'))[0]
shutil.move(extracted, target)
os.rmdir(tmp_dir)
return inner
@wrap_extract
def extract_7z(filename, tmp_dir):
"""
Extracts a 7z archive
"""
try:
Command('7z x -o{tmp} {file}'.format(file=filename,
tmp=tmp_dir)).wait()
except (OSError, PakitCmdError):
raise PakitCmdError('Need `7z` to extract: ' + filename)
try:
os.rmdir(tmp_dir)
except OSError:
pass
@wrap_extract
def extract_rar(filename, tmp_dir):
"""
Extracts a rar archive
"""
success = False
cmd_str = 'rar x {file} {tmp}'.format(file=filename, tmp=tmp_dir)
for cmd in [cmd_str, 'un' + cmd_str]:
try:
os.makedirs(tmp_dir)
Command(cmd).wait()
success = True
except (OSError, PakitCmdError):
pass
finally:
try:
os.rmdir(tmp_dir)
except OSError:
pass
if not success:
raise PakitCmdError('Need `rar` or `unrar` command to extract: ' +
filename)
@wrap_extract
def extract_tar_gz(filename, tmp_dir):
"""
Extracts a tar.gz archive to a temp dir
"""
tarf = tarfile.open(filename)
tarf.extractall(tmp_dir)
@wrap_extract
def extract_tar_xz(filename, tmp_dir):
"""
Extracts a tar.xz archive to a temp dir
"""
tar_file = filename.split('.')
tar_file = tar_file[0:-2] if 'tar' in tar_file else tar_file[0:-1]
tar_file = os.path.join(os.path.dirname(filename),
'.'.join(tar_file + ['tar']))
try:
os.makedirs(tmp_dir)
except OSError: # pragma: no cover
pass
try:
Command('xz --keep --decompress ' + filename).wait()
Command('tar -C {0} -xf {1}'.format(tmp_dir, tar_file)).wait()
except (OSError, PakitCmdError):
raise PakitCmdError('Need commands `xz` and `tar` to extract: ' +
filename)
finally:
os.remove(tar_file)
try:
os.rmdir(tmp_dir)
except OSError:
pass
@wrap_extract
def extract_zip(filename, tmp_dir):
"""
Extracts a zip archive
"""
zipf = zipfile.ZipFile(filename)
zipf.extractall(tmp_dir)
def get_extract_func(arc_path):
"""
Check mimetype of archive to select extraction method.
Args:
arc_path: The absolute path to an archive.
Returns:
The function of the form extract(filename, target).
Raises:
PakitError: Could not determine function from mimetype.
"""
cmd = Command('file --mime-type ' + arc_path)
cmd.wait()
mtype = cmd.output()[0].split()[1]
if mtype not in EXT_FUNCS.keys():
raise PakitError('Unsupported Archive: mimetype ' + mtype)
return getattr(sys.modules[__name__], EXT_FUNCS[mtype])
def hash_archive(archive, hash_alg='sha256'):
"""
Hash an archive.
Args:
archive: Path to an archive.
hash_alg: Hashing algorithm to use, available algorithms
are in hashlib.algorithms
Returns:
The hex based hash of the archive, using hash_alg.
"""
hasher = hashlib.new(hash_alg)
blk_size = 1024 ** 2
with open(archive, 'rb') as fin:
block = fin.read(blk_size)
while block:
hasher.update(block)
block = fin.read(blk_size)
return hasher.hexdigest()
def common_suffix(path1, path2):
"""
Given two paths, find the largest common suffix.
Args:
path1: The first path.
path2: The second path.
"""
suffix = []
parts1 = path1.split(os.path.sep)
parts2 = path2.split(os.path.sep)
if len(parts2) < len(parts1):
parts1, parts2 = parts2, parts1
while len(parts1) and parts1[-1] == parts2[-1]:
suffix.insert(0, parts1.pop())
parts2.pop()
return os.path.sep.join(suffix)
def walk_and_link(src, dst):
"""
Recurse down the tree from src and symbollically link
the files to their counterparts under dst.
Args:
src: The source path with the files to link.
dst: The destination path where links should be made.
Raises:
PakitLinkError: When anything goes wrong linking.
"""
for dirpath, _, filenames in os.walk(src, followlinks=True, topdown=True):
link_all_files(dirpath, dirpath.replace(src, dst), filenames)
def walk_and_unlink(src, dst):
"""
Recurse down the tree from src and unlink the files
that have counterparts under dst.
Args:
src: The source path with the files to link.
dst: The destination path where links should be removed.
"""
for dirpath, _, filenames in os.walk(src, followlinks=True, topdown=False):
unlink_all_files(dirpath, dirpath.replace(src, dst), filenames)
try:
os.makedirs(dst)
except OSError:
pass
def walk_and_unlink_all(link_root, build_root):
"""
Walk the tree from bottom up and remove all symbolic links
pointing into the build_root. Cleans up any empty folders.
Args:
build_root: The path where all installations are. Any symlink
pakit makes will have this as a prefix of the target path.
link_root: All links are located below this folder.
"""
for dirpath, _, filenames in os.walk(link_root, followlinks=True,
topdown=False):
to_remove = []
for fname in filenames:
abs_file = os.path.join(dirpath, fname)
if os.path.realpath(abs_file).find(build_root) == 0:
to_remove.append(fname)
unlink_all_files(dirpath, dirpath, to_remove)
try:
os.makedirs(link_root)
except OSError:
pass
def link_all_files(src, dst, filenames):
"""
From src directory link all filenames into dst.
Args:
src: The directory where the source files exist.
dst: The directory where the links should be made.
filenames: A list of filenames in src.
"""
try:
os.makedirs(dst)
except OSError:
pass # The folder already existed
for fname in filenames:
sfile = os.path.join(src, fname)
dfile = os.path.join(dst, fname)
try:
os.symlink(sfile, dfile)
except OSError:
msg = 'Could not symlink {0} -> {1}'.format(sfile, dfile)
logging.error(msg)
raise PakitLinkError(msg)
def unlink_all_files(_, dst, filenames):
"""
Unlink all links in dst that are in filenames.
Args:
src: The directory where the source files exist.
dst: The directory where the links should be made.
filenames: A list of filenames in src.
"""
for fname in filenames:
try:
os.remove(os.path.join(dst, fname))
except OSError:
pass # The link was not there
try:
os.rmdir(dst)
except OSError:
pass # Folder probably had files left.
def link_man_pages(link_dir):
"""
Silently links project man pages into link dir.
"""
src = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'extra')
dst = os.path.join(link_dir, 'share', 'man', 'man1')
try:
os.makedirs(dst)
except OSError:
pass
for page in glob.glob(os.path.join(src, '*.1')):
try:
os.symlink(page, page.replace(src, dst))
except OSError: # pragma: no cover
pass
def unlink_man_pages(link_dir):
"""
Unlink all man pages from the link directory.
"""
src = os.path.join(os.path.dirname(__file__), 'extra')
dst = os.path.join(link_dir, 'share', 'man', 'man1')
for page in glob.glob(os.path.join(src, '*.1')):
try:
os.remove(page.replace(src, dst))
except OSError: # pragma: no cover
pass
for paths in os.walk(link_dir, topdown=False):
try:
os.rmdir(paths[0])
except OSError: # pragma: no cover
pass
try:
os.makedirs(link_dir)
except OSError: # pragma: no cover
pass
def vcs_factory(uri, **kwargs):
"""
Given a uri, match it with the right VersionRepo subclass.
Args:
uri: The version control URI.
Returns:
The instantiated VersionRepo subclass. Any kwargs, are
passed along to the constructor of the subclass.
Raises:
PakitError: The URI is not supported.
"""
subclasses = []
for _, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj) and issubclass(obj, VersionRepo):
subclasses.append(obj)
subclasses.remove(VersionRepo)
for cls in subclasses:
if cls.valid_uri(uri):
return cls(uri, **kwargs)
raise PakitError('Unssupported URI: ' + uri)
def write_config(config_file):
"""
Writes the DEFAULT config to the config file.
Overwrites the file if present.
Raises:
PakitError: File exists and is a directory.
PakitError: File could not be written to.
"""
try:
os.remove(config_file)
except OSError:
if os.path.isdir(config_file):
raise PakitError('Config path is a directory.')
try:
config = pakit.conf.Config(config_file)
config.reset()
config.write()
except (IOError, OSError):
raise PakitError('Failed to write to ' + config.filename)
class Fetchable(object):
"""
Establishes an abstract interface for fetching source code.
Subclasses are destined for Recipe.repos to be used to retrieve source
from the wild.
Attributes:
target: The folder the source code should end up in.
uri: The location of the source code.
"""
__metaclass__ = ABCMeta
def __init__(self, uri, target):
self.target = target
self.uri = uri
@abstractmethod
def __enter__(self):
"""
Guarantees that source is available at target
"""
raise NotImplementedError
@abstractmethod
def __exit__(self, exc_type, exc_value, exc_tb):
"""
Handles errors as needed
"""
raise NotImplementedError
@abstractproperty
def ready(self):
"""
True iff the source code is available at target
"""
raise NotImplementedError
@abstractproperty
def src_hash(self):
"""
A hash that identifies the source snapshot
"""
raise NotImplementedError
def clean(self):
"""
Purges the source tree from the system
"""
Command('rm -rf ' + self.target).wait()
@abstractmethod
def download(self):
"""
Retrieves code from the remote, may require additional steps
"""
raise NotImplementedError
class Dummy(Fetchable):
"""
Creates the target directory when invoked.
This is a dummy repository, useful for testing and when a recipe
does NOT rely on a source repository or archive.
"""
def __init__(self, uri=None, **kwargs):
"""
Constructor for a Dummy repository.
Target must be specified before entering context.
Args:
uri: Default None, serves no purpose.
Kwargs:
target: Path that will be created on the system.
"""
super(Dummy, self).__init__(uri, kwargs.get('target', None))
def __str__(self):
return 'DummyTask: No source code to fetch.'
def __enter__(self):
"""
Guarantees that source is available at target
"""
try:
self.clean()
os.makedirs(self.target)
except OSError:
raise PakitError('Could not create folder: ' + self.target)
def __exit__(self, exc_type, exc_value, exc_tb):
"""
Handles errors as needed
"""
pass
@property
def ready(self):
"""
True iff the source code is available at target
"""
return os.path.isdir(self.target) and len(os.listdir(self.target)) == 0
@property
def src_hash(self):
"""
A hash that identifies the source snapshot
"""
return 'dummy_hash'
def download(self):
"""
Retrieves code from the remote, may require additional steps
"""
raise NotImplementedError
class Archive(Fetchable):
"""
Retrieve an archive from a remote URI and extract it to target.
Supports any extension that has an extract function in this module
of the form `extract_ext`. For example, if given a zip will use the
extract_zip function.
Attributes:
actual_hash: The actual sha256 hash of the archive.
filename: The filename of the archive.
src_hash: The expected sha256 hash of the archive.
target: The folder the source code should end up in.
uri: The location of the source code.
"""
def __init__(self, uri, **kwargs):
"""
Constructor for Archive. *uri* and *hash* are required.
Args:
uri: The URI to retrieve the archive from.
Kwargs:
filename: The filename to use, else a tempfile will be used.
hash: The sha256 hash of the archive.
target: Path on system to extract to.
"""
super(Archive, self).__init__(uri, kwargs.get('target', None))
self.__src_hash = kwargs.get('hash', '')
self.filename = kwargs.get('filename')
if self.filename is None:
self.__tfile = TempFile(mode='wb', delete=False,
dir=pakit.conf.TMP_DIR,
prefix='arc')
self.filename = self.__tfile.name
def __enter__(self):
"""
Guarantees that source is available at target
"""
if self.ready:
return
logging.info('Downloading %s', self.arc_file)
self.download()
logging.info('Extracting %s to %s', self.arc_file, self.target)
get_extract_func(self.arc_file)(self.arc_file, self.target)
with open(os.path.join(self.target, '.archive'), 'wb') as fout:
fout.write(self.src_hash.encode())
os.remove(self.arc_file)
def __exit__(self, exc_type, exc_value, exc_tb):
"""
Handles errors as needed
"""
self.clean()
def __str__(self):
return '{name}: {uri}'.format(name=self.__class__.__name__,
uri=self.uri)
@property
def arc_file(self):
"""
The path to the downloaded archive.
"""
target = self.target
if target.find('./') == 0:
target = target.replace('./', '')
return os.path.join(os.path.dirname(target), self.filename)
@property
def ready(self):
"""
True iff the source code is available at target
"""
try:
with open(os.path.join(self.target, '.archive'), 'rb') as fin:
file_hash = fin.readlines()[0].decode()
return file_hash == self.src_hash
except IOError:
return False
@property
def src_hash(self):
"""
The expected hash of the archive.
"""
return self.__src_hash
def actual_hash(self):
"""
The actual hash of the downloaded archive file.
"""
arc_clean = False
if not os.path.exists(self.arc_file):
self.download()
arc_clean = True
hash_str = hash_archive(self.arc_file)
if arc_clean:
os.remove(self.arc_file)
return hash_str
def clean(self):
"""
Guarantee no trace of archive file or source target.
"""
try:
os.remove(self.arc_file)
except OSError:
pass
super(Archive, self).clean()
def download(self):
"""
Retrieves the archive from the remote URI.
If the URI is a local file, simply copy it.
"""
if not os.path.isfile(self.uri):
resp = ulib.urlopen(self.uri, timeout=30)
with open(self.arc_file, 'wb') as fout:
fout.write(resp.read())
elif self.uri != self.arc_file:
shutil.copy(self.uri, self.arc_file)
arc_hash = self.actual_hash()
if arc_hash != self.src_hash:
self.clean()
raise PakitError('Hash mismatch on archive.\n Expected: {exp}'
'\n Actual: {act}'.format(exp=self.src_hash,
act=arc_hash))
class VersionRepo(Fetchable):
"""
Base class for all version control support.
When a 'tag' is set, check out a specific revision of the repository.
When a 'branch' is set, checkout out the latest commit on the branch of
the repository.
These two options are mutually exclusive.
Attributes:
branch: A branch to checkout during clone.
src_hash: The hash of the current commit.
tag: A tag to checkout during clone.
target: The folder the source code should end up in.
uri: The location of the source code.
"""
def __init__(self, uri, **kwargs):
super(VersionRepo, self).__init__(uri, kwargs.get('target', None))
tag = kwargs.get('tag', None)
if tag is not None:
self.__tag = tag
self.on_branch = False
else:
self.__tag = kwargs.get('branch', None)
self.on_branch = True
def __enter__(self):
"""
Guarantees that the repo is downloaded and on the right commit.
"""
if not self.ready:
self.clean()
self.download()
else:
self.checkout()
if self.on_branch:
self.update()
def __exit__(self, exc_type, exc_value, exc_tb):
"""
Handles errors as needed
"""
self.reset()
def __str__(self):
if self.on_branch:
tag = 'HEAD' if self.tag is None else self.tag
tag = 'branch: ' + tag
else:
tag = 'tag: ' + self.tag
return '{name}: {tag}, uri: {uri}'.format(
name=self.__class__.__name__, uri=self.uri, tag=tag)
@property
def branch(self):
"""
A branch of the repository.
"""
return self.__tag
@branch.setter
def branch(self, new_branch):
"""
Set the branch to checkout from the repository.
"""
self.on_branch = True
self.__tag = new_branch
@property
def tag(self):
"""
A revision or tag of the repository.
"""
return self.__tag
@tag.setter
def tag(self, new_tag):
"""
Set the tag to checkout from the repository.
"""
self.on_branch = False
self.__tag = new_tag
@abstractproperty
def ready(self):
"""
Returns true iff the repository is available and the
right tag or branch is checked out.
"""
raise NotImplementedError
@abstractproperty
def src_hash(self):
"""
The hash of the current commit.
"""
raise NotImplementedError
@staticmethod
def valid_uri(uri):
"""
Validate that the supplied uri is handled by this class.
Returns:
True if the URI is valid for this class, else False.
"""
raise NotImplementedError
@abstractmethod
def checkout(self):
"""
Equivalent to git checkout for the version system.
"""
raise NotImplementedError
@abstractmethod
def download(self):
"""
Download the repository to the target.
"""
raise NotImplementedError
@abstractmethod
def reset(self):
"""
Clears away all build files from repo.
"""
raise NotImplementedError
@abstractmethod
def update(self):
"""
Fetches latest commit when branch is set.
"""
raise NotImplementedError
class Git(VersionRepo):
"""
Fetch a git repository from the given URI.
When a 'tag' is set, check out a specific revision of the repository.
When a 'branch' is set, checkout out the latest commit on the branch of
the repository.
If neither provided, will checkout 'master' branch.
These two options are mutually exclusive.
Attributes:
branch: A branch to checkout during clone.
src_hash: The hash of the current commit.
tag: A tag to checkout during clone.
target: The folder the source code should end up in.
uri: The location of the source code.
"""
def __init__(self, uri, **kwargs):
"""
Constructor for a git repository.
By default checks out the default branch.
The *branch* and *tag* kwargs are mutually exclusive.
Args:
uri: The URI that hosts the repository.
Kwargs:
branch: A branch to checkout and track.
tag: Any fixed tag like a revision or tagged commit.
target: Path on system to clone to.
"""
super(Git, self).__init__(uri, **kwargs)
if self.on_branch and kwargs.get('tag') is None:
self.branch = 'master'
@property
def ready(self):
"""
Returns true iff the repository is available and
the right tag or branch is checked out.
"""
if not os.path.exists(os.path.join(self.target, '.git')):
return False
cmd = Command('git remote show origin', self.target)
cmd.wait()
return self.uri in cmd.output()[1]
@property
def src_hash(self):
"""
Return the current hash of the repository.
"""
with self:
cmd = Command('git rev-parse HEAD', self.target)
cmd.wait()
return cmd.output()[0]
@staticmethod
def valid_uri(uri):
"""
Validate that the supplied uri is handled by this class.
Returns:
True if the URI is valid for this class, else False.
"""
try:
cmd = Command('git ls-remote ' + uri)
cmd.wait()
return cmd.rcode == 0
except PakitError:
return False
def checkout(self):
"""
Checkout the right tag or branch.
"""
Command('git checkout ' + self.tag, self.target).wait()
def download(self):
"""
Download the repository to the target.
"""
tag = '' if self.tag is None else '-b ' + self.tag
cmd = Command('git clone --recursive {tag} {uri} {target}'.format(
tag=tag, uri=self.uri, target=self.target))
cmd.wait()
def reset(self):
"""
Clears away all build files from repo.
"""
Command('git clean -f', self.target).wait()
def update(self):
"""
Fetches latest commit when branch is set.
"""
cmd = Command('git fetch origin +{0}:new{0}'.format(self.branch),
self.target)
cmd.wait()
cmd = Command('git merge --ff-only new' + self.branch, self.target)
cmd.wait()
class Hg(VersionRepo):
"""
Fetch a mercurial repository from the given URI.
When a 'tag' is set, check out a specific revision of the repository.
When a 'branch' is set, checkout out the latest commit on the branch of
the repository.
If neither provided, will checkout 'default' branch.
These two options are mutually exclusive.
Attributes:
branch: A branch to checkout during clone.
src_hash: The hash of the current commit.
tag: A tag to checkout during clone.
target: The folder the source code should end up in.
uri: The location of the source code.
"""
def __init__(self, uri, **kwargs):
"""
Constructor for a mercurial repository.
By default checks out the default branch.
The *branch* and *tag* kwargs are mutually exclusive.
Args:
uri: The URI that hosts the repository.
Kwargs:
branch: A branch to checkout and track.
tag: Any fixed tag like a revision or tagged commit.
target: Path on system to clone to.
"""
super(Hg, self).__init__(uri, **kwargs)
if self.on_branch and kwargs.get('tag') is None:
self.branch = 'default'
@property
def ready(self):
"""
Returns true iff the repository is available and the
right tag or branch is checked out.
"""
if not os.path.exists(os.path.join(self.target, '.hg')):
return False
found = False
with open(os.path.join(self.target, '.hg', 'hgrc')) as fin:
for line in fin:
if self.uri in line:
found = True
break
return found
@property
def src_hash(self):
"""
Return the current hash of the repository.
"""
with self:
cmd = Command('hg identify', self.target)
cmd.wait()
return cmd.output()[0].split()[0]
@staticmethod
def valid_uri(uri):
"""
Validate that the supplied uri is handled by this class.
Returns:
True if the URI is valid for this class, else False.
"""
try:
cmd = Command('hg identify ' + uri)
cmd.wait()
return cmd.rcode == 0
except PakitError:
return False
def checkout(self):
"""
Checkout the right tag or branch.
"""
Command('hg update ' + self.tag, self.target).wait()
def download(self):
"""
Download the repository to the target.
"""
tag = '' if self.tag is None else '-u ' + self.tag
cmd = Command('hg clone {tag} {uri} {target}'.format(
tag=tag, uri=self.uri, target=self.target))
cmd.wait()
def reset(self):
"""
Clears away all build files from repo.
"""
cmd = Command('hg status -un', self.target)
cmd.wait()
for path in cmd.output():
os.remove(os.path.join(self.target, path))
def update(self):
"""
Fetches latest commit when branch is set.
"""
cmd = Command('hg pull -b ' + self.branch, self.target)
cmd.wait()
cmd = Command('hg update', self.target)
cmd.wait()
class Command(object):
"""
Execute a command on the host system.
Once the constructor returns, the command is running.
At that point, either wait for it to complete or go about your business.
The process and all children will be part of the same process group,
this allows for easy termination via signals.
Attributes:
alive: True only if the command is still running.
rcode: When the command finishes, is the return code.
"""
def __init__(self, cmd, cmd_dir=None, prev_cmd=None, env=None):
"""
Run a command on the system.
Note: Don't use '|' or '&', instead execute commands
one after another & supply prev_cmd.
Args:
cmd: A string that you would type into the shell.
If shlex.split would not correctly split the line
then pass a list.
cmd_dir: Change to this directory before executing.
env: A dictionary of environment variables to change.
For instance, env={'HOME': '/tmp'} would change
HOME variable for the duration of the Command.
prev_cmd: Read the stdout of this command for stdin.
Raises:
PakitCmdError: The command could not find command on system
or the cmd_dir did not exist during subprocess execution.
"""
super(Command, self).__init__()
if isinstance(cmd, list):
self._cmd = cmd
else:
self._cmd = shlex.split(cmd)
if self._cmd[0].find('./') != 0:
self._cmd.insert(0, '/usr/bin/env')
self._cmd_dir = cmd_dir
stdin = None
if prev_cmd:
stdin = open(prev_cmd.stdout.name, 'r')
if env:
to_update = env
env = os.environ.copy()
env.update(to_update)
logging.debug('CMD START: %s', self)
try:
self.stdout = TempFile(mode='wb', delete=False,
dir=pakit.conf.TMP_DIR,
prefix='cmd', suffix='.log')
self._proc = subprocess.Popen(
self._cmd, cwd=self._cmd_dir, env=env, preexec_fn=os.setsid,
stdin=stdin, stdout=self.stdout, stderr=subprocess.STDOUT
)
except OSError as exc:
if cmd_dir and not os.path.exists(cmd_dir):
raise PakitCmdError('Command directory does not exist: ' +
self._cmd_dir)
else:
raise PakitCmdError('General OSError:\n' + str(exc))
def __del__(self):
"""
When the command object is garbage collected:
- Terminate processes if still running.
- Write the entire output of the command to the log.
"""
try:
if self.alive:
self.terminate() # pragma: no cover
self.stdout.close()
prefix = '\n '
msg = prefix + prefix.join(self.output())
logging.debug("CMD LOG: %s%s", self, msg)
except AttributeError:
logging.error('Could not execute command: ' + str(self))
except (IOError, OSError) as exc:
logging.error(exc)
def __str__(self):
return 'Command: {0}, {1}'.format(self._cmd, self._cmd_dir)
@property
def alive(self):
"""
The command is still running.
"""
return self._proc.poll() is None
@property
def rcode(self):
"""
The return code of the command.
"""
return self._proc.returncode
def output(self, last_n=0):
"""
The output of the run command.
Args:
last_n: Return last n lines from output, default all output.
Returns:
A list of lines from the output of the command.
"""
if self._proc is None or not os.path.exists(self.stdout.name):
return [] # pragma: no cover
# TODO: Handle encoding better?
with open(self.stdout.name, 'rb') as out:
lines = [line.strip().decode('utf-8', 'ignore')
for line in out.readlines()]
return lines[-last_n:]
def terminate(self):
"""
Terminates the subprocess running the command and all
children spawned by the command.
On return, they are all dead.
"""
if self.alive:
os.killpg(self._proc.pid, signal.SIGTERM)
self._proc.wait()
def wait(self, timeout=None):
"""
Block here until the command is done.
Args:
timeout: If no stdout for this interval
terminate the command and raise error.
Raises:
PakitCmdTimeout: When stdout stops getting output for max_time.
PakitCmdError: When return code is not 0.
"""
if not timeout:
timeout = pakit.conf.CONFIG.get('pakit.command.timeout')
thrd = threading.Thread(target=(lambda proc: proc.wait()),
args=(self._proc,))
thrd.start()
thread_not_started = True
while thread_not_started:
try:
thrd.join(0.1)
thread_not_started = False
except RuntimeError: # pragma: no cover
pass
while self._proc.poll() is None:
thrd.join(0.5)
interval = time.time() - os.path.getmtime(self.stdout.name)
if interval > timeout:
self.terminate()
raise PakitCmdTimeout('\n'.join(self.output(10)))
if self.rcode != 0:
raise PakitCmdError('\n'.join(self.output(10)))
| |
# {% set dataset_is_draft = data.get('state', 'draft').startswith('draft') or data.get('state', 'none') == 'none' %}
# {% set dataset_has_organization = data.owner_org or data.group_id %}
from logging import getLogger
import ckan.plugins as p
import formencode.validators as v
import ckan.new_authz as auth
import copy
from ckan.logic.action.create import user_create as core_user_create, package_create
from ckan.logic.action.update import package_update
from ckan.logic.action.get import user_show, package_show
import ckan.lib.dictization.model_dictize as model_dictize
import ckan.lib.helpers as h
import helpers as meta_helper
import os
log = getLogger(__name__)
#excluded title, description, tags and last update as they're part of the default ckan dataset metadata
required_metadata = (
{'id': 'language', 'validators': [v.String(max=100)]},
{'id': 'data_type', 'validators': [v.String(max=100)]},
{'id': 'access_information', 'validators': [v.String(max=500)]}, # use_constraints
{'id': 'status', 'validators': [v.String(max=100)]},
{'id': 'variable_description', 'validators': [v.String(max=100)]},
)
#optional metadata
expanded_metadata = (
{'id': 'spatial', 'validators': [v.String(max=500)]},
{'id': 'temporal', 'validators': [v.String(max=300)]},
{'id': 'purpose', 'validators': [v.String(max=100)]},
{'id': 'research_focus', 'validators': [v.String(max=50)]},
{'id': 'sub_name', 'validators': [v.String(max=100)]},
{'id': 'sub_email', 'validators': [v.String(max=100)]},
{'id': 'license_id', 'validators': [v.String(max=50)]},
{'id': 'version', 'validators': [v.String(max=50)]},
{'id': 'feature_types', 'validators': [v.String(max=100)]},
{'id': 'north_extent', 'validators': [v.String(max=100)]},
{'id': 'south_extent', 'validators': [v.String(max=100)]},
{'id': 'east_extent', 'validators': [v.String(max=100)]},
{'id': 'west_extent', 'validators': [v.String(max=100)]},
{'id': 'horz_coord_system', 'validators': [v.String(max=100)]},
{'id': 'vert_coord_system', 'validators': [v.String(max=100)]},
{'id': 'update_frequency', 'validators': []},
{'id': 'study_area', 'validators': [v.String(max=100)]},
{'id': 'data_processing_method', 'validators': [v.String(max=500)]},
{'id': 'data_collection_method', 'validators': [v.String(max=500)]},
{'id': 'citation', 'validators': [v.String(max=500)]},
{'id': 'required_software', 'validators': [v.String(max=100)]},
)
# needed for repeatable data elements
def creator_schema():
ignore_missing = p.toolkit.get_validator('ignore_missing')
not_empty = p.toolkit.get_validator('not_empty')
schema = {
'name': [not_empty, convert_to_extras_custom],
'email': [ignore_missing, convert_to_extras_custom],
'phone': [ignore_missing, convert_to_extras_custom],
'address': [ignore_missing, convert_to_extras_custom],
'organization': [ignore_missing, convert_to_extras_custom],
'is_a_group': [ignore_missing, convert_to_extras_custom],
'delete': [ignore_missing, convert_to_extras_custom]
}
return schema
# needed for repeatable data elements
def contributor_schema():
ignore_missing = p.toolkit.get_validator('ignore_missing')
not_empty = p.toolkit.get_validator('not_empty')
schema = {
'name': [not_empty, convert_to_extras_custom],
'email': [ignore_missing, convert_to_extras_custom],
'phone': [ignore_missing, convert_to_extras_custom],
'address': [ignore_missing, convert_to_extras_custom],
'organization': [ignore_missing, convert_to_extras_custom],
'delete': [ignore_missing, convert_to_extras_custom]
}
return schema
# needed for repeatable data elements
def variable_schema():
ignore_missing = p.toolkit.get_validator('ignore_missing')
#not_empty = p.toolkit.get_validator('not_empty')
schema = {
'name': [ignore_missing, convert_to_extras_custom],
'unit': [ignore_missing, convert_to_extras_custom],
'delete': [ignore_missing, convert_to_extras_custom]
}
return schema
# needed for repeatable data elements
def convert_to_extras_custom(key, data, errors, context):
# print "key :====> ", key, "data : ====>", data[key]
extras = data.get(('extras',), [])
if not extras:
data[('extras',)] = extras
keyStr = ':'.join([str(x) for x in key])
extras.append({'key': keyStr, 'value': data[key]})
# needed for repeatable data elements
def convert_from_extras(key, data, errors, context):
print "key : <====", key, "\n"
def remove_from_extras(data, keyList):
to_remove = []
for data_key, data_value in data.iteritems():
if data_key[0] == 'extras' and data_key[1] in keyList:
to_remove.append(data_key)
for item in to_remove:
del data[item]
indexList = [] # A list containing the index of items in extras to be removed.
new_data = {} # A new dictionary for data stored in extras with the given key
for data_key, data_value in data.iteritems():
if data_key[0] == 'extras' and data_key[-1] == 'key':
#Extract the key components separated by ':'
keyList = data_value.split(':')
#Check for multiple value inputs and convert the list item index to integer
if len(keyList) > 1:
keyList[1] = int(keyList[1])
#Construct the key for the stored value(s)
newKey = tuple(keyList)
if key[-1] == newKey[0]:
#Retrieve data from extras and add it to new_data so it can be added to the data dictionary.
new_data[newKey] = data[('extras', data_key[1], 'value')]
#Add the data index in extras to the list of items to be removed.
indexList.append(data_key[1])
#Remove all data from extras with the given index
remove_from_extras(data, indexList)
#Remove previous data stored under the given key
del data[key]
# get rid of all repeatable elements if they are marked as deleted (delete = '1')
deleteIndex_creators = []
deleteIndex_contributors = []
deleteIndex_variables = []
for data_key, data_value in new_data.iteritems():
#If this is a deleted record then add it to the deleted list to be removed from data later.
if 'delete' in data_key and data_value == '1':
if 'creators' == data_key[0]:
deleteIndex_creators.append(data_key[1])
elif 'contributors' == data_key[0]:
deleteIndex_contributors.append(data_key[1])
elif 'variables' == data_key[0]:
deleteIndex_variables.append(data_key[1])
deleted = []
for data_key, data_value in new_data.iteritems():
if len(data_key) > 1:
if data_key[0] == 'creators' and data_key[1] in deleteIndex_creators:
deleted.append(data_key)
elif data_key[0] == 'contributors' and data_key[1] in deleteIndex_contributors:
deleted.append(data_key)
elif data_key[0] == 'variables' and data_key[1] in deleteIndex_variables:
deleted.append(data_key)
for item in deleted:
del new_data[item]
#Add data extracted from extras to the data dictionary
for data_key, data_value in new_data.iteritems():
data[data_key] = data_value
# TODO: the following method is not used
def _process_deleted_repeatables(data_dict, deleted_indexes, repeatable_name):
# removes all deleted repeatable elements form the data_dict
# param: data_dict from which keys to be removed
# param: deleted_indexes a list of indexes for the specified repeatable element
# param: repeatable_element name of the repeatable element
last_used_deleted_index = 0
index_to_adjust = 0
keys_to_delete = []
new_data_to_add = []
if len(deleted_indexes) > 0:
for data_key, data_value in data_dict.iteritems():
if len(data_key) > 1 and data_key[0] == repeatable_name:
if data_key[1] > min(deleted_indexes) and data_key[1] != 0:
if index_to_adjust != 0 and index_to_adjust != data_key[1]:
del deleted_indexes[last_used_deleted_index]
if index_to_adjust == data_key[1]:
new_data_key = (data_key[0], min(deleted_indexes), data_key[2])
new_data_to_add.append({new_data_key: data_dict[data_key]})
keys_to_delete.append(data_key)
else:
index_to_adjust = data_key[1]
last_used_deleted_index = min(deleted_indexes)
new_data_key = (data_key[0], last_used_deleted_index, data_key[2])
new_data_to_add.append({new_data_key: data_dict[data_key]})
keys_to_delete.append(data_key)
for key in keys_to_delete:
del data_dict[key]
for dict_item in new_data_to_add:
for key, value in dict_item.iteritems():
data_dict[key] = value
break
#add validator 'not-empty' for all required metadata fields
def get_req_metadata_for_create():
new_req_meta = copy.deepcopy(required_metadata)
validator = p.toolkit.get_validator('not_empty')
for meta in new_req_meta:
meta['validators'].append(validator)
return new_req_meta
# adds validator 'ignore-missing' for all optional metadata fields
def get_req_metadata_for_show_update():
new_req_meta = copy.deepcopy(required_metadata)
validator = p.toolkit.get_validator('ignore_missing')
for meta in new_req_meta:
meta['validators'].append(validator)
return new_req_meta
for meta in expanded_metadata:
meta['validators'].append(p.toolkit.get_validator('ignore_missing'))
schema_updates_for_create = [{meta['id']: meta['validators']+[p.toolkit.get_converter('convert_to_extras')]}
for meta in (get_req_metadata_for_create() + expanded_metadata)]
schema_updates_for_show = [{meta['id']: [p.toolkit.get_converter('convert_from_extras')] + meta['validators']}
for meta in (get_req_metadata_for_show_update() + expanded_metadata)]
class MetadataPlugin(p.SingletonPlugin, p.toolkit.DefaultDatasetForm):
'''This plugin adds fields for the metadata (known as the Common Core) defined at
https://github.com/project-open-data/project-open-data.github.io/blob/master/schema.md
'''
p.implements(p.ITemplateHelpers)
p.implements(p.IConfigurer)
p.implements(p.IDatasetForm)
p.implements(p.IActions)
p.implements(p.IPackageController, inherit=True)
p.toolkit.add_resource('public', 'metadata_resources')
# template helper function
@classmethod
def check_if_user_owns_dataset(cls, package_id, username):
return meta_helper.is_user_owns_package(package_id, username)
# template helper function
@classmethod
def get_pylons_context_obj(cls):
"""
This one will allow us to access the c object in a snippet template
"""
return p.toolkit.c
# template helper function
@classmethod
def has_user_group_or_org_admin_role(cls, group_id, user_name):
"""
Checks if the given user has admin role for the specified group/org
"""
return auth.has_user_permission_for_group_or_org(group_id, user_name, 'admin')
# template helper function
@classmethod
def load_data_into_dict(cls, data_dict):
'''
a jinja2 template helper function.
'extras' contains a list of dicts corresponding to the extras used to store arbitrary key value pairs in CKAN.
This function moves each entry in 'extras' that is a common core metadata into 'custom_meta'
Example:
{'hi':'there', 'extras':[{'key': 'publisher', 'value':'USGS'}]}
becomes
{'hi':'there', 'custom_meta':{'publisher':'USGS'}, 'extras':[]}
'''
new_dict = copy.deepcopy(data_dict)
common_metadata = [x['id'] for x in required_metadata+expanded_metadata]
# needed for repeatable metadata
# the original data_dict will have the creator set of data keys as follows:
# creators:0:name # for creator#1
# creators:0:email
# creators:0:phone
# creators:0:address
# creators:0:organization
# creators:0:is_a_group
# creators:1:name # for creator#2
# creators:1:email
# creators:1:phone
# creators:1:address
# creators:1:organization
# creators:1:is_a_group
#
# In the generated new new_dict we want the set of creator data as follows:
# new_dict['custom_meta']['creators'] = [
# {'name': name1-value, 'email': email1-value, 'phone': phone1-value, 'address': address1-value, 'organization': org1-value}
# {'name': name2-value, 'email': email2-value, 'phone': phone2-value, 'address': address2-value, 'organization': org2-value}
# { .......}
# ]
# the same logic above applies to any other repeatable element that we have in the schema
try:
new_dict['custom_meta']
except KeyError:
new_dict['custom_meta'] = {}
repeatable_elements = ['creators', 'contributors', 'variables']
for element in repeatable_elements:
new_dict['custom_meta'][element] = []
reduced_extras = []
sub_name = ''
sub_email = ''
try:
for extra in new_dict['extras']:
if extra['key'] in common_metadata:
new_dict['custom_meta'][extra['key']] = extra['value']
# grab the submitter name and email to set default first creator name and email
if extra['key'] == 'sub_name':
sub_name = extra['value']
if extra['key'] == 'sub_email':
sub_email = extra['value']
else:
# check if the key matches the any of the repeatable metadata element
data_key_parts = extra['key'].split(':')
if data_key_parts[0] not in repeatable_elements or len(data_key_parts) != 3:
reduced_extras.append(extra)
# repeatable element key shorting is necessary so that a key like 'creators:10:name"
# does not come before a key like 'creators:2:name'
sorted_creator_keys = cls._get_sorted_repeatable_element_keys(new_dict['extras'], 'creators')
sorted_contributor_keys = cls._get_sorted_repeatable_element_keys(new_dict['extras'], 'contributors')
sorted_variable_keys = cls._get_sorted_repeatable_element_keys(new_dict['extras'], 'variables')
cls._load_repeatable_elements_to_dict(new_dict, sorted_creator_keys, new_dict['extras'])
cls._load_repeatable_elements_to_dict(new_dict, sorted_contributor_keys, new_dict['extras'])
cls._load_repeatable_elements_to_dict(new_dict, sorted_variable_keys, new_dict['extras'])
# add the default creator if no creator exists at this point
set_default_creator(new_dict, sub_name, sub_email)
new_dict['extras'] = reduced_extras
except KeyError as ex:
log.debug('''Expected key ['%s'] not found, attempting to move common core keys to subdictionary''',
ex.message)
#this can happen when a form fails validation, as all the data will now be as key,value pairs, not under extras,
#so we'll move them to the expected point again to fill in the values
# e.g.
# { 'foo':'bar','publisher':'somename'} becomes {'foo':'bar', 'custom_meta':{'publisher':'somename'}}
keys_to_remove = []
log.debug('common core metadata: {0}'.format(common_metadata))
for key, value in new_dict.iteritems():
#TODO remove debug
log.debug('checking key: {0}'.format(key))
if key in common_metadata:
#TODO remove debug
log.debug('adding key: {0}'.format(key))
new_dict['custom_meta'][key] = value
keys_to_remove.append(key)
# grab the submitter name and email to set default first creator name and email
if key == 'sub_name':
sub_name = value
if key == 'sub_email':
sub_email = value
else:
# check if the key matches any of the repeatable metadata element
if key in repeatable_elements:
for repeat_item in value:
new_dict['custom_meta'][key].append(repeat_item)
for key in keys_to_remove:
del new_dict[key]
# add the default creator if no creator exists at this point
set_default_creator(new_dict, sub_name, sub_email)
# remove any repeatable elements marked as deleted from the dict
for element in repeatable_elements:
valid_repeatables = [c for c in new_dict['custom_meta'][element] if c['delete'] != '1']
new_dict['custom_meta'][element] = valid_repeatables
return new_dict
@classmethod
def _load_repeatable_elements_to_dict(cls, dict_to_load_to, repeatable_element_keys, extra_data):
for element_key in repeatable_element_keys:
data_key_parts = element_key.split(':')
element_dataset_index = int(data_key_parts[1])
if element_dataset_index == len(dict_to_load_to['custom_meta'][data_key_parts[0]]):
element = {data_key_parts[2]: cls._get_extra_value(extra_data, element_key)}
dict_to_load_to['custom_meta'][data_key_parts[0]].append(element)
else:
dict_to_load_to['custom_meta'][data_key_parts[0]][element_dataset_index][data_key_parts[2]] = \
cls._get_extra_value(extra_data, element_key)
return dict_to_load_to
@classmethod
def _get_sorted_repeatable_element_keys(cls, extra_data, element_name):
def get_key(item):
# if the item is 'creators:0:name'
# after the split we will have parts = ['creators', 0, 'name']
parts = item.split(":")
return int(parts[1])
element_key_list = []
for extra in extra_data:
data_key_parts = extra['key'].split(':')
if data_key_parts[0] == element_name and len(data_key_parts) == 3:
element_key_list.append(extra['key'])
return sorted(element_key_list, key=get_key)
@classmethod
def _get_extra_value(cls, extra_dict, key):
for extra in extra_dict:
if extra['key'] == key:
return extra['value']
return None
def __create_vocabulary(cls, name, *values):
'''Create vocab and tags, if they don't exist already.
name: the name or unique id of the vocabulary e.g. 'flower_colors'
values: the values that the vocabulary can take on e.g. ('blue', 'orange', 'purple', 'white', 'yellow)
'''
user = p.toolkit.get_action('get_site_user')({'ignore_auth': True}, {})
context = {'user': user['name']}
log.debug("Creating vocab '{0}'".format(name))
data = {'name': name}
vocab = p.toolkit.get_action('vocabulary_create')(context, data)
log.debug('Vocab created: {0}'.format(vocab))
for tag in values:
log.debug(
"Adding tag {0} to vocab {1}'".format(tag, name))
data = {'name': tag, 'vocabulary_id': vocab['id']}
p.toolkit.get_action('tag_create')(context, data)
return vocab
@classmethod
def __update_vocabulary(cls, name, *values):
user = p.toolkit.get_action('get_site_user')({'ignore_auth': True}, {})
context = {'user': user['name']}
log.debug("Updating vocab '{0}'".format(name))
data = {'id': name}
vocab = p.toolkit.get_action('vocabulary_show')(context, data)
data = {'name': name, 'id': vocab['id']}
vocab = p.toolkit.get_action('vocabulary_update')(context, data)
log.debug('Vocab updated: {0}'.format(vocab))
for tag in values:
log.debug(
"Adding tag {0} to vocab {1}'".format(tag, name))
data = {'name': tag, 'vocabulary_id': vocab['id']}
p.toolkit.get_action('tag_create')(context, data)
return vocab
@classmethod
def __add_tag_to_vocabulary(cls, name, *values):
user = p.toolkit.get_action('get_site_user')({'ignore_auth': True}, {})
context = {'user': user['name']}
log.debug("Updating vocab '{0}'".format(name))
data = {'id': name}
vocab = p.toolkit.get_action('vocabulary_show')(context, data)
log.debug('Vocab updated: {0}'.format(vocab))
for tag in values:
log.debug(
"Adding tag {0} to vocab {1}'".format(tag, name))
data = {'name': tag, 'vocabulary_id': vocab['id']}
p.toolkit.get_action('tag_create')(context, data)
data = {'id': name}
vocab = p.toolkit.get_action('vocabulary_show')(context, data)
return vocab
# template helper function
@classmethod
def get_research_focus(cls):
''' log.debug('get_research_focus() called')
Jinja2 template helper function, gets the vocabulary for research focus
'''
# NOTE: any time you want to include new tag for the vocabulary term 'research_focus' add the tag name
# to the following list (research_focus_tags). Nothing else need to be changed
research_focus_tags = [u'RFA1', u'RFA2', u'RFA3', u'other', u'CI', u'EOD']
vocab_name = 'research_focus'
research_focus = cls.__get_tags(vocab_name, research_focus_tags)
return research_focus
# template helper function
@classmethod
def get_update_frequency(cls):
'''
log.debug('get_update_frequency() called')
Jinja2 template helper function, gets the vocabulary for update_frequency
'''
# NOTE: any time you want to include new tag for the vocabulary term 'update_frequency' add the tag name
# to the following list. Nothing else need to be changed
update_frequency_tags = ['none', 'real time', 'hourly', 'daily', 'weekly', 'monthly', 'yearly', 'other']
vocab_name = 'update_frequency'
update_frequency = cls.__get_tags(vocab_name, update_frequency_tags)
return update_frequency
# template helper function
@classmethod
def get_study_area(cls):
''' log.debug('get_study_area() called')
Jinja2 template helper function, gets the vocabulary for access levels
'''
# NOTE: any time you want to include new tag for the vocabulary term 'study_area' add the tag name
# to the following list (study_area_tags). Nothing else need to be changed
study_area_tags = [u'other', u'WRMA-Wasatch Range Metropolitan Area', u'Logan River Watershed',
u'Red Butte Creek Watershed', u'Provo River Watershed', u'Multiple Watersheds']
vocab_name = 'study_area'
study_area = cls.__get_tags(vocab_name, study_area_tags)
return study_area
# template helper function
@classmethod
def get_types(cls):
''' log.debug('type() called')
Jinja2 template helper function, gets the vocabulary for type
'''
# NOTE: any time you want to include new tag for the vocabulary term 'type' add the tag name
# to the following list (type_tags). Nothing else need to be changed
type_tags = ['collection', 'dataset', 'image', 'interactive resource', 'model', 'service', 'software', 'text']
vocab_name = 'type'
types = cls.__get_tags(vocab_name, type_tags)
return types
# template helper function
@classmethod
def get_status(cls):
''' log.debug('get_study_area() called')
Jinja2 template helper function, gets the vocabulary for status
'''
# NOTE: any time you want to include new tag for the vocabulary term 'status' add the tag name
# to the following list (status_tags). Nothing else need to be changed
status_tags = [u'complete', u'ongoing', u'planned', u'unknown']
vocab_name = 'status'
status = cls.__get_tags(vocab_name, status_tags)
return status
@classmethod
def __get_tags(cls, vocab_name, tags):
user = p.toolkit.get_action('get_site_user')({'ignore_auth': True}, {})
context = {'user': user['name']}
try:
data = {'id': vocab_name} # we can use the id or name for id param
vocab = p.toolkit.get_action('vocabulary_show')(context, data)
existing_tags = [tag['display_name'] for tag in vocab['tags']]
# check if we need to create additional tags for this vocabulary term
tags_to_add = [tag_name for tag_name in tags if tag_name not in existing_tags]
if len(tags_to_add) > 0:
vocab = cls.__add_tag_to_vocabulary(vocab_name, *tags_to_add)
except:
log.debug("vocabulary_show failed, meaning the vocabulary for %s doesn't exist", vocab_name)
vocab = cls.__create_vocabulary(vocab_name, *tags)
new_tags = [x['display_name'] for x in vocab['tags']]
log.debug("vocab tags: %s" % new_tags)
return new_tags
#See ckan.plugins.interfaces.IDatasetForm
def is_fallback(self):
# Return False so that we use the CKAN's default for
# /dataset/new and /dataset/edit
return False
#See ckan.plugins.interfaces.IDatasetForm
def package_types(self):
# This plugin doesn't handle any special package types, it just
# registers itself as the default (above).
return ['dataset']
def package_form(self):
return super(MetadataPlugin, self).package_form()
#See ckan.plugins.interfaces.IDatasetForm
def update_config(self, config):
# Instruct CKAN to look in the ```templates``` directory for customized templates and snippets
p.toolkit.add_template_directory(config, 'templates')
# add the extension's public dir path so that
# ckan can find any resources used from this path
# get the current dir path (here) for this plugin
here = os.path.dirname(__file__)
rootdir = os.path.dirname(os.path.dirname(here))
our_public_dir = os.path.join(rootdir, 'ckanext', 'Metadata', 'public')
config['extra_public_paths'] = ','.join([our_public_dir, config.get('extra_public_paths', '')])
#See ckan.plugins.interfaces.IDatasetForm
def _modify_package_schema(self, schema):
#log.debug("_modify_package_schema called")
not_empty = p.toolkit.get_validator('not_empty')
tag_string_convert = p.toolkit.get_validator('tag_string_convert')
for update in schema_updates_for_create:
schema.update(update)
# update the ckan's tag_string element making it required - which would force the user to enter
# at least on keyword (tag item)
schema.update({'tag_string': [not_empty, tag_string_convert]})
schema['resources']['name'][0] = not_empty
schema.update({'creators': creator_schema()}) # needed for repeatable elements
schema.update({'contributors': contributor_schema()}) # needed for repeatable elements
schema.update({'variables': variable_schema()}) # needed for repeatable elements
return schema
#See ckan.plugins.interfaces.IDatasetForm
def create_package_schema(self):
log.debug('create_package_schema')
schema = super(MetadataPlugin, self).create_package_schema()
schema = self._modify_package_schema(schema)
return schema
#See ckan.plugins.interfaces.IDatasetForm
def update_package_schema(self):
#log.debug('update_package_schema')
schema = super(MetadataPlugin, self).update_package_schema()
schema = self._modify_package_schema(schema)
return schema
#See ckan.plugins.interfaces.IDatasetForm
def show_package_schema(self):
schema = super(MetadataPlugin, self).show_package_schema()
ignore_missing = p.toolkit.get_validator('ignore_missing')
# Don't show vocab tags mixed in with normal 'free' tags
# (e.g. on dataset pages, or on the search page)
schema['tags']['__extras'].append(p.toolkit.get_converter('free_tags_only'))
for update in schema_updates_for_show:
schema.update(update)
schema.update({'creators': [convert_from_extras, ignore_missing]}) # needed for repeatable elements
schema.update({'contributors': [convert_from_extras, ignore_missing]}) # needed for repeatable elements
schema.update({'variables': [convert_from_extras, ignore_missing]}) # needed for repeatable elements
return schema
#Method below allows functions and other methods to be called from the Jinja template using the h variable
def get_helpers(self):
return {'get_research_focus': self.get_research_focus,
'required_metadata': required_metadata,
'load_data_into_dict': self.load_data_into_dict,
'check_if_dataset_using_older_schema': check_if_dataset_using_older_schema,
'study_area': self.get_study_area,
'get_status': self.get_status,
'get_types': self.get_types,
'update_frequency': self.get_update_frequency,
'check_if_user_owns_dataset': self.check_if_user_owns_dataset,
'get_pylons_context_obj': self.get_pylons_context_obj,
'has_user_group_or_org_admin_role': self.has_user_group_or_org_admin_role}
#See ckan.plugins.interfaces.IActions
def get_actions(self):
log.debug('get_actions() called')
return {
'package_create': pkg_create,
'package_update': pkg_update,
'user_create': user_create_local,
'user_show': show_user,
'package_show': show_package
}
# implements IPackageController
def before_search(self, search_params):
'''
Extensions will receive a dictionary with the query parameters,
and should return a modified (or not) version of it.
search_params will include an `extras` dictionary with all values
from fields starting with `ext_`, so extensions can receive user
input from specific fields.
'''
# when listing datasets outside an organization, get only the
# public (private:false) datasets
if not 'owner_org' in search_params['q']:
if len(search_params['q']) > 0:
search_params['q'] += ' private:false'
else:
search_params['q'] = 'private:false'
return search_params
# implements IPackageController
def after_search(self, search_results, search_params):
'''
Extensions will receive the search results, as well as the search
parameters, and should return a modified (or not) object with the
same structure:
{'count': '', 'results': '', 'facets': ''}
Note that count and facets may need to be adjusted if the extension
changed the results for some reason.
search_params will include an `extras` dictionary with all values
from fields starting with `ext_`, so extensions can receive user
input from specific fields.
'''
if 'owner_org' in search_params['q']:
datasets = search_results['results']
for dataset in datasets:
if dataset['private'] and not (meta_helper.is_user_owns_package(dataset['id'], p.toolkit.c.user) or
self.has_user_group_or_org_admin_role(dataset['owner_org'],
p.toolkit.c.user)):
# update dataset count for dataset groups
for group in dataset['groups']:
for item in search_results['search_facets']['groups']['items']:
if item.get('name') == group['name']:
item['count'] -= 1
break
# update dataset count for the organization
search_results['search_facets']['organization']['items'][0]['count'] -= 1
# update dataset counts by license
for item in search_results['search_facets']['license_id']['items']:
if item.get('name') == dataset['license_id']:
item['count'] -= 1
break
# update dataset count by each tag
for tag in dataset['tags']:
for item in search_results['search_facets']['tags']['items']:
if item.get('name') == tag['name']:
item['count'] -= 1
break
# update resource format counts
updated_formats = []
for resource in dataset['resources']:
res_format = resource['format']
if res_format in updated_formats:
continue
search_results['facets']['res_format'][res_format] -= 1
for item in search_results['search_facets']['res_format']['items']:
if item.get('name') == res_format:
item['count'] -= 1
updated_formats.append(res_format)
break
# remove the dataset since the user does not have access to it
search_results['results'].remove(dataset)
# remove any facet items that has a count of zero or less
facet_items_to_delete = {}
facets = search_results['search_facets']
for key, value in facets.iteritems():
for facet_item in value['items']:
if facet_item['count'] <= 0:
if key not in facet_items_to_delete:
facet_items_to_delete[key] = {'items': []}
facet_items_to_delete[key]['items'].append(facet_item)
else:
facet_items_to_delete[key]['items'].append(facet_item)
for facet_key in facet_items_to_delete:
for item_to_delete in facet_items_to_delete[facet_key]['items']:
search_results['search_facets'][facet_key]['items'].remove(item_to_delete)
return search_results
# implements IPackageController
def after_show(self, context, pkg_dict):
'''
Extensions will receive the validated data dict after the package
is ready for display (Note that the read method will return a
package domain object, which may not include all fields).
'''
# We are cheating the system here by putting dummy data for the resources
# so that we can create dataset without resources. Then in our own local pkg_update()
# we are deleting this dummy data
if pkg_dict['state'] == 'draft' or pkg_dict['state'] == 'draft-complete':
if len(pkg_dict['resources']) == 0:
pkg_dict['resources'] = [{'dummy_resource': '****'}]
def user_create_local(context, data_dict):
log.debug('my very own user_create() called')
user_obj = core_user_create(context, data_dict)
data_dict = {
'id': 'iutah',
'username': user_obj['name'],
'role': 'editor'
}
context['ignore_auth'] = True
# 'default' is CKAN' default sysadmin account username that can be used for adding a user to an organization
context['user'] = 'default'
p.toolkit.get_action('organization_member_create')(context, data_dict)
return user_obj
def pkg_update(context, data_dict):
log.debug('my very own package_update() called')
# turning context 'validate' key on/off to allow schema changes to work with existing dataset
context['validate'] = False
origpkg = p.toolkit.get_action('package_show')(context, data_dict)
context['validate'] = True
# this is needed when adding a resource to an existing dataset
if context.get('save', None) is None:
for extra in origpkg['extras']:
if data_dict.get(extra['key'], None) is None:
data_dict[extra['key']] = extra['value']
#get name of the author to use in citation
author = origpkg.get('author', None)
# get the name of the submitter to use in citation if author is not available
sub_name = None
sub_email = None
submitter_dict = [extra for extra in origpkg['extras'] if extra['key'] == 'sub_name' or extra['key'] == 'sub_email']
for extra in submitter_dict:
if extra['key'] == 'sub_name':
sub_name = extra['value']
if extra['key'] == 'sub_email':
sub_email = extra['value']
if not sub_name:
context['return_minimal'] = True
# turning context 'validate' key on/off to allow schema changes to work with existing dataset
context['validate'] = False
user = p.toolkit.get_action('user_show')(context, {'id': context['user']})
context['validate'] = True
data_dict['sub_name'] = user['fullname']
data_dict['sub_email'] = user['email']
else:
data_dict['sub_name'] = sub_name
data_dict['sub_email'] = sub_email
# TODO: may be we do not need the original CKAN author information
if not author:
data_dict['author'] = data_dict['sub_name']
data_dict['author_email'] = data_dict['sub_email']
data_dict['version'] = u'1.0'
data_dict['license_id'] = u'cc-by'
dateval = origpkg['metadata_created']
year = dateval.split("-")[0]
if origpkg['state'] != 'active':
if data_dict.get('author', None):
data_dict['citation'] = createcitation(context, data_dict, year)
else:
data_dict['citation'] = u''
else:
data_dict['citation'] = createcitation(context, data_dict, year)
context['validate'] = False
# This was added to allow creation metadata only dataset (dataset without resources)
# Here we are deleting our dummy resource if it exists
if origpkg['state'] == 'draft' or origpkg['state'] == 'draft-complete':
if data_dict.get('resources', None):
if len(data_dict['resources']) > 0:
dummy_resource = data_dict['resources'][0]
if dummy_resource.get('dummy_resource', None):
del data_dict['resources'][0]
elif origpkg.get('resources', None):
if len(origpkg['resources']) > 0:
dummy_resource = origpkg['resources'][0]
if dummy_resource.get('dummy_resource', None):
del origpkg['resources'][0]
iutahorg = p.toolkit.get_action('organization_show')(context, {'id': 'iutah'})
if not data_dict.get('owner_org', None):
data_dict['owner_org'] = origpkg['owner_org']
data_dict['private'] = origpkg['private']
else:
if data_dict['owner_org'] == iutahorg['id']:
data_dict['private'] = origpkg['private']
# remove if there any deleted repeatable elements from the data_dict
_remove_deleted_repeatable_elements(data_dict, 'creators')
_remove_deleted_repeatable_elements(data_dict, 'contributors')
_remove_deleted_repeatable_elements(data_dict, 'variables')
# remove any invalid variables
_remove_invalid_variables(data_dict)
# add tag names to the tag_string element if 'tag_string' is missing from the data_dict
# needed to make the entry of one tag (keyword) as required
if not 'tag_string' in data_dict.keys():
tags = ','.join(tag['name'] for tag in data_dict['tags'])
data_dict['tag_string'] = tags
return package_update(context, data_dict)
def show_user(context, data_dict):
# this function solves the missing value error
# when dataset schema is changed and we have old datasets
# that were created prior to the schema change
if not context.get('save', None):
context['validate'] = False
return user_show(context, data_dict)
def show_package(context, data_dict):
# this function solves the missing value error
# when dataset schema is changed and we have old datasets
# that were created prior to the schema change
if context.get('for_view', None) or context.get('for_edit', None) or context.get('pending', None) or \
context.get('allow_partial_update', None):
context['validate'] = False
if context.get('resource', None):
model = context['model']
pkg = model.Package.get(data_dict['id'])
data_dict = model_dictize.package_dictize(pkg, context)
if check_if_dataset_using_older_schema(data_dict['extras']):
context['validate'] = False
return package_show(context, data_dict)
def check_if_dataset_using_older_schema(dataset_extras):
# get the list of custom schema elements and check that against the dataset_extras
if len(dataset_extras) == 0:
return False
common_metadata = [x['id'] for x in required_metadata + expanded_metadata]
# only take into account the required repeatable elements
repeatable_elements = ['creators']
extra_keys = [extra['key'] for extra in dataset_extras]
extra_repeat_keys = [extra['key'] for extra in dataset_extras if len(extra['key'].split(':')) == 3]
extra_repeat_keys_first_parts = [key.split(':')[0] for key in extra_repeat_keys]
for custom_element in common_metadata:
if custom_element not in extra_keys:
return True
for repeat_element in repeatable_elements:
if repeat_element not in extra_repeat_keys_first_parts:
return True
return False
def createcitation(context, data_dict, year):
url = h.url_for(controller='package', action='read', id=data_dict['name'], qualified=True)
# turning context 'validate' key on/off to allow schema changes to work with existing dataset
context['validate'] = False
creators = data_dict.get('creators', None)
citation_authors = ''
if creators:
for creator in creators:
if creator['delete'] == '1':
continue
# check first if the creator is a group and if so no need need to split the name
if 'is_a_group' in creator:
if creator['is_a_group'] == '1':
citation_authors += creator['name'] + ", "
else:
name_parts = creator['name'].split(" ")
if len(name_parts) > 1: # this is when the name contains first name and last name
citation_authors += "{last_name}, {first_initial}.".format(last_name=name_parts[-1],
first_initial=name_parts[0][0]) \
+ ", "
elif len(name_parts) > 0: # if only one name is provided use that as the last name
citation_authors += "{last_name}.".format(last_name=name_parts[-1]) + ", "
# get rid of the last comma followed by a space (last 2 chars)
citation_authors = citation_authors[:-2]
version = 0
try:
version = data_dict['version']
except:
version = 0
citation = '{creator} ({year}), {title}, {version}, iUTAH Modeling & Data Federation, ' \
'{url}'.format(creator=citation_authors, year=year, title=data_dict['title'], version=version, url=url)
context['validate'] = True
return citation
def pkg_create(context, data_dict):
log.debug('my very own package_create() called')
# 'return_minimal' will only get the user information and not any dataset associated with the user
# without return_minimal' the context object will change and point to some other dataset
# which will get overwritten
context['return_minimal'] = True
user = p.toolkit.get_action('user_show')(context, {'id': context['user']})
data_dict['sub_name'] = user['fullname']
data_dict['sub_email'] = user['email']
data_dict['creator_organization'] = ''
data_dict['creator_address'] = ''
data_dict['creator_phone'] = ''
data_dict['version'] = u'1.0'
data_dict['license_id'] = u'cc-by'
data_dict['citation'] = u''
#if organization is iutah
iutahorg = p.toolkit.get_action('organization_show')(context, {'id': 'iutah'})
if data_dict['owner_org'] == iutahorg['id']:
data_dict['private'] = True
# remove if there any deleted repeatable elements from the data_dict
_remove_deleted_repeatable_elements(data_dict, 'creators')
_remove_deleted_repeatable_elements(data_dict, 'contributors')
_remove_deleted_repeatable_elements(data_dict, 'variables')
# remove any invalid variables
_remove_invalid_variables(data_dict)
p.toolkit.check_access('package_create',context, data_dict)
pkg = package_create(context, data_dict)
return pkg
def set_default_creator(data_dict, sub_name, sub_email):
if len(data_dict['custom_meta']['creators']) == 0:
creator = {'name': sub_name, 'email': sub_email, 'phone': '', 'address': '', 'organization': '',
'delete': '0', 'is_a_group': '0'}
data_dict['custom_meta']['creators'].append(creator)
def _remove_deleted_repeatable_elements(data_dict, element_name):
if element_name in data_dict:
deleted_contributors = [c for c in data_dict[element_name] if c['delete'] == '1']
for contributor in deleted_contributors:
data_dict[element_name].remove(contributor)
def _remove_invalid_variables(data_dict):
if 'variables' in data_dict:
for variable in data_dict['variables']:
if len(variable['name'].strip()) == 0 and len(variable['unit'].strip()) == 0:
data_dict['variables'].remove(variable)
| |
import random
import sys
"""
Author: Benjamin Kovach
Representation: Permutation of the set {0..N-1}. Each element indicates the row
of the queen at index i (i corresponds to the queen's column).
Parenthood Selection: Tournament selection twice with three combatants.
Mutation: Swap two elements of an individual's vector
Crossover: Order crossover
Survival Selection: Replace the N/3 worst individuals with N/3 new ones
Example run at bottom.
"""
# order crossover
def crossover(i1, i2):
n = len(i1)
r1 = random.randrange(n-1)
r2 = random.randrange(r1, n)
child1 = [None] * n
child2 = [None] * n
child1[r1:r2] = i1[r1:r2]
child2[r1:r2] = i2[r1:r2]
rest1 = filter(lambda e: e not in i1[r1:r2], i2)
rest2 = filter(lambda e: e not in i2[r1:r2], i1)
for i in xrange(len(rest1)):
child1[(r2 + i) % n] = rest1[i]
for i in xrange(len(rest2)):
child2[(r2 + i) % n] = rest2[i]
return child1, child2
# tourament selection
def select(xs, n=3):
selection = []
for i in xrange(0, n):
selection.append(xs[random.randrange(len(xs))])
fixed = zip(selection, map(lambda path: unfitness(path), selection))
mini = None
minind = None
for ind, fit in fixed:
if (fit < mini or mini is None):
mini = fit
minind = ind
return (mini, minind)
def unfitness(queens):
colls = 0
for i, q in enumerate(queens):
for j, p in enumerate(queens[i+1:]):
_j = j + 1
if (q + _j == p or q - _j == p):
colls += 1
return colls
def collision(p, q, n):
for i in range(-n, n):
pa = (p[0] + i, p[1] + i)
pb = (p[0] + i, p[1] - i)
if (pa == q or pb == q):
return True
return False
def generate_child(pop, mutate_prob=0.1):
fit1, p1 = select(pop)
fit2, p2 = select(pop)
c1, c2 = crossover(p1, p2)
# return the child with greater fitness
if (unfitness(c1) < unfitness(c2)):
if(c2 in pop):
return generate_child(pop, mutate_prob)
if(random.random() < mutate_prob):
return mutate(c2)
else:
return c2
else:
if(c1 in pop):
return generate_child(pop, mutate_prob)
if(random.random() < mutate_prob):
return mutate(c1)
else:
return c1
# swap a random one
def mutate(ind):
return ind
i1 = random.randrange(len(ind))
i2 = random.randrange(len(ind))
ind[i1], ind[i2] = ind[i2], ind[i1]
return ind
def individual(n):
# random permutation of 0..n-1
return random.sample(range(n), n)
def population(n, ind_size):
return [individual(ind_size) for _ in xrange(n)]
def unfitnesses(pop):
return map(unfitness, pop)
def sort_pop(pop):
fits = unfitnesses(pop)
inds = zip(fits, pop)
return sorted(inds, key=lambda ind: ind[0])
def replace_worst(pop, n=1, mut_prob=0.1):
keepers = sort_pop(pop)[:-n]
_pop = map(lambda ind: ind[1], keepers)
for i in xrange(0, n):
_pop.append(generate_child(pop, mut_prob))
return keepers[0][0], _pop
def show(vs, n):
vs = zip(xrange(0,len(vs)), vs)
for x in range(0, n):
for y in range(0, n):
if (x, y) in vs:
sys.stdout.write('Q')
else:
sys.stdout.write('-')
print ""
def nqueens(n):
POPSIZE = n
GENERATIONS = n * 1000
MUTPROB = 1.0/n
REPLACEMENTS = n/3
pop = population(POPSIZE, n)
soln = False
for g in xrange(GENERATIONS):
mini, pop = replace_worst(pop, n=REPLACEMENTS, mut_prob=MUTPROB)
if mini == 0:
soln = True
print("Solution for n={0}".format(n))
show(pop[0], n)
break
if(not soln):
print("Solution for n = {0} not found in {1} generations.".format(n, g))
for i in xrange(4, 20):
nqueens(i)
"""
Solution for n=4
-Q--
---Q
Q---
--Q-
Solution for n=5
----Q
--Q--
Q----
---Q-
-Q---
Solution for n=6
-Q----
---Q--
-----Q
Q-----
--Q---
----Q-
Solution for n=7
-----Q-
---Q---
-Q-----
------Q
----Q--
--Q----
Q------
Solution for n=8
--Q-----
----Q---
-Q------
-------Q
-----Q--
---Q----
------Q-
Q-------
Solution for n = 9 not found in 8999 generations.
Solution for n=10
----Q-----
-------Q--
---Q------
------Q---
---------Q
--Q-------
Q---------
-----Q----
--------Q-
-Q--------
Solution for n = 11 not found in 10999 generations.
Solution for n=12
----Q-------
--------Q---
-Q----------
---------Q--
--Q---------
Q-----------
-------Q----
---Q--------
-----------Q
------Q-----
----------Q-
-----Q------
Solution for n = 13 not found in 12999 generations.
Solution for n=14
-----Q--------
--Q-----------
-----------Q--
---Q----------
--------Q-----
-------------Q
-Q------------
----------Q---
-------Q------
Q-------------
------------Q-
---------Q----
----Q---------
------Q-------
Solution for n=15
----------Q----
-------------Q-
----Q----------
------------Q--
-------Q-------
Q--------------
--Q------------
------Q--------
-Q-------------
--------------Q
--------Q------
-----------Q---
---------Q-----
---Q-----------
-----Q---------
Solution for n = 16 not found in 15999 generations.
Solution for n = 17 not found in 16999 generations.
Solution for n=18
------------Q-----
----------Q-------
-----------------Q
-------Q----------
-Q----------------
---Q--------------
--------Q---------
------Q-----------
---------------Q--
Q-----------------
-----------Q------
----------------Q-
----Q-------------
--Q---------------
--------------Q---
-----Q------------
---------Q--------
-------------Q----
Solution for n = 19 not found in 18999 generations.
"""
| |
import os
import tempfile
from io import StringIO
from wsgiref.util import FileWrapper
from django import forms
from django.conf.urls import url
from django.contrib import admin
from django.contrib.admin import BooleanFieldListFilter
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.admin import GroupAdmin, UserAdmin
from django.contrib.auth.models import Group, User
from django.core.exceptions import ValidationError
from django.core.files.storage import FileSystemStorage
from django.core.mail import EmailMessage
from django.db import models
from django.forms.models import BaseModelFormSet
from django.http import HttpResponse, StreamingHttpResponse
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from .forms import MediaActionForm
from .models import (
Actor, AdminOrderedAdminMethod, AdminOrderedCallable, AdminOrderedField,
AdminOrderedModelMethod, Album, Answer, Article, BarAccount, Book,
Bookmark, Category, Chapter, ChapterXtra1, Child, ChildOfReferer, Choice,
City, Collector, Color, Color2, ComplexSortedPerson, CoverLetter,
CustomArticle, CyclicOne, CyclicTwo, DependentChild, DooHickey, EmptyModel,
EmptyModelHidden, EmptyModelMixin, EmptyModelVisible, ExplicitlyProvidedPK,
ExternalSubscriber, Fabric, FancyDoodad, FieldOverridePost,
FilteredManager, FooAccount, FoodDelivery, FunkyTag, Gadget, Gallery,
GenRelReference, Grommet, ImplicitlyGeneratedPK, Ingredient,
InlineReference, InlineReferer, Inquisition, Language, Link,
MainPrepopulated, ModelWithStringPrimaryKey, NotReferenced, OldSubscriber,
OtherStory, Paper, Parent, ParentWithDependentChildren, ParentWithUUIDPK,
Person, Persona, Picture, Pizza, Plot, PlotDetails, PlotProxy,
PluggableSearchPerson, Podcast, Post, PrePopulatedPost,
PrePopulatedPostLargeSlug, PrePopulatedSubPost, Promo, Question, Recipe,
Recommendation, Recommender, ReferencedByGenRel, ReferencedByInline,
ReferencedByParent, RelatedPrepopulated, RelatedWithUUIDPKModel, Report,
Reservation, Restaurant, RowLevelChangePermissionModel, Section,
ShortMessage, Simple, Sketch, State, Story, StumpJoke, Subscriber,
SuperVillain, Telegram, Thing, Topping, UnchangeableObject,
UndeletableObject, UnorderedObject, UserMessenger, Villain, Vodcast,
Whatsit, Widget, Worker, WorkHour,
)
def callable_year(dt_value):
try:
return dt_value.year
except AttributeError:
return None
callable_year.admin_order_field = 'date'
class ArticleInline(admin.TabularInline):
model = Article
fk_name = 'section'
prepopulated_fields = {
'title': ('content',)
}
fieldsets = (
('Some fields', {
'classes': ('collapse',),
'fields': ('title', 'content')
}),
('Some other fields', {
'classes': ('wide',),
'fields': ('date', 'section')
})
)
class ChapterInline(admin.TabularInline):
model = Chapter
class ChapterXtra1Admin(admin.ModelAdmin):
list_filter = ('chap',
'chap__title',
'chap__book',
'chap__book__name',
'chap__book__promo',
'chap__book__promo__name',)
class ArticleAdmin(admin.ModelAdmin):
list_display = (
'content', 'date', callable_year, 'model_year', 'modeladmin_year',
'model_year_reversed', 'section', lambda obj: obj.title,
)
list_editable = ('section',)
list_filter = ('date', 'section')
view_on_site = False
fieldsets = (
('Some fields', {
'classes': ('collapse',),
'fields': ('title', 'content')
}),
('Some other fields', {
'classes': ('wide',),
'fields': ('date', 'section', 'sub_section')
})
)
def changelist_view(self, request):
return super(ArticleAdmin, self).changelist_view(
request, extra_context={
'extra_var': 'Hello!'
}
)
def modeladmin_year(self, obj):
return obj.date.year
modeladmin_year.admin_order_field = 'date'
modeladmin_year.short_description = None
def delete_model(self, request, obj):
EmailMessage(
'Greetings from a deleted object',
'I hereby inform you that some user deleted me',
'from@example.com',
['to@example.com']
).send()
return super(ArticleAdmin, self).delete_model(request, obj)
def save_model(self, request, obj, form, change=True):
EmailMessage(
'Greetings from a created object',
'I hereby inform you that some user created me',
'from@example.com',
['to@example.com']
).send()
return super(ArticleAdmin, self).save_model(request, obj, form, change)
class ArticleAdmin2(admin.ModelAdmin):
def has_module_permission(self, request):
return False
class RowLevelChangePermissionModelAdmin(admin.ModelAdmin):
def has_change_permission(self, request, obj=None):
""" Only allow changing objects with even id number """
return request.user.is_staff and (obj is not None) and (obj.id % 2 == 0)
class CustomArticleAdmin(admin.ModelAdmin):
"""
Tests various hooks for using custom templates and contexts.
"""
change_list_template = 'custom_admin/change_list.html'
change_form_template = 'custom_admin/change_form.html'
add_form_template = 'custom_admin/add_form.html'
object_history_template = 'custom_admin/object_history.html'
delete_confirmation_template = 'custom_admin/delete_confirmation.html'
delete_selected_confirmation_template = 'custom_admin/delete_selected_confirmation.html'
popup_response_template = 'custom_admin/popup_response.html'
def changelist_view(self, request):
return super(CustomArticleAdmin, self).changelist_view(
request, extra_context={
'extra_var': 'Hello!'
}
)
class ThingAdmin(admin.ModelAdmin):
list_filter = ('color__warm', 'color__value', 'pub_date',)
class InquisitionAdmin(admin.ModelAdmin):
list_display = ('leader', 'country', 'expected', 'sketch')
def sketch(self, obj):
# A method with the same name as a reverse accessor.
return 'list-display-sketch'
class SketchAdmin(admin.ModelAdmin):
raw_id_fields = ('inquisition', 'defendant0', 'defendant1')
class FabricAdmin(admin.ModelAdmin):
list_display = ('surface',)
list_filter = ('surface',)
class BasePersonModelFormSet(BaseModelFormSet):
def clean(self):
for person_dict in self.cleaned_data:
person = person_dict.get('id')
alive = person_dict.get('alive')
if person and alive and person.name == "Grace Hopper":
raise forms.ValidationError("Grace is not a Zombie")
class PersonAdmin(admin.ModelAdmin):
list_display = ('name', 'gender', 'alive')
list_editable = ('gender', 'alive')
list_filter = ('gender',)
search_fields = ('^name',)
save_as = True
def get_changelist_formset(self, request, **kwargs):
return super(PersonAdmin, self).get_changelist_formset(request, formset=BasePersonModelFormSet, **kwargs)
def get_queryset(self, request):
# Order by a field that isn't in list display, to be able to test
# whether ordering is preserved.
return super(PersonAdmin, self).get_queryset(request).order_by('age')
class FooAccountAdmin(admin.StackedInline):
model = FooAccount
extra = 1
class BarAccountAdmin(admin.StackedInline):
model = BarAccount
extra = 1
class PersonaAdmin(admin.ModelAdmin):
inlines = (
FooAccountAdmin,
BarAccountAdmin
)
class SubscriberAdmin(admin.ModelAdmin):
actions = ['mail_admin']
action_form = MediaActionForm
def mail_admin(self, request, selected):
EmailMessage(
'Greetings from a ModelAdmin action',
'This is the test email from an admin action',
'from@example.com',
['to@example.com']
).send()
def external_mail(modeladmin, request, selected):
EmailMessage(
'Greetings from a function action',
'This is the test email from a function action',
'from@example.com',
['to@example.com']
).send()
external_mail.short_description = 'External mail (Another awesome action)'
def redirect_to(modeladmin, request, selected):
from django.http import HttpResponseRedirect
return HttpResponseRedirect('/some-where-else/')
redirect_to.short_description = 'Redirect to (Awesome action)'
def download(modeladmin, request, selected):
buf = StringIO('This is the content of the file')
return StreamingHttpResponse(FileWrapper(buf))
download.short_description = 'Download subscription'
def no_perm(modeladmin, request, selected):
return HttpResponse(content='No permission to perform this action',
status=403)
no_perm.short_description = 'No permission to run'
class ExternalSubscriberAdmin(admin.ModelAdmin):
actions = [redirect_to, external_mail, download, no_perm]
class PodcastAdmin(admin.ModelAdmin):
list_display = ('name', 'release_date')
list_editable = ('release_date',)
date_hierarchy = 'release_date'
ordering = ('name',)
class VodcastAdmin(admin.ModelAdmin):
list_display = ('name', 'released')
list_editable = ('released',)
ordering = ('name',)
class ChildInline(admin.StackedInline):
model = Child
class ParentAdmin(admin.ModelAdmin):
model = Parent
inlines = [ChildInline]
save_as = True
list_display = ('id', 'name',)
list_display_links = ('id',)
list_editable = ('name',)
def save_related(self, request, form, formsets, change):
super(ParentAdmin, self).save_related(request, form, formsets, change)
first_name, last_name = form.instance.name.split()
for child in form.instance.child_set.all():
if len(child.name.split()) < 2:
child.name = child.name + ' ' + last_name
child.save()
class EmptyModelAdmin(admin.ModelAdmin):
def get_queryset(self, request):
return super(EmptyModelAdmin, self).get_queryset(request).filter(pk__gt=1)
class OldSubscriberAdmin(admin.ModelAdmin):
actions = None
temp_storage = FileSystemStorage(tempfile.mkdtemp())
UPLOAD_TO = os.path.join(temp_storage.location, 'test_upload')
class PictureInline(admin.TabularInline):
model = Picture
extra = 1
class GalleryAdmin(admin.ModelAdmin):
inlines = [PictureInline]
class PictureAdmin(admin.ModelAdmin):
pass
class LanguageAdmin(admin.ModelAdmin):
list_display = ['iso', 'shortlist', 'english_name', 'name']
list_editable = ['shortlist']
class RecommendationAdmin(admin.ModelAdmin):
show_full_result_count = False
search_fields = ('=titletranslation__text', '=the_recommender__titletranslation__text',)
class WidgetInline(admin.StackedInline):
model = Widget
class DooHickeyInline(admin.StackedInline):
model = DooHickey
class GrommetInline(admin.StackedInline):
model = Grommet
class WhatsitInline(admin.StackedInline):
model = Whatsit
class FancyDoodadInline(admin.StackedInline):
model = FancyDoodad
class CategoryAdmin(admin.ModelAdmin):
list_display = ('id', 'collector', 'order')
list_editable = ('order',)
class CategoryInline(admin.StackedInline):
model = Category
class CollectorAdmin(admin.ModelAdmin):
inlines = [
WidgetInline, DooHickeyInline, GrommetInline, WhatsitInline,
FancyDoodadInline, CategoryInline
]
class LinkInline(admin.TabularInline):
model = Link
extra = 1
readonly_fields = ("posted", "multiline", "readonly_link_content")
def multiline(self, instance):
return "InlineMultiline\ntest\nstring"
class SubPostInline(admin.TabularInline):
model = PrePopulatedSubPost
prepopulated_fields = {
'subslug': ('subtitle',)
}
def get_readonly_fields(self, request, obj=None):
if obj and obj.published:
return ('subslug',)
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
if obj and obj.published:
return {}
return self.prepopulated_fields
class PrePopulatedPostAdmin(admin.ModelAdmin):
list_display = ['title', 'slug']
prepopulated_fields = {
'slug': ('title',)
}
inlines = [SubPostInline]
def get_readonly_fields(self, request, obj=None):
if obj and obj.published:
return ('slug',)
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
if obj and obj.published:
return {}
return self.prepopulated_fields
class PostAdmin(admin.ModelAdmin):
list_display = ['title', 'public']
readonly_fields = (
'posted', 'awesomeness_level', 'coolness', 'value',
'multiline', 'multiline_html', lambda obj: "foo",
'readonly_content',
)
inlines = [
LinkInline
]
def coolness(self, instance):
if instance.pk:
return "%d amount of cool." % instance.pk
else:
return "Unknown coolness."
def value(self, instance):
return 1000
value.short_description = 'Value in $US'
def multiline(self, instance):
return "Multiline\ntest\nstring"
def multiline_html(self, instance):
return mark_safe("Multiline<br>\nhtml<br>\ncontent")
class FieldOverridePostForm(forms.ModelForm):
model = FieldOverridePost
class Meta:
help_texts = {
'posted': 'Overridden help text for the date',
}
labels = {
'public': 'Overridden public label',
}
class FieldOverridePostAdmin(PostAdmin):
form = FieldOverridePostForm
class CustomChangeList(ChangeList):
def get_queryset(self, request):
return self.root_queryset.order_by('pk').filter(pk=9999) # Doesn't exist
class GadgetAdmin(admin.ModelAdmin):
def get_changelist(self, request, **kwargs):
return CustomChangeList
class ToppingAdmin(admin.ModelAdmin):
readonly_fields = ('pizzas',)
class PizzaAdmin(admin.ModelAdmin):
readonly_fields = ('toppings',)
class WorkHourAdmin(admin.ModelAdmin):
list_display = ('datum', 'employee')
list_filter = ('employee',)
class FoodDeliveryAdmin(admin.ModelAdmin):
list_display = ('reference', 'driver', 'restaurant')
list_editable = ('driver', 'restaurant')
class CoverLetterAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses defer(), to test
verbose_name display in messages shown after adding/editing CoverLetter
instances. Note that the CoverLetter model defines a __str__ method.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super(CoverLetterAdmin, self).get_queryset(request).defer('date_written')
class PaperAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses only(), to test
verbose_name display in messages shown after adding/editing Paper
instances.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super(PaperAdmin, self).get_queryset(request).only('title')
class ShortMessageAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses defer(), to test
verbose_name display in messages shown after adding/editing ShortMessage
instances.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super(ShortMessageAdmin, self).get_queryset(request).defer('timestamp')
class TelegramAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses only(), to test
verbose_name display in messages shown after adding/editing Telegram
instances. Note that the Telegram model defines a __str__ method.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super(TelegramAdmin, self).get_queryset(request).only('title')
class StoryForm(forms.ModelForm):
class Meta:
widgets = {'title': forms.HiddenInput}
class StoryAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'content')
list_display_links = ('title',) # 'id' not in list_display_links
list_editable = ('content', )
form = StoryForm
ordering = ['-id']
class OtherStoryAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'content')
list_display_links = ('title', 'id') # 'id' in list_display_links
list_editable = ('content', )
ordering = ['-id']
class ComplexSortedPersonAdmin(admin.ModelAdmin):
list_display = ('name', 'age', 'is_employee', 'colored_name')
ordering = ('name',)
def colored_name(self, obj):
return format_html('<span style="color: #ff00ff;">{}</span>', obj.name)
colored_name.admin_order_field = 'name'
class PluggableSearchPersonAdmin(admin.ModelAdmin):
list_display = ('name', 'age')
search_fields = ('name',)
def get_search_results(self, request, queryset, search_term):
queryset, use_distinct = super(PluggableSearchPersonAdmin, self).get_search_results(
request, queryset, search_term
)
try:
search_term_as_int = int(search_term)
except ValueError:
pass
else:
queryset |= self.model.objects.filter(age=search_term_as_int)
return queryset, use_distinct
class AlbumAdmin(admin.ModelAdmin):
list_filter = ['title']
class PrePopulatedPostLargeSlugAdmin(admin.ModelAdmin):
prepopulated_fields = {
'slug': ('title',)
}
class AdminOrderedFieldAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', 'order')
class AdminOrderedModelMethodAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', 'some_order')
class AdminOrderedAdminMethodAdmin(admin.ModelAdmin):
def some_admin_order(self, obj):
return obj.order
some_admin_order.admin_order_field = 'order'
ordering = ('order',)
list_display = ('stuff', 'some_admin_order')
def admin_ordered_callable(obj):
return obj.order
admin_ordered_callable.admin_order_field = 'order'
class AdminOrderedCallableAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', admin_ordered_callable)
class ReportAdmin(admin.ModelAdmin):
def extra(self, request):
return HttpResponse()
def get_urls(self):
# Corner case: Don't call parent implementation
return [
url(r'^extra/$',
self.extra,
name='cable_extra'),
]
class CustomTemplateBooleanFieldListFilter(BooleanFieldListFilter):
template = 'custom_filter_template.html'
class CustomTemplateFilterColorAdmin(admin.ModelAdmin):
list_filter = (('warm', CustomTemplateBooleanFieldListFilter),)
# For Selenium Prepopulated tests -------------------------------------
class RelatedPrepopulatedInline1(admin.StackedInline):
fieldsets = (
(None, {
'fields': (('pubdate', 'status'), ('name', 'slug1', 'slug2',),)
}),
)
formfield_overrides = {models.CharField: {'strip': False}}
model = RelatedPrepopulated
extra = 1
prepopulated_fields = {'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name']}
class RelatedPrepopulatedInline2(admin.TabularInline):
model = RelatedPrepopulated
extra = 1
prepopulated_fields = {'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name']}
class MainPrepopulatedAdmin(admin.ModelAdmin):
inlines = [RelatedPrepopulatedInline1, RelatedPrepopulatedInline2]
fieldsets = (
(None, {
'fields': (('pubdate', 'status'), ('name', 'slug1', 'slug2', 'slug3'))
}),
)
formfield_overrides = {models.CharField: {'strip': False}}
prepopulated_fields = {
'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name'],
'slug3': ['name'],
}
class UnorderedObjectAdmin(admin.ModelAdmin):
list_display = ['id', 'name']
list_display_links = ['id']
list_editable = ['name']
list_per_page = 2
class UndeletableObjectAdmin(admin.ModelAdmin):
def change_view(self, *args, **kwargs):
kwargs['extra_context'] = {'show_delete': False}
return super(UndeletableObjectAdmin, self).change_view(*args, **kwargs)
class UnchangeableObjectAdmin(admin.ModelAdmin):
def get_urls(self):
# Disable change_view, but leave other urls untouched
urlpatterns = super(UnchangeableObjectAdmin, self).get_urls()
return [p for p in urlpatterns if p.name and not p.name.endswith("_change")]
def callable_on_unknown(obj):
return obj.unknown
class AttributeErrorRaisingAdmin(admin.ModelAdmin):
list_display = [callable_on_unknown, ]
class CustomManagerAdmin(admin.ModelAdmin):
def get_queryset(self, request):
return FilteredManager.objects
class MessageTestingAdmin(admin.ModelAdmin):
actions = ["message_debug", "message_info", "message_success",
"message_warning", "message_error", "message_extra_tags"]
def message_debug(self, request, selected):
self.message_user(request, "Test debug", level="debug")
def message_info(self, request, selected):
self.message_user(request, "Test info", level="info")
def message_success(self, request, selected):
self.message_user(request, "Test success", level="success")
def message_warning(self, request, selected):
self.message_user(request, "Test warning", level="warning")
def message_error(self, request, selected):
self.message_user(request, "Test error", level="error")
def message_extra_tags(self, request, selected):
self.message_user(request, "Test tags", extra_tags="extra_tag")
class ChoiceList(admin.ModelAdmin):
list_display = ['choice']
readonly_fields = ['choice']
fields = ['choice']
class DependentChildAdminForm(forms.ModelForm):
"""
Issue #20522
Form to test child dependency on parent object's validation
"""
def clean(self):
parent = self.cleaned_data.get('parent')
if parent.family_name and parent.family_name != self.cleaned_data.get('family_name'):
raise ValidationError("Children must share a family name with their parents " +
"in this contrived test case")
return super(DependentChildAdminForm, self).clean()
class DependentChildInline(admin.TabularInline):
model = DependentChild
form = DependentChildAdminForm
class ParentWithDependentChildrenAdmin(admin.ModelAdmin):
inlines = [DependentChildInline]
# Tests for ticket 11277 ----------------------------------
class FormWithoutHiddenField(forms.ModelForm):
first = forms.CharField()
second = forms.CharField()
class FormWithoutVisibleField(forms.ModelForm):
first = forms.CharField(widget=forms.HiddenInput)
second = forms.CharField(widget=forms.HiddenInput)
class FormWithVisibleAndHiddenField(forms.ModelForm):
first = forms.CharField(widget=forms.HiddenInput)
second = forms.CharField()
class EmptyModelVisibleAdmin(admin.ModelAdmin):
form = FormWithoutHiddenField
fieldsets = (
(None, {
'fields': (('first', 'second'),),
}),
)
class EmptyModelHiddenAdmin(admin.ModelAdmin):
form = FormWithoutVisibleField
fieldsets = EmptyModelVisibleAdmin.fieldsets
class EmptyModelMixinAdmin(admin.ModelAdmin):
form = FormWithVisibleAndHiddenField
fieldsets = EmptyModelVisibleAdmin.fieldsets
class CityInlineAdmin(admin.TabularInline):
model = City
view_on_site = False
class StateAdmin(admin.ModelAdmin):
inlines = [CityInlineAdmin]
class RestaurantInlineAdmin(admin.TabularInline):
model = Restaurant
view_on_site = True
class CityAdmin(admin.ModelAdmin):
inlines = [RestaurantInlineAdmin]
view_on_site = True
class WorkerAdmin(admin.ModelAdmin):
def view_on_site(self, obj):
return '/worker/%s/%s/' % (obj.surname, obj.name)
class WorkerInlineAdmin(admin.TabularInline):
model = Worker
def view_on_site(self, obj):
return '/worker_inline/%s/%s/' % (obj.surname, obj.name)
class RestaurantAdmin(admin.ModelAdmin):
inlines = [WorkerInlineAdmin]
view_on_site = False
def get_changeform_initial_data(self, request):
return {'name': 'overridden_value'}
class FunkyTagAdmin(admin.ModelAdmin):
list_display = ('name', 'content_object')
class InlineReferenceInline(admin.TabularInline):
model = InlineReference
class InlineRefererAdmin(admin.ModelAdmin):
inlines = [InlineReferenceInline]
class PlotReadonlyAdmin(admin.ModelAdmin):
readonly_fields = ('plotdetails',)
class GetFormsetsArgumentCheckingAdmin(admin.ModelAdmin):
fields = ['name']
def add_view(self, request, *args, **kwargs):
request.is_add_view = True
return super(GetFormsetsArgumentCheckingAdmin, self).add_view(request, *args, **kwargs)
def change_view(self, request, *args, **kwargs):
request.is_add_view = False
return super(GetFormsetsArgumentCheckingAdmin, self).change_view(request, *args, **kwargs)
def get_formsets_with_inlines(self, request, obj=None):
if request.is_add_view and obj is not None:
raise Exception("'obj' passed to get_formsets_with_inlines wasn't None during add_view")
if not request.is_add_view and obj is None:
raise Exception("'obj' passed to get_formsets_with_inlines was None during change_view")
return super(GetFormsetsArgumentCheckingAdmin, self).get_formsets_with_inlines(request, obj)
site = admin.AdminSite(name="admin")
site.site_url = '/my-site-url/'
site.register(Article, ArticleAdmin)
site.register(CustomArticle, CustomArticleAdmin)
site.register(Section, save_as=True, inlines=[ArticleInline], readonly_fields=['name_property'])
site.register(ModelWithStringPrimaryKey)
site.register(Color)
site.register(Thing, ThingAdmin)
site.register(Actor)
site.register(Inquisition, InquisitionAdmin)
site.register(Sketch, SketchAdmin)
site.register(Person, PersonAdmin)
site.register(Persona, PersonaAdmin)
site.register(Subscriber, SubscriberAdmin)
site.register(ExternalSubscriber, ExternalSubscriberAdmin)
site.register(OldSubscriber, OldSubscriberAdmin)
site.register(Podcast, PodcastAdmin)
site.register(Vodcast, VodcastAdmin)
site.register(Parent, ParentAdmin)
site.register(EmptyModel, EmptyModelAdmin)
site.register(Fabric, FabricAdmin)
site.register(Gallery, GalleryAdmin)
site.register(Picture, PictureAdmin)
site.register(Language, LanguageAdmin)
site.register(Recommendation, RecommendationAdmin)
site.register(Recommender)
site.register(Collector, CollectorAdmin)
site.register(Category, CategoryAdmin)
site.register(Post, PostAdmin)
site.register(FieldOverridePost, FieldOverridePostAdmin)
site.register(Gadget, GadgetAdmin)
site.register(Villain)
site.register(SuperVillain)
site.register(Plot)
site.register(PlotDetails)
site.register(PlotProxy, PlotReadonlyAdmin)
site.register(Bookmark)
site.register(CyclicOne)
site.register(CyclicTwo)
site.register(WorkHour, WorkHourAdmin)
site.register(Reservation)
site.register(FoodDelivery, FoodDeliveryAdmin)
site.register(RowLevelChangePermissionModel, RowLevelChangePermissionModelAdmin)
site.register(Paper, PaperAdmin)
site.register(CoverLetter, CoverLetterAdmin)
site.register(ShortMessage, ShortMessageAdmin)
site.register(Telegram, TelegramAdmin)
site.register(Story, StoryAdmin)
site.register(OtherStory, OtherStoryAdmin)
site.register(Report, ReportAdmin)
site.register(MainPrepopulated, MainPrepopulatedAdmin)
site.register(UnorderedObject, UnorderedObjectAdmin)
site.register(UndeletableObject, UndeletableObjectAdmin)
site.register(UnchangeableObject, UnchangeableObjectAdmin)
site.register(State, StateAdmin)
site.register(City, CityAdmin)
site.register(Restaurant, RestaurantAdmin)
site.register(Worker, WorkerAdmin)
site.register(FunkyTag, FunkyTagAdmin)
site.register(ReferencedByParent)
site.register(ChildOfReferer)
site.register(ReferencedByInline)
site.register(InlineReferer, InlineRefererAdmin)
site.register(ReferencedByGenRel)
site.register(GenRelReference)
site.register(ParentWithUUIDPK)
site.register(RelatedWithUUIDPKModel)
# We intentionally register Promo and ChapterXtra1 but not Chapter nor ChapterXtra2.
# That way we cover all four cases:
# related ForeignKey object registered in admin
# related ForeignKey object not registered in admin
# related OneToOne object registered in admin
# related OneToOne object not registered in admin
# when deleting Book so as exercise all four paths through
# contrib.admin.utils's get_deleted_objects function.
site.register(Book, inlines=[ChapterInline])
site.register(Promo)
site.register(ChapterXtra1, ChapterXtra1Admin)
site.register(Pizza, PizzaAdmin)
site.register(Topping, ToppingAdmin)
site.register(Album, AlbumAdmin)
site.register(Question)
site.register(Answer, date_hierarchy='question__posted')
site.register(PrePopulatedPost, PrePopulatedPostAdmin)
site.register(ComplexSortedPerson, ComplexSortedPersonAdmin)
site.register(FilteredManager, CustomManagerAdmin)
site.register(PluggableSearchPerson, PluggableSearchPersonAdmin)
site.register(PrePopulatedPostLargeSlug, PrePopulatedPostLargeSlugAdmin)
site.register(AdminOrderedField, AdminOrderedFieldAdmin)
site.register(AdminOrderedModelMethod, AdminOrderedModelMethodAdmin)
site.register(AdminOrderedAdminMethod, AdminOrderedAdminMethodAdmin)
site.register(AdminOrderedCallable, AdminOrderedCallableAdmin)
site.register(Color2, CustomTemplateFilterColorAdmin)
site.register(Simple, AttributeErrorRaisingAdmin)
site.register(UserMessenger, MessageTestingAdmin)
site.register(Choice, ChoiceList)
site.register(ParentWithDependentChildren, ParentWithDependentChildrenAdmin)
site.register(EmptyModelHidden, EmptyModelHiddenAdmin)
site.register(EmptyModelVisible, EmptyModelVisibleAdmin)
site.register(EmptyModelMixin, EmptyModelMixinAdmin)
site.register(StumpJoke)
site.register(Recipe)
site.register(Ingredient)
site.register(NotReferenced)
site.register(ExplicitlyProvidedPK, GetFormsetsArgumentCheckingAdmin)
site.register(ImplicitlyGeneratedPK, GetFormsetsArgumentCheckingAdmin)
# Register core models we need in our tests
site.register(User, UserAdmin)
site.register(Group, GroupAdmin)
# Used to test URL namespaces
site2 = admin.AdminSite(name="namespaced_admin")
site2.register(User, UserAdmin)
site2.register(Group, GroupAdmin)
site2.register(ParentWithUUIDPK)
site2.register(
RelatedWithUUIDPKModel,
list_display=['pk', 'parent'],
list_editable=['parent'],
raw_id_fields=['parent'],
)
site2.register(Person, save_as_continue=False)
site7 = admin.AdminSite(name="admin7")
site7.register(Article, ArticleAdmin2)
site7.register(Section)
| |
"""
Comprehensive command line completer support
Provides extended tab-completions support for
- std items: keywords, attributes
- import statements
- array indicies
- dict keys
It is based on the code from rlcompleter, then pimped out.
"""
import os
import sys
import re
import keyword
import pkgutil
import builtins
import __main__
__all__ = ["Completer", "ReadlineCompleter",
"EditlineCompleter", "global_line_editor"]
def _debug(tag, *args):
"""Debugging utility.
Args:
tag: debug tag to enable/disable visibility
args: va-args of what to print
Returns:
Nothing
"""
do_debug = False
if not do_debug:
return
monitor_tags = [
#'complete(0)',
#'complete(match)',
#'attr_matches'
#'global_matches',
#'dict_matches',
#'array_matches'
#'LastExpr(0)',
#'LastExpr(1)',
#'LastExpr(2)',
#'LastExpr(3)',
#'LastExpr(4)',
#'LastExpr(5)',
#'LastExpr(6)',
#'LastExpr(7)',
#'LastExpr(8)'
]
if tag in monitor_tags:
print(os.linesep + "DBG["+tag+"]:", *args)
#
# Maintain a global for the lineditor
#
_gle_data = None
def global_line_editor(gle=None):
"""Access the global lineeditor instance.
Args:
gle: (optional) new global lineeditor instance
Returns:
Current global lineeditor instance
"""
global _gle_data
if gle is not None:
_gle_data = gle
return _gle_data
class Completer(object):
"""General tab-completion support for underlying terminal infrastructure.
Provides extended tab-completion mechanisms in a way which is available
to both 'readline' and 'editline'. The goal is to coalece the completion
functionality into a single place and have it independent of one specific
terminal library's interface.
Args:
subeditor: An instance of editline or readline to implement
the basic terminal interface.
namespace: (optional): The namespace to use for completion.
If unspecified, the default namespace where
completions are performed is __main__
(technically, __main__.__dict__).
"""
allow_eval_of_calls = False
"""(bool) - Flag to allow evals of function calls within
leading expressions. Careful when this is True!
"""
# matches single AND double quoted 'strings' in text. It will do so even
# to support any escaped single and double quotes
_str_dq_re = re.compile(r'''
(?<!\\) # not preceded by a backslash
("|\') # a literal double-quote
.*? # 1-or-more characters
(?<!\\) # not preceded by a backslash
\1 # a literal double-quote
''', re.VERBOSE)
# identify an import statement most generally for speed. Should actually
# compare if two .startswith() statements is faster -- but extra care
# would be needed to isolate the '\b' manually
_check_import_stmt_re = re.compile(r'^(import|from)\b')
# completly define an re to match any import stamement form (excluding
# the 'as' clause.
_import_stmt_re = re.compile(r'''
^
(
from\s+(\w+)(\.\w+)*\s+import\s+(\w+)(\.\w+)*
| import\s+(\w+)(\.\w+)*
)
''', re.VERBOSE)
def __init__(self, subeditor: object, namespace: dict = None) -> object:
if namespace and not isinstance(namespace, dict):
raise TypeError('namespace must be a dictionary')
# Don't bind to namespace quite yet, but flag whether the user wants a
# specific namespace or to use __main__.__dict__. This will allow us
# to bind to __main__.__dict__ at completion time, not now.
self.namespace = namespace
if not self.namespace:
self.namespace = __main__.__dict__
# configure the subeditor
self.subeditor = subeditor
# holds the matches of the sub-statement being matched
self.matches = []
def complete(self, text: str) -> list:
"""Command completion routine.
Args:
text: The current (full) command-line, containing probably
incomplete syntax.
Returns:
A list of strings (possibly empty) of the possible matches which
would provide valid syntax.
"""
# handle import statements
if self._check_import_stmt_re.match(text.strip()):
return self._import_matches(text.strip())
# chop up the line
pretext, expr2c, token, pad, mtext = self._extract_parts(text)
_debug('complete(0)',
'pt=|{0}| ex=|{1}| tk=|{2}| pad=|{3}| mt=|{4}|'
.format(pretext, expr2c, token, pad, mtext))
# assume basic
close_token = ''
# manage the text component needing completion properly
if token.startswith('['):
# extract the 'parent' object
obj = self._eval_to_object(expr2c)
if obj is None:
return [] # no object implies no completions
# what sort of thing is this
flavour = Completer._estimate_flavour(obj)
# handle it based on the flavour
if flavour == 'dict-ish': # could have incomplete syntax for dict
# verify we have *correct* dictionary syntax
if len(token) < 2:
token = token + "'" # user tabbed at only the [
close_token = "']"
elif 4 >= len(token) > 2:
# some sort of use with triple quotes? Legal, but odd.
close_token = token[1] + ']'
else:
close_token = token[1] + ']'
# figure out what keys are needed..
self.matches = self._dict_matches(obj, mtext)
# make sure the 'pad' is in between the [ and the '
if pad != '':
token = token[0] + pad + token[1]
pad = ''
# check for list-ish objs and anything call with [ that
# has __getitem__ is fair
elif flavour == 'list-ish':
self.matches = self._array_matches(obj, mtext)
close_token = ']'
elif flavour == 'set-ish':
# automatically delete the index-data and the [ in the buffer
self.subeditor.delete_text(len(token)+len(mtext))
# replace it with '.'
self.subeditor.insert_text('.')
# invalid syntax... auto-correct
token = ''
close_token = ''
pretext = ''
# this effectively drops into the "object-attribute" case
self.matches = self._attr_matches(expr2c + '.')
expr2c = '' # rub this out so the full-line match is clean
else:
# hmm. something wonky...
pass
elif "." in expr2c:
self.matches = self._attr_matches(expr2c)
expr2c = '' # rub this out so the full-line match is clean
elif expr2c == '':
# here, there is no expression, but unterminated text
self.matches = self._global_matches(mtext)
else:
self.matches = self._global_matches(expr2c)
expr2c = '' # rub this out so the full-line match is clean
# remember to re-attach the leading text...
matches = []
for match in self.matches:
_debug('complete(match)', pretext + expr2c + token + pad + match + close_token)
matches.append(pretext + expr2c + token + pad + match + close_token)
# done
return matches
@classmethod
def _estimate_flavour(cls, obj: object) -> str:
"""Determine a general behaviour of the object.
Given the object, the goal is to figure out if it is more like a
list, set, dictionary or something else.
Args:
cls: class reference
obj: An object reference to check
Returns:
A string value, one of:
'list-ish', 'dict-ish', 'set-ish' or 'unknown'
"""
# easy ones first
if isinstance(obj, dict):
return 'dict-ish'
if isinstance(obj, (list, tuple, range)):
return 'list-ish'
if isinstance(obj, (set, frozenset)):
return 'set-ish'
# could be either an array or a dict, depends on
# what key-type it likes...
if hasattr(obj, '__getitem__'):
# start with dictionary key...
try:
_ = obj['__Zz_Really-Unlykely-KeY_3.14159']
except KeyError:
return 'dict-ish'
except TypeError:
# appears not to be a dictionary
pass
# ok, now try it as a list
try:
_ = obj[2305843009213693951] # Mersenne Prime #9 (2**61 -1)
except IndexError:
return 'list-ish'
except TypeError:
# dunno, sets do this, but they would get
# filtered by not having __getitem__
return 'unknown'
# ?!?!
return 'unknown'
@classmethod
def _entity_postfix(cls, val, word: str) -> str:
"""Append relevant syntax string to indicate `val`'s type.
Args:
cls: class reference
val: an object who's syntax is defined in `word`
word: the text which evaluates to `val`
Returns:
Pass back `word`, potentially appended with appropriate syntax
in order to be recognizable as a certain type.
"""
flavour = cls._estimate_flavour(val)
if isinstance(val, (str, int, float, bytes, bool, complex)):
pass
elif flavour == 'dict-ish': # isinstance(val, dict):
word = word + "['"
elif flavour == 'list-ish': #isinstance(val, (list, tuple, range, bytearray)):
word = word + "["
elif flavour == 'set-ish': #isinstance(val, (set, frozenset)):
pass # acts basically like an object or attr, no indexing
elif callable(val):
word = word + "("
elif hasattr(val, '__class__'):
word = word + "."
return word
@classmethod
def _expr_has_call(cls, text: str) -> bool:
'''Inspect text and determine if it contains `call` syntax.
Args:
cls: class reference
text: a python expression (in code form)
Returns:
True if the expression would evaluate a callable
False if not
'''
opens = text.count('(')
closes = text.count(')')
if closes > 0 and opens >= closes:
return True
return False
def _eval_to_object(self, expr: str) -> object:
"""Convert the text in the argument into a python object.
This is, generally, a dangerous thing to do as it is more or less
evaluating totally unsafe code. Alas...
Args:
expr: python syntax describing which object as source code
Returns:
The runtime object found by `eval` of the `expr` argument or None.
Notes:
This routine is affected by the `allow_eval_of_calls` flag. By
default, it *will not* eval source code which would enact an
(arbitrary) callable. The flag can be altered to change this
behaviour and let circumstances fall in the lap of the user.
"""
# need to check if there is a "call" in the expression
if Completer._expr_has_call(expr):
if not self.allow_eval_of_calls:
_debug('_eval_to_object', "Blocking call eval")
return None
# I'm not a fan of the blind 'eval', but am not sure of a better
# way to do this
try:
ns = {}
ns.update(self.namespace)
pobj = eval(expr, ns)
except Exception:
return None
return pobj
def _import_matches(self, text: str) -> list:
"""Compute matches when text appears to have an import statement.
Args:
text: python code for the import statement
Returns:
Names of all packages and modules available which match.
Notes:
This only does packages and modules... not submodules or other
symbols. (It does not "import" or "parse" the module.) It will
complete os, sys or ctypes.util because they are dirs/files. It
won't do
import os.pa<tab>
which *could* complete to 'os.path'; os.path is a definition
within os.py.
"""
pretext = text
if ' ' in text:
pretext = text[:text.rindex(' ') + 1]
textparts = text.split()
modulepath = ''
matches = []
# filter out 'as' situations
if 'as' in textparts:
self.matches = []
return []
# collect base package in 'from' cases
if len(textparts) > 2:
modulepath = textparts[1]
# handle (import|from) stuff cases
partial = textparts[len(textparts) - 1]
if modulepath != '':
partial = modulepath + '.' + partial
if '.' not in partial:
for modname in sys.builtin_module_names:
if modname.startswith(partial):
#print(" builtin: " + modname)
matches.append(modname)
#for importer, modname, ispkg in pkgutil.walk_packages(
for _, modname, _ in pkgutil.walk_packages(
path=None, onerror=lambda x: None):
if modname.startswith(partial):
#print(" check: " + modname)
if modulepath != '':
matches.append(modname[len(modulepath) + 1:])
else:
matches.append(modname)
# save for later
self.matches = matches
# create the full line
return [pretext + x for x in matches]
@classmethod
def _dict_matches(cls, dobj: dict, text: str) -> list:
"""Identify the possible completion keys within a dictionary.
Args:
dobj: the dictionary whose keys are to be checked
text: some or no text forming the start of the key to be matched
Returns:
All keys which match the prefix given.
"""
_debug('dict_matches', text)
# provide all keys if no estimation
if text == '':
results = [k for k in dobj.keys()]
return results
# for valid data, match any keys...
results = [k for k in dobj.keys() if k.startswith(text)]
return results
@classmethod
def _array_matches(cls, aobj: list, text: str) -> list:
"""Identify the possible completion indices within a list.
Args:
aobj: the list whose keys are to be checked
text: some or no text forming the start of the index to be matched
Returns:
All indicies which match the prefix given. Strings of integers, not
integers themselves are returned.
"""
_debug('array_matches', text)
# no hints means put out all options... could be a long list
if text is None or text == '':
return [str(x) for x in range(len(aobj))]
# implicit info: an array of ZERO length has no completions...
return [str(x) for x in range(len(aobj)) if str(x).startswith(text)]
def _global_matches(self, text: str) -> list:
"""Compute matches within the global namespace.
Args:
text: initial characters of the global entity name to match
Returns:
All keywords, built-in functions and names currently
defined in self.namespace that match the given prefix text.
"""
_debug('global_matches', text)
matches = []
seen = {"__builtins__"}
textn = len(text)
for word in keyword.kwlist:
if word[:textn] == text:
seen.add(word)
if word in ['finally', 'try']:
word = word + ':'
elif word not in ['False', 'None', 'True', 'break',
'continue', 'pass', 'else']:
word = word + ' '
matches.append(word)
for nspace in [self.namespace, builtins.__dict__]:
if not nspace:
continue
for word, val in nspace.items():
if word[:textn] == text and word not in seen:
seen.add(word)
matches.append(Completer._entity_postfix(val, word))
return matches
def _attr_matches(self, text: str) -> list:
"""Compute matching attributes to an object.
Args:
text: expression, containing a '.' and some or no characters of
the attribute desired
Returns:
All attribute names (as strings) which are found within the
parent object.
Notes:
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluable in self.namespace, it will be evaluated and its attributes
(as revealed by dir()) are used as possible completions. (For class
instances, class members are also considered.)
Warnings:
This can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
# bust apart the trailing item from the base object
(expr, attr) = text.rsplit('.', 1)
_debug('attr_matches', 'expr={0} attr={1}'.format(expr, attr))
# try to convert it to a parent object
pobj = self._eval_to_object(expr)
# did not resolve to an object
if pobj is None:
return []
# get the content of the object, except __builtins__
words = set(dir(pobj))
words.discard("__builtins__")
if hasattr(pobj, '__class__'):
words.add('__class__')
words.update(self._get_class_members(pobj.__class__))
matches = []
attrn = len(attr)
if attr == '':
noprefix = '_'
elif attr == '_':
noprefix = '__'
else:
noprefix = None
while True:
for word in words:
if (word[:attrn] == attr and
not (noprefix and word[:attrn + 1] == noprefix)):
match = "%s.%s" % (expr, word)
try:
val = getattr(pobj, word)
except Exception:
pass # Include even if attribute not set
else:
match = Completer._entity_postfix(val, match)
matches.append(match)
if matches or not noprefix:
break
if noprefix == '_':
noprefix = '__'
else:
noprefix = None
matches.sort()
return matches
@classmethod
def _get_class_members(cls, item: object) -> list:
"""Thoroughly inspect a given instance to find *all* members.
Args:
cls: class reference
item: the object or base-class to inspect
Returns:
A list of the attributes found in the object and all of its
base classes.
"""
ret = dir(item)
if hasattr(item, '__bases__'):
for base in item.__bases__:
ret = ret + cls._get_class_members(base)
return ret
@classmethod
def _last_expr(cls, text: str) -> (str, str):
"""Separate the last-expression and the pre-text.
Args:
cls: reference to the Completer class
text: a full line of python code
Returns:
A tuple of the pretext and the last (possibly incomplete)
expression.
"""
nesting = 0
for index, char in enumerate(reversed(text)):
if char in ")]}":
nesting += 1
elif nesting:
if char in "([{":
nesting -= 1
elif char in ' \t\n`~!@#$%^&*-=+\\|;:,<>[](){}/?':
return text[:-index], text[-index:]
return '', text
@classmethod
def _string_sub(cls, text: str) -> (str, dict):
"""Substitute any python string syntax with a simple token.
Args:
cls: class reference
text: a line of python code
Returns:
2-Tuple of the original string with string-syntax items replaced
plus a mapping of the inserted (unique) tokens to the original
strings.
Notes:
Supports all stringifiers in Python: single and double quotes
along with their triple counterparts.
"""
rtv = ''
idx = 0
done = False
cache = {}
while not done:
# do a basic search to find anything
mrv = cls._str_dq_re.search(text)
if mrv is None:
# we're done, pass back the safe-str and stats
return text, cache
# we've got a match
# create a substitution token
sstr = '___PyEl_{:d}'.format(idx)
# remember which token is supposed to be which string
cache[sstr] = mrv.group()
# switch it out
rtv = cls._str_dq_re.sub(sstr, text, 1)
if rtv == text:
break
text = rtv
idx += 1
# no luck
return text, {}
def _extract_parts(self, text: str) -> (str, str, str, str, str):
"""Parse a line of python code (... without the parser).
Args:
text: line of python source code
Returns:
5-Tuple
pretext - any leading code not involved
expr-to-complete - the expression which will be completed
lookup_token - possible lookup token ([ or [' or [")
padding - possible whitespace between token and str
unterminated_str - the fragment of code to be expanded
Notes:
Just about any of the tuple entries can be empty. This routine
is more like a semi-parser/semi-tokenizer. It is the parent
level code which will have to sort out the *meaning* of each
entry as to how the overall cmd is structured.
"""
# replace all quoted strings with simpler tokens. This avoid
# confusing later stages of the parsing by finding python tokens
# embedded in data-strings.
pretext, cache = self._string_sub(text[:])
#_debug(" ", pretext)
#_debug(" ", cache)
# check if there are any quotes left...
# if so, then there is an un-terminated str
unterm_str = None
if "'" in pretext:
idx = pretext.index("'")
unterm_str = pretext[idx:]
pretext = pretext[:idx]
elif '"' in pretext:
idx = pretext.index('"')
unterm_str = pretext[idx:]
pretext = pretext[:idx]
#_debug('LastExpr(0)',
# 'pt >{0}< uts >{1}<'.format(pretext, unterm_str))
# declare this and assume there is none
lookup_tok = None
# handle possible whitespace at the end of the string
pt_rstr = pretext.rstrip() # ws is stuck on pretext
padding = pretext[len(pt_rstr):] # separate the pad chars
pretext = pt_rstr # move forward with clean pretext
#_debug('LastExpr(1)',
# 'pt >{0}< pad >{1}< uts >{2}<'.format(pretext,
# padding, unterm_str))
# figure out the last expression
pretext, expr2c = self._last_expr(pretext)
#_debug('LastExpr(2)', 'pt >{0}< expr2c >{1}<'.format(pretext, expr2c))
# handle possible whitespace between the expr or [ and the match-text
if padding == '':
pt_rstr = pretext.rstrip() # ws is stuck on pretext
padding = pretext[len(pt_rstr):] # separate the pad chars
pretext = pt_rstr # move forward with clean pretext
#_debug('LastExpr(3)',
# 'pt >{0}< pad >{1}< expr2c >{2}<'.format(pretext,
# padding, expr2c))
# check expr2c to see if it looks like a number.
# (Probably could expand it to support more number formats...
if unterm_str is None and expr2c.isnumeric():
unterm_str = expr2c
pretext, expr2c = self._last_expr(pretext)
#_debug('LastExpr(4)',
# 'pt >{0}< expr2c >{1}<'.format(pretext, expr2c))
# is the ending part now an array or dictionary lookup?
if expr2c.endswith('['):
_debug('LastExpr(5)', "Array or Dictionary ending")
lookup_tok = '['
if pretext == '':
pretext, expr2c = self._last_expr(expr2c[:-len(lookup_tok)])
#_debug('LastExpr(6)',
# 'pt >{0}< expr2c >{1}<'.format(pretext, expr2c))
# shift the start string char to the bracket
if unterm_str is not None:
if unterm_str[0] in "'\"":
lookup_tok += unterm_str[0]
unterm_str = unterm_str[1:]
# handle primitive cases where there is just a global function call
if pretext == '' and expr2c.endswith('('):
pretext = expr2c
expr2c = ''
#_debug('LastExpr(7)',
# 'base-expr: |{0} lookup: |{1}|'.format(expr2c,lookup_tok))
# recheck pretext and expr2c to replace the cache-string-token(s)
for key, value in cache.items():
if key in expr2c:
expr2c = expr2c.replace(key, value)
if key in pretext:
pretext = pretext.replace(key, value)
_debug('LastExpr(8)', "Final Base Expression: " + expr2c)
# tidy up the Nones...
if unterm_str is None:
unterm_str = ''
if lookup_tok is None:
lookup_tok = ''
# done
return pretext, expr2c, lookup_tok, padding, unterm_str
class ReadlineCompleter(Completer):
"""Readline support for extended completer
Args:
subeditor: An instance of editline or readline to implement
the basic terminal interface.
namespace: (optional): The namespace to use for completion.
If unspecified, the default namespace where
completions are performed is __main__
(technically, __main__.__dict__).
"""
def __init__(self, namespace=None):
try:
import readline
import atexit
super().__init__(namespace, readline)
readline.set_completer(self.rl_complete)
# Release references early at shutdown (the readline module's
# contents are quasi-immortal, and the completer function holds a
# reference to globals).
atexit.register(lambda: readline.set_completer(None))
except ImportError:
super().__init__(namespace)
def rl_complete(self, text: str, state: int) -> int:
"""Return the next possible completion state for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
Args:
text: python code line
state: which matches are best
Returns:
The next state value.
Notes:
Backward support for readline. This has not been tested
particularly thoroughly.
"""
if not text.strip():
if state == 0:
if self.subeditor:
self.subeditor.insert_text('\t')
self.subeditor.redisplay()
return ''
return '\t'
return None
if state == 0:
super().complete(text)
try:
return self.matches[state]
except IndexError:
return None
class EditlineCompleter(Completer):
"""Completion support customized for editline
Editline (and libedit) use a cleaner interface than readline so it is
separated out here to keep what little delta from the common base
separated.
Args:
subeditor: An instance of editline or readline to implement
the basic terminal interface.
namespace: (optional): The namespace to use for completion.
If unspecified, the default namespace where
completions are performed is __main__
(technically, __main__.__dict__).
"""
def __init__(self, subeditor, namespace: dict = None):
# this *may* cause an ImportError. Let it propagate...
import editline
# make sure the user is using it correctly
if not isinstance(subeditor, editline.editline.EditLine):
raise ValueError("must have subeditor of type EditLine")
# proceed with the creation...
super().__init__(subeditor, namespace)
# adjust the editor for clarity
self._default_display_matches = self.subeditor.display_matches
self.subeditor.display_matches = self.display_matches
# hook it up
self.subeditor.completer = self.complete
def display_matches(self, matches: list):
"""Display relevant information for each match value.
When editline is used, it will naturally show "whole line matches"
which are annoying. This 'override' uses the cached statement matches
to create better lists of stuff.
Args:
matches: the list of matches which contain the full-text matches
"""
self.subeditor._display_matches(self.matches)
| |
import numpy as np
import random
import theano
from keras.models import Sequential
from keras.callbacks import Callback
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.regularizers import l2
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras.datasets import mnist
import keras.callbacks as cbks
from matplotlib import pyplot as plt
from matplotlib import animation
##############################
# model DrawActivations test #
##############################
print('Running DrawActivations test')
nb_classes = 10
batch_size = 128
nb_epoch = 10
max_train_samples = 512
max_test_samples = 1
np.random.seed(1337)
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(-1,1,28,28)[:max_train_samples]
X_train = X_train.astype("float32")
X_train /= 255
X_test = X_test.reshape(-1,1,28,28)[:max_test_samples]
X_test = X_test.astype("float32")
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)[:max_train_samples]
class Frames(object):
def __init__(self, n_plots=16):
self._n_frames = 0
self._framedata = []
self._titles = []
for i in range(n_plots):
self._framedata.append([])
def add_frame(self, i, frame):
self._framedata[i].append(frame)
def set_title(self, title):
self._titles.append(title)
class SubplotTimedAnimation(animation.TimedAnimation):
def __init__(self, fig, frames, grid=(4, 4), interval=10, blit=False, **kwargs):
self.n_plots = grid[0] * grid[1]
self.axes = [fig.add_subplot(grid[0], grid[1], i + 1) for i in range(self.n_plots)]
for axis in self.axes:
axis.get_xaxis().set_ticks([])
axis.get_yaxis().set_ticks([])
self.frames = frames
self.imgs = [self.axes[i].imshow(frames._framedata[i][0], interpolation='nearest', cmap='bone') for i in range(self.n_plots)]
self.title = fig.suptitle('')
super(SubplotTimedAnimation, self).__init__(fig, interval=interval, blit=blit, **kwargs)
def _draw_frame(self, j):
for i in range(self.n_plots):
self.imgs[i].set_data(self.frames._framedata[i][j])
if len(self.frames._titles) > j:
self.title.set_text(self.frames._titles[j])
self._drawn_artists = self.imgs
def new_frame_seq(self):
return iter(range(len(self.frames._framedata[0])))
def _init_draw(self):
for img in self.imgs:
img.set_data([[]])
def combine_imgs(imgs, grid=(1,1)):
n_imgs, img_h, img_w = imgs.shape
if n_imgs != grid[0] * grid[1]:
raise ValueError()
combined = np.zeros((grid[0] * img_h, grid[1] * img_w))
for i in range(grid[0]):
for j in range(grid[1]):
combined[img_h*i:img_h*(i+1),img_w*j:img_w*(j+1)] = imgs[grid[0] * i + j]
return combined
class DrawActivations(Callback):
def __init__(self, figsize):
self.fig = plt.figure(figsize=figsize)
def on_train_begin(self, logs={}):
self.imgs = Frames(n_plots=5)
layers_0_ids = np.random.choice(32, 16, replace=False)
self.test_layer0 = theano.function([self.model.get_input()], self.model.layers[1].get_output(train=False)[0, layers_0_ids])
layers_1_ids = np.random.choice(64, 36, replace=False)
self.test_layer1 = theano.function([self.model.get_input()], self.model.layers[5].get_output(train=False)[0, layers_1_ids])
self.test_layer2 = theano.function([self.model.get_input()], self.model.layers[10].get_output(train=False)[0])
def on_epoch_begin(self, epoch, logs={}):
self.epoch = epoch
def on_batch_end(self, batch, logs={}):
if batch % 5 == 0:
self.imgs.add_frame(0, X_test[0,0])
self.imgs.add_frame(1, combine_imgs(self.test_layer0(X_test), grid=(4, 4)))
self.imgs.add_frame(2, combine_imgs(self.test_layer1(X_test), grid=(6, 6)))
self.imgs.add_frame(3, self.test_layer2(X_test).reshape((16,16)))
self.imgs.add_frame(4, self.model._predict(X_test)[0].reshape((1,10)))
self.imgs.set_title('Epoch #%d - Batch #%d' % (self.epoch, batch))
def on_train_end(self, logs={}):
anim = SubplotTimedAnimation(self.fig, self.imgs, grid=(1,5), interval=10, blit=False, repeat_delay=1000)
# anim.save('test_gif.gif', fps=15, writer='imagemagick')
plt.show()
# model = Sequential()
# model.add(Dense(784, 50))
# model.add(Activation('relu'))
# model.add(Dense(50, 10))
# model.add(Activation('softmax'))
model = Sequential()
model.add(Convolution2D(32, 1, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 32, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64*8*8, 256))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(256, 10, W_regularizer = l2(0.1)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# Fit the model
draw_weights = DrawActivations(figsize=(5.4, 1.35))
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, callbacks=[draw_weights])
##########################
# model checkpoint tests #
##########################
print('Running ModelCheckpoint test')
nb_classes = 10
batch_size = 128
nb_epoch = 20
# small sample size to overfit on training data
max_train_samples = 50
max_test_samples = 1000
np.random.seed(1337) # for reproducibility
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000,784)[:max_train_samples]
X_test = X_test.reshape(10000,784)[:max_test_samples]
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)[:max_train_samples]
Y_test = np_utils.to_categorical(y_test, nb_classes)[:max_test_samples]
# Create a slightly larger network than required to test best validation save only
model = Sequential()
model.add(Dense(784, 500))
model.add(Activation('relu'))
model.add(Dense(500, 10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# test file location
path = "/tmp"
filename = "model_weights.hdf5"
import os
f = os.path.join(path, filename)
print("Test model checkpointer")
# only store best validation model in checkpointer
checkpointer = cbks.ModelCheckpoint(filepath=f, verbose=1, save_best_only=True)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=(X_test, Y_test), callbacks =[checkpointer])
if not os.path.isfile(f):
raise Exception("Model weights were not saved to %s" % (f))
print("Test model checkpointer without validation data")
import warnings
warnings.filterwarnings('error')
try:
passed = False
# this should issue a warning
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, callbacks =[checkpointer])
except:
passed = True
if not passed:
raise Exception("Modelcheckpoint tests did not pass")
print("Test model checkpointer with pattern")
filename = "model_weights.{epoch:04d}.hdf5"
f = os.path.join(path, filename)
nb_epoch = 3
checkpointer = cbks.ModelCheckpoint(f)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, callbacks=[checkpointer])
for i in range(nb_epoch):
if not os.path.isfile(f.format(epoch=i)):
raise Exception("Model weights were not saved separately for each epoch")
print("Tests passed")
| |
from models import Achievement, Category, Trophy, CollectionAchievement, Progress, ProgressAchievement, Task, TaskAchievement, TaskProgress
from django.contrib import admin
from django import forms
from django.core.exceptions import ValidationError
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.db import models
# set display and search field for category table
class CategoryAdmin(admin.ModelAdmin):
list_display=['name', 'parent_category']
search_fields = ('name', 'parent_category')
# ModelForm for validating, if an user has reached the achievement
class AchievementAdminForm(forms.ModelForm):
class Meta:
model = Achievement
def clean(self):
users = self.cleaned_data.get('users')
progress = Progress.objects.filter(progress_achievement__id = self.instance.id)
taskprogress = TaskProgress.objects.filter(task_achievement__id = self.instance.id)
task_accomplished_user = []
progress_accomplished_user = []
# check, if achievement already exists
if self.instance.id:
# check, if achievement has any users
if users:
# check, if achievement is one of the sub types
try:
progressachievement = ProgressAchievement.objects.get(id = self.instance.id)
except:
try:
taskachievement = TaskAchievement.objects.get(id = self.instance.id)
except:
try:
collectionachievement = CollectionAchievement.objects.get(id = self.instance.id)
except:
# if achievement is not one of them, it can be saved, because there are no requirements, which have to be checked
return self.cleaned_data
else:
# check, if user in CollectionAchievement has accomplished all achievements, which are required in the CollectionAchievement
for achievement in collectionachievement.achievements.all():
for user in users:
if not user in achievement.users.all():
raise ValidationError('This User has not earned this achievement yet')
return self.cleaned_data
else:
# check, if there is any TaskProgress for this TaskAchievement
if not taskprogress:
raise ValidationError('This User has not earned this achievement yet')
else:
for pro in taskprogress:
if pro.user in users:
# check, if user has accomplished all required tasks
if not pro.completed_tasks.count() == taskachievement.tasks.count():
raise ValidationError('This User has not earned this achievement yet')
else:
# check, if users contains only 1 entry
# if not, the user of the accomplished achievement will be saved in an array
if not users.count() == 1:
task_accomplished_user.append(pro.user)
else:
return self.cleaned_data
else:
# check, if TaskProgress contains only 1 entry
if taskprogress.count() == 1:
raise ValidationError('This User has not earned this achievement yet')
# check, if amount of entries in array, which contains the user of the accomplished achievements,
# is the same as the amount of entries of users list
if not len(task_accomplished_user) == users.count():
raise ValidationError('This User has not earned this achievement yet')
else:
return self.cleaned_data
else:
# check, if there is any Progress for this ProgressAchievement
if not progress:
raise ValidationError('This User has not earned this achievement yet')
else:
for pro in progress:
if pro.user in users:
# check, if user has accomplished the required amount
if not pro.achieved_amount == progressachievement.required_amount:
raise ValidationError('This User has not earned this achievement yet')
else:
# check, if users contains only 1 entry
# if not, the user of the accomplished achievement will be saved in an array
if not users.count() == 1:
progress_accomplished_user.append(pro.user)
else:
return self.cleaned_data
else:
# check, if TaskProgress contains only 1 entry
if progress.count() == 1:
raise ValidationError('This User has not earned this achievement yet')
# check, if amount of entries in array, which contains the user of the accomplished achievements,
# is the same as the amount of entries of users list
if not len(progress_accomplished_user) == users.count():
raise ValidationError('This User has not earned this achievement yet')
else:
return self.cleaned_data
else:
return self.cleaned_data
else:
return self.cleaned_data
# set display and search field for achievement table
# include AchievementAdminForm
# set ManyToManyField users to FilteredSelectMultiple
class AchievementAdmin(admin.ModelAdmin):
form = AchievementAdminForm
list_display=['name', 'description', 'category']
search_fields = ('name', 'category')
formfield_overrides = {
models.ManyToManyField: {'widget': FilteredSelectMultiple("user", False)}
}
# set display field for progress table
class ProgressAdmin(admin.ModelAdmin):
list_display=['progress_achievement', 'achieved_amount', 'user']
# ModelForm for validating, if an user has reached the ProgressAchievement
class ProgressAchievementAdminForm(forms.ModelForm):
class Meta:
model = ProgressAchievement
def clean(self):
users = self.cleaned_data.get('users')
required_amount = self.cleaned_data.get('required_amount')
progress = Progress.objects.filter(progress_achievement__id = self.instance.id)
accomplished_user = []
if self.instance.id:
if users:
# check, if there is any Progress for this ProgressAchievement
if not progress:
raise ValidationError('This User has not earned this achievement yet')
else:
for pro in progress:
if pro.user in users:
# check, if user has accomplished the required amount
if not pro.achieved_amount == required_amount:
raise ValidationError('This User has not earned this achievement yet')
else:
# check, if users contains only 1 entry
# if not, the user of the accomplished achievement will be saved in an array
if not users.count() == 1:
accomplished_user.append(pro.user)
else:
return self.cleaned_data
else:
# check, if TaskProgress contains only 1 entry
if progress.count() == 1:
raise ValidationError('This User has not earned this achievement yet')
# check, if amount of entries in array, which contains the user of the accomplished achievements,
# is the same as the amount of entries of users list
if not len(accomplished_user) == users.count():
raise ValidationError('This User has not earned this achievement yet')
else:
return self.cleaned_data
else:
return self.cleaned_data
# if ProgressAchievement is new, it cannot be accomplished yet
elif users:
raise ValidationError('You can not add user for this achievement yet')
else:
return self.cleaned_data
# set display and search field for ProgressAchievement table
# include ProgressAchievementAdminForm
# set ManyToManyField users to FilteredSelectMultiple
class ProgressAchievementAdmin(admin.ModelAdmin):
form = ProgressAchievementAdminForm
list_display=['name', 'description', 'category']
search_fields = ('name', 'category')
formfield_overrides = {
models.ManyToManyField: {'widget': FilteredSelectMultiple("user", False)}
}
# ModelForm for validating, if an user has reached the TaskAchievement
class TaskAchievementAdminForm(forms.ModelForm):
class Meta:
model = TaskAchievement
def clean(self):
users = self.cleaned_data.get('users')
tasks = self.cleaned_data.get('tasks')
progress = TaskProgress.objects.filter(task_achievement__id = self.instance.id)
accomplished_user = []
if self.instance.id:
if users:
# check, if there is any TaskProgress for this TaskAchievement
if not progress:
raise ValidationError('This User has not earned this achievement yet')
else:
for pro in progress:
if pro.user in users:
# check, if user has accomplished all required tasks
if not pro.completed_tasks.count() == tasks.count():
raise ValidationError('This User has not earned this achievement yet')
else:
# check, if users contains only 1 entry
# if not, the user of the accomplished achievement will be saved in an array
if not users.count() == 1:
accomplished_user.append(pro.user)
else:
return self.cleaned_data
else:
# check, if TaskProgress contains only 1 entry
if progress.count() == 1:
raise ValidationError('This User has not earned this achievement yet')
# check, if amount of entries in array, which contains the user of the accomplished achievements,
# is the same as the amount of entries of users list
if not len(accomplished_user) == users.count():
raise ValidationError('This User has not earned this achievement yet')
else:
return self.cleaned_data
else:
return self.cleaned_data
# if TaskAchievement is new, it cannot be accomplished yet
elif users:
raise ValidationError('You can not add user for this achievement yet')
else:
return self.cleaned_data
# set display and search field for TaskAchievement table
# include TaskAchievementAdminForm
# set ManyToManyField tasks to FilteredSelectMultiple
# set ManyToManyField users to FilteredSelectMultiple
class TaskAchievementAdmin(admin.ModelAdmin):
form = TaskAchievementAdminForm
list_display=['name', 'description', 'category']
search_fields = ('name', 'category')
formfield_overrides = {
models.ManyToManyField: {'widget': FilteredSelectMultiple("tasks", False)}
}
formfield_overrides = {
models.ManyToManyField: {'widget': FilteredSelectMultiple("users", False)}
}
# ModelForm for validating, if an user has reached the CollectionAchievement
class CollectionAchievementAdminForm(forms.ModelForm):
class Meta:
model = CollectionAchievement
def clean(self):
users = self.cleaned_data.get('users')
achievements = self.cleaned_data.get('achievements')
if users:
# check, if user in CollectionAchievement has accomplished all achievements, which are required in the CollectionAchievement
for achievement in achievements:
for user in users:
if not user in achievement.users.all():
raise ValidationError('This User has not earned this achievement yet')
return self.cleaned_data
else:
return self.cleaned_data
# set display and search field for CollectionAchievement table
# include CollectionAchievementAdminForm
# set ManyToManyField achievements to FilteredSelectMultiple
class CollectionAchievementAdmin(admin.ModelAdmin):
form = CollectionAchievementAdminForm
list_display=['name', 'description', 'category']
search_fields = ('name', 'category')
formfield_overrides = {
models.ManyToManyField: {'widget': FilteredSelectMultiple("achievements", False)}
}
# set display field for Task table
class TaskAdmin(admin.ModelAdmin):
list_display=['name', 'description']
# set display field for TaskProgress table
# # set ManyToManyField tasks to FilteredSelectMultiple
class TaskProgressAdmin(admin.ModelAdmin):
list_display=['task_achievement', 'user']
formfield_overrides = {
models.ManyToManyField: {'widget': FilteredSelectMultiple("tasks", False)}
}
# set display field for Trophy table
class TrophyAdmin(admin.ModelAdmin):
list_display=['achievement', 'position']
admin.site.register(Achievement, AchievementAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(ProgressAchievement, ProgressAchievementAdmin)
admin.site.register(Progress, ProgressAdmin)
admin.site.register(TaskAchievement, TaskAchievementAdmin)
admin.site.register(Task, TaskAdmin)
admin.site.register(TaskProgress, TaskProgressAdmin)
admin.site.register(Trophy, TrophyAdmin)
admin.site.register(CollectionAchievement, CollectionAchievementAdmin)
| |
from __future__ import unicode_literals
from django import forms
from django.contrib.admin import BooleanFieldListFilter, SimpleListFilter
from django.contrib.admin.options import VERTICAL, ModelAdmin, TabularInline
from django.contrib.admin.sites import AdminSite
from django.core.checks import Error
from django.forms.models import BaseModelFormSet
from django.test import SimpleTestCase
from .models import Band, ValidationTestInlineModel, ValidationTestModel
class CheckTestCase(SimpleTestCase):
def assertIsInvalid(self, model_admin, model, msg, id=None, hint=None, invalid_obj=None):
invalid_obj = invalid_obj or model_admin
admin_obj = model_admin(model, AdminSite())
self.assertEqual(admin_obj.check(), [Error(msg, hint=hint, obj=invalid_obj, id=id)])
def assertIsInvalidRegexp(self, model_admin, model, msg, id=None, hint=None, invalid_obj=None):
"""
Same as assertIsInvalid but treats the given msg as a regexp.
"""
invalid_obj = invalid_obj or model_admin
admin_obj = model_admin(model, AdminSite())
errors = admin_obj.check()
self.assertEqual(len(errors), 1)
error = errors[0]
self.assertEqual(error.hint, hint)
self.assertEqual(error.obj, invalid_obj)
self.assertEqual(error.id, id)
self.assertRegex(error.msg, msg)
def assertIsValid(self, model_admin, model):
admin_obj = model_admin(model, AdminSite())
self.assertEqual(admin_obj.check(), [])
class RawIdCheckTests(CheckTestCase):
def test_not_iterable(self):
class TestModelAdmin(ModelAdmin):
raw_id_fields = 10
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'raw_id_fields' must be a list or tuple.",
'admin.E001'
)
def test_missing_field(self):
class TestModelAdmin(ModelAdmin):
raw_id_fields = ('non_existent_field',)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'raw_id_fields[0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'.",
'admin.E002'
)
def test_invalid_field_type(self):
class TestModelAdmin(ModelAdmin):
raw_id_fields = ('name',)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'raw_id_fields[0]' must be a foreign key or a "
"many-to-many field.",
'admin.E003'
)
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
raw_id_fields = ('users',)
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class FieldsetsCheckTests(CheckTestCase):
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
fieldsets = (('General', {'fields': ('name',)}),)
self.assertIsValid(TestModelAdmin, ValidationTestModel)
def test_not_iterable(self):
class TestModelAdmin(ModelAdmin):
fieldsets = 10
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'fieldsets' must be a list or tuple.",
'admin.E007'
)
def test_non_iterable_item(self):
class TestModelAdmin(ModelAdmin):
fieldsets = ({},)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0]' must be a list or tuple.",
'admin.E008'
)
def test_item_not_a_pair(self):
class TestModelAdmin(ModelAdmin):
fieldsets = ((),)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0]' must be of length 2.",
'admin.E009'
)
def test_second_element_of_item_not_a_dict(self):
class TestModelAdmin(ModelAdmin):
fieldsets = (('General', ()),)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0][1]' must be a dictionary.",
'admin.E010'
)
def test_missing_fields_key(self):
class TestModelAdmin(ModelAdmin):
fieldsets = (('General', {}),)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0][1]' must contain the key 'fields'.",
'admin.E011'
)
class TestModelAdmin(ModelAdmin):
fieldsets = (('General', {'fields': ('name',)}),)
self.assertIsValid(TestModelAdmin, ValidationTestModel)
def test_specified_both_fields_and_fieldsets(self):
class TestModelAdmin(ModelAdmin):
fieldsets = (('General', {'fields': ('name',)}),)
fields = ['name']
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"Both 'fieldsets' and 'fields' are specified.",
'admin.E005'
)
def test_duplicate_fields(self):
class TestModelAdmin(ModelAdmin):
fieldsets = [(None, {'fields': ['name', 'name']})]
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"There are duplicate field(s) in 'fieldsets[0][1]'.",
'admin.E012'
)
def test_fieldsets_with_custom_form_validation(self):
class BandAdmin(ModelAdmin):
fieldsets = (('Band', {'fields': ('name',)}),)
self.assertIsValid(BandAdmin, Band)
class FieldsCheckTests(CheckTestCase):
def test_duplicate_fields_in_fields(self):
class TestModelAdmin(ModelAdmin):
fields = ['name', 'name']
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'fields' contains duplicate field(s).",
'admin.E006'
)
def test_inline(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fields = 10
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'fields' must be a list or tuple.",
'admin.E004',
invalid_obj=ValidationTestInline
)
class FormCheckTests(CheckTestCase):
def test_invalid_type(self):
class FakeForm(object):
pass
class TestModelAdmin(ModelAdmin):
form = FakeForm
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'form' must inherit from 'BaseModelForm'.",
'admin.E016'
)
def test_fieldsets_with_custom_form_validation(self):
class BandAdmin(ModelAdmin):
fieldsets = (('Band', {'fields': ('name',)}),)
self.assertIsValid(BandAdmin, Band)
def test_valid_case(self):
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class BandAdmin(ModelAdmin):
form = AdminBandForm
fieldsets = (
('Band', {
'fields': ('name', 'bio', 'sign_date', 'delete')
}),
)
self.assertIsValid(BandAdmin, Band)
class FilterVerticalCheckTests(CheckTestCase):
def test_not_iterable(self):
class TestModelAdmin(ModelAdmin):
filter_vertical = 10
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'filter_vertical' must be a list or tuple.",
'admin.E017'
)
def test_missing_field(self):
class TestModelAdmin(ModelAdmin):
filter_vertical = ('non_existent_field',)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'filter_vertical[0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'.",
'admin.E019'
)
def test_invalid_field_type(self):
class TestModelAdmin(ModelAdmin):
filter_vertical = ('name',)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'filter_vertical[0]' must be a many-to-many field.",
'admin.E020'
)
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
filter_vertical = ('users',)
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class FilterHorizontalCheckTests(CheckTestCase):
def test_not_iterable(self):
class TestModelAdmin(ModelAdmin):
filter_horizontal = 10
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'filter_horizontal' must be a list or tuple.",
'admin.E018'
)
def test_missing_field(self):
class TestModelAdmin(ModelAdmin):
filter_horizontal = ('non_existent_field',)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'filter_horizontal[0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'.",
'admin.E019'
)
def test_invalid_field_type(self):
class TestModelAdmin(ModelAdmin):
filter_horizontal = ('name',)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'filter_horizontal[0]' must be a many-to-many field.",
'admin.E020'
)
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
filter_horizontal = ('users',)
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class RadioFieldsCheckTests(CheckTestCase):
def test_not_dictionary(self):
class TestModelAdmin(ModelAdmin):
radio_fields = ()
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'radio_fields' must be a dictionary.",
'admin.E021'
)
def test_missing_field(self):
class TestModelAdmin(ModelAdmin):
radio_fields = {'non_existent_field': VERTICAL}
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'radio_fields' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'.",
'admin.E022'
)
def test_invalid_field_type(self):
class TestModelAdmin(ModelAdmin):
radio_fields = {'name': VERTICAL}
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'radio_fields' refers to 'name', which is not an instance "
"of ForeignKey, and does not have a 'choices' definition.",
'admin.E023'
)
def test_invalid_value(self):
class TestModelAdmin(ModelAdmin):
radio_fields = {'state': None}
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'radio_fields[\"state\"]' must be either admin.HORIZONTAL or admin.VERTICAL.",
'admin.E024'
)
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
radio_fields = {'state': VERTICAL}
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class PrepopulatedFieldsCheckTests(CheckTestCase):
def test_not_dictionary(self):
class TestModelAdmin(ModelAdmin):
prepopulated_fields = ()
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'prepopulated_fields' must be a dictionary.",
'admin.E026'
)
def test_missing_field(self):
class TestModelAdmin(ModelAdmin):
prepopulated_fields = {'non_existent_field': ('slug',)}
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'prepopulated_fields' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'.",
'admin.E027'
)
def test_missing_field_again(self):
class TestModelAdmin(ModelAdmin):
prepopulated_fields = {'slug': ('non_existent_field',)}
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'prepopulated_fields[\"slug\"][0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'.",
'admin.E030'
)
def test_invalid_field_type(self):
class TestModelAdmin(ModelAdmin):
prepopulated_fields = {'users': ('name',)}
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'prepopulated_fields' refers to 'users', which must not be "
"a DateTimeField, a ForeignKey, a OneToOneField, or a ManyToManyField.",
'admin.E028'
)
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
prepopulated_fields = {'slug': ('name',)}
self.assertIsValid(TestModelAdmin, ValidationTestModel)
def test_one_to_one_field(self):
class TestModelAdmin(ModelAdmin):
prepopulated_fields = {'best_friend': ('name',)}
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'prepopulated_fields' refers to 'best_friend', which must not be "
"a DateTimeField, a ForeignKey, a OneToOneField, or a ManyToManyField.",
'admin.E028'
)
class ListDisplayTests(CheckTestCase):
def test_not_iterable(self):
class TestModelAdmin(ModelAdmin):
list_display = 10
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_display' must be a list or tuple.",
'admin.E107'
)
def test_missing_field(self):
class TestModelAdmin(ModelAdmin):
list_display = ('non_existent_field',)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_display[0]' refers to 'non_existent_field', "
"which is not a callable, an attribute of 'TestModelAdmin', "
"or an attribute or method on 'modeladmin.ValidationTestModel'.",
'admin.E108'
)
def test_invalid_field_type(self):
class TestModelAdmin(ModelAdmin):
list_display = ('users',)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_display[0]' must not be a ManyToManyField.",
'admin.E109'
)
def test_valid_case(self):
def a_callable(obj):
pass
class TestModelAdmin(ModelAdmin):
def a_method(self, obj):
pass
list_display = ('name', 'decade_published_in', 'a_method', a_callable)
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class ListDisplayLinksCheckTests(CheckTestCase):
def test_not_iterable(self):
class TestModelAdmin(ModelAdmin):
list_display_links = 10
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_display_links' must be a list, a tuple, or None.",
'admin.E110'
)
def test_missing_field(self):
class TestModelAdmin(ModelAdmin):
list_display_links = ('non_existent_field',)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel, (
"The value of 'list_display_links[0]' refers to "
"'non_existent_field', which is not defined in 'list_display'."
), 'admin.E111'
)
def test_missing_in_list_display(self):
class TestModelAdmin(ModelAdmin):
list_display_links = ('name',)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_display_links[0]' refers to 'name', which is not defined in 'list_display'.",
'admin.E111'
)
def test_valid_case(self):
def a_callable(obj):
pass
class TestModelAdmin(ModelAdmin):
def a_method(self, obj):
pass
list_display = ('name', 'decade_published_in', 'a_method', a_callable)
list_display_links = ('name', 'decade_published_in', 'a_method', a_callable)
self.assertIsValid(TestModelAdmin, ValidationTestModel)
def test_None_is_valid_case(self):
class TestModelAdmin(ModelAdmin):
list_display_links = None
self.assertIsValid(TestModelAdmin, ValidationTestModel)
def test_list_display_links_check_skipped_if_get_list_display_overridden(self):
"""
list_display_links check is skipped if get_list_display() is overridden.
"""
class TestModelAdmin(ModelAdmin):
list_display_links = ['name', 'subtitle']
def get_list_display(self, request):
pass
self.assertIsValid(TestModelAdmin, ValidationTestModel)
def test_list_display_link_checked_for_list_tuple_if_get_list_display_overridden(self):
"""
list_display_links is checked for list/tuple/None even if
get_list_display() is overridden.
"""
class TestModelAdmin(ModelAdmin):
list_display_links = 'non-list/tuple'
def get_list_display(self, request):
pass
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_display_links' must be a list, a tuple, or None.",
'admin.E110'
)
class ListFilterTests(CheckTestCase):
def test_list_filter_validation(self):
class TestModelAdmin(ModelAdmin):
list_filter = 10
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_filter' must be a list or tuple.",
'admin.E112'
)
def test_missing_field(self):
class TestModelAdmin(ModelAdmin):
list_filter = ('non_existent_field',)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0]' refers to 'non_existent_field', "
"which does not refer to a Field.",
'admin.E116'
)
def test_not_filter(self):
class RandomClass(object):
pass
class TestModelAdmin(ModelAdmin):
list_filter = (RandomClass,)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0]' must inherit from 'ListFilter'.",
'admin.E113')
def test_not_filter_again(self):
class RandomClass(object):
pass
class TestModelAdmin(ModelAdmin):
list_filter = (('is_active', RandomClass),)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0][1]' must inherit from 'FieldListFilter'.",
'admin.E115'
)
def test_not_filter_again_again(self):
class AwesomeFilter(SimpleListFilter):
def get_title(self):
return 'awesomeness'
def get_choices(self, request):
return (('bit', 'A bit awesome'), ('very', 'Very awesome'), )
def get_queryset(self, cl, qs):
return qs
class TestModelAdmin(ModelAdmin):
list_filter = (('is_active', AwesomeFilter),)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0][1]' must inherit from 'FieldListFilter'.",
'admin.E115'
)
def test_not_associated_with_field_name(self):
class TestModelAdmin(ModelAdmin):
list_filter = (BooleanFieldListFilter,)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0]' must not inherit from 'FieldListFilter'.",
'admin.E114'
)
def test_valid_case(self):
class AwesomeFilter(SimpleListFilter):
def get_title(self):
return 'awesomeness'
def get_choices(self, request):
return (('bit', 'A bit awesome'), ('very', 'Very awesome'), )
def get_queryset(self, cl, qs):
return qs
class TestModelAdmin(ModelAdmin):
list_filter = ('is_active', AwesomeFilter, ('is_active', BooleanFieldListFilter), 'no')
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class ListPerPageCheckTests(CheckTestCase):
def test_not_integer(self):
class TestModelAdmin(ModelAdmin):
list_per_page = 'hello'
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_per_page' must be an integer.",
'admin.E118'
)
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
list_per_page = 100
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class ListMaxShowAllCheckTests(CheckTestCase):
def test_not_integer(self):
class TestModelAdmin(ModelAdmin):
list_max_show_all = 'hello'
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_max_show_all' must be an integer.",
'admin.E119'
)
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
list_max_show_all = 200
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class SearchFieldsCheckTests(CheckTestCase):
def test_not_iterable(self):
class TestModelAdmin(ModelAdmin):
search_fields = 10
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'search_fields' must be a list or tuple.",
'admin.E126'
)
class DateHierarchyCheckTests(CheckTestCase):
def test_missing_field(self):
class TestModelAdmin(ModelAdmin):
date_hierarchy = 'non_existent_field'
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'date_hierarchy' refers to 'non_existent_field', "
"which does not refer to a Field.",
'admin.E127'
)
def test_invalid_field_type(self):
class TestModelAdmin(ModelAdmin):
date_hierarchy = 'name'
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'date_hierarchy' must be a DateField or DateTimeField.",
'admin.E128'
)
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
date_hierarchy = 'pub_date'
self.assertIsValid(TestModelAdmin, ValidationTestModel)
def test_related_valid_case(self):
class TestModelAdmin(ModelAdmin):
date_hierarchy = 'band__sign_date'
self.assertIsValid(TestModelAdmin, ValidationTestModel)
def test_related_invalid_field_type(self):
class TestModelAdmin(ModelAdmin):
date_hierarchy = 'band__name'
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'date_hierarchy' must be a DateField or DateTimeField.",
'admin.E128'
)
class OrderingCheckTests(CheckTestCase):
def test_not_iterable(self):
class TestModelAdmin(ModelAdmin):
ordering = 10
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'ordering' must be a list or tuple.",
'admin.E031'
)
class TestModelAdmin(ModelAdmin):
ordering = ('non_existent_field',)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'ordering[0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'.",
'admin.E033'
)
def test_random_marker_not_alone(self):
class TestModelAdmin(ModelAdmin):
ordering = ('?', 'name')
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'ordering' has the random ordering marker '?', but contains "
"other fields as well.",
'admin.E032',
hint='Either remove the "?", or remove the other fields.'
)
def test_valid_random_marker_case(self):
class TestModelAdmin(ModelAdmin):
ordering = ('?',)
self.assertIsValid(TestModelAdmin, ValidationTestModel)
def test_valid_complex_case(self):
class TestModelAdmin(ModelAdmin):
ordering = ('band__name',)
self.assertIsValid(TestModelAdmin, ValidationTestModel)
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
ordering = ('name', 'pk')
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class ListSelectRelatedCheckTests(CheckTestCase):
def test_invalid_type(self):
class TestModelAdmin(ModelAdmin):
list_select_related = 1
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_select_related' must be a boolean, tuple or list.",
'admin.E117'
)
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
list_select_related = False
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class SaveAsCheckTests(CheckTestCase):
def test_not_boolean(self):
class TestModelAdmin(ModelAdmin):
save_as = 1
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'save_as' must be a boolean.",
'admin.E101'
)
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
save_as = True
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class SaveOnTopCheckTests(CheckTestCase):
def test_not_boolean(self):
class TestModelAdmin(ModelAdmin):
save_on_top = 1
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'save_on_top' must be a boolean.",
'admin.E102'
)
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
save_on_top = True
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class InlinesCheckTests(CheckTestCase):
def test_not_iterable(self):
class TestModelAdmin(ModelAdmin):
inlines = 10
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'inlines' must be a list or tuple.",
'admin.E103'
)
def test_not_model_admin(self):
class ValidationTestInline(object):
pass
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalidRegexp(
TestModelAdmin, ValidationTestModel,
r"'.*\.ValidationTestInline' must inherit from 'InlineModelAdmin'\.",
'admin.E104'
)
def test_missing_model_field(self):
class ValidationTestInline(TabularInline):
pass
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalidRegexp(
TestModelAdmin, ValidationTestModel,
r"'.*\.ValidationTestInline' must have a 'model' attribute\.",
'admin.E105')
def test_invalid_model_type(self):
class SomethingBad(object):
pass
class ValidationTestInline(TabularInline):
model = SomethingBad
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalidRegexp(
TestModelAdmin, ValidationTestModel,
r"The value of '.*\.ValidationTestInline.model' must be a Model\.",
'admin.E106'
)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class FkNameCheckTests(CheckTestCase):
def test_missing_field(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fk_name = 'non_existent_field'
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"'modeladmin.ValidationTestInlineModel' has no field named 'non_existent_field'.",
'admin.E202',
invalid_obj=ValidationTestInline
)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fk_name = 'parent'
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class ExtraCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
extra = 'hello'
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'extra' must be an integer.",
'admin.E203',
invalid_obj=ValidationTestInline
)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
extra = 2
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class MaxNumCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = 'hello'
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'max_num' must be an integer.",
'admin.E204',
invalid_obj=ValidationTestInline
)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = 2
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class MinNumCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
min_num = 'hello'
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'min_num' must be an integer.",
'admin.E205',
invalid_obj=ValidationTestInline
)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
min_num = 2
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class FormsetCheckTests(CheckTestCase):
def test_invalid_type(self):
class FakeFormSet(object):
pass
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
formset = FakeFormSet
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'formset' must inherit from 'BaseModelFormSet'.",
'admin.E206',
invalid_obj=ValidationTestInline
)
def test_valid_case(self):
class RealModelFormSet(BaseModelFormSet):
pass
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
formset = RealModelFormSet
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class ListDisplayEditableTests(CheckTestCase):
def test_list_display_links_is_none(self):
"""
list_display and list_editable can contain the same values
when list_display_links is None
"""
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = list_display
list_display_links = None
self.assertIsValid(ProductAdmin, ValidationTestModel)
def test_list_display_first_item_same_as_list_editable_first_item(self):
"""
The first item in list_display can be the same as the first in
list_editable.
"""
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = ['name', 'slug']
list_display_links = ['pub_date']
self.assertIsValid(ProductAdmin, ValidationTestModel)
def test_list_display_first_item_in_list_editable(self):
"""
The first item in list_display can be in list_editable as long as
list_display_links is defined.
"""
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = ['slug', 'name']
list_display_links = ['pub_date']
self.assertIsValid(ProductAdmin, ValidationTestModel)
def test_list_display_first_item_same_as_list_editable_no_list_display_links(self):
"""
The first item in list_display cannot be the same as the first item
in list_editable if list_display_links is not defined.
"""
class ProductAdmin(ModelAdmin):
list_display = ['name']
list_editable = ['name']
self.assertIsInvalid(
ProductAdmin, ValidationTestModel,
"The value of 'list_editable[0]' refers to the first field "
"in 'list_display' ('name'), which cannot be used unless "
"'list_display_links' is set.",
id='admin.E124',
)
def test_list_display_first_item_in_list_editable_no_list_display_links(self):
"""
The first item in list_display cannot be in list_editable if
list_display_links isn't defined.
"""
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = ['slug', 'name']
self.assertIsInvalid(
ProductAdmin, ValidationTestModel,
"The value of 'list_editable[1]' refers to the first field "
"in 'list_display' ('name'), which cannot be used unless "
"'list_display_links' is set.",
id='admin.E124',
)
| |
# Copyright (c) 2019 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from kmip.core import enums
from kmip.core import exceptions
from kmip.core import objects
from kmip.core import primitives
from kmip.core import secrets
from kmip.core import utils
from kmip.core.messages import payloads
class TestRegisterRequestPayload(testtools.TestCase):
def setUp(self):
super(TestRegisterRequestPayload, self).setUp()
self.certificate_value = (
b'\x30\x82\x03\x12\x30\x82\x01\xFA\xA0\x03\x02\x01\x02\x02\x01\x01'
b'\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30'
b'\x3B\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x0D'
b'\x30\x0B\x06\x03\x55\x04\x0A\x13\x04\x54\x45\x53\x54\x31\x0E\x30'
b'\x0C\x06\x03\x55\x04\x0B\x13\x05\x4F\x41\x53\x49\x53\x31\x0D\x30'
b'\x0B\x06\x03\x55\x04\x03\x13\x04\x4B\x4D\x49\x50\x30\x1E\x17\x0D'
b'\x31\x30\x31\x31\x30\x31\x32\x33\x35\x39\x35\x39\x5A\x17\x0D\x32'
b'\x30\x31\x31\x30\x31\x32\x33\x35\x39\x35\x39\x5A\x30\x3B\x31\x0B'
b'\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x0D\x30\x0B\x06'
b'\x03\x55\x04\x0A\x13\x04\x54\x45\x53\x54\x31\x0E\x30\x0C\x06\x03'
b'\x55\x04\x0B\x13\x05\x4F\x41\x53\x49\x53\x31\x0D\x30\x0B\x06\x03'
b'\x55\x04\x03\x13\x04\x4B\x4D\x49\x50\x30\x82\x01\x22\x30\x0D\x06'
b'\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x01\x0F'
b'\x00\x30\x82\x01\x0A\x02\x82\x01\x01\x00\xAB\x7F\x16\x1C\x00\x42'
b'\x49\x6C\xCD\x6C\x6D\x4D\xAD\xB9\x19\x97\x34\x35\x35\x77\x76\x00'
b'\x3A\xCF\x54\xB7\xAF\x1E\x44\x0A\xFB\x80\xB6\x4A\x87\x55\xF8\x00'
b'\x2C\xFE\xBA\x6B\x18\x45\x40\xA2\xD6\x60\x86\xD7\x46\x48\x34\x6D'
b'\x75\xB8\xD7\x18\x12\xB2\x05\x38\x7C\x0F\x65\x83\xBC\x4D\x7D\xC7'
b'\xEC\x11\x4F\x3B\x17\x6B\x79\x57\xC4\x22\xE7\xD0\x3F\xC6\x26\x7F'
b'\xA2\xA6\xF8\x9B\x9B\xEE\x9E\x60\xA1\xD7\xC2\xD8\x33\xE5\xA5\xF4'
b'\xBB\x0B\x14\x34\xF4\xE7\x95\xA4\x11\x00\xF8\xAA\x21\x49\x00\xDF'
b'\x8B\x65\x08\x9F\x98\x13\x5B\x1C\x67\xB7\x01\x67\x5A\xBD\xBC\x7D'
b'\x57\x21\xAA\xC9\xD1\x4A\x7F\x08\x1F\xCE\xC8\x0B\x64\xE8\xA0\xEC'
b'\xC8\x29\x53\x53\xC7\x95\x32\x8A\xBF\x70\xE1\xB4\x2E\x7B\xB8\xB7'
b'\xF4\xE8\xAC\x8C\x81\x0C\xDB\x66\xE3\xD2\x11\x26\xEB\xA8\xDA\x7D'
b'\x0C\xA3\x41\x42\xCB\x76\xF9\x1F\x01\x3D\xA8\x09\xE9\xC1\xB7\xAE'
b'\x64\xC5\x41\x30\xFB\xC2\x1D\x80\xE9\xC2\xCB\x06\xC5\xC8\xD7\xCC'
b'\xE8\x94\x6A\x9A\xC9\x9B\x1C\x28\x15\xC3\x61\x2A\x29\xA8\x2D\x73'
b'\xA1\xF9\x93\x74\xFE\x30\xE5\x49\x51\x66\x2A\x6E\xDA\x29\xC6\xFC'
b'\x41\x13\x35\xD5\xDC\x74\x26\xB0\xF6\x05\x02\x03\x01\x00\x01\xA3'
b'\x21\x30\x1F\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\x04\xE5'
b'\x7B\xD2\xC4\x31\xB2\xE8\x16\xE1\x80\xA1\x98\x23\xFA\xC8\x58\x27'
b'\x3F\x6B\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05'
b'\x00\x03\x82\x01\x01\x00\xA8\x76\xAD\xBC\x6C\x8E\x0F\xF0\x17\x21'
b'\x6E\x19\x5F\xEA\x76\xBF\xF6\x1A\x56\x7C\x9A\x13\xDC\x50\xD1\x3F'
b'\xEC\x12\xA4\x27\x3C\x44\x15\x47\xCF\xAB\xCB\x5D\x61\xD9\x91\xE9'
b'\x66\x31\x9D\xF7\x2C\x0D\x41\xBA\x82\x6A\x45\x11\x2F\xF2\x60\x89'
b'\xA2\x34\x4F\x4D\x71\xCF\x7C\x92\x1B\x4B\xDF\xAE\xF1\x60\x0D\x1B'
b'\xAA\xA1\x53\x36\x05\x7E\x01\x4B\x8B\x49\x6D\x4F\xAE\x9E\x8A\x6C'
b'\x1D\xA9\xAE\xB6\xCB\xC9\x60\xCB\xF2\xFA\xE7\x7F\x58\x7E\xC4\xBB'
b'\x28\x20\x45\x33\x88\x45\xB8\x8D\xD9\xAE\xEA\x53\xE4\x82\xA3\x6E'
b'\x73\x4E\x4F\x5F\x03\xB9\xD0\xDF\xC4\xCA\xFC\x6B\xB3\x4E\xA9\x05'
b'\x3E\x52\xBD\x60\x9E\xE0\x1E\x86\xD9\xB0\x9F\xB5\x11\x20\xC1\x98'
b'\x34\xA9\x97\xB0\x9C\xE0\x8D\x79\xE8\x13\x11\x76\x2F\x97\x4B\xB1'
b'\xC8\xC0\x91\x86\xC4\xD7\x89\x33\xE0\xDB\x38\xE9\x05\x08\x48\x77'
b'\xE1\x47\xC7\x8A\xF5\x2F\xAE\x07\x19\x2F\xF1\x66\xD1\x9F\xA9\x4A'
b'\x11\xCC\x11\xB2\x7E\xD0\x50\xF7\xA2\x7F\xAE\x13\xB2\x05\xA5\x74'
b'\xC4\xEE\x00\xAA\x8B\xD6\x5D\x0D\x70\x57\xC9\x85\xC8\x39\xEF\x33'
b'\x6A\x44\x1E\xD5\x3A\x53\xC6\xB6\xB6\x96\xF1\xBD\xEB\x5F\x7E\xA8'
b'\x11\xEB\xB2\x5A\x7F\x86'
)
# Encoding obtained from the KMIP 1.1 testing document, Section 13.2.2.
# Modified to exclude the Link attribute.
#
# TODO (ph) Add the Link attribute back in once Links are supported.
#
# This encoding matches the following set of values:
# Request Payload
# Object Type - Certificate
# Template Attribute
# Attribute
# Attribute Name - Cryptographic Usage Mask
# Attribute Value - Sign | Verify
# Certificate
# Certificate Type - X.509
# Certificate Value -
# 0x30820312308201FAA003020102020101300D06092A864886F70D01
# 01050500303B310B3009060355040613025553310D300B060355040A
# 130454455354310E300C060355040B13054F41534953310D300B0603
# 55040313044B4D4950301E170D3130313130313233353935395A170D
# 3230313130313233353935395A303B310B3009060355040613025553
# 310D300B060355040A130454455354310E300C060355040B13054F41
# 534953310D300B060355040313044B4D495030820122300D06092A86
# 4886F70D01010105000382010F003082010A0282010100AB7F161C00
# 42496CCD6C6D4DADB919973435357776003ACF54B7AF1E440AFB80B6
# 4A8755F8002CFEBA6B184540A2D66086D74648346D75B8D71812B205
# 387C0F6583BC4D7DC7EC114F3B176B7957C422E7D03FC6267FA2A6F8
# 9B9BEE9E60A1D7C2D833E5A5F4BB0B1434F4E795A41100F8AA214900
# DF8B65089F98135B1C67B701675ABDBC7D5721AAC9D14A7F081FCEC8
# 0B64E8A0ECC8295353C795328ABF70E1B42E7BB8B7F4E8AC8C810CDB
# 66E3D21126EBA8DA7D0CA34142CB76F91F013DA809E9C1B7AE64C541
# 30FBC21D80E9C2CB06C5C8D7CCE8946A9AC99B1C2815C3612A29A82D
# 73A1F99374FE30E54951662A6EDA29C6FC411335D5DC7426B0F60502
# 03010001A321301F301D0603551D0E0416041404E57BD2C431B2E816
# E180A19823FAC858273F6B300D06092A864886F70D01010505000382
# 010100A876ADBC6C8E0FF017216E195FEA76BFF61A567C9A13DC50D1
# 3FEC12A4273C441547CFABCB5D61D991E966319DF72C0D41BA826A45
# 112FF26089A2344F4D71CF7C921B4BDFAEF1600D1BAAA15336057E01
# 4B8B496D4FAE9E8A6C1DA9AEB6CBC960CBF2FAE77F587EC4BB282045
# 338845B88DD9AEEA53E482A36E734E4F5F03B9D0DFC4CAFC6BB34EA9
# 053E52BD609EE01E86D9B09FB51120C19834A997B09CE08D79E81311
# 762F974BB1C8C09186C4D78933E0DB38E905084877E147C78AF52FAE
# 07192FF166D19FA94A11CC11B27ED050F7A27FAE13B205A574C4EE00
# AA8BD65D0D7057C985C839EF336A441ED53A53C6B6B696F1BDEB5F7E
# A811EBB25A7F86
self.full_encoding = utils.BytearrayStream(
b'\x42\x00\x79\x01\x00\x00\x03\x88'
b'\x42\x00\x57\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x00\x91\x01\x00\x00\x00\x38'
b'\x42\x00\x08\x01\x00\x00\x00\x30'
b'\x42\x00\x0A\x07\x00\x00\x00\x18'
b'\x43\x72\x79\x70\x74\x6F\x67\x72\x61\x70\x68\x69\x63\x20\x55\x73'
b'\x61\x67\x65\x20\x4D\x61\x73\x6B'
b'\x42\x00\x0B\x02\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00\x00'
b'\x42\x00\x13\x01\x00\x00\x03\x30'
b'\x42\x00\x1D\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x00\x1E\x08\x00\x00\x03\x16' + self.certificate_value +
b'\x00\x00'
)
# Encoding obtained from the KMIP 1.1 testing document, Section 13.2.2.
# Modified to exclude the Link attribute. Manually converted into the
# KMIP 2.0 format.
#
# TODO (ph) Add the Link attribute back in once Links are supported.
#
# This encoding matches the following set of values:
# Request Payload
# Object Type - Certificate
# Attributes
# Cryptographic Usage Mask - Sign | Verify
# Certificate
# Certificate Type - X.509
# Certificate Value - See comment for the full encoding.
# Protection Storage Masks
# Protection Storage Mask - Software | Hardware
self.full_encoding_with_attributes = utils.BytearrayStream(
b'\x42\x00\x79\x01\x00\x00\x03\x78'
b'\x42\x00\x57\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x01\x25\x01\x00\x00\x00\x10'
b'\x42\x00\x2C\x02\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00\x00'
b'\x42\x00\x13\x01\x00\x00\x03\x30'
b'\x42\x00\x1D\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x00\x1E\x08\x00\x00\x03\x16' + self.certificate_value +
b'\x00\x00'
b'\x42\x01\x5F\x01\x00\x00\x00\x10'
b'\x42\x01\x5E\x02\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00\x00'
)
# Encoding obtained from the KMIP 1.1 testing document, Section 13.2.2.
# Modified to exclude the Link attribute.
#
# TODO (ph) Add the Link attribute back in once Links are supported.
#
# This encoding matches the following set of values:
# Request Payload
# Template Attribute
# Attribute
# Attribute Name - Cryptographic Usage Mask
# Attribute Value - Sign | Verify
# Certificate
# Certificate Type - X.509
# Certificate Value - See comment for the full encoding.
self.no_object_type_encoding = utils.BytearrayStream(
b'\x42\x00\x79\x01\x00\x00\x03\x78'
b'\x42\x00\x91\x01\x00\x00\x00\x38'
b'\x42\x00\x08\x01\x00\x00\x00\x30'
b'\x42\x00\x0A\x07\x00\x00\x00\x18'
b'\x43\x72\x79\x70\x74\x6F\x67\x72\x61\x70\x68\x69\x63\x20\x55\x73'
b'\x61\x67\x65\x20\x4D\x61\x73\x6B'
b'\x42\x00\x0B\x02\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00\x00'
b'\x42\x00\x13\x01\x00\x00\x03\x30'
b'\x42\x00\x1D\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x00\x1E\x08\x00\x00\x03\x16' + self.certificate_value +
b'\x00\x00'
)
# Encoding obtained from the KMIP 1.1 testing document, Section 13.2.2.
#
# This encoding matches the following set of values:
# Request Payload
# Object Type - Certificate
# Certificate
# Certificate Type - X.509
# Certificate Value - See comment for the full encoding.
self.no_template_attribute_encoding = utils.BytearrayStream(
b'\x42\x00\x79\x01\x00\x00\x03\x48'
b'\x42\x00\x57\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x00\x13\x01\x00\x00\x03\x30'
b'\x42\x00\x1D\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x00\x1E\x08\x00\x00\x03\x16' + self.certificate_value +
b'\x00\x00'
)
# Encoding obtained from the KMIP 1.1 testing document, Section 13.2.2.
# Modified to exclude the Link attribute.
#
# TODO (ph) Add the Link attribute back in once Links are supported.
#
# This encoding matches the following set of values:
# Request Payload
# Object Type - Certificate
# Template Attribute
# Attribute
# Attribute Name - Cryptographic Usage Mask
# Attribute Value - Sign | Verify
self.no_managed_object_encoding = utils.BytearrayStream(
b'\x42\x00\x79\x01\x00\x00\x00\x50'
b'\x42\x00\x57\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x00\x91\x01\x00\x00\x00\x38'
b'\x42\x00\x08\x01\x00\x00\x00\x30'
b'\x42\x00\x0A\x07\x00\x00\x00\x18'
b'\x43\x72\x79\x70\x74\x6F\x67\x72\x61\x70\x68\x69\x63\x20\x55\x73'
b'\x61\x67\x65\x20\x4D\x61\x73\x6B'
b'\x42\x00\x0B\x02\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00\x00'
)
def tearDown(self):
super(TestRegisterRequestPayload, self).tearDown()
def test_invalid_object_type(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the object type of a Register request payload.
"""
kwargs = {'object_type': 'invalid'}
self.assertRaisesRegex(
TypeError,
"Object type must be an ObjectType enumeration.",
payloads.RegisterRequestPayload,
**kwargs
)
args = (
payloads.RegisterRequestPayload(),
'object_type',
'invalid'
)
self.assertRaisesRegex(
TypeError,
"Object type must be an ObjectType enumeration.",
setattr,
*args
)
def test_invalid_template_attribute(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the template attribute of a Register request payload.
"""
kwargs = {'template_attribute': 'invalid'}
self.assertRaisesRegex(
TypeError,
"Template attribute must be a TemplateAttribute structure.",
payloads.RegisterRequestPayload,
**kwargs
)
args = (
payloads.RegisterRequestPayload(),
'template_attribute',
'invalid'
)
self.assertRaisesRegex(
TypeError,
"Template attribute must be a TemplateAttribute structure.",
setattr,
*args
)
def test_invalid_managed_object(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the managed object of a Register request payload.
"""
kwargs = {'managed_object': 'invalid'}
self.assertRaisesRegex(
TypeError,
"Managed object must be a supported managed object structure.",
payloads.RegisterRequestPayload,
**kwargs
)
args = (
payloads.RegisterRequestPayload(),
'managed_object',
'invalid'
)
self.assertRaisesRegex(
TypeError,
"Managed object must be a supported managed object structure.",
setattr,
*args
)
def test_invalid_protection_storage_masks(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the protection storage masks of a Register request payload.
"""
kwargs = {"protection_storage_masks": "invalid"}
self.assertRaisesRegex(
TypeError,
"The protection storage masks must be a ProtectionStorageMasks "
"structure.",
payloads.RegisterRequestPayload,
**kwargs
)
kwargs = {
"protection_storage_masks": objects.ProtectionStorageMasks(
tag=enums.Tags.COMMON_PROTECTION_STORAGE_MASKS
)
}
self.assertRaisesRegex(
TypeError,
"The protection storage masks must be a ProtectionStorageMasks "
"structure with a ProtectionStorageMasks tag.",
payloads.RegisterRequestPayload,
**kwargs
)
args = (
payloads.RegisterRequestPayload(),
"protection_storage_masks",
"invalid"
)
self.assertRaisesRegex(
TypeError,
"The protection storage masks must be a ProtectionStorageMasks "
"structure.",
setattr,
*args
)
args = (
payloads.RegisterRequestPayload(),
"protection_storage_masks",
objects.ProtectionStorageMasks(
tag=enums.Tags.COMMON_PROTECTION_STORAGE_MASKS
)
)
self.assertRaisesRegex(
TypeError,
"The protection storage masks must be a ProtectionStorageMasks "
"structure with a ProtectionStorageMasks tag.",
setattr,
*args
)
def test_read(self):
"""
Test that a Register request payload can be read from a data stream.
"""
payload = payloads.RegisterRequestPayload()
self.assertIsNone(payload.object_type)
self.assertIsNone(payload.template_attribute)
self.assertIsNone(payload.managed_object)
self.assertIsNone(payload.protection_storage_masks)
payload.read(self.full_encoding)
self.assertEqual(enums.ObjectType.CERTIFICATE, payload.object_type)
self.assertEqual(
objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
"Cryptographic Usage Mask"
),
attribute_value=primitives.Integer(
enums.CryptographicUsageMask.SIGN.value |
enums.CryptographicUsageMask.VERIFY.value,
tag=enums.Tags.CRYPTOGRAPHIC_USAGE_MASK
)
)
]
),
payload.template_attribute
)
self.assertEqual(
secrets.Certificate(
certificate_type=enums.CertificateType.X_509,
certificate_value=self.certificate_value
),
payload.managed_object
)
self.assertIsNone(payload.protection_storage_masks)
def test_read_kmip_2_0(self):
"""
Test that a Register request payload can be read from a data stream
encoded with the KMIP 2.0 format.
"""
payload = payloads.RegisterRequestPayload()
self.assertIsNone(payload.object_type)
self.assertIsNone(payload.template_attribute)
self.assertIsNone(payload.managed_object)
self.assertIsNone(payload.protection_storage_masks)
payload.read(
self.full_encoding_with_attributes,
kmip_version=enums.KMIPVersion.KMIP_2_0
)
self.assertEqual(enums.ObjectType.CERTIFICATE, payload.object_type)
self.assertEqual(
objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
"Cryptographic Usage Mask"
),
attribute_value=primitives.Integer(
enums.CryptographicUsageMask.SIGN.value |
enums.CryptographicUsageMask.VERIFY.value,
tag=enums.Tags.CRYPTOGRAPHIC_USAGE_MASK
)
)
]
),
payload.template_attribute
)
self.assertEqual(
secrets.Certificate(
certificate_type=enums.CertificateType.X_509,
certificate_value=self.certificate_value
),
payload.managed_object
)
self.assertEqual(
objects.ProtectionStorageMasks(protection_storage_masks=[3]),
payload.protection_storage_masks
)
def test_read_missing_object_type(self):
"""
Test that an InvalidKmipEncoding error is raised during the decoding
of a Register request payload when the object type is missing from the
encoding.
"""
payload = payloads.RegisterRequestPayload()
args = (self.no_object_type_encoding, )
self.assertRaisesRegex(
exceptions.InvalidKmipEncoding,
"The Register request payload encoding is missing the object "
"type.",
payload.read,
*args
)
def test_read_missing_template_attribute(self):
"""
Test that an InvalidKmipEncoding error is raised during the decoding
of a Register request payload when the template attribute is missing
from the encoding.
"""
payload = payloads.RegisterRequestPayload()
args = (self.no_template_attribute_encoding, )
self.assertRaisesRegex(
exceptions.InvalidKmipEncoding,
"The Register request payload encoding is missing the template "
"attribute.",
payload.read,
*args
)
def test_read_missing_attributes(self):
"""
Test that an InvalidKmipEncoding error is raised during the decoding
of a Register request payload when the attributes structure is missing
from the encoding.
"""
payload = payloads.RegisterRequestPayload()
args = (self.no_template_attribute_encoding, )
kwargs = {"kmip_version": enums.KMIPVersion.KMIP_2_0}
self.assertRaisesRegex(
exceptions.InvalidKmipEncoding,
"The Register request payload encoding is missing the attributes "
"structure.",
payload.read,
*args,
**kwargs
)
def test_read_missing_managed_object(self):
"""
Test that an InvalidKmipEncoding error is raised during the decoding
of a Register request payload when the managed object is missing from
the encoding.
"""
payload = payloads.RegisterRequestPayload()
args = (self.no_managed_object_encoding, )
self.assertRaisesRegex(
exceptions.InvalidKmipEncoding,
"The Register request payload encoding is missing the managed "
"object.",
payload.read,
*args
)
def test_write(self):
"""
Test that a Register request payload can be written to a data stream.
"""
payload = payloads.RegisterRequestPayload(
object_type=enums.ObjectType.CERTIFICATE,
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
"Cryptographic Usage Mask"
),
attribute_value=primitives.Integer(
enums.CryptographicUsageMask.SIGN.value |
enums.CryptographicUsageMask.VERIFY.value,
tag=enums.Tags.CRYPTOGRAPHIC_USAGE_MASK
)
)
]
),
managed_object=secrets.Certificate(
certificate_type=enums.CertificateType.X_509,
certificate_value=self.certificate_value
)
)
stream = utils.BytearrayStream()
payload.write(stream)
self.assertEqual(len(self.full_encoding), len(stream))
self.assertEqual(str(self.full_encoding), str(stream))
def test_write_kmip_2_0(self):
"""
Test that a Register request payload can be written to a data stream
encoded with the KMIP 2.0 format.
"""
payload = payloads.RegisterRequestPayload(
object_type=enums.ObjectType.CERTIFICATE,
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
"Cryptographic Usage Mask"
),
attribute_value=primitives.Integer(
enums.CryptographicUsageMask.SIGN.value |
enums.CryptographicUsageMask.VERIFY.value,
tag=enums.Tags.CRYPTOGRAPHIC_USAGE_MASK
)
)
]
),
managed_object=secrets.Certificate(
certificate_type=enums.CertificateType.X_509,
certificate_value=self.certificate_value
),
protection_storage_masks=objects.ProtectionStorageMasks(
protection_storage_masks=[
(
enums.ProtectionStorageMask.SOFTWARE.value |
enums.ProtectionStorageMask.HARDWARE.value
)
]
)
)
stream = utils.BytearrayStream()
payload.write(stream, kmip_version=enums.KMIPVersion.KMIP_2_0)
self.assertEqual(len(self.full_encoding_with_attributes), len(stream))
self.assertEqual(str(self.full_encoding_with_attributes), str(stream))
def test_write_missing_object_type(self):
"""
Test that an InvalidField error is raised during the encoding of a
Register request payload when the payload is missing the object type.
"""
payload = payloads.RegisterRequestPayload(
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
"Cryptographic Usage Mask"
),
attribute_value=primitives.Integer(
enums.CryptographicUsageMask.SIGN.value |
enums.CryptographicUsageMask.VERIFY.value,
tag=enums.Tags.CRYPTOGRAPHIC_USAGE_MASK
)
)
]
),
managed_object=secrets.Certificate(
certificate_type=enums.CertificateType.X_509,
certificate_value=self.certificate_value
)
)
args = (utils.BytearrayStream(), )
self.assertRaisesRegex(
exceptions.InvalidField,
"The Register request payload is missing the object type field.",
payload.write,
*args
)
def test_write_missing_template_attribute(self):
"""
Test that an InvalidField error is raised during the encoding of a
Register request payload when the payload is missing the template
attribute.
"""
payload = payloads.RegisterRequestPayload(
object_type=enums.ObjectType.CERTIFICATE,
managed_object=secrets.Certificate(
certificate_type=enums.CertificateType.X_509,
certificate_value=self.certificate_value
)
)
args = (utils.BytearrayStream(), )
self.assertRaisesRegex(
exceptions.InvalidField,
"The Register request payload is missing the template attribute "
"field.",
payload.write,
*args
)
def test_write_missing_attributes(self):
"""
Test that an InvalidField error is raised during the encoding of a
Register request payload when the payload is missing the attributes
structure.
"""
payload = payloads.RegisterRequestPayload(
object_type=enums.ObjectType.CERTIFICATE,
managed_object=secrets.Certificate(
certificate_type=enums.CertificateType.X_509,
certificate_value=self.certificate_value
)
)
args = (utils.BytearrayStream(), )
kwargs = {"kmip_version": enums.KMIPVersion.KMIP_2_0}
self.assertRaisesRegex(
exceptions.InvalidField,
"The Register request payload is missing the template attribute "
"field.",
payload.write,
*args,
**kwargs
)
def test_write_missing_managed_object(self):
"""
Test that an InvalidField error is raised during the encoding of a
Register request payload when the payload is missing the managed
object.
"""
payload = payloads.RegisterRequestPayload(
object_type=enums.ObjectType.SECRET_DATA,
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
"Cryptographic Usage Mask"
),
attribute_value=primitives.Integer(
enums.CryptographicUsageMask.VERIFY.value,
tag=enums.Tags.CRYPTOGRAPHIC_USAGE_MASK
)
)
]
)
)
args = (utils.BytearrayStream(), )
self.assertRaisesRegex(
exceptions.InvalidField,
"The Register request payload is missing the managed object "
"field.",
payload.write,
*args
)
def test_repr(self):
"""
Test that repr can be applied to a Register request payload structure.
"""
payload = payloads.RegisterRequestPayload(
object_type=enums.ObjectType.SECRET_DATA,
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
"Cryptographic Usage Mask"
),
attribute_value=primitives.Integer(
enums.CryptographicUsageMask.VERIFY.value,
tag=enums.Tags.CRYPTOGRAPHIC_USAGE_MASK
)
)
]
),
managed_object=secrets.SecretData(
secret_data_type=primitives.Enumeration(
enums.SecretDataType,
value=enums.SecretDataType.PASSWORD,
tag=enums.Tags.SECRET_DATA_TYPE
),
key_block=objects.KeyBlock(
key_format_type=objects.KeyFormatType(
enums.KeyFormatType.OPAQUE
),
key_value=objects.KeyValue(
key_material=objects.KeyMaterial(
(
b'\x53\x65\x63\x72\x65\x74\x50\x61\x73\x73\x77'
b'\x6F\x72\x64'
)
)
)
)
),
protection_storage_masks=objects.ProtectionStorageMasks(
protection_storage_masks=[
(
enums.ProtectionStorageMask.SOFTWARE.value |
enums.ProtectionStorageMask.HARDWARE.value
)
]
)
)
self.assertEqual(
"RegisterRequestPayload("
"object_type=ObjectType.SECRET_DATA, "
"template_attribute=Struct(), "
"managed_object=Struct(), "
"protection_storage_masks=ProtectionStorageMasks("
"protection_storage_masks=[3]))",
repr(payload)
)
def test_str(self):
"""
Test that str can be applied to a Register request payload structure.
"""
payload = payloads.RegisterRequestPayload(
object_type=enums.ObjectType.SECRET_DATA,
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
"Cryptographic Usage Mask"
),
attribute_value=primitives.Integer(
enums.CryptographicUsageMask.VERIFY.value,
tag=enums.Tags.CRYPTOGRAPHIC_USAGE_MASK
)
)
]
),
managed_object=secrets.SecretData(
secret_data_type=primitives.Enumeration(
enums.SecretDataType,
value=enums.SecretDataType.PASSWORD,
tag=enums.Tags.SECRET_DATA_TYPE
),
key_block=objects.KeyBlock(
key_format_type=objects.KeyFormatType(
enums.KeyFormatType.OPAQUE
),
key_value=objects.KeyValue(
key_material=objects.KeyMaterial(
(
b'\x53\x65\x63\x72\x65\x74\x50\x61\x73\x73\x77'
b'\x6F\x72\x64'
)
)
)
)
),
protection_storage_masks=objects.ProtectionStorageMasks(
protection_storage_masks=[
(
enums.ProtectionStorageMask.SOFTWARE.value |
enums.ProtectionStorageMask.HARDWARE.value
)
]
)
)
self.assertEqual(
'{'
'"object_type": ObjectType.SECRET_DATA, '
'"template_attribute": Struct(), '
'"managed_object": Struct(), '
'"protection_storage_masks": {"protection_storage_masks": [3]}'
'}',
str(payload)
)
def test_equal_on_equal(self):
"""
Test that the equality operator returns True when comparing two
Register request payloads with the same data.
"""
a = payloads.RegisterRequestPayload()
b = payloads.RegisterRequestPayload()
self.assertTrue(a == b)
self.assertTrue(b == a)
a = payloads.RegisterRequestPayload(
object_type=enums.ObjectType.CERTIFICATE,
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
"Cryptographic Usage Mask"
),
attribute_value=primitives.Integer(
enums.CryptographicUsageMask.SIGN.value |
enums.CryptographicUsageMask.VERIFY.value,
tag=enums.Tags.CRYPTOGRAPHIC_USAGE_MASK
)
)
]
),
managed_object=secrets.Certificate(
certificate_type=enums.CertificateType.X_509,
certificate_value=self.certificate_value
),
protection_storage_masks=objects.ProtectionStorageMasks(
protection_storage_masks=[
(
enums.ProtectionStorageMask.SOFTWARE.value |
enums.ProtectionStorageMask.HARDWARE.value
)
]
)
)
b = payloads.RegisterRequestPayload(
object_type=enums.ObjectType.CERTIFICATE,
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
"Cryptographic Usage Mask"
),
attribute_value=primitives.Integer(
enums.CryptographicUsageMask.SIGN.value |
enums.CryptographicUsageMask.VERIFY.value,
tag=enums.Tags.CRYPTOGRAPHIC_USAGE_MASK
)
)
]
),
managed_object=secrets.Certificate(
certificate_type=enums.CertificateType.X_509,
certificate_value=self.certificate_value
),
protection_storage_masks=objects.ProtectionStorageMasks(
protection_storage_masks=[
(
enums.ProtectionStorageMask.SOFTWARE.value |
enums.ProtectionStorageMask.HARDWARE.value
)
]
)
)
self.assertTrue(a == b)
self.assertTrue(b == a)
def test_equal_on_not_equal_object_type(self):
"""
Test that the equality operator returns False when comparing two
Register request payloads with different object types.
"""
a = payloads.RegisterRequestPayload(
object_type=enums.ObjectType.SYMMETRIC_KEY
)
b = payloads.RegisterRequestPayload(
object_type=enums.ObjectType.SECRET_DATA
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_template_attribute(self):
"""
Test that the equality operator returns False when comparing two
Register request payloads with different template attributes.
"""
a = payloads.RegisterRequestPayload(
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
"Cryptographic Usage Mask"
),
attribute_value=primitives.Integer(
value=enums.CryptographicUsageMask.VERIFY.value,
tag=enums.Tags.CRYPTOGRAPHIC_USAGE_MASK
)
)
]
)
)
b = payloads.RegisterRequestPayload(
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
"Cryptographic Usage Mask"
),
attribute_value=primitives.Integer(
value=enums.CryptographicUsageMask.SIGN.value,
tag=enums.Tags.CRYPTOGRAPHIC_USAGE_MASK
)
)
]
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_managed_object(self):
"""
Test that the equality operator returns False when comparing two
Register request payloads with different managed objects.
"""
a = payloads.RegisterRequestPayload(
managed_object=secrets.Certificate(
certificate_type=enums.CertificateType.X_509,
certificate_value=self.certificate_value
)
)
b = payloads.RegisterRequestPayload(
managed_object=secrets.Certificate(
certificate_type=enums.CertificateType.PGP,
certificate_value=self.certificate_value
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_protection_storage_masks(self):
"""
Test that the equality operator returns False when comparing two Create
request payloads with different protection storage masks.
"""
a = payloads.RegisterRequestPayload(
protection_storage_masks=objects.ProtectionStorageMasks(
protection_storage_masks=[
(
enums.ProtectionStorageMask.SOFTWARE.value |
enums.ProtectionStorageMask.HARDWARE.value
)
]
)
)
b = payloads.RegisterRequestPayload(
protection_storage_masks=objects.ProtectionStorageMasks(
protection_storage_masks=[
(
enums.ProtectionStorageMask.ON_SYSTEM.value |
enums.ProtectionStorageMask.OFF_SYSTEM.value
)
]
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_type_mismatch(self):
"""
Test that the equality operator returns False when comparing two
Register request payloads with different types.
"""
a = payloads.RegisterRequestPayload()
b = 'invalid'
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_not_equal_on_equal(self):
"""
Test that the inequality operator returns False when comparing two
Register request payloads with the same data.
"""
a = payloads.RegisterRequestPayload()
b = payloads.RegisterRequestPayload()
self.assertFalse(a != b)
self.assertFalse(b != a)
a = payloads.RegisterRequestPayload(
object_type=enums.ObjectType.CERTIFICATE,
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
"Cryptographic Usage Mask"
),
attribute_value=primitives.Integer(
enums.CryptographicUsageMask.SIGN.value |
enums.CryptographicUsageMask.VERIFY.value,
tag=enums.Tags.CRYPTOGRAPHIC_USAGE_MASK
)
)
]
),
managed_object=secrets.Certificate(
certificate_type=enums.CertificateType.X_509,
certificate_value=self.certificate_value
),
protection_storage_masks=objects.ProtectionStorageMasks(
protection_storage_masks=[
(
enums.ProtectionStorageMask.SOFTWARE.value |
enums.ProtectionStorageMask.HARDWARE.value
)
]
)
)
b = payloads.RegisterRequestPayload(
object_type=enums.ObjectType.CERTIFICATE,
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
"Cryptographic Usage Mask"
),
attribute_value=primitives.Integer(
enums.CryptographicUsageMask.SIGN.value |
enums.CryptographicUsageMask.VERIFY.value,
tag=enums.Tags.CRYPTOGRAPHIC_USAGE_MASK
)
)
]
),
managed_object=secrets.Certificate(
certificate_type=enums.CertificateType.X_509,
certificate_value=self.certificate_value
),
protection_storage_masks=objects.ProtectionStorageMasks(
protection_storage_masks=[
(
enums.ProtectionStorageMask.SOFTWARE.value |
enums.ProtectionStorageMask.HARDWARE.value
)
]
)
)
self.assertFalse(a != b)
self.assertFalse(b != a)
def test_not_equal_on_not_equal_object_type(self):
"""
Test that the inequality operator returns True when comparing two
Register request payloads with different object types.
"""
a = payloads.RegisterRequestPayload(
object_type=enums.ObjectType.SYMMETRIC_KEY
)
b = payloads.RegisterRequestPayload(
object_type=enums.ObjectType.SECRET_DATA
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_template_attribute(self):
"""
Test that the inequality operator returns True when comparing two
Register request payloads with different template attributes.
"""
a = payloads.RegisterRequestPayload(
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
"Cryptographic Usage Mask"
),
attribute_value=primitives.Integer(
value=enums.CryptographicUsageMask.VERIFY.value,
tag=enums.Tags.CRYPTOGRAPHIC_USAGE_MASK
)
)
]
)
)
b = payloads.RegisterRequestPayload(
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
"Cryptographic Usage Mask"
),
attribute_value=primitives.Integer(
value=enums.CryptographicUsageMask.SIGN.value,
tag=enums.Tags.CRYPTOGRAPHIC_USAGE_MASK
)
)
]
)
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_managed_object(self):
"""
Test that the inequality operator returns True when comparing two
Register request payloads with different managed objects.
"""
a = payloads.RegisterRequestPayload(
managed_object=secrets.Certificate(
certificate_type=enums.CertificateType.X_509,
certificate_value=self.certificate_value
)
)
b = payloads.RegisterRequestPayload(
managed_object=secrets.Certificate(
certificate_type=enums.CertificateType.PGP,
certificate_value=self.certificate_value
)
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_protection_storage_masks(self):
"""
Test that the inequality operator returns True when comparing two
Register request payloads with different protection storage masks.
"""
a = payloads.RegisterRequestPayload(
protection_storage_masks=objects.ProtectionStorageMasks(
protection_storage_masks=[
(
enums.ProtectionStorageMask.SOFTWARE.value |
enums.ProtectionStorageMask.HARDWARE.value
)
]
)
)
b = payloads.RegisterRequestPayload(
protection_storage_masks=objects.ProtectionStorageMasks(
protection_storage_masks=[
(
enums.ProtectionStorageMask.ON_SYSTEM.value |
enums.ProtectionStorageMask.OFF_SYSTEM.value
)
]
)
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_type_mismatch(self):
"""
Test that the inequality operator returns True when comparing two
Register request payloads with different types.
"""
a = payloads.RegisterRequestPayload()
b = 'invalid'
self.assertTrue(a != b)
self.assertTrue(b != a)
class TestRegisterResponsePayload(testtools.TestCase):
def setUp(self):
super(TestRegisterResponsePayload, self).setUp()
# Encoding obtained from the KMIP 1.1 testing document, Section 13.2.2.
# Modified to include the template attribute.
#
# This encoding matches the following set of values:
# Response Payload
# Unique Identifier - 7091d0bf-548a-4d4a-93a6-6dd71cf75221
# Template Attribute
# Attribute
# Attribute Name - State
# Attribute Value - Pre-active
self.full_encoding = utils.BytearrayStream(
b'\x42\x00\x7C\x01\x00\x00\x00\x60'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x37\x30\x39\x31\x64\x30\x62\x66\x2D\x35\x34\x38\x61\x2D\x34\x64'
b'\x34\x61\x2D\x39\x33\x61\x36\x2D\x36\x64\x64\x37\x31\x63\x66\x37'
b'\x35\x32\x32\x31\x00\x00\x00\x00'
b'\x42\x00\x91\x01\x00\x00\x00\x28'
b'\x42\x00\x08\x01\x00\x00\x00\x20'
b'\x42\x00\x0A\x07\x00\x00\x00\x05'
b'\x53\x74\x61\x74\x65\x00\x00\x00'
b'\x42\x00\x0B\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
)
# Encoding obtained from the KMIP 1.1 testing document, Section 13.2.2.
# Modified to include the template attribute.
#
# This encoding matches the following set of values:
# Response Payload
# Template Attribute
# Attribute
# Attribute Name - State
# Attribute Value - Pre-active
self.no_unique_identifier_encoding = utils.BytearrayStream(
b'\x42\x00\x7C\x01\x00\x00\x00\x30'
b'\x42\x00\x91\x01\x00\x00\x00\x28'
b'\x42\x00\x08\x01\x00\x00\x00\x20'
b'\x42\x00\x0A\x07\x00\x00\x00\x05'
b'\x53\x74\x61\x74\x65\x00\x00\x00'
b'\x42\x00\x0B\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
)
# Encoding obtained from the KMIP 1.1 testing document, Section 13.2.2.
#
# This encoding matches the following set of values:
# Response Payload
# Unique Identifier - 7091d0bf-548a-4d4a-93a6-6dd71cf75221
self.no_template_attribute_encoding = utils.BytearrayStream(
b'\x42\x00\x7C\x01\x00\x00\x00\x30'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x37\x30\x39\x31\x64\x30\x62\x66\x2D\x35\x34\x38\x61\x2D\x34\x64'
b'\x34\x61\x2D\x39\x33\x61\x36\x2D\x36\x64\x64\x37\x31\x63\x66\x37'
b'\x35\x32\x32\x31\x00\x00\x00\x00'
)
def test_invalid_unique_identifier(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the unique identifier of a Register response payload.
"""
kwargs = {'unique_identifier': 0}
self.assertRaisesRegex(
TypeError,
"Unique identifier must be a string.",
payloads.RegisterResponsePayload,
**kwargs
)
args = (payloads.RegisterResponsePayload(), 'unique_identifier', 0)
self.assertRaisesRegex(
TypeError,
"Unique identifier must be a string.",
setattr,
*args
)
def test_invalid_template_attribute(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the template attribute of a Register response payload.
"""
kwargs = {'template_attribute': 'invalid'}
self.assertRaisesRegex(
TypeError,
"Template attribute must be a TemplateAttribute structure.",
payloads.RegisterResponsePayload,
**kwargs
)
args = (
payloads.RegisterResponsePayload(),
'template_attribute',
'invalid'
)
self.assertRaisesRegex(
TypeError,
"Template attribute must be a TemplateAttribute structure.",
setattr,
*args
)
def test_read(self):
"""
Test that a Register response payload can be read from a data stream.
"""
payload = payloads.RegisterResponsePayload()
self.assertIsNone(payload.unique_identifier)
self.assertIsNone(payload.template_attribute)
payload.read(self.full_encoding)
self.assertEqual(
"7091d0bf-548a-4d4a-93a6-6dd71cf75221",
payload.unique_identifier
)
self.assertEqual(
objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
"State"
),
attribute_value=primitives.Enumeration(
enums.State,
enums.State.PRE_ACTIVE,
tag=enums.Tags.STATE
)
)
]
),
payload.template_attribute
)
def test_read_kmip_2_0(self):
"""
Test that a Register response payload can be read from a data stream
encoded with the KMIP 2.0 format.
"""
payload = payloads.RegisterResponsePayload()
self.assertIsNone(payload.unique_identifier)
self.assertIsNone(payload.template_attribute)
payload.read(
self.no_template_attribute_encoding,
kmip_version=enums.KMIPVersion.KMIP_2_0
)
self.assertEqual(
"7091d0bf-548a-4d4a-93a6-6dd71cf75221",
payload.unique_identifier
)
self.assertIsNone(payload.template_attribute)
def test_read_missing_unique_identifier(self):
"""
Test that an InvalidKmipEncoding error is raised during the decoding
of a Register response payload when the unique identifier is missing
from the encoding.
"""
payload = payloads.RegisterResponsePayload()
self.assertIsNone(payload.unique_identifier)
self.assertIsNone(payload.template_attribute)
args = (self.no_unique_identifier_encoding, )
self.assertRaisesRegex(
exceptions.InvalidKmipEncoding,
"The Register response payload encoding is missing the unique "
"identifier.",
payload.read,
*args
)
def test_read_missing_template_attribute(self):
"""
Test that a Register response payload can be read from a data stream
event when missing the template attribute.
"""
payload = payloads.RegisterResponsePayload()
self.assertEqual(None, payload.unique_identifier)
self.assertEqual(None, payload.template_attribute)
payload.read(self.no_template_attribute_encoding)
self.assertEqual(
"7091d0bf-548a-4d4a-93a6-6dd71cf75221",
payload.unique_identifier
)
self.assertIsNone(payload.template_attribute)
def test_write(self):
"""
Test that a Register response payload can be written to a data stream.
"""
payload = payloads.RegisterResponsePayload(
unique_identifier="7091d0bf-548a-4d4a-93a6-6dd71cf75221",
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
"State"
),
attribute_value=primitives.Enumeration(
enums.State,
value=enums.State.PRE_ACTIVE,
tag=enums.Tags.STATE
)
)
]
)
)
stream = utils.BytearrayStream()
payload.write(stream)
self.assertEqual(len(self.full_encoding), len(stream))
self.assertEqual(str(self.full_encoding), str(stream))
def test_write_kmip_2_0(self):
"""
Test that a Register response payload can be written to a data stream
encoded with the KMIP 2.0 format.
"""
payload = payloads.RegisterResponsePayload(
unique_identifier="7091d0bf-548a-4d4a-93a6-6dd71cf75221",
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
"State"
),
attribute_value=primitives.Enumeration(
enums.State,
value=enums.State.PRE_ACTIVE,
tag=enums.Tags.STATE
)
)
]
)
)
stream = utils.BytearrayStream()
payload.write(stream, kmip_version=enums.KMIPVersion.KMIP_2_0)
self.assertEqual(len(self.no_template_attribute_encoding), len(stream))
self.assertEqual(str(self.no_template_attribute_encoding), str(stream))
def test_write_missing_unique_identifier(self):
"""
Test that an InvalidField error is raised during the encoding of a
Register response payload when the payload is missing the unique
identifier.
"""
payload = payloads.RegisterResponsePayload(
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
"State"
),
attribute_value=primitives.Enumeration(
enums.State,
value=enums.State.PRE_ACTIVE,
tag=enums.Tags.STATE
)
)
]
)
)
stream = utils.BytearrayStream()
args = (stream, )
self.assertRaisesRegex(
exceptions.InvalidField,
"The Register response payload is missing the unique identifier "
"field.",
payload.write,
*args
)
def test_write_missing_template_attribute(self):
"""
Test that a Register response payload can be written to a data stream
even when missing the template attribute.
"""
payload = payloads.RegisterResponsePayload(
unique_identifier="7091d0bf-548a-4d4a-93a6-6dd71cf75221"
)
stream = utils.BytearrayStream()
payload.write(stream)
self.assertEqual(len(self.no_template_attribute_encoding), len(stream))
self.assertEqual(str(self.no_template_attribute_encoding), str(stream))
def test_repr(self):
"""
Test that repr can be applied to a Register response payload structure.
"""
payload = payloads.RegisterResponsePayload(
unique_identifier="7091d0bf-548a-4d4a-93a6-6dd71cf75221",
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
"State"
),
attribute_value=primitives.Enumeration(
enums.State,
value=enums.State.PRE_ACTIVE,
tag=enums.Tags.STATE
)
)
]
)
)
self.assertEqual(
"RegisterResponsePayload("
"unique_identifier='7091d0bf-548a-4d4a-93a6-6dd71cf75221', "
"template_attribute=Struct())",
repr(payload)
)
def test_str(self):
"""
Test that str can be applied to a Register response payload structure.
"""
payload = payloads.RegisterResponsePayload(
unique_identifier="7091d0bf-548a-4d4a-93a6-6dd71cf75221",
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
"State"
),
attribute_value=primitives.Enumeration(
enums.State,
value=enums.State.PRE_ACTIVE,
tag=enums.Tags.STATE
)
)
]
)
)
self.assertEqual(
'{'
'"unique_identifier": "7091d0bf-548a-4d4a-93a6-6dd71cf75221", '
'"template_attribute": Struct()'
'}',
str(payload)
)
def test_equal_on_equal(self):
"""
Test that the equality operator returns True when comparing two
Register response payloads with the same data.
"""
a = payloads.RegisterResponsePayload()
b = payloads.RegisterResponsePayload()
self.assertTrue(a == b)
self.assertTrue(b == a)
a = payloads.RegisterResponsePayload(
unique_identifier="7091d0bf-548a-4d4a-93a6-6dd71cf75221",
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
"State"
),
attribute_value=primitives.Enumeration(
enums.State,
value=enums.State.PRE_ACTIVE,
tag=enums.Tags.STATE
)
)
]
)
)
b = payloads.RegisterResponsePayload(
unique_identifier="7091d0bf-548a-4d4a-93a6-6dd71cf75221",
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
"State"
),
attribute_value=primitives.Enumeration(
enums.State,
value=enums.State.PRE_ACTIVE,
tag=enums.Tags.STATE
)
)
]
)
)
self.assertTrue(a == b)
self.assertTrue(b == a)
def test_equal_on_not_equal_unique_identifier(self):
"""
Test that the equality operator returns False when comparing two
Register response payloads with different unique identifiers.
"""
a = payloads.RegisterResponsePayload(unique_identifier="a")
b = payloads.RegisterResponsePayload(unique_identifier="b")
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_template_attribute(self):
"""
Test that the equality operator returns False when comparing two
Register response payloads with different template attributes.
"""
a = payloads.RegisterResponsePayload(
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
"State"
),
attribute_value=primitives.Enumeration(
enums.State,
value=enums.State.PRE_ACTIVE,
tag=enums.Tags.STATE
)
)
]
)
)
b = payloads.RegisterResponsePayload(
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
"State"
),
attribute_value=primitives.Enumeration(
enums.State,
value=enums.State.ACTIVE,
tag=enums.Tags.STATE
)
)
]
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_type_mismatch(self):
"""
Test that the equality operator returns False when comparing two
Register response payloads with different types.
"""
a = payloads.RegisterResponsePayload()
b = "invalid"
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_not_equal_on_equal(self):
"""
Test that the inequality operator returns False when comparing two
Register response payloads with the same data.
"""
a = payloads.RegisterResponsePayload()
b = payloads.RegisterResponsePayload()
self.assertFalse(a != b)
self.assertFalse(b != a)
a = payloads.RegisterResponsePayload(
unique_identifier="7091d0bf-548a-4d4a-93a6-6dd71cf75221",
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
"State"
),
attribute_value=primitives.Enumeration(
enums.State,
value=enums.State.PRE_ACTIVE,
tag=enums.Tags.STATE
)
)
]
)
)
b = payloads.RegisterResponsePayload(
unique_identifier="7091d0bf-548a-4d4a-93a6-6dd71cf75221",
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
"State"
),
attribute_value=primitives.Enumeration(
enums.State,
value=enums.State.PRE_ACTIVE,
tag=enums.Tags.STATE
)
)
]
)
)
self.assertFalse(a != b)
self.assertFalse(b != a)
def test_not_equal_on_not_equal_unique_identifier(self):
"""
Test that the inequality operator returns True when comparing two
Register response payloads with different unique identifiers.
"""
a = payloads.RegisterResponsePayload(unique_identifier="a")
b = payloads.RegisterResponsePayload(unique_identifier="b")
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_template_attribute(self):
"""
Test that the inequality operator returns True when comparing two
Register response payloads with different template attributes.
"""
a = payloads.RegisterResponsePayload(
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
"State"
),
attribute_value=primitives.Enumeration(
enums.State,
value=enums.State.PRE_ACTIVE,
tag=enums.Tags.STATE
)
)
]
)
)
b = payloads.RegisterResponsePayload(
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
"State"
),
attribute_value=primitives.Enumeration(
enums.State,
value=enums.State.ACTIVE,
tag=enums.Tags.STATE
)
)
]
)
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_type_mismatch(self):
"""
Test that the inequality operator returns True when comparing two
Register response payloads with different types.
"""
a = payloads.RegisterResponsePayload()
b = "invalid"
self.assertTrue(a != b)
self.assertTrue(b != a)
| |
# Copyright 2014 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Volume driver common utilities for HP MSA Storage array
"""
import base64
import uuid
from oslo_config import cfg
from cinder import exception
from cinder.i18n import _LE
from cinder.openstack.common import log as logging
from cinder.volume.drivers.san.hp import hp_msa_client as msa
LOG = logging.getLogger(__name__)
hpmsa_opt = [
cfg.StrOpt('msa_vdisk',
default='OpenStack',
help="The VDisk to use for volume creation."),
]
CONF = cfg.CONF
CONF.register_opts(hpmsa_opt)
class HPMSACommon(object):
"""Class that contains common code for MSA drivers.
Version history:
0.1 - First release
0.2 - Added Logging Markers
"""
VERSION = "0.2"
stats = {}
def __init__(self, config):
self.config = config
self.client = msa.HPMSAClient(self.config.san_ip,
self.config.san_login,
self.config.san_password)
self.vdisk = self.config.msa_vdisk
def get_version(self):
return self.VERSION
def do_setup(self, context):
self.client_login()
self._validate_vdisks()
self.client_logout()
def client_login(self):
LOG.debug("Connecting to MSA")
try:
self.client.login()
except msa.HPMSAConnectionError as ex:
msg = (_LE("Failed to connect to MSA Array (%(host)s): %(err)s") %
{'host': self.config.san_ip, 'err': ex})
LOG.error(msg)
raise exception.HPMSAConnectionError(reason=msg)
except msa.HPMSAAuthenticationError:
msg = _LE("Failed to log on MSA Array (invalid login?)")
LOG.error(msg)
raise exception.HPMSAConnectionError(reason=msg)
def _validate_vdisks(self):
if not self.client.vdisk_exists(self.vdisk):
self.client_logout()
raise exception.HPMSAInvalidVDisk(vdisk=self.vdisk)
def client_logout(self):
self.client.logout()
LOG.debug("Disconnected from MSA Array")
def _get_vol_name(self, volume_id):
volume_name = self._encode_name(volume_id)
return "v%s" % volume_name
def _get_snap_name(self, snapshot_id):
snapshot_name = self._encode_name(snapshot_id)
return "s%s" % snapshot_name
def _encode_name(self, name):
"""Get converted MSA volume name.
Converts the openstack volume id from
ecffc30f-98cb-4cf5-85ee-d7309cc17cd2
to
7P_DD5jLTPWF7tcwnMF80g
We convert the 128 bits of the uuid into a 24character long
base64 encoded string. This still exceeds the limit of 20 characters
so we truncate the name later.
"""
uuid_str = name.replace("-", "")
vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str)
vol_encoded = base64.b64encode(vol_uuid.bytes)
vol_encoded = vol_encoded.replace('=', '')
# + is not a valid character for MSA
vol_encoded = vol_encoded.replace('+', '.')
# since we use http URLs to send paramters, '/' is not an acceptable
# parameter
vol_encoded = vol_encoded.replace('/', '_')
# NOTE(gpocentek): we limit the size to 20 characters since the array
# doesn't support more than that for now. Duplicates should happen very
# rarely.
# We return 19 chars here because the _get_{vol,snap}_name functions
# prepend a character
return vol_encoded[:19]
def check_flags(self, options, required_flags):
for flag in required_flags:
if not getattr(options, flag, None):
msg = _LE('%s configuration option is not set') % flag
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
def create_volume(self, volume):
volume_id = self._get_vol_name(volume['id'])
LOG.debug("Create Volume (%(display_name)s: %(name)s %(id)s)" %
{'display_name': volume['display_name'],
'name': volume['name'], 'id': volume_id})
# use base64 to encode the volume name (UUID is too long for MSA)
volume_name = self._get_vol_name(volume['id'])
volume_size = "%dGB" % volume['size']
try:
metadata = self.client.create_volume(self.config.msa_vdisk,
volume_name,
volume_size)
except msa.HPMSARequestError as ex:
LOG.error(ex)
raise exception.Invalid(ex)
return metadata
def _assert_enough_space_for_copy(self, volume_size):
"""The MSA creates a snap pool before trying to copy the volume.
The pool is 5.27GB or 20% of the volume size, whichever is larger.
Verify that we have enough space for the pool and then copy
"""
pool_size = max(volume_size * 0.2, 5.27)
required_size = pool_size + volume_size
if required_size > self.stats['free_capacity_gb']:
raise exception.HPMSANotEnoughSpace(vdisk=self.vdisk)
def _assert_source_detached(self, volume):
"""The MSA requires a volume to be detached to clone it.
Make sure that the volume is not in use when trying to copy it.
"""
if volume['status'] != "available" or \
volume['attach_status'] == "attached":
msg = _LE("Volume must be detached to perform a clone operation.")
LOG.error(msg)
raise exception.VolumeAttached(volume_id=volume['id'])
def create_cloned_volume(self, volume, src_vref):
self.get_volume_stats(True)
self._assert_enough_space_for_copy(volume['size'])
self._assert_source_detached(src_vref)
LOG.debug("Cloning Volume %(source_id)s (%(dest_id)s)" %
{'source_id': volume['source_volid'],
'dest_id': volume['id']})
orig_name = self._get_vol_name(volume['source_volid'])
dest_name = self._get_vol_name(volume['id'])
try:
self.client.copy_volume(orig_name, dest_name,
self.config.msa_vdisk)
except msa.HPMSARequestError as ex:
LOG.error(ex)
raise exception.Invalid(ex)
return None
def create_volume_from_snapshot(self, volume, snapshot):
self.get_volume_stats(True)
self._assert_enough_space_for_copy(volume['size'])
LOG.debug("Creating Volume from snapshot %(source_id)s "
"(%(dest_id)s)" %
{'source_id': snapshot['id'], 'dest_id': volume['id']})
orig_name = self._get_snap_name(snapshot['id'])
dest_name = self._get_vol_name(volume['id'])
try:
self.client.copy_volume(orig_name, dest_name,
self.config.msa_vdisk)
except msa.HPMSARequestError as ex:
LOG.error(ex)
raise exception.Invalid(ex)
return None
def delete_volume(self, volume):
LOG.debug("Deleting Volume (%s)" % volume['id'])
volume_name = self._get_vol_name(volume['id'])
try:
self.client.delete_volume(volume_name)
except msa.HPMSARequestError as ex:
LOG.error(ex)
# if the volume wasn't found, ignore the error
if 'The volume was not found on this system.' in ex:
return
raise exception.Invalid(ex)
def get_volume_stats(self, refresh):
if refresh:
self._update_volume_stats()
return self.stats
def _update_volume_stats(self):
# storage_protocol and volume_backend_name are
# set in the child classes
stats = {'driver_version': self.VERSION,
'free_capacity_gb': 'unknown',
'reserved_percentage': 0,
'storage_protocol': None,
'total_capacity_gb': 'unknown',
'QoS_support': False,
'vendor_name': 'Hewlett-Packard',
'volume_backend_name': None}
try:
vdisk_stats = self.client.vdisk_stats(self.config.msa_vdisk)
stats.update(vdisk_stats)
except msa.HPMSARequestError:
err = (_LE("Unable to get stats for VDisk (%s)")
% self.config.msa_vdisk)
LOG.error(err)
raise exception.Invalid(reason=err)
self.stats = stats
def _assert_connector_ok(self, connector):
if not connector['wwpns']:
msg = _LE("Connector doesn't provide wwpns")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
def map_volume(self, volume, connector):
self._assert_connector_ok(connector)
volume_name = self._get_vol_name(volume['id'])
try:
data = self.client.map_volume(volume_name, connector['wwpns'])
return data
except msa.HPMSARequestError as ex:
LOG.error(ex)
raise exception.Invalid(ex)
def unmap_volume(self, volume, connector):
self._assert_connector_ok(connector)
volume_name = self._get_vol_name(volume['id'])
try:
self.client.unmap_volume(volume_name, connector['wwpns'])
except msa.HPMSARequestError as ex:
LOG.error(ex)
raise exception.Invalid(ex)
def get_active_fc_target_ports(self):
return self.client.get_active_fc_target_ports()
def create_snapshot(self, snapshot):
LOG.debug("Creating Snapshot from %(volume_id)s (%(snap_id)s)" %
{'volume_id': snapshot['volume_id'],
'snap_id': snapshot['id']})
snap_name = self._get_snap_name(snapshot['id'])
vol_name = self._get_vol_name(snapshot['volume_id'])
try:
self.client.create_snapshot(vol_name, snap_name)
except msa.HPMSARequestError as ex:
LOG.error(ex)
raise exception.Invalid(ex)
def delete_snapshot(self, snapshot):
snap_name = self._get_snap_name(snapshot['id'])
LOG.debug("Deleting Snapshot (%s)" % snapshot['id'])
try:
self.client.delete_snapshot(snap_name)
except msa.HPMSARequestError as ex:
LOG.error(ex)
# if the volume wasn't found, ignore the error
if 'The volume was not found on this system.' in ex:
return
raise exception.Invalid(ex)
def extend_volume(self, volume, new_size):
volume_name = self._get_vol_name(volume['id'])
old_size = volume['size']
growth_size = int(new_size) - old_size
LOG.debug("Extending Volume %(volume_name)s from %(old_size)s to "
"%(new_size)s, by %(growth_size)s GB." %
{'volume_name': volume_name, 'old_size': old_size,
'new_size': new_size, 'growth_size': growth_size})
try:
self.client.extend_volume(volume_name, "%dGB" % growth_size)
except msa.HPMSARequestError as ex:
LOG.error(ex)
raise exception.Invalid(ex)
| |
# -*- coding: utf-8 -*-
"""Tests for function unzip() from zipfile module."""
import tempfile
import pytest
from cookiecutter import zipfile
from cookiecutter.exceptions import InvalidZipRepository
def mock_download():
"""Fake download function."""
with open('tests/files/fake-repo-tmpl.zip', 'rb') as zf:
chunk = zf.read(1024)
while chunk:
yield chunk
chunk = zf.read(1024)
def test_unzip_local_file(mocker, tmpdir):
"""Local file reference can be unzipped."""
mock_prompt_and_delete = mocker.patch(
'cookiecutter.zipfile.prompt_and_delete',
return_value=True,
autospec=True
)
clone_to_dir = tmpdir.mkdir('clone')
output_dir = zipfile.unzip(
'tests/files/fake-repo-tmpl.zip',
is_url=False,
clone_to_dir=str(clone_to_dir)
)
assert output_dir.startswith(tempfile.gettempdir())
assert not mock_prompt_and_delete.called
def test_unzip_protected_local_file_environment_password(mocker, tmpdir):
"""In `unzip()`, the environment can be used to provide a repo password."""
mock_prompt_and_delete = mocker.patch(
'cookiecutter.zipfile.prompt_and_delete',
return_value=True,
autospec=True
)
clone_to_dir = tmpdir.mkdir('clone')
output_dir = zipfile.unzip(
'tests/files/protected-fake-repo-tmpl.zip',
is_url=False,
clone_to_dir=str(clone_to_dir),
password='sekrit'
)
assert output_dir.startswith(tempfile.gettempdir())
assert not mock_prompt_and_delete.called
def test_unzip_protected_local_file_bad_environment_password(mocker, tmpdir):
"""In `unzip()`, an error occurs if the environment has a bad password."""
mocker.patch(
'cookiecutter.zipfile.prompt_and_delete',
return_value=True,
autospec=True
)
clone_to_dir = tmpdir.mkdir('clone')
with pytest.raises(InvalidZipRepository):
zipfile.unzip(
'tests/files/protected-fake-repo-tmpl.zip',
is_url=False,
clone_to_dir=str(clone_to_dir),
password='not-the-right-password'
)
def test_unzip_protected_local_file_user_password_with_noinput(mocker, tmpdir):
"""Can't unpack a password-protected repo in no_input mode."""
mocker.patch(
'cookiecutter.zipfile.prompt_and_delete',
return_value=True,
autospec=True
)
clone_to_dir = tmpdir.mkdir('clone')
with pytest.raises(InvalidZipRepository):
zipfile.unzip(
'tests/files/protected-fake-repo-tmpl.zip',
is_url=False,
clone_to_dir=str(clone_to_dir),
no_input=True
)
def test_unzip_protected_local_file_user_password(mocker, tmpdir):
"""A password-protected local file reference can be unzipped."""
mock_prompt_and_delete = mocker.patch(
'cookiecutter.zipfile.prompt_and_delete',
return_value=True,
autospec=True
)
mocker.patch(
'cookiecutter.zipfile.read_repo_password',
return_value='sekrit'
)
clone_to_dir = tmpdir.mkdir('clone')
output_dir = zipfile.unzip(
'tests/files/protected-fake-repo-tmpl.zip',
is_url=False,
clone_to_dir=str(clone_to_dir)
)
assert output_dir.startswith(tempfile.gettempdir())
assert not mock_prompt_and_delete.called
def test_unzip_protected_local_file_user_bad_password(mocker, tmpdir):
"""Error in `unzip()`, if user can't provide a valid password."""
mocker.patch(
'cookiecutter.zipfile.prompt_and_delete',
return_value=True,
autospec=True
)
mocker.patch(
'cookiecutter.zipfile.read_repo_password',
return_value='not-the-right-password'
)
clone_to_dir = tmpdir.mkdir('clone')
with pytest.raises(InvalidZipRepository):
zipfile.unzip(
'tests/files/protected-fake-repo-tmpl.zip',
is_url=False,
clone_to_dir=str(clone_to_dir)
)
def test_empty_zip_file(mocker, tmpdir):
"""In `unzip()`, an empty file raises an error."""
mocker.patch(
'cookiecutter.zipfile.prompt_and_delete',
return_value=True,
autospec=True
)
clone_to_dir = tmpdir.mkdir('clone')
with pytest.raises(InvalidZipRepository):
zipfile.unzip(
'tests/files/empty.zip',
is_url=False,
clone_to_dir=str(clone_to_dir)
)
def test_non_repo_zip_file(mocker, tmpdir):
"""In `unzip()`, a repository must have a top level directory."""
mocker.patch(
'cookiecutter.zipfile.prompt_and_delete',
return_value=True,
autospec=True
)
clone_to_dir = tmpdir.mkdir('clone')
with pytest.raises(InvalidZipRepository):
zipfile.unzip(
'tests/files/not-a-repo.zip',
is_url=False,
clone_to_dir=str(clone_to_dir)
)
def test_bad_zip_file(mocker, tmpdir):
"""In `unzip()`, a corrupted zip file raises an error."""
mocker.patch(
'cookiecutter.zipfile.prompt_and_delete',
return_value=True,
autospec=True
)
clone_to_dir = tmpdir.mkdir('clone')
with pytest.raises(InvalidZipRepository):
zipfile.unzip(
'tests/files/bad-zip-file.zip',
is_url=False,
clone_to_dir=str(clone_to_dir)
)
def test_unzip_url(mocker, tmpdir):
"""In `unzip()`, a url will be downloaded and unzipped."""
mock_prompt_and_delete = mocker.patch(
'cookiecutter.zipfile.prompt_and_delete',
return_value=True,
autospec=True
)
request = mocker.MagicMock()
request.iter_content.return_value = mock_download()
mocker.patch(
'cookiecutter.zipfile.requests.get',
return_value=request,
autospec=True,
)
clone_to_dir = tmpdir.mkdir('clone')
output_dir = zipfile.unzip(
'https://example.com/path/to/fake-repo-tmpl.zip',
is_url=True,
clone_to_dir=str(clone_to_dir)
)
assert output_dir.startswith(tempfile.gettempdir())
assert not mock_prompt_and_delete.called
def test_unzip_url_existing_cache(mocker, tmpdir):
"""Url should be downloaded and unzipped, old zip file will be removed."""
mock_prompt_and_delete = mocker.patch(
'cookiecutter.zipfile.prompt_and_delete',
return_value=True,
autospec=True
)
request = mocker.MagicMock()
request.iter_content.return_value = mock_download()
mocker.patch(
'cookiecutter.zipfile.requests.get',
return_value=request,
autospec=True,
)
clone_to_dir = tmpdir.mkdir('clone')
# Create an existing cache of the zipfile
existing_zip = clone_to_dir.join('fake-repo-tmpl.zip')
existing_zip.write('This is an existing zipfile')
output_dir = zipfile.unzip(
'https://example.com/path/to/fake-repo-tmpl.zip',
is_url=True,
clone_to_dir=str(clone_to_dir)
)
assert output_dir.startswith(tempfile.gettempdir())
assert mock_prompt_and_delete.call_count == 1
def test_unzip_url_existing_cache_no_input(mocker, tmpdir):
"""If no_input is provided, the existing file should be removed."""
request = mocker.MagicMock()
request.iter_content.return_value = mock_download()
mocker.patch(
'cookiecutter.zipfile.requests.get',
return_value=request,
autospec=True,
)
clone_to_dir = tmpdir.mkdir('clone')
# Create an existing cache of the zipfile
existing_zip = clone_to_dir.join('fake-repo-tmpl.zip')
existing_zip.write('This is an existing zipfile')
output_dir = zipfile.unzip(
'https://example.com/path/to/fake-repo-tmpl.zip',
is_url=True,
clone_to_dir=str(clone_to_dir),
no_input=True
)
assert output_dir.startswith(tempfile.gettempdir())
def test_unzip_should_abort_if_no_redownload(mocker, tmpdir):
"""Should exit without cloning anything If no redownload."""
mocker.patch(
'cookiecutter.zipfile.prompt_and_delete',
side_effect=SystemExit,
autospec=True
)
mock_requests_get = mocker.patch(
'cookiecutter.zipfile.requests.get',
autospec=True,
)
clone_to_dir = tmpdir.mkdir('clone')
# Create an existing cache of the zipfile
existing_zip = clone_to_dir.join('fake-repo-tmpl.zip')
existing_zip.write('This is an existing zipfile')
zipfile_url = 'https://example.com/path/to/fake-repo-tmpl.zip'
with pytest.raises(SystemExit):
zipfile.unzip(zipfile_url, is_url=True, clone_to_dir=str(clone_to_dir))
assert not mock_requests_get.called
| |
"""The Z-Wave JS integration."""
from __future__ import annotations
import asyncio
from collections import defaultdict
from typing import Callable
from async_timeout import timeout
from zwave_js_server.client import Client as ZwaveClient
from zwave_js_server.exceptions import BaseZwaveJSServerError, InvalidServerVersion
from zwave_js_server.model.node import Node as ZwaveNode
from zwave_js_server.model.notification import (
EntryControlNotification,
NotificationNotification,
)
from zwave_js_server.model.value import Value, ValueNotification
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_DEVICE_ID,
ATTR_DOMAIN,
ATTR_ENTITY_ID,
CONF_URL,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import Event, HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import device_registry, entity_registry
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .addon import AddonError, AddonManager, AddonState, get_addon_manager
from .api import async_register_api
from .const import (
ATTR_COMMAND_CLASS,
ATTR_COMMAND_CLASS_NAME,
ATTR_DATA_TYPE,
ATTR_ENDPOINT,
ATTR_EVENT,
ATTR_EVENT_DATA,
ATTR_EVENT_LABEL,
ATTR_EVENT_TYPE,
ATTR_HOME_ID,
ATTR_LABEL,
ATTR_NODE_ID,
ATTR_PARAMETERS,
ATTR_PROPERTY,
ATTR_PROPERTY_KEY,
ATTR_PROPERTY_KEY_NAME,
ATTR_PROPERTY_NAME,
ATTR_TYPE,
ATTR_VALUE,
ATTR_VALUE_RAW,
CONF_ADDON_DEVICE,
CONF_ADDON_NETWORK_KEY,
CONF_DATA_COLLECTION_OPTED_IN,
CONF_INTEGRATION_CREATED_ADDON,
CONF_NETWORK_KEY,
CONF_USB_PATH,
CONF_USE_ADDON,
DATA_CLIENT,
DATA_PLATFORM_SETUP,
DATA_UNSUBSCRIBE,
DOMAIN,
EVENT_DEVICE_ADDED_TO_REGISTRY,
LOGGER,
ZWAVE_JS_NOTIFICATION_EVENT,
ZWAVE_JS_VALUE_NOTIFICATION_EVENT,
ZWAVE_JS_VALUE_UPDATED_EVENT,
)
from .discovery import ZwaveDiscoveryInfo, async_discover_values
from .helpers import async_enable_statistics, get_device_id, get_unique_id
from .migrate import async_migrate_discovered_value
from .services import ZWaveServices
CONNECT_TIMEOUT = 10
DATA_CLIENT_LISTEN_TASK = "client_listen_task"
DATA_START_PLATFORM_TASK = "start_platform_task"
DATA_CONNECT_FAILED_LOGGED = "connect_failed_logged"
DATA_INVALID_SERVER_VERSION_LOGGED = "invalid_server_version_logged"
async def async_setup(hass: HomeAssistant, config: dict) -> bool:
"""Set up the Z-Wave JS component."""
hass.data[DOMAIN] = {}
return True
@callback
def register_node_in_dev_reg(
hass: HomeAssistant,
entry: ConfigEntry,
dev_reg: device_registry.DeviceRegistry,
client: ZwaveClient,
node: ZwaveNode,
) -> device_registry.DeviceEntry:
"""Register node in dev reg."""
params = {
"config_entry_id": entry.entry_id,
"identifiers": {get_device_id(client, node)},
"sw_version": node.firmware_version,
"name": node.name or node.device_config.description or f"Node {node.node_id}",
"model": node.device_config.label,
"manufacturer": node.device_config.manufacturer,
}
if node.location:
params["suggested_area"] = node.location
device = dev_reg.async_get_or_create(**params)
async_dispatcher_send(hass, EVENT_DEVICE_ADDED_TO_REGISTRY, device)
return device
async def async_setup_entry( # noqa: C901
hass: HomeAssistant, entry: ConfigEntry
) -> bool:
"""Set up Z-Wave JS from a config entry."""
use_addon = entry.data.get(CONF_USE_ADDON)
if use_addon:
await async_ensure_addon_running(hass, entry)
client = ZwaveClient(entry.data[CONF_URL], async_get_clientsession(hass))
dev_reg = device_registry.async_get(hass)
ent_reg = entity_registry.async_get(hass)
entry_hass_data: dict = hass.data[DOMAIN].setdefault(entry.entry_id, {})
unsubscribe_callbacks: list[Callable] = []
entry_hass_data[DATA_CLIENT] = client
entry_hass_data[DATA_UNSUBSCRIBE] = unsubscribe_callbacks
entry_hass_data[DATA_PLATFORM_SETUP] = {}
registered_unique_ids: dict[str, dict[str, set[str]]] = defaultdict(dict)
async def async_on_node_ready(node: ZwaveNode) -> None:
"""Handle node ready event."""
LOGGER.debug("Processing node %s", node)
platform_setup_tasks = entry_hass_data[DATA_PLATFORM_SETUP]
# register (or update) node in device registry
device = register_node_in_dev_reg(hass, entry, dev_reg, client, node)
# We only want to create the defaultdict once, even on reinterviews
if device.id not in registered_unique_ids:
registered_unique_ids[device.id] = defaultdict(set)
value_updates_disc_info = []
# run discovery on all node values and create/update entities
for disc_info in async_discover_values(node):
platform = disc_info.platform
# This migration logic was added in 2021.3 to handle a breaking change to
# the value_id format. Some time in the future, this call (as well as the
# helper functions) can be removed.
async_migrate_discovered_value(
hass,
ent_reg,
registered_unique_ids[device.id][platform],
device,
client,
disc_info,
)
if platform not in platform_setup_tasks:
platform_setup_tasks[platform] = hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
await platform_setup_tasks[platform]
LOGGER.debug("Discovered entity: %s", disc_info)
async_dispatcher_send(
hass, f"{DOMAIN}_{entry.entry_id}_add_{platform}", disc_info
)
# Capture discovery info for values we want to watch for updates
if disc_info.assumed_state:
value_updates_disc_info.append(disc_info)
# We need to set up the sensor platform if it hasn't already been setup in
# order to create the node status sensor
if SENSOR_DOMAIN not in platform_setup_tasks:
platform_setup_tasks[SENSOR_DOMAIN] = hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, SENSOR_DOMAIN)
)
await platform_setup_tasks[SENSOR_DOMAIN]
# Create a node status sensor for each device
async_dispatcher_send(
hass, f"{DOMAIN}_{entry.entry_id}_add_node_status_sensor", node
)
# add listener for value updated events if necessary
if value_updates_disc_info:
unsubscribe_callbacks.append(
node.on(
"value updated",
lambda event: async_on_value_updated(
value_updates_disc_info, event["value"]
),
)
)
# add listener for stateless node value notification events
unsubscribe_callbacks.append(
node.on(
"value notification",
lambda event: async_on_value_notification(event["value_notification"]),
)
)
# add listener for stateless node notification events
unsubscribe_callbacks.append(
node.on(
"notification",
lambda event: async_on_notification(event["notification"]),
)
)
async def async_on_node_added(node: ZwaveNode) -> None:
"""Handle node added event."""
# we only want to run discovery when the node has reached ready state,
# otherwise we'll have all kinds of missing info issues.
if node.ready:
await async_on_node_ready(node)
return
# if node is not yet ready, register one-time callback for ready state
LOGGER.debug("Node added: %s - waiting for it to become ready", node.node_id)
node.once(
"ready",
lambda event: hass.async_create_task(async_on_node_ready(event["node"])),
)
# we do submit the node to device registry so user has
# some visual feedback that something is (in the process of) being added
register_node_in_dev_reg(hass, entry, dev_reg, client, node)
@callback
def async_on_node_removed(node: ZwaveNode) -> None:
"""Handle node removed event."""
# grab device in device registry attached to this node
dev_id = get_device_id(client, node)
device = dev_reg.async_get_device({dev_id})
# note: removal of entity registry entry is handled by core
dev_reg.async_remove_device(device.id) # type: ignore
registered_unique_ids.pop(device.id, None) # type: ignore
@callback
def async_on_value_notification(notification: ValueNotification) -> None:
"""Relay stateless value notification events from Z-Wave nodes to hass."""
device = dev_reg.async_get_device({get_device_id(client, notification.node)})
raw_value = value = notification.value
if notification.metadata.states:
value = notification.metadata.states.get(str(value), value)
hass.bus.async_fire(
ZWAVE_JS_VALUE_NOTIFICATION_EVENT,
{
ATTR_DOMAIN: DOMAIN,
ATTR_NODE_ID: notification.node.node_id,
ATTR_HOME_ID: client.driver.controller.home_id,
ATTR_ENDPOINT: notification.endpoint,
ATTR_DEVICE_ID: device.id, # type: ignore
ATTR_COMMAND_CLASS: notification.command_class,
ATTR_COMMAND_CLASS_NAME: notification.command_class_name,
ATTR_LABEL: notification.metadata.label,
ATTR_PROPERTY: notification.property_,
ATTR_PROPERTY_NAME: notification.property_name,
ATTR_PROPERTY_KEY: notification.property_key,
ATTR_PROPERTY_KEY_NAME: notification.property_key_name,
ATTR_VALUE: value,
ATTR_VALUE_RAW: raw_value,
},
)
@callback
def async_on_notification(
notification: EntryControlNotification | NotificationNotification,
) -> None:
"""Relay stateless notification events from Z-Wave nodes to hass."""
device = dev_reg.async_get_device({get_device_id(client, notification.node)})
event_data = {
ATTR_DOMAIN: DOMAIN,
ATTR_NODE_ID: notification.node.node_id,
ATTR_HOME_ID: client.driver.controller.home_id,
ATTR_DEVICE_ID: device.id, # type: ignore
ATTR_COMMAND_CLASS: notification.command_class,
}
if isinstance(notification, EntryControlNotification):
event_data.update(
{
ATTR_COMMAND_CLASS_NAME: "Entry Control",
ATTR_EVENT_TYPE: notification.event_type,
ATTR_DATA_TYPE: notification.data_type,
ATTR_EVENT_DATA: notification.event_data,
}
)
else:
event_data.update(
{
ATTR_COMMAND_CLASS_NAME: "Notification",
ATTR_LABEL: notification.label,
ATTR_TYPE: notification.type_,
ATTR_EVENT: notification.event,
ATTR_EVENT_LABEL: notification.event_label,
ATTR_PARAMETERS: notification.parameters,
}
)
hass.bus.async_fire(ZWAVE_JS_NOTIFICATION_EVENT, event_data)
@callback
def async_on_value_updated(
value_updates_disc_info: list[ZwaveDiscoveryInfo], value: Value
) -> None:
"""Fire value updated event."""
# Get the discovery info for the value that was updated. If we can't
# find the discovery info, we don't need to fire an event
try:
disc_info = next(
disc_info
for disc_info in value_updates_disc_info
if disc_info.primary_value.value_id == value.value_id
)
except StopIteration:
return
device = dev_reg.async_get_device({get_device_id(client, value.node)})
unique_id = get_unique_id(
client.driver.controller.home_id, disc_info.primary_value.value_id
)
entity_id = ent_reg.async_get_entity_id(disc_info.platform, DOMAIN, unique_id)
raw_value = value_ = value.value
if value.metadata.states:
value_ = value.metadata.states.get(str(value), value_)
hass.bus.async_fire(
ZWAVE_JS_VALUE_UPDATED_EVENT,
{
ATTR_NODE_ID: value.node.node_id,
ATTR_HOME_ID: client.driver.controller.home_id,
ATTR_DEVICE_ID: device.id, # type: ignore
ATTR_ENTITY_ID: entity_id,
ATTR_COMMAND_CLASS: value.command_class,
ATTR_COMMAND_CLASS_NAME: value.command_class_name,
ATTR_ENDPOINT: value.endpoint,
ATTR_PROPERTY: value.property_,
ATTR_PROPERTY_NAME: value.property_name,
ATTR_PROPERTY_KEY: value.property_key,
ATTR_PROPERTY_KEY_NAME: value.property_key_name,
ATTR_VALUE: value_,
ATTR_VALUE_RAW: raw_value,
},
)
# connect and throw error if connection failed
try:
async with timeout(CONNECT_TIMEOUT):
await client.connect()
except InvalidServerVersion as err:
if not entry_hass_data.get(DATA_INVALID_SERVER_VERSION_LOGGED):
LOGGER.error("Invalid server version: %s", err)
entry_hass_data[DATA_INVALID_SERVER_VERSION_LOGGED] = True
if use_addon:
async_ensure_addon_updated(hass)
raise ConfigEntryNotReady from err
except (asyncio.TimeoutError, BaseZwaveJSServerError) as err:
if not entry_hass_data.get(DATA_CONNECT_FAILED_LOGGED):
LOGGER.error("Failed to connect: %s", err)
entry_hass_data[DATA_CONNECT_FAILED_LOGGED] = True
raise ConfigEntryNotReady from err
else:
LOGGER.info("Connected to Zwave JS Server")
entry_hass_data[DATA_CONNECT_FAILED_LOGGED] = False
entry_hass_data[DATA_INVALID_SERVER_VERSION_LOGGED] = False
services = ZWaveServices(hass, ent_reg, dev_reg)
services.async_register()
# Set up websocket API
async_register_api(hass)
async def start_platforms() -> None:
"""Start platforms and perform discovery."""
driver_ready = asyncio.Event()
async def handle_ha_shutdown(event: Event) -> None:
"""Handle HA shutdown."""
await disconnect_client(hass, entry, client, listen_task, platform_task)
listen_task = asyncio.create_task(
client_listen(hass, entry, client, driver_ready)
)
entry_hass_data[DATA_CLIENT_LISTEN_TASK] = listen_task
unsubscribe_callbacks.append(
hass.bus.async_listen(EVENT_HOMEASSISTANT_STOP, handle_ha_shutdown)
)
try:
await driver_ready.wait()
except asyncio.CancelledError:
LOGGER.debug("Cancelling start platforms")
return
LOGGER.info("Connection to Zwave JS Server initialized")
# If opt in preference hasn't been specified yet, we do nothing, otherwise
# we apply the preference
if opted_in := entry.data.get(CONF_DATA_COLLECTION_OPTED_IN):
await async_enable_statistics(client)
elif opted_in is False:
await client.driver.async_disable_statistics()
# Check for nodes that no longer exist and remove them
stored_devices = device_registry.async_entries_for_config_entry(
dev_reg, entry.entry_id
)
known_devices = [
dev_reg.async_get_device({get_device_id(client, node)})
for node in client.driver.controller.nodes.values()
]
# Devices that are in the device registry that are not known by the controller can be removed
for device in stored_devices:
if device not in known_devices:
dev_reg.async_remove_device(device.id)
# run discovery on all ready nodes
await asyncio.gather(
*[
async_on_node_added(node)
for node in client.driver.controller.nodes.values()
]
)
# listen for new nodes being added to the mesh
unsubscribe_callbacks.append(
client.driver.controller.on(
"node added",
lambda event: hass.async_create_task(
async_on_node_added(event["node"])
),
)
)
# listen for nodes being removed from the mesh
# NOTE: This will not remove nodes that were removed when HA was not running
unsubscribe_callbacks.append(
client.driver.controller.on(
"node removed", lambda event: async_on_node_removed(event["node"])
)
)
platform_task = hass.async_create_task(start_platforms())
entry_hass_data[DATA_START_PLATFORM_TASK] = platform_task
return True
async def client_listen(
hass: HomeAssistant,
entry: ConfigEntry,
client: ZwaveClient,
driver_ready: asyncio.Event,
) -> None:
"""Listen with the client."""
should_reload = True
try:
await client.listen(driver_ready)
except asyncio.CancelledError:
should_reload = False
except BaseZwaveJSServerError as err:
LOGGER.error("Failed to listen: %s", err)
except Exception as err: # pylint: disable=broad-except
# We need to guard against unknown exceptions to not crash this task.
LOGGER.exception("Unexpected exception: %s", err)
# The entry needs to be reloaded since a new driver state
# will be acquired on reconnect.
# All model instances will be replaced when the new state is acquired.
if should_reload:
LOGGER.info("Disconnected from server. Reloading integration")
hass.async_create_task(hass.config_entries.async_reload(entry.entry_id))
async def disconnect_client(
hass: HomeAssistant,
entry: ConfigEntry,
client: ZwaveClient,
listen_task: asyncio.Task,
platform_task: asyncio.Task,
) -> None:
"""Disconnect client."""
listen_task.cancel()
platform_task.cancel()
platform_setup_tasks = (
hass.data[DOMAIN].get(entry.entry_id, {}).get(DATA_PLATFORM_SETUP, {}).values()
)
for task in platform_setup_tasks:
task.cancel()
await asyncio.gather(listen_task, platform_task, *platform_setup_tasks)
if client.connected:
await client.disconnect()
LOGGER.info("Disconnected from Zwave JS Server")
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
info = hass.data[DOMAIN].pop(entry.entry_id)
for unsub in info[DATA_UNSUBSCRIBE]:
unsub()
tasks = []
for platform, task in info[DATA_PLATFORM_SETUP].items():
if task.done():
tasks.append(
hass.config_entries.async_forward_entry_unload(entry, platform)
)
else:
task.cancel()
tasks.append(task)
unload_ok = all(await asyncio.gather(*tasks))
if DATA_CLIENT_LISTEN_TASK in info:
await disconnect_client(
hass,
entry,
info[DATA_CLIENT],
info[DATA_CLIENT_LISTEN_TASK],
platform_task=info[DATA_START_PLATFORM_TASK],
)
if entry.data.get(CONF_USE_ADDON) and entry.disabled_by:
addon_manager: AddonManager = get_addon_manager(hass)
LOGGER.debug("Stopping Z-Wave JS add-on")
try:
await addon_manager.async_stop_addon()
except AddonError as err:
LOGGER.error("Failed to stop the Z-Wave JS add-on: %s", err)
return False
return unload_ok
async def async_remove_entry(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Remove a config entry."""
if not entry.data.get(CONF_INTEGRATION_CREATED_ADDON):
return
addon_manager: AddonManager = get_addon_manager(hass)
try:
await addon_manager.async_stop_addon()
except AddonError as err:
LOGGER.error(err)
return
try:
await addon_manager.async_create_snapshot()
except AddonError as err:
LOGGER.error(err)
return
try:
await addon_manager.async_uninstall_addon()
except AddonError as err:
LOGGER.error(err)
async def async_ensure_addon_running(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Ensure that Z-Wave JS add-on is installed and running."""
addon_manager: AddonManager = get_addon_manager(hass)
if addon_manager.task_in_progress():
raise ConfigEntryNotReady
try:
addon_info = await addon_manager.async_get_addon_info()
except AddonError as err:
LOGGER.error(err)
raise ConfigEntryNotReady from err
usb_path: str = entry.data[CONF_USB_PATH]
network_key: str = entry.data[CONF_NETWORK_KEY]
addon_state = addon_info.state
if addon_state == AddonState.NOT_INSTALLED:
addon_manager.async_schedule_install_setup_addon(
usb_path, network_key, catch_error=True
)
raise ConfigEntryNotReady
if addon_state == AddonState.NOT_RUNNING:
addon_manager.async_schedule_setup_addon(
usb_path, network_key, catch_error=True
)
raise ConfigEntryNotReady
addon_options = addon_info.options
addon_device = addon_options[CONF_ADDON_DEVICE]
addon_network_key = addon_options[CONF_ADDON_NETWORK_KEY]
updates = {}
if usb_path != addon_device:
updates[CONF_USB_PATH] = addon_device
if network_key != addon_network_key:
updates[CONF_NETWORK_KEY] = addon_network_key
if updates:
hass.config_entries.async_update_entry(entry, data={**entry.data, **updates})
@callback
def async_ensure_addon_updated(hass: HomeAssistant) -> None:
"""Ensure that Z-Wave JS add-on is updated and running."""
addon_manager: AddonManager = get_addon_manager(hass)
if addon_manager.task_in_progress():
raise ConfigEntryNotReady
addon_manager.async_schedule_update_addon(catch_error=True)
| |
"""This module contains a collection of unit tests which
validate the ..request_handler module.
"""
import base64
import httplib
import json
import re
import uuid
import mock
import tor_async_util
import tornado
import tornado.netutil
import tornado.testing
import tornado.web
from ..async_actions import AsyncEndToEndContainerRunner # noqa
import ecs
from ..request_handlers import HealthRequestHandler
from ..request_handlers import NoOpRequestHandler
from ..request_handlers import TasksRequestHandler
from ..request_handlers import VersionRequestHandler
class Patcher(object):
"""An abstract base class for all patcher context managers."""
def __init__(self, patcher):
object.__init__(self)
self._patcher = patcher
def __enter__(self):
self._patcher.start()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._patcher.stop()
class AsyncHTTPClientPatcher(Patcher):
"""This context manager provides an easy way to install a
patch allowing the caller to determine the behavior of
tornado.httpclient.AsyncHTTPClient.fetch().
"""
def __init__(self, response):
def fetch_patch(ahc, request, callback):
callback(response)
patcher = mock.patch(
'tornado.httpclient.AsyncHTTPClient.fetch',
fetch_patch)
Patcher.__init__(self, patcher)
class AsyncEndToEndContainerRunnerPatcher(Patcher):
"""This context manager provides an easy way to install a
patch allowing the caller to determine the behavior of
AsyncEndToEndContainerRunner.create().
"""
def __init__(self,
is_ok,
is_image_found=None,
exit_code=None,
stdout=None,
stderr=None):
def create_patch(acr, callback):
callback(is_ok, is_image_found, exit_code, stdout, stderr, acr)
patcher = mock.patch(
__name__ + '.AsyncEndToEndContainerRunner.create',
create_patch)
Patcher.__init__(self, patcher)
class WriteAndVerifyPatcher(Patcher):
"""This context manager provides an easy way to install a
patch allowing the caller to determine the behavior of
tor_async_util's write_and_verify().
"""
def __init__(self, is_ok):
def write_and_verify_patch(request_handler, body, schema):
return is_ok
patcher = mock.patch(
'tor_async_util.RequestHandler.write_and_verify',
write_and_verify_patch)
Patcher.__init__(self, patcher)
class AsyncRequestHandlerTestCase(tornado.testing.AsyncHTTPTestCase):
def assertDebugDetail(self, response, expected_value):
"""Assert a debug failure detail HTTP header appears in
```response``` with a value equal to ```expected_value```."""
value = response.headers.get(
tor_async_util.debug_details_header_name,
None)
self.assertIsNotNone(value)
self.assertTrue(value.startswith("0x"))
self.assertEqual(int(value, 16), expected_value)
def assertNoDebugDetail(self, response):
"""Assert *no* debug failure detail HTTP header appears
in ```response```."""
value = response.headers.get(
tor_async_util.debug_details_header_name,
None)
self.assertIsNone(value)
def assertJsonDocumentResponse(self, response, expected_body):
content_type = response.headers.get('Content-Type', None)
self.assertIsNotNone(content_type)
json_utf8_content_type_reg_ex = re.compile(
'^\s*application/json(;\s+charset\=utf-{0,1}8){0,1}\s*$',
re.IGNORECASE)
self.assertIsNotNone(json_utf8_content_type_reg_ex.match(content_type))
self.assertEqual(json.loads(response.body), expected_body)
def assertEmptyJsonDocumentResponse(self, response):
self.assertJsonDocumentResponse(response, {})
class TasksRequestHandlerTestCase(AsyncRequestHandlerTestCase):
"""Unit tests for TasksRequestHandler"""
def get_app(self):
handlers = [
(
TasksRequestHandler.url_spec,
TasksRequestHandler
),
]
return tornado.web.Application(handlers=handlers)
def test_post_bad_request_body(self):
headers = {
'Content-Type': 'application/json; charset=utf-8',
}
body = {
}
response = self.fetch(
'/v1.1/tasks',
method='POST',
headers=headers,
body=json.dumps(body))
self.assertEqual(response.code, httplib.BAD_REQUEST)
self.assertDebugDetail(
response,
TasksRequestHandler.PDD_BAD_REQUEST_BODY)
self.assertEmptyJsonDocumentResponse(response)
def test_container_runner_error(self):
with AsyncEndToEndContainerRunnerPatcher(is_ok=False):
headers = {
'Content-Type': 'application/json; charset=utf-8',
}
body = {
'docker_image': 'ubuntu:latest',
'cmd': [
'echo',
'hello world!!!',
],
}
response = self.fetch(
'/v1.1/tasks',
method='POST',
headers=headers,
body=json.dumps(body))
self.assertEqual(response.code, httplib.INTERNAL_SERVER_ERROR)
self.assertDebugDetail(
response,
TasksRequestHandler.PDD_ERROR_CREATING_RAW_CRAWL)
self.assertEmptyJsonDocumentResponse(response)
def test_image_not_found(self):
with AsyncEndToEndContainerRunnerPatcher(is_ok=True, is_image_found=False):
headers = {
'Content-Type': 'application/json; charset=utf-8',
}
body = {
'docker_image': 'ubuntu:latest',
'cmd': [
'echo',
'hello world!!!',
],
}
response = self.fetch(
'/v1.1/tasks',
method='POST',
headers=headers,
body=json.dumps(body))
self.assertEqual(response.code, httplib.NOT_FOUND)
self.assertDebugDetail(
response,
TasksRequestHandler.PDD_IMAGE_NOT_FOUND)
self.assertEmptyJsonDocumentResponse(response)
def test_response_body_error(self):
exit_code = 45
stdout = uuid.uuid4().hex
stderr = uuid.uuid4().hex
with AsyncEndToEndContainerRunnerPatcher(is_ok=True,
is_image_found=True,
exit_code=exit_code,
stdout=stdout,
stderr=stderr):
with WriteAndVerifyPatcher(is_ok=False):
headers = {
'Content-Type': 'application/json; charset=utf-8',
}
body = {
'docker_image': 'ubuntu:latest',
'cmd': [
'echo',
'hello world!!!',
],
}
response = self.fetch(
'/v1.1/tasks',
method='POST',
headers=headers,
body=json.dumps(body))
self.assertEqual(response.code, httplib.INTERNAL_SERVER_ERROR)
self.assertDebugDetail(response, TasksRequestHandler.PDD_BAD_RESPONSE_BODY)
self.assertEmptyJsonDocumentResponse(response)
def test_happy_path(self):
exit_code = 45
stdout = uuid.uuid4().hex
stderr = uuid.uuid4().hex
with AsyncEndToEndContainerRunnerPatcher(is_ok=True,
is_image_found=True,
exit_code=exit_code,
stdout=stdout,
stderr=stderr):
headers = {
'Content-Type': 'application/json; charset=utf-8',
}
body = {
'docker_image': 'ubuntu:latest',
'cmd': [
'echo',
'hello world!!!',
],
}
response = self.fetch(
'/v1.1/tasks',
method='POST',
headers=headers,
body=json.dumps(body))
self.assertEqual(response.code, httplib.CREATED)
self.assertNoDebugDetail(response)
expected_body = {
'exitCode': exit_code,
'stdout': base64.b64encode(stdout),
'stderr': base64.b64encode(stderr),
}
self.assertJsonDocumentResponse(response, expected_body)
class VersionRequestHandlerTestCase(AsyncRequestHandlerTestCase):
"""Unit tests for NoOpRequestHandler"""
def get_app(self):
handlers = [
(
VersionRequestHandler.url_spec,
VersionRequestHandler
),
]
return tornado.web.Application(handlers=handlers)
def test_happy_path(self):
response = self.fetch('/v1.1/_version', method='GET')
self.assertEqual(response.code, httplib.OK)
self.assertNoDebugDetail(response)
self.assertEqual(
response.headers['location'],
response.effective_url)
expected_response_body = {
'version': ecs.__version__,
'links': {
'self': {
'href': response.effective_url,
},
},
}
self.assertJsonDocumentResponse(response, expected_response_body)
class NoOpRequestHandlerTestCase(AsyncRequestHandlerTestCase):
"""Unit tests for NoOpRequestHandler"""
def get_app(self):
handlers = [
(
NoOpRequestHandler.url_spec,
NoOpRequestHandler
),
]
return tornado.web.Application(handlers=handlers)
def test_happy_path(self):
response = self.fetch('/v1.1/_noop', method='GET')
self.assertEqual(response.code, httplib.OK)
self.assertNoDebugDetail(response)
self.assertEqual(
response.headers['location'],
response.effective_url)
expected_response_body = {
'links': {
'self': {
'href': response.effective_url,
},
},
}
self.assertJsonDocumentResponse(response, expected_response_body)
class HealthRequestHandlerTestCase(AsyncRequestHandlerTestCase):
"""Unit tests for HealthRequestHandler"""
def get_app(self):
handlers = [
(
HealthRequestHandler.url_spec,
HealthRequestHandler
),
]
return tornado.web.Application(handlers=handlers)
def test_happy_path(self):
response = self.fetch('/v1.1/_health', method='GET')
self.assertEqual(response.code, httplib.OK)
self.assertNoDebugDetail(response)
self.assertEqual(
response.headers['location'],
response.effective_url)
expected_response_body = {
'status': 'green',
'links': {
'self': {
'href': response.effective_url,
},
},
}
self.assertJsonDocumentResponse(response, expected_response_body)
| |
# Copyright 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api as dash_api
from openstack_dashboard.contrib.trove import api
from openstack_dashboard.dashboards.project.instances \
import utils as instance_utils
LOG = logging.getLogger(__name__)
class SetInstanceDetailsAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Instance Name"))
flavor = forms.ChoiceField(label=_("Flavor"),
help_text=_("Size of image to launch."))
volume = forms.IntegerField(label=_("Volume Size"),
min_value=0,
initial=1,
help_text=_("Size of the volume in GB."))
datastore = forms.ChoiceField(label=_("Datastore"),
help_text=_(
"Type and version of datastore."))
class Meta(object):
name = _("Details")
help_text_template = "project/databases/_launch_details_help.html"
def clean(self):
if self.data.get("datastore", None) == "select_datastore_type_version":
msg = _("You must select a datastore type and version.")
self._errors["datastore"] = self.error_class([msg])
return self.cleaned_data
@memoized.memoized_method
def flavors(self, request):
try:
return api.trove.flavor_list(request)
except Exception:
LOG.exception("Exception while obtaining flavors list")
redirect = reverse("horizon:project:databases:index")
exceptions.handle(request,
_('Unable to obtain flavors.'),
redirect=redirect)
def populate_flavor_choices(self, request, context):
flavors = self.flavors(request)
if flavors:
return instance_utils.sort_flavor_list(request, flavors)
return []
@memoized.memoized_method
def datastores(self, request):
try:
return api.trove.datastore_list(request)
except Exception:
LOG.exception("Exception while obtaining datastores list")
self._datastores = []
@memoized.memoized_method
def datastore_versions(self, request, datastore):
try:
return api.trove.datastore_version_list(request, datastore)
except Exception:
LOG.exception("Exception while obtaining datastore version list")
self._datastore_versions = []
def populate_datastore_choices(self, request, context):
choices = ()
set_initial = False
datastores = self.datastores(request)
if datastores is not None:
num_datastores_with_one_version = 0
for ds in datastores:
versions = self.datastore_versions(request, ds.name)
if not set_initial:
if len(versions) >= 2:
set_initial = True
elif len(versions) == 1:
num_datastores_with_one_version += 1
if num_datastores_with_one_version > 1:
set_initial = True
if versions:
# only add to choices if datastore has at least one version
version_choices = ()
for v in versions:
version_choices = (version_choices +
((ds.name + ',' + v.name, v.name),))
datastore_choices = (ds.name, version_choices)
choices = choices + (datastore_choices,)
if set_initial:
# prepend choice to force user to choose
initial = (('select_datastore_type_version',
_('Select datastore type and version')))
choices = (initial,) + choices
return choices
TROVE_ADD_USER_PERMS = getattr(settings, 'TROVE_ADD_USER_PERMS', [])
TROVE_ADD_DATABASE_PERMS = getattr(settings, 'TROVE_ADD_DATABASE_PERMS', [])
TROVE_ADD_PERMS = TROVE_ADD_USER_PERMS + TROVE_ADD_DATABASE_PERMS
class SetInstanceDetails(workflows.Step):
action_class = SetInstanceDetailsAction
contributes = ("name", "volume", "flavor", "datastore")
class SetNetworkAction(workflows.Action):
network = forms.MultipleChoiceField(label=_("Networks"),
widget=forms.CheckboxSelectMultiple(),
error_messages={
'required': _(
"At least one network must"
" be specified.")},
help_text=_("Launch instance with"
" these networks"))
def __init__(self, request, *args, **kwargs):
super(SetNetworkAction, self).__init__(request, *args, **kwargs)
network_list = self.fields["network"].choices
if len(network_list) == 1:
self.fields['network'].initial = [network_list[0][0]]
class Meta(object):
name = _("Networking")
permissions = ('openstack.services.network',)
help_text = _("Select networks for your instance.")
def populate_network_choices(self, request, context):
try:
tenant_id = self.request.user.tenant_id
networks = dash_api.neutron.network_list_for_tenant(request,
tenant_id)
network_list = [(network.id, network.name_or_id)
for network in networks]
except Exception:
network_list = []
exceptions.handle(request,
_('Unable to retrieve networks.'))
return network_list
class SetNetwork(workflows.Step):
action_class = SetNetworkAction
template_name = "project/databases/_launch_networks.html"
contributes = ("network_id",)
def contribute(self, data, context):
if data:
networks = self.workflow.request.POST.getlist("network")
# If no networks are explicitly specified, network list
# contains an empty string, so remove it.
networks = [n for n in networks if n != '']
if networks:
context['network_id'] = networks
return context
class AddDatabasesAction(workflows.Action):
"""Initialize the database with users/databases. This tab will honor
the settings which should be a list of permissions required:
* TROVE_ADD_USER_PERMS = []
* TROVE_ADD_DATABASE_PERMS = []
"""
databases = forms.CharField(label=_('Initial Databases'),
required=False,
help_text=_('Comma separated list of '
'databases to create'))
user = forms.CharField(label=_('Initial Admin User'),
required=False,
help_text=_("Initial admin user to add"))
password = forms.CharField(widget=forms.PasswordInput(),
label=_("Password"),
required=False)
host = forms.CharField(label=_("Allowed Host (optional)"),
required=False,
help_text=_("Host or IP that the user is allowed "
"to connect through."))
class Meta(object):
name = _("Initialize Databases")
permissions = TROVE_ADD_PERMS
help_text_template = "project/databases/_launch_initialize_help.html"
def clean(self):
cleaned_data = super(AddDatabasesAction, self).clean()
if cleaned_data.get('user'):
if not cleaned_data.get('password'):
msg = _('You must specify a password if you create a user.')
self._errors["password"] = self.error_class([msg])
if not cleaned_data.get('databases'):
msg = _('You must specify at least one database if '
'you create a user.')
self._errors["databases"] = self.error_class([msg])
return cleaned_data
class InitializeDatabase(workflows.Step):
action_class = AddDatabasesAction
contributes = ["databases", 'user', 'password', 'host']
class AdvancedAction(workflows.Action):
initial_state = forms.ChoiceField(
label=_('Source for Initial State'),
required=False,
help_text=_("Choose initial state."),
choices=[
('', _('None')),
('backup', _('Restore from Backup')),
('master', _('Replicate from Instance'))],
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'initial_state'
}))
backup = forms.ChoiceField(
label=_('Backup Name'),
required=False,
help_text=_('Select a backup to restore'),
widget=forms.Select(attrs={
'class': 'switched',
'data-switch-on': 'initial_state',
'data-initial_state-backup': _('Backup Name')
}))
master = forms.ChoiceField(
label=_('Master Instance Name'),
required=False,
help_text=_('Select a master instance'),
widget=forms.Select(attrs={
'class': 'switched',
'data-switch-on': 'initial_state',
'data-initial_state-master': _('Master Instance Name')
}))
class Meta(object):
name = _("Advanced")
help_text_template = "project/databases/_launch_advanced_help.html"
def populate_backup_choices(self, request, context):
try:
backups = api.trove.backup_list(request)
choices = [(b.id, b.name) for b in backups
if b.status == 'COMPLETED']
except Exception:
choices = []
if choices:
choices.insert(0, ("", _("Select backup")))
else:
choices.insert(0, ("", _("No backups available")))
return choices
def populate_master_choices(self, request, context):
try:
instances = api.trove.instance_list(request)
choices = [(i.id, i.name) for i in
instances if i.status == 'ACTIVE']
except Exception:
choices = []
if choices:
choices.insert(0, ("", _("Select instance")))
else:
choices.insert(0, ("", _("No instances available")))
return choices
def clean(self):
cleaned_data = super(AdvancedAction, self).clean()
initial_state = cleaned_data.get("initial_state")
if initial_state == 'backup':
backup = self.cleaned_data['backup']
if backup:
try:
bkup = api.trove.backup_get(self.request, backup)
self.cleaned_data['backup'] = bkup.id
except Exception:
raise forms.ValidationError(_("Unable to find backup!"))
else:
raise forms.ValidationError(_("A backup must be selected!"))
cleaned_data['master'] = None
elif initial_state == 'master':
master = self.cleaned_data['master']
if master:
try:
api.trove.instance_get(self.request, master)
except Exception:
raise forms.ValidationError(
_("Unable to find master instance!"))
else:
raise forms.ValidationError(
_("A master instance must be selected!"))
cleaned_data['backup'] = None
else:
cleaned_data['master'] = None
cleaned_data['backup'] = None
return cleaned_data
class Advanced(workflows.Step):
action_class = AdvancedAction
contributes = ['backup', 'master']
class LaunchInstance(workflows.Workflow):
slug = "launch_instance"
name = _("Launch Instance")
finalize_button_name = _("Launch")
success_message = _('Launched %(count)s named "%(name)s".')
failure_message = _('Unable to launch %(count)s named "%(name)s".')
success_url = "horizon:project:databases:index"
default_steps = (SetInstanceDetails,
SetNetwork,
InitializeDatabase,
Advanced)
def __init__(self, request=None, context_seed=None, entry_point=None,
*args, **kwargs):
super(LaunchInstance, self).__init__(request, context_seed,
entry_point, *args, **kwargs)
self.attrs['autocomplete'] = (
settings.HORIZON_CONFIG.get('password_autocomplete'))
def format_status_message(self, message):
name = self.context.get('name', 'unknown instance')
return message % {"count": _("instance"), "name": name}
def _get_databases(self, context):
"""Returns the initial databases for this instance."""
databases = None
if context.get('databases'):
dbs = context['databases']
databases = [{'name': d.strip()} for d in dbs.split(',')]
return databases
def _get_users(self, context):
users = None
if context.get('user'):
user = {
'name': context['user'],
'password': context['password'],
'databases': self._get_databases(context),
}
if context['host']:
user['host'] = context['host']
users = [user]
return users
def _get_backup(self, context):
backup = None
if context.get('backup'):
backup = {'backupRef': context['backup']}
return backup
def _get_nics(self, context):
netids = context.get('network_id', None)
if netids:
return [{"net-id": netid, "v4-fixed-ip": ""}
for netid in netids]
else:
return None
def handle(self, request, context):
try:
datastore = self.context['datastore'].split(',')[0]
datastore_version = self.context['datastore'].split(',')[1]
LOG.info("Launching database instance with parameters "
"{name=%s, volume=%s, flavor=%s, "
"datastore=%s, datastore_version=%s, "
"dbs=%s, users=%s, "
"backups=%s, nics=%s, replica_of=%s}",
context['name'], context['volume'], context['flavor'],
datastore, datastore_version,
self._get_databases(context), self._get_users(context),
self._get_backup(context), self._get_nics(context),
context.get('master'))
api.trove.instance_create(request,
context['name'],
context['volume'],
context['flavor'],
datastore=datastore,
datastore_version=datastore_version,
databases=self._get_databases(context),
users=self._get_users(context),
restore_point=self._get_backup(context),
nics=self._get_nics(context),
replica_of=context.get('master'))
return True
except Exception:
exceptions.handle(request)
return False
| |
# sys
import re
from htmlentitydefs import name2codepoint as n2cp
from datetime import datetime
# twisted
from twisted.words.protocols import irc
from twisted.internet import reactor, protocol, task
from twisted.python import log
# pinder
import pinder
# BeautifulSoup
from BeautifulSoup import BeautifulSoup
# config
from settings import *
class CampfireBot(object):
"""The Campfire part of the IRC <-> Campfire bridge."""
def __init__(self, subdomain, room, email, password):
self.host = "http://%s.campfirenow.com" % subdomain
self.subdomain = subdomain
self.email = email
self.client = pinder.Campfire(subdomain)
self.client.login(email, password)
self.room = self.client.find_room_by_name(room)
self.room.join()
def __str__(self):
return "<%s: %s as %s>" % (self.host, self.room, self.email)
def __getattr__(self, name):
return getattr(self.room, name)
def logout(self):
self.room.leave()
self.client.logout()
def todays_transcript_url(self):
path = '/room/%s/transcript/%s' % (self.id,
datetime.now().strftime('%Y/%m/%d'))
return self.host + path
# message filters
class MessageFilter(object):
def __init__(self, message):
self.message = message
@classmethod
def filter_message(cls, message):
for subclass in cls.__subclasses__():
message = subclass(message).filter()
return message
def filter(self):
return self.message
class IRCMessageFilter(MessageFilter):
pass
class TwitterFilter(IRCMessageFilter):
def filter(self):
if 'twitter.com/' in self.message:
id = re.search(r'(\d+)', self.message).group(0)
self.message = 'http://twictur.es/i/%s.gif' % id
return self.message
class CampfireMessageFilter(MessageFilter):
def __init__(self, message):
self.message = message
self.soup = BeautifulSoup(message['message'].decode('unicode_escape'))
class ActionFilter(CampfireMessageFilter):
def filter(self):
if re.search(r'has (entered|left) the room', self.message['message']):
pass
elif re.search(r'^\*(.+)\*$', self.message['message']):
self.message['message'] = self.message['message'].replace('*', '')
else:
self.message['person'] = self.message['person'] + ':'
return self.message
class PasteFilter(CampfireMessageFilter):
def filter(self):
paste = self.soup.find('pre')
if paste:
url = self.soup.find('a')['href']
# hax
host = "http://%s.campfirenow.com" % CAMPFIRE_SUBDOMAIN
self.message['message'] = host + url
return self.message
class ImageFilter(CampfireMessageFilter):
def filter(self):
image = self.soup.find('img')
if image:
url = str(image['src'])
if "twictur.es" in url:
url = self.twicture_url(url)
self.message['message'] = url
return self.message
def twicture_url(self, image):
return image.replace('/i/', '/r/').replace('.gif', '')
class LinkFilter(CampfireMessageFilter):
def filter(self):
link = self.soup.find('a')
if link and len(self.soup.findAll(True)) == 1:
self.message['message'] = str(link['href'])
return self.message
class IRCBot(irc.IRCClient):
"""The IRC part of the IRC <-> Campfire bridge."""
nickname = BOT_NAME
# twisted callbacks
def connectionMade(self):
irc.IRCClient.connectionMade(self)
self.campfire = CampfireBot(self.factory.subdomain, self.factory.room,
self.factory.email, self.factory.password)
self.channel = '#%s' % self.factory.channel
self.lc = task.LoopingCall(self.new_messages_from_campfire)
self.lc.start(5, False)
def connectionLost(self, reason):
irc.IRCClient.connectionLost(self, reason)
self.campfire.logout()
def new_messages_from_campfire(self):
self.campfire.ping()
try:
for message in self.campfire.messages():
message = CampfireMessageFilter.filter_message(message)
msg = "%s %s" % (message['person'], message['message'])
msg = self.decode_htmlentities(msg.decode('unicode_escape'))
self.speak(msg)
except socket.timeout:
pass
# irc callbacks
def signedOn(self):
self.join(self.channel)
self.commands = IRCCommands(campfire=self.campfire, irc=self)
def joined(self, channel):
self.speak("Room '%s' in %s: %s" %
(self.factory.room, self.factory.subdomain,
self.campfire.todays_transcript_url()))
def irc_PING(self, prefix, params):
irc.IRCClient.irc_PING(self, prefix, params)
self.campfire.ping()
def action(self, user, channel, data):
user = user.split('!')[0]
action = '*' + data + '*'
if user == BLESSED_USER:
self.campfire.speak(action)
self.log(channel, user, action)
def privmsg(self, user, channel, msg):
user = user.split('!')[0]
self.log(channel, user, msg)
if user == BLESSED_USER:
if self.iscommand(msg):
parts = msg.split(' ')
command = parts[1]
args = parts[2:]
out = self.commands._send(command, args)
self.speak(out)
else:
out = IRCMessageFilter.filter_message(msg)
self.campfire.speak(out)
def iscommand(self, msg):
return BOT_NAME in msg.split(' ')[0]
# other bot methods
def speak(self, message):
self.msg(self.channel, str(message))
self.log(self.channel, self.nickname, message)
def log(self, channel, user, msg):
print "%s <%s> %s" % (channel, user, msg)
def __str__(self):
return "<%s: %s as %s>" % (IRC_SERVER, self.channel, self.nickname)
def decode_htmlentities(self, string):
"""
Decode HTML entities-hex, decimal, or named-in a string
@see http://snippets.dzone.com/posts/show/4569
@see http://github.com/sku/python-twitter-ircbot/blob/321d94e0e40d0acc92f5bf57d126b57369da70de/html_decode.py
"""
def substitute_entity(match):
ent = match.group(3)
if match.group(1) == "#":
# decoding by number
if match.group(2) == '':
# number is in decimal
return unichr(int(ent))
elif match.group(2) == 'x':
# number is in hex
return unichr(int('0x'+ent, 16))
else:
# they were using a name
cp = n2cp.get(ent)
if cp: return unichr(cp)
else: return match.group()
entity_re = re.compile(r'&(#?)(x?)(\w+);')
return entity_re.subn(substitute_entity, string)[0]
class IRCBotFactory(protocol.ClientFactory):
"""
A factory for IRCBot.
A new protocol instance will be created each time we connect to the server.
"""
protocol = IRCBot
def __init__(self):
self.channel = IRC_CHANNEL
self.subdomain = CAMPFIRE_SUBDOMAIN
self.room = CAMPFIRE_ROOM
self.email = CAMPFIRE_EMAIL
self.password = CAMPFIRE_PASSWORD
def clientConnectionLost(self, connector, reason):
"""Reconnect to server on disconnect."""
connector.connect()
def clientConnectionFailed(self, connector, reason):
print "connection failed:", reason
reactor.stop()
class IRCCommands(object):
"""
Commands the IRC bot responds to.
Each method is a command, passed all subsequent words.
e.g.
<defunkt> bot: help
calls: bot.help([])
<defunkt> bot: guest on
calls: bot.guest(['on'])
Returning a non-empty string replies to the channel.
"""
def __init__(self, campfire, irc):
self.campfire = campfire
self.irc = irc
def _send(self, command, args):
"""Dispatch method. Not a command."""
try:
method = getattr(self, command)
return method(args)
except:
return ''
def help(self, args):
methods = dir(self)
methods.remove('_send')
methods = [x for x in methods if not '__' in x and type(getattr(self, x)) == type(self._send)]
return "I know these commands: " + ', '.join(methods)
def users(self, args):
return ', '.join(self.campfire.users())
def transcript(self, args):
return self.campfire.todays_transcript_url()
if __name__ == '__main__':
f = IRCBotFactory()
reactor.connectTCP(IRC_SERVER, IRC_PORT, f)
reactor.run()
| |
#!/usr/bin/env python2
# This program takes OpenMC statepoint binary files and creates a variety of
# outputs from them which should provide the user with an idea of the
# convergence behavior of all the tallies and filters defined by the user in
# tallies.xml. The program can directly plot the value and errors of each
# tally, filter, score combination; it can save these plots to a file; and
# it can also save the data used in these plots to a CSV file for importing in
# to other plotting packages such as Excel, gnuplot, MathGL, or Veusz.
# To use the program, run this program from the working directory of the openMC
# problem to analyze.
# The USER OPTIONS block below provides four options for the user to set:
# fileType, printxs, showImg, and savetoCSV. See the options block for more
# information.
from math import sqrt, pow
from glob import glob
import numpy as np
import scipy.stats
import matplotlib.pyplot as plt
from statepoint import StatePoint
##################################### USER OPTIONS
# Set filetype (the file extension desired, without the period.)
# Options are backend dependent, but most backends support png, pdf, ps, eps
# and svg. Write "none" if no saved files are desired.
fileType = "none"
# Set if cross-sections or reaction rates are desired printxs = True means X/S
printxs = False
# Set if the figures should be displayed to screen or not (True means show)
showImg = False
# Save to CSV for use in more advanced plotting programs like GNUPlot, MathGL
savetoCSV = True
##################################### END USER OPTIONS
## Find if tallies.xml exists.
#if glob('./tallies.xml') != None:
# # It exists
# tallyData = talliesXML('tallies.xml')
#else:
# # It does not exist.
# tallyData = None
# Find all statepoints in this directory.
files = glob('./statepoint.*.binary')
fileNums = []
begin = 13
# Arrange the file list in increasing batch order
for i in range(len(files)):
end = files[i].find(".binary")
fileNums.append(int(files[i][begin:end]))
fileNums.sort()
# Re-make filenames
files = []
for i in range(len(fileNums)):
files.append("./statepoint." + str(fileNums[i]) + ".binary")
# Initialize arrays as needed
mean = [None for x in range(len(files))]
uncert = [None for x in range(len(files))]
scoreType = [None for x in range(len(files))]
active_batches = [None for x in range(len(files))]
for i_batch in range(len(files)):
# Get filename
batch_filename = files[i_batch]
# Create StatePoint object
sp = StatePoint(batch_filename)
# Read number of realizations for global tallies
sp.n_realizations = sp._get_int()[0]
# Read global tallies
n_global_tallies = sp._get_int()[0]
sp.global_tallies = np.array(sp._get_double(2*n_global_tallies))
sp.global_tallies.shape = (n_global_tallies, 2)
# Flag indicating if tallies are present
tallies_present = sp._get_int()[0]
# Check if tallies are present
if not tallies_present:
raise Exception("No tally data in state point!")
# Increase the dimensionality of our main variables
mean[i_batch] = [None for x in range(len(sp.tallies))]
uncert[i_batch] = [None for x in range(len(sp.tallies))]
scoreType[i_batch] = [None for x in range(len(sp.tallies))]
# Loop over all tallies
for i_tally, t in enumerate(sp.tallies):
# Calculate t-value for 95% two-sided CI
n = t.n_realizations
t_value = scipy.stats.t.ppf(0.975, n - 1)
# Store the batch count
active_batches[i_batch] = n
# Resize the 2nd dimension
mean[i_batch][i_tally] = [None for x in range(t.total_filter_bins)]
uncert[i_batch][i_tally] = [None for x in range(t.total_filter_bins)]
scoreType[i_batch][i_tally] = [None for x in range(t.total_filter_bins)]
for i_filter in range(t.total_filter_bins):
# Resize the 3rd dimension
mean[i_batch][i_tally][i_filter] = [None for x in range(t.n_nuclides)]
uncert[i_batch][i_tally][i_filter] = [None for x in range(t.n_nuclides)]
scoreType[i_batch][i_tally][i_filter] = [None for x in range(t.n_nuclides)]
print(t.total_filter_bins,t.n_nuclides)
for i_nuclide in range(t.n_nuclides):
mean[i_batch][i_tally][i_filter][i_nuclide] = \
[None for x in range(t.n_scores)]
uncert[i_batch][i_tally][i_filter][i_nuclide] = \
[None for x in range(t.n_scores)]
scoreType[i_batch][i_tally][i_filter][i_nuclide] = \
[None for x in range(t.n_scores)]
for i_score in range(t.n_scores):
scoreType[i_batch][i_tally][i_filter][i_nuclide][i_score] = \
t.scores[i_score]
s, s2 = sp._get_double(2)
s /= n
mean[i_batch][i_tally][i_filter][i_nuclide][i_score] = s
if s != 0.0:
relative_error = t_value*sqrt((s2/n - s*s)/(n-1))/s
else:
relative_error = 0.0
uncert[i_batch][i_tally][i_filter][i_nuclide][i_score] = relative_error
# Reorder the data lists in to a list order more conducive for plotting:
# The indexing should be: [tally][filter][score][batch]
meanPlot = [None for x in range(len(mean[0]))] # Set to the number of tallies
uncertPlot = [None for x in range(len(mean[0]))] # Set to the number of tallies
absUncertPlot = [None for x in range(len(mean[0]))] # Set to number of tallies
filterLabel = [None for x in range(len(mean[0]))] #Set to the number of tallies
fluxLoc = [None for x in range(len(mean[0]))] # Set to the number of tallies
printxs = [False for x in range(len(mean[0]))] # Set to the number of tallies
# Get and set the correct sizes for the rest of the dimensions
for i_tally in range(len(meanPlot)):
# Set 2nd (score) dimension
meanPlot[i_tally] = [None for x in range(len(mean[0][i_tally]))]
uncertPlot[i_tally] = [None for x in range(len(mean[0][i_tally]))]
absUncertPlot[i_tally] = [None for x in range(len(mean[0][i_tally]))]
filterLabel[i_tally] = [None for x in range(len(mean[0][i_tally]))]
# Initialize flux location so it will be -1 if not found
fluxLoc[i_tally] = -1
for i_filter in range(len(meanPlot[i_tally])):
# Set 3rd (filter) dimension
meanPlot[i_tally][i_filter] = \
[None for x in range(len(mean[0][i_tally][i_filter]))]
uncertPlot[i_tally][i_filter] = \
[None for x in range(len(mean[0][i_tally][i_filter]))]
absUncertPlot[i_tally][i_filter] = \
[None for x in range(len(mean[0][i_tally][i_filter]))]
filterLabel[i_tally][i_filter] = \
[None for x in range(len(mean[0][i_tally][i_filter]))]
for i_nuclide in range(len(meanPlot[i_tally][i_filter])):
# Set 4th (nuclide)) dimension
meanPlot[i_tally][i_filter][i_nuclide] = \
[None for x in range(len(mean[0][i_tally][i_filter][i_nuclide]))]
uncertPlot[i_tally][i_filter][i_nuclide] = \
[None for x in range(len(mean[0][i_tally][i_filter][i_nuclide]))]
absUncertPlot[i_tally][i_filter][i_nuclide] = \
[None for x in range(len(mean[0][i_tally][i_filter][i_nuclide]))]
for i_score in range(len(meanPlot[i_tally][i_filter][i_nuclide])):
# Set 5th (batch) dimension
meanPlot[i_tally][i_filter][i_nuclide][i_score] = \
[None for x in range(len(mean))]
uncertPlot[i_tally][i_filter][i_nuclide][i_score] = \
[None for x in range(len(mean))]
absUncertPlot[i_tally][i_filter][i_nuclide][i_score] = \
[None for x in range(len(mean))]
# Get filterLabel (this should be moved to its own function)
#??? How to do?
# Set flux location if found
# all batches and all tallies will have the same score ordering, hence
# the 0's in the 1st, 3rd, and 4th dimensions.
if scoreType[0][i_tally][0][0][i_score] == 'flux':
fluxLoc[i_tally] = i_score
# Set printxs array according to the printxs input
if printxs:
for i_tally in range(len(fluxLoc)):
if fluxLoc[i_tally] != -1:
printxs[i_tally] = True
# Now rearrange the data as suitable, and perform xs conversion if necessary
for i_batch in range(len(mean)):
for i_tally in range(len(mean[i_batch])):
for i_filter in range(len(mean[i_batch][i_tally])):
for i_nuclide in range(len(mean[i_batch][i_tally][i_filter])):
for i_score in range(len(mean[i_batch][i_tally][i_filter][i_nuclide])):
if (printxs[i_tally] and \
((scoreType[0][i_tally][i_filter][i_nuclide][i_score] != 'flux') and \
(scoreType[0][i_tally][i_filter][i_nuclide][i_score] != 'current'))):
# Perform rate to xs conversion
# mean is mean/fluxmean
meanPlot[i_tally][i_filter][i_nuclide][i_score][i_batch] = \
mean[i_batch][i_tally][i_filter][i_nuclide][i_score] / \
mean[i_batch][i_tally][i_filter][i_nuclide][fluxLoc[i_tally]]
# Update the relative uncertainty via error propagation
uncertPlot[i_tally][i_filter][i_nuclide][i_score][i_batch] = \
sqrt(pow(uncert[i_batch][i_tally][i_filter][i_nuclide][i_score],2) \
+ pow(uncert[i_batch][i_tally][i_filter][i_nuclide][fluxLoc[i_tally]],2))
else:
# Do not perform rate to xs conversion
meanPlot[i_tally][i_filter][i_nuclide][i_score][i_batch] = \
mean[i_batch][i_tally][i_filter][i_nuclide][i_score]
uncertPlot[i_tally][i_filter][i_nuclide][i_score][i_batch] = \
uncert[i_batch][i_tally][i_filter][i_nuclide][i_score]
# Both have the same absolute uncertainty calculation
absUncertPlot[i_tally][i_filter][i_nuclide][i_score][i_batch] = \
uncert[i_batch][i_tally][i_filter][i_nuclide][i_score] * \
mean[i_batch][i_tally][i_filter][i_nuclide][i_score]
# Set plotting constants
xLabel = "Batches"
xLabel = xLabel.title() # not necessary for now, but is left in to handle if
# the previous line changes
# Begin plotting
for i_tally in range(len(meanPlot)):
# Set tally string (placeholder until I put tally labels in statePoint)
tallyStr = "Tally " + str(i_tally + 1)
for i_filter in range(len(meanPlot[i_tally])):
# Set filter string
filterStr = "Filter " + str(i_filter + 1)
for i_nuclide in range(len(meanPlot[i_tally][i_filter])):
nuclideStr = "Nuclide " + str(i_nuclide + 1)
for i_score in range(len(meanPlot[i_tally][i_filter][i_nuclide])):
# Set score string
scoreStr = scoreType[i_batch][i_tally][i_filter][i_nuclide][i_score]
scoreStr = scoreStr.title()
if (printxs[i_tally] and ((scoreStr != 'Flux') and \
(scoreStr != 'Current'))):
scoreStr = scoreStr + "-XS"
# set Title
title = "Convergence of " + scoreStr + " in " + tallyStr + " for "\
+ filterStr + " and " + nuclideStr
# set yLabel
yLabel = scoreStr
yLabel = yLabel.title()
# Set saving filename
fileName = "tally_" + str(i_tally + 1) + "_" + scoreStr + \
"_filter_" + str(i_filter+1) + "_nuclide_" + str(i_nuclide+1) \
+ "." + fileType
REfileName = "tally_" + str(i_tally + 1) + "_" + scoreStr + \
"RE_filter_" + str(i_filter+1) + "_nuclide_" + str(i_nuclide+1) \
+ "." + fileType
# Plot mean with absolute error bars
plt.errorbar(active_batches, \
meanPlot[i_tally][i_filter][i_nuclide][i_score][:], \
absUncertPlot[i_tally][i_filter][i_nuclide][i_score][:],fmt='o-',aa=True)
plt.xlabel(xLabel)
plt.ylabel(yLabel)
plt.title(title)
if (fileType != 'none'):
plt.savefig(fileName)
if showImg:
plt.show()
plt.clf()
# Plot relative uncertainty
plt.plot(active_batches, \
uncertPlot[i_tally][i_filter][i_nuclide][i_score][:],'o-',aa=True)
plt.xlabel(xLabel)
plt.ylabel("Relative Error of " + yLabel)
plt.title("Relative Error of " + title)
if (fileType != 'none'):
plt.savefig(REfileName)
if showImg:
plt.show()
plt.clf()
if savetoCSV:
# This block loops through each tally, and for each tally:
# Creates a new file
# Writes the scores and filters for that tally in csv format.
# The columns will be: batches,then for each filter: all the scores
# The rows, of course, are the data points per batch.
for i_tally in range(len(meanPlot)):
# Set tally string (placeholder until I put tally labels in statePoint)
tallyStr = "Tally " + str(i_tally + 1)
CSV_filename = "./tally" + str(i_tally+1)+".csv"
# Open the file
f = open(CSV_filename, 'w')
# Write the header line
lineText = "Batches"
for i_filter in range(len(meanPlot[i_tally])):
# Set filter string
filterStr = "Filter " + str(i_filter + 1)
for i_nuclide in range(len(meanPlot[i_tally][i_filter])):
nuclideStr = "Nuclide " + str(i_nuclide + 1)
for i_score in range(len(meanPlot[i_tally][i_filter][i_nuclide])):
# Set the title
scoreStr = scoreType[i_batch][i_tally][i_filter][i_nuclide][i_score]
scoreStr = scoreStr.title()
if (printxs[i_tally] and ((scoreStr != 'Flux') and \
(scoreStr != 'Current'))):
scoreStr = scoreStr + "-XS"
# set header
headerText = scoreStr + " for " + filterStr + " for " + nuclideStr
lineText = lineText + "," + headerText + \
",Abs Unc of " + headerText + \
",Rel Unc of " + headerText
f.write(lineText + "\n")
# Write the data lines, each row is a different batch
for i_batch in range(len(meanPlot[i_tally][0][0][0])):
lineText = repr(active_batches[i_batch])
for i_filter in range(len(meanPlot[i_tally])):
for i_nuclide in range(len(meanPlot[i_tally][i_filter])):
for i_score in range(len(meanPlot[i_tally][i_filter][i_nuclide])):
fieldText = \
repr(meanPlot[i_tally][i_filter][i_nuclide][i_score][i_batch]) + \
"," + \
repr(absUncertPlot[i_tally][i_filter][i_nuclide][i_score][i_batch]) +\
"," + \
repr(uncertPlot[i_tally][i_filter][i_nuclide][i_score][i_batch])
lineText = lineText + "," + fieldText
f.write(lineText + "\n")
| |
import os
import json
import uuid
import shutil
import asyncio
import hashlib
from waterbutler.core import utils
from waterbutler.core import signing
from waterbutler.core import streams
from waterbutler.core import provider
from waterbutler.core import exceptions
from waterbutler.core.path import WaterButlerPath
from waterbutler.providers.osfstorage import settings
from waterbutler.providers.osfstorage.tasks import backup
from waterbutler.providers.osfstorage.tasks import parity
from waterbutler.providers.osfstorage.metadata import OsfStorageFileMetadata
from waterbutler.providers.osfstorage.metadata import OsfStorageFolderMetadata
from waterbutler.providers.osfstorage.metadata import OsfStorageRevisionMetadata
QUERY_METHODS = ('GET', 'DELETE')
class OSFStorageProvider(provider.BaseProvider):
__version__ = '0.0.1'
NAME = 'osfstorage'
def __init__(self, auth, credentials, settings):
super().__init__(auth, credentials, settings)
self.nid = settings['nid']
self.root_id = settings['rootId']
self.BASE_URL = settings['baseUrl']
self.provider_name = settings['storage'].get('provider')
self.parity_settings = settings.get('parity')
self.parity_credentials = credentials.get('parity')
self.archive_settings = settings.get('archive')
self.archive_credentials = credentials.get('archive')
@asyncio.coroutine
def validate_path(self, path, **kwargs):
if path == '/':
return WaterButlerPath('/', _ids=[self.root_id], folder=True)
try:
path, name = path.strip('/').split('/')
except ValueError:
path, name = path, None
resp = yield from self.make_signed_request(
'GET',
self.build_url(path, 'lineage'),
expects=(200, 404)
)
if resp.status == 404:
return WaterButlerPath(path, _ids=(self.root_id, None), folder=path.endswith('/'))
data = yield from resp.json()
names, ids = zip(*[(x['name'], x['id']) for x in reversed(data['data'])])
if name is not None:
ids += (None, )
names += (name, )
return WaterButlerPath('/'.join(names), _ids=ids, folder='folder' == data['data'][0]['kind'])
def revalidate_path(self, base, path, folder=False):
assert base.is_dir
try:
data = next(
x for x in
(yield from self.metadata(base))
if x.name == path and
x.kind == ('folder' if folder else 'file')
)
return base.child(data.name, _id=data.path.strip('/'), folder=folder)
except StopIteration:
return base.child(path, folder=folder)
def make_provider(self, settings):
"""Requests on different files may need to use different providers,
instances, e.g. when different files lives in different containers
within a provider. This helper creates a single-use provider instance
that optionally overrides the settings.
:param dict settings: Overridden settings
"""
return utils.make_provider(
self.provider_name,
self.auth,
self.credentials['storage'],
self.settings['storage'],
)
def can_intra_copy(self, other, path=None):
return isinstance(other, self.__class__)
def can_intra_move(self, other, path=None):
return isinstance(other, self.__class__)
def intra_move(self, dest_provider, src_path, dest_path):
resp = yield from self.make_signed_request(
'POST',
self.build_url('hooks', 'move'),
data=json.dumps({
'user': self.auth['id'],
'source': src_path.identifier,
'destination': {
'name': dest_path.name,
'node': dest_provider.nid,
'parent': dest_path.parent.identifier
}
}),
headers={'Content-Type': 'application/json'},
expects=(200, 201)
)
data = yield from resp.json()
if data['kind'] == 'file':
return OsfStorageFileMetadata(data, str(dest_path)), resp.status == 201
return OsfStorageFolderMetadata(data, str(dest_path)), resp.status == 201
def intra_copy(self, dest_provider, src_path, dest_path):
resp = yield from self.make_signed_request(
'POST',
self.build_url('hooks', 'copy'),
data=json.dumps({
'user': self.auth['id'],
'source': src_path.identifier,
'destination': {
'name': dest_path.name,
'node': dest_provider.nid,
'parent': dest_path.parent.identifier
}
}),
headers={'Content-Type': 'application/json'},
expects=(200, 201)
)
data = yield from resp.json()
if data['kind'] == 'file':
return OsfStorageFileMetadata(data, str(dest_path)), resp.status == 201
return OsfStorageFolderMetadata(data, str(dest_path)), resp.status == 201
@asyncio.coroutine
def make_signed_request(self, method, url, data=None, params=None, ttl=100, **kwargs):
signer = signing.Signer(settings.HMAC_SECRET, settings.HMAC_ALGORITHM)
if method.upper() in QUERY_METHODS:
signed = signing.sign_data(signer, params or {}, ttl=ttl)
params = signed
else:
signed = signing.sign_data(signer, json.loads(data or {}), ttl=ttl)
data = json.dumps(signed)
# Ensure url ends with a /
if not url.endswith('/'):
if '?' not in url:
url += '/'
elif url[url.rfind('?') - 1] != '/':
url = url.replace('?', '/?')
return (yield from self.make_request(method, url, data=data, params=params, **kwargs))
@asyncio.coroutine
def download(self, path, version=None, mode=None, **kwargs):
if not path.identifier:
raise exceptions.NotFoundError(str(path))
# osf storage metadata will return a virtual path within the provider
resp = yield from self.make_signed_request(
'GET',
self.build_url(path.identifier, 'download', version=version, mode=mode),
expects=(200, ),
throws=exceptions.DownloadError,
)
data = yield from resp.json()
provider = self.make_provider(data['settings'])
name = data['data'].pop('name')
data['data']['path'] = yield from provider.validate_path('/' + data['data']['path'])
download_kwargs = {}
download_kwargs.update(kwargs)
download_kwargs.update(data['data'])
download_kwargs['displayName'] = kwargs.get('displayName', name)
return (yield from provider.download(**download_kwargs))
@asyncio.coroutine
def upload(self, stream, path, **kwargs):
self._create_paths()
pending_name = str(uuid.uuid4())
provider = self.make_provider(self.settings)
local_pending_path = os.path.join(settings.FILE_PATH_PENDING, pending_name)
remote_pending_path = yield from provider.validate_path('/' + pending_name)
stream.add_writer('md5', streams.HashStreamWriter(hashlib.md5))
stream.add_writer('sha1', streams.HashStreamWriter(hashlib.sha1))
stream.add_writer('sha256', streams.HashStreamWriter(hashlib.sha256))
with open(local_pending_path, 'wb') as file_pointer:
stream.add_writer('file', file_pointer)
yield from provider.upload(stream, remote_pending_path, check_created=False, fetch_metadata=False, **kwargs)
complete_name = stream.writers['sha256'].hexdigest
local_complete_path = os.path.join(settings.FILE_PATH_COMPLETE, complete_name)
remote_complete_path = yield from provider.validate_path('/' + complete_name)
try:
metadata = yield from provider.metadata(remote_complete_path)
except exceptions.MetadataError as e:
if e.code != 404:
raise
metadata, _ = yield from provider.move(provider, remote_pending_path, remote_complete_path)
else:
yield from provider.delete(remote_pending_path)
finally:
metadata = metadata.serialized()
# Due to cross volume movement in unix we leverage shutil.move which properly handles this case.
# http://bytes.com/topic/python/answers/41652-errno-18-invalid-cross-device-link-using-os-rename#post157964
shutil.move(local_pending_path, local_complete_path)
response = yield from self.make_signed_request(
'POST',
self.build_url(path.parent.identifier, 'children'),
expects=(200, 201),
data=json.dumps({
'name': path.name,
'user': self.auth['id'],
'settings': self.settings['storage'],
'metadata': metadata,
'hashes': {
'md5': stream.writers['md5'].hexdigest,
'sha1': stream.writers['sha1'].hexdigest,
'sha256': stream.writers['sha256'].hexdigest,
},
'worker': {
'host': os.uname()[1],
# TODO: Include additional information
'address': None,
'version': self.__version__,
},
}),
headers={'Content-Type': 'application/json'},
)
created = response.status == 201
data = yield from response.json()
if settings.RUN_TASKS:
parity.main(
local_complete_path,
self.parity_credentials,
self.parity_settings,
)
backup.main(
local_complete_path,
data['version'],
self.build_url('hooks', 'metadata') + '/',
self.archive_credentials,
self.archive_settings,
)
name = path.name
metadata.update({
'name': name,
'path': data['data']['path'],
'version': data['data']['version'],
'downloads': data['data']['downloads']
})
return OsfStorageFileMetadata(metadata, str(path)), created
@asyncio.coroutine
def delete(self, path, **kwargs):
if path.identifier is None:
raise exceptions.NotFoundError(str(path))
yield from self.make_signed_request(
'DELETE',
self.build_url(path.identifier),
params={'user': self.auth['id']},
expects=(200, )
)
@asyncio.coroutine
def metadata(self, path, **kwargs):
if path.identifier is None:
raise exceptions.MetadataError('{} not found'.format(str(path)), code=404)
if not path.is_dir:
return (yield from self._item_metadata(path))
return (yield from self._children_metadata(path))
@asyncio.coroutine
def revisions(self, path, view_only=None, **kwargs):
if path.identifier is None:
raise exceptions.MetadataError('File not found', code=404)
resp = yield from self.make_signed_request(
'GET',
self.build_url(path.identifier, 'revisions', view_only=view_only),
expects=(200, )
)
return [
OsfStorageRevisionMetadata(item)
for item in (yield from resp.json())['revisions']
]
@asyncio.coroutine
def create_folder(self, path, **kwargs):
resp = yield from self.make_signed_request(
'POST',
self.build_url(path.parent.identifier, 'children'),
data=json.dumps({
'kind': 'folder',
'name': path.name,
'user': self.auth['id'],
}),
headers={'Content-Type': 'application/json'},
expects=(201, )
)
return OsfStorageFolderMetadata(
(yield from resp.json())['data'],
str(path)
)
@asyncio.coroutine
def _item_metadata(self, path):
resp = yield from self.make_signed_request(
'GET',
self.build_url(path.identifier),
expects=(200, )
)
return OsfStorageFileMetadata((yield from resp.json()), str(path))
@asyncio.coroutine
def _children_metadata(self, path):
resp = yield from self.make_signed_request(
'GET',
self.build_url(path.identifier, 'children'),
expects=(200, )
)
resp_json = yield from resp.json()
ret = []
for item in resp_json:
if item['kind'] == 'folder':
ret.append(OsfStorageFolderMetadata(item, str(path.child(item['name']))))
else:
ret.append(OsfStorageFileMetadata(item, str(path.child(item['name']))))
return ret
def _create_paths(self):
try:
os.mkdir(settings.FILE_PATH_PENDING)
except FileExistsError:
pass
try:
os.mkdir(settings.FILE_PATH_COMPLETE)
except FileExistsError:
pass
return True
| |
from collections import defaultdict, OrderedDict
import math
import os
import cPickle
import copy
import numpy as np
import pysal as ps
from pysal.weights.util import get_ids
from analysis import NetworkG, NetworkK, NetworkF
import util
__all__ = ["Network", "PointPattern", "NetworkG", "NetworkK", "NetworkF" ]
class Network:
"""
Spatially constrained network representation and analytical functionality.
Parameters
-----------
in_shp : string
A topoligically correct input shapefile
Attributes
----------
in_shp : string
input shapefile name
adjacencylist : list
of lists storing node adjacency
nodes : dict
key are tuple of node coords and value is the node ID
edge_lengths : dict
key is a tuple of sorted node IDs representing an edge
value is the length
pointpatterns : dict
key is a string name of the pattern
value is a point pattern class instance
node_coords : dict
key is th node ID and value are the (x,y) coordinates
inverse to nodes
edges : list
of edges, where each edge is a sorted tuple of node IDs
node_list : list
node IDs
alldistances : dict
key is the node ID
value is a tuple with two elements, first is a list of the
shortest path distances, second is a dict with
the key being the id of the destination node and the value
is a list of the shortest path.
Examples
--------
Instantiate an instance of a network
>>> ntw = network.Network(ps.examples.get_path('geodanet/streets.shp'))
Snap point observations to the network with attribute information
>>> ntw.snapobservations(ps.examples.get_path('geodanet/crimes.shp'), 'crimes', attribute=True)
And without attribute information
>>> ntw.snapobservations(ps.examples.get_path('geodanet/schools.shp'), 'schools', attribute=False)
"""
def __init__(self, in_shp=None):
if in_shp:
self.in_shp = in_shp
self.adjacencylist = defaultdict(list)
self.nodes = {}
self.edge_lengths = {}
self.edges = []
self.pointpatterns = {}
self._extractnetwork()
self.node_coords = dict((value, key) for key, value in self.nodes.iteritems())
#This is a spatial representation of the network.
self.edges = sorted(self.edges)
#Extract the graph
self.extractgraph()
self.node_list = sorted(self.nodes.values())
def _extractnetwork(self):
"""
Used internally, to extract a network from a polyline shapefile
"""
nodecount = 0
shps = ps.open(self.in_shp)
for shp in shps:
vertices = shp.vertices
for i, v in enumerate(vertices[:-1]):
try:
vid = self.nodes[v]
except:
self.nodes[v] = vid = nodecount
nodecount += 1
try:
nvid = self.nodes[vertices[i+1]]
except:
self.nodes[vertices[i+1]] = nvid = nodecount
nodecount += 1
self.adjacencylist[vid].append(nvid)
self.adjacencylist[nvid].append(vid)
#Sort the edges so that mono-directional keys can be stored.
edgenodes = sorted([vid, nvid])
edge = tuple(edgenodes)
self.edges.append(edge)
length = util.compute_length(v, vertices[i+1])
self.edge_lengths[edge] = length
def extractgraph(self):
"""
Using the existing network representation, create a graph based representation,
by removing all nodes with neighbor incidence of two. That is, we assume these
nodes are bridges between nodes with higher incidence.
"""
self.graphedges = []
self.edge_to_graph = {}
self.graph_lengths = {}
#Find all nodes with cardinality 2
segment_nodes = []
for k, v in self.adjacencylist.iteritems():
#len(v) == 1 #cul-de-sac
#len(v) == 2 #bridge segment
#len(v) > 2 #intersection
if len(v) == 2:
segment_nodes.append(k)
#Start with a copy of the spatial representation and iteratively
# remove edges deemed to be segments
self.graphedges = copy.deepcopy(self.edges)
self.graph_lengths = copy.deepcopy(self.edge_lengths)
self.graph_to_edges = {} #Mapping all the edges contained within a single graph represented edge
bridges = []
for s in segment_nodes:
bridge = [s]
neighbors = self._yieldneighbor(s, segment_nodes, bridge)
while neighbors:
cnode = neighbors.pop()
segment_nodes.remove(cnode)
bridge.append(cnode)
newneighbors = self._yieldneighbor(cnode, segment_nodes, bridge)
neighbors += newneighbors
bridges.append(bridge)
for bridge in bridges:
if len(bridge) == 1:
n = self.adjacencylist[bridge[0]]
newedge = tuple(sorted([n[0], n[1]]))
#Identify the edges to be removed
e1 = tuple(sorted([bridge[0], n[0]]))
e2 = tuple(sorted([bridge[0], n[1]]))
#Remove from the graph
self.graphedges.remove(e1)
self.graphedges.remove(e2)
#Remove from the edge lengths
length_e1 = self.edge_lengths[e1]
length_e2 = self.edge_lengths[e2]
self.graph_lengths.pop(e1, None)
self.graph_lengths.pop(e2, None)
self.graph_lengths[newedge] = length_e1 + length_e2
#Update the pointers
self.graph_to_edges[e1] = newedge
self.graph_to_edges[e2] = newedge
else:
cumulative_length = 0
startend = {}
redundant = set([])
for b in bridge:
for n in self.adjacencylist[b]:
if n not in bridge:
startend[b] = n
else:
redundant.add(tuple(sorted([b,n])))
newedge = tuple(sorted(startend.values()))
for k, v in startend.iteritems():
redundant.add(tuple(sorted([k,v])))
for r in redundant:
self.graphedges.remove(r)
cumulative_length += self.edge_lengths[r]
self.graph_lengths.pop(r, None)
self.graph_to_edges[r] = newedge
self.graph_lengths[newedge] = cumulative_length
self.graphedges.append(newedge)
self.graphedges = sorted(self.graphedges)
def _yieldneighbor(self, node, segment_nodes, bridge):
"""
Used internally, this method traverses a bridge segement
to find the source and destination nodes.
"""
n = []
for i in self.adjacencylist[node]:
if i in segment_nodes and i not in bridge:
n.append(i)
return n
def contiguityweights(self, graph=True, weightings=None):
"""
Create a contiguity based W object
Parameters
----------
graph : boolean
{True, False } controls whether the W is generated using the spatial
representation or the graph representation
weightings : dict
of lists of weightings for each edge
Returns
-------
: W
A PySAL W Object representing the binary adjacency of the network
Examples
--------
>>> w = ntw.contiguityweights(graph=False)
Using the W object, access to ESDA functionality is provided. First,
a vector of attributes is created for all edges with observations.
>>> w = ntw.contiguityweights(graph=False)
>>> edges = w.neighbors.keys()
>>> y = np.zeros(len(edges))
>>> for i, e in enumerate(edges):
>>> if e in counts.keys():
>>> y[i] = counts[e]
Next, a standard call ot Moran is made and the result placed into `res`
>>> res = ps.esda.moran.Moran(y, ntw.w, permutations=99)
"""
neighbors = {}
neighbors = OrderedDict()
if graph:
edges = self.graphedges
else:
edges = self.edges
if weightings:
weights = {}
else:
weights = None
for key in edges:
neighbors[key] = []
if weightings:
weights[key] = []
for neigh in edges:
if key == neigh:
continue
if key[0] == neigh[0] or key[0] == neigh[1] or key[1] == neigh[0] or key[1] == neigh[1]:
neighbors[key].append(neigh)
if weightings:
weights[key].append(weightings[neigh])
#TODO: Add a break condition - everything is sorted, so we know when we have stepped beyond a possible neighbor.
#if key[1] > neigh[1]: #NOT THIS
#break
return ps.weights.W(neighbors, weights=weights)
def distancebandweights(self, threshold):
"""
Create distance based weights
Parameters
----------
threshold : float
Distance threshold value
"""
try:
hasattr(self.alldistances)
except:
self.node_distance_matrix()
neighbor_query = np.where(self.distancematrix < threshold)
neighbors = defaultdict(list)
for i, n in enumerate(neighbor_query[0]):
neigh = neighbor_query[1][i]
if n != neigh:
neighbors[n].append(neighbor_query[1][i])
return ps.weights.W(neighbors)
def snapobservations(self, shapefile, name, idvariable=None, attribute=None):
"""
Snap a point pattern shapefile to this network object. The point pattern
is the stored in the network.pointpattern['key'] attribute of the network
object.
Parameters
----------
shapefile : str
The PATH to the shapefile
name : str
Name to be assigned to the point dataset
idvariable : str
Column name to be used as ID variable
attribute : bool
Defines whether attributes should be extracted
Returns
-------
"""
self.pointpatterns[name] = PointPattern(shapefile, idvariable=idvariable, attribute=attribute)
self._snap_to_edge(self.pointpatterns[name])
def compute_distance_to_nodes(self, x, y, edge):
"""
Given an observation on a network edge, return the distance to the two
nodes that bound that end.
Parameters
----------
x : float
x-coordinate of the snapped point
y : float
y-coordiante of the snapped point
edge : tuple
(node0, node1) representation of the network edge
Returns
-------
d1 : float
the distance to node0, always the node with the lesser id
d2 : float
the distance to node1, always the node with the greater id
"""
d1 = util.compute_length((x,y), self.node_coords[edge[0]])
d2 = util.compute_length((x,y), self.node_coords[edge[1]])
return d1, d2
def _snap_to_edge(self, pointpattern):
"""
Used internally to snap point observations to network edges.
Parameters
-----------
pointpattern : obj
PySAL Point Pattern Object
Returns
-------
obs_to_edge : dict
with edge as key and list of points as value
edge_to_obs : dict
with point id as key and edge tuple as value
dist_to_node : dict
with edge as key and tuple of distances to nodes as value
"""
obs_to_edge = {}
dist_to_node = {}
pointpattern.snapped_coordinates = {}
for pt_index, point in pointpattern.points.iteritems():
x0 = point['coordinates'][0]
y0 = point['coordinates'][1]
d = {}
vectors = {}
c = 0
#Components of this for loop can be pre computed and cached, like denom to distance =
for edge in self.edges:
xi = self.node_coords[edge[0]][0]
yi = self.node_coords[edge[0]][1]
xi1 = self.node_coords[edge[1]][0]
yi1 = self.node_coords[edge[1]][1]
num = ((yi1 - yi)*(x0-xi)-(xi1-xi)*(y0-yi))
denom = ((yi1-yi)**2 + (xi1-xi)**2)
k = num / float(denom)
distance = abs(num) / math.sqrt(((yi1-yi)**2 + (xi1-xi)**2))
vectors[c] = (xi, xi1, yi, yi1,k,edge)
d[distance] = c
c += 1
min_dist = SortedEdges(sorted(d.items()))
for dist, vector_id in min_dist.iteritems():
value = vectors[vector_id]
xi = value[0]
xi1 = value[1]
yi = value[2]
yi1 = value[3]
k = value[4]
edge = value[5]
#Okabe Method
x = x0 - k * (yi1 - yi)
y = y0 + k * (xi1 - xi)
#Compute the distance from the new point to the nodes
d1, d2 = self.compute_distance_to_nodes(x, y, edge)
if xi <= x <= xi1 or xi1 <= x <= xi and yi <= y <= yi1 or yi1 <=y <= yi:
#print "{} intersections edge {} at {}".format(pt_index, edge, (x,y))
#We are assuming undirected - this should never be true.
if edge not in obs_to_edge.keys():
obs_to_edge[edge] = {pt_index: (x,y)}
else:
obs_to_edge[edge][pt_index] = (x,y)
dist_to_node[pt_index] = {edge[0]:d1, edge[1]:d2}
pointpattern.snapped_coordinates[pt_index] = (x,y)
break
else:
#either pi or pi+1 are the nearest point on that edge.
#If this point is closer than the next distance, we can break, the
# observation intersects the node with the shorter
# distance.
pi = (xi, yi)
pi1 = (xi1, yi1)
p0 = (x0,y0)
#Maybe this call to ps.cg should go as well - as per the call in the class above
dist_pi = ps.cg.standalone.get_points_dist(p0, pi)
dist_pi1 = ps.cg.standalone.get_points_dist(p0, pi1)
if dist_pi < dist_pi1:
node_dist = dist_pi
(x,y) = pi
else:
node_dist = dist_pi1
(x,y) = pi1
d1, d2 = self.compute_distance_to_nodes(x, y, edge)
if node_dist < min_dist.next_key(dist):
if edge not in obs_to_edge.keys():
obs_to_edge[edge] = {pt_index: (x, y)}
else:
obs_to_edge[edge][pt_index] = (x, y)
dist_to_node[pt_index] = {edge[0]:d1, edge[1]:d2}
pointpattern.snapped_coordinates[pt_index] = (x,y)
break
obs_to_node = defaultdict(list)
for k, v in obs_to_edge.iteritems():
keys = v.keys()
obs_to_node[k[0]] = keys
obs_to_node[k[1]] = keys
pointpattern.obs_to_edge = obs_to_edge
pointpattern.dist_to_node = dist_to_node
pointpattern.obs_to_node = obs_to_node
def count_per_edge(self, obs_on_network, graph=True):
"""
Compute the counts per edge.
Parameters
----------
obs_on_network : dict
of observations on the network
{(edge): {pt_id: (coords)}} or {edge: [(coord), (coord), (coord)]}
Returns
-------
counts: dict {(edge):count}
Example
-------
Note that this passes the obs_to_edge attribute of a point pattern
snapped to the network.
>>> counts = ntw.count_per_edge(ntw.pointpatterns['crimes'].obs_to_edge,
graph=False)
"""
counts = {}
if graph:
for key, observations in obs_on_network.iteritems():
cnt = len(observations)
if key in self.graph_to_edges.keys():
key = self.graph_to_edges[key]
try:
counts[key] += cnt
except:
counts[key] = cnt
else:
for key in obs_on_network.iterkeys():
counts[key] = len(obs_on_network[key])
return counts
def _newpoint_coords(self, edge, distance):
"""
Used internally to compute new point coordinates during snapping
"""
x1 = self.node_coords[edge[0]][0]
y1 = self.node_coords[edge[0]][1]
x2 = self.node_coords[edge[1]][0]
y2 = self.node_coords[edge[1]][1]
m = (y2 - y1) / (x2 - x1)
if x1 > x2:
x0 = x1 - distance / math.sqrt(1 + m**2)
elif x1 < x2:
x0 = x1 + distance / math.sqrt(1 + m**2)
y0 = m * (x0 - x1) + y1
return x0, y0
def simulate_observations(self, count, distribution='uniform'):
"""
Generate a simulated point pattern on the network.
Parameters
----------
count : integer
number of points to create or mean of the distribution
if not 'uniform'
distribution : string
{'uniform', 'poisson'} distribution of random points
Returns
-------
random_pts : dict
key is the edge tuple
value is a list of new point coordinates
Example
-------
>>> npts = ntw.pointpatterns['crimes'].npoints
>>> sim = ntw.simulate_observations(npts)
>>> sim
<network.SimulatedPointPattern instance at 0x1133d8710>
"""
simpts = SimulatedPointPattern()
#Cumulative Network Length
edges = []
lengths = np.zeros(len(self.edge_lengths))
for i, key in enumerate(self.edge_lengths.iterkeys()):
edges.append(key)
lengths[i] = self.edge_lengths[key]
stops = np.cumsum(lengths)
totallength = stops[-1]
if distribution is 'uniform':
nrandompts = np.random.uniform(0, totallength, size=(count,))
elif distribution is 'poisson':
nrandompts = np.random.uniform(0, totallength, size=(np.random.poisson(count),))
for i, r in enumerate(nrandompts):
idx = np.where(r < stops)[0][0]
assignment_edge = edges[idx]
distance_from_start = stops[idx] - r
#Populate the coordinates dict
x0, y0 = self._newpoint_coords(assignment_edge, distance_from_start)
simpts.snapped_coordinates[i] = (x0, y0)
simpts.obs_to_node[assignment_edge[0]].append(i)
simpts.obs_to_node[assignment_edge[1]].append(i)
#Populate the distance to node
simpts.dist_to_node[i] = {assignment_edge[0] : distance_from_start,
assignment_edge[1] : self.edge_lengths[edges[idx]] - distance_from_start}
simpts.points = simpts.snapped_coordinates
simpts.npoints = len(simpts.points)
return simpts
def enum_links_node(self, v0):
"""
Returns the edges (links) around node
Parameters
-----------
v0 : int
node id
Returns
-------
links : list
list of tuple edge adjacent to the node
"""
links = []
neighbornodes = self.adjacencylist[v0]
for n in neighbornodes:
links.append(tuple(sorted([n, v0])))
return links
def node_distance_matrix(self):
self.alldistances = {}
nnodes = len(self.node_list)
self.distancematrix = np.empty((nnodes, nnodes))
for node in self.node_list:
distance, pred = util.dijkstra(self, self.edge_lengths, node, n=float('inf'))
pred = np.array(pred)
tree = util.generatetree(pred)
self.alldistances[node] = (distance, tree)
self.distancematrix[node] = distance
def allneighbordistances(self, sourcepattern, destpattern=None):
"""
Compute either all distances between i and j in a single point pattern
or all distances between each i from a source pattern and all j
from a destination pattern
Parameters
----------
sourcepattern : str
The key of a point pattern snapped to the network.
destpattern :str
(Optional) The key of a point pattern snapped to the network.
Returns
-------
nearest : array (n,n)
An array or shape n,n storing distances between all points
"""
if not hasattr(self,'alldistances'):
self.node_distance_matrix()
src_indices = sourcepattern.points.keys()
nsource_pts = len(src_indices)
dist_to_node = sourcepattern.dist_to_node
if destpattern == None:
destpattern = sourcepattern
dest_indices = destpattern.points.keys()
ndest_pts = len(dest_indices)
searchpts = copy.deepcopy(dest_indices)
nearest = np.empty((nsource_pts, ndest_pts))
nearest[:] = np.inf
searchnodes = {}
for s in searchpts:
e1, e2 = dist_to_node[s].keys()
searchnodes[s] = (e1, e2)
for p1 in src_indices:
#Get the source nodes and dist to source nodes
source1, source2 = searchnodes[p1]
set1 = set(searchnodes[p1])
# distance from node1 to p, distance from node2 to p
sdist1, sdist2 = dist_to_node[p1].values()
searchpts.remove(p1)
for p2 in searchpts:
dest1, dest2 = searchnodes[p2]
set2 = set(searchnodes[p2])
if set1 == set2: #same edge
x1,y1 = sourcepattern.snapped_coordinates[p1]
x2,y2 = destpattern.snapped_coordinates[p2]
xd = x1-x2
yd = y1-y2
nearest[p1,p2] = np.sqrt(xd*xd + yd*yd)
nearest[p2,p1] = nearest[p1,p2]
else:
ddist1, ddist2 = dist_to_node[p2].values()
d11 = self.alldistances[source1][0][dest1]
d21 = self.alldistances[source2][0][dest1]
d12 = self.alldistances[source1][0][dest2]
d22 = self.alldistances[source2][0][dest2]
# find shortest distance from path passing through each of two origin nodes
# to first destination node
sd_1 = d11 + sdist1
sd_21 = d21 + sdist2
if sd_1 > sd_21:
sd_1 = sd_21
# now add point to node one distance on destination edge
len_1 = sd_1 + ddist1
# repeat but now for paths entering at second node of second edge
sd_2 = d12 + sdist1
sd_22 = d22 + sdist2
b = 0
if sd_2 > sd_22:
sd_2 = sd_22
b = 1
len_2 = sd_2 + ddist2
# now find shortest length path between the point 1 on edge 1 and
# point 2 on edge 2, and assign
sp_12 = len_1
if len_1 > len_2:
sp_12 = len_2
nearest[p1, p2] = sp_12
nearest[p2, p1] = sp_12
#print p1,p2, sp_12
np.fill_diagonal(nearest, np.nan)
return nearest
def nearestneighbordistances(self, sourcepattern, destpattern=None):
"""
Compute the interpattern nearest neighbor distances or the intrapattern
nearest neight distances between a source pattern and a destination pattern.
Parameters
----------
sourcepattern str The key of a point pattern snapped to the network.
destpattern str (Optional) The key of a point pattern snapped to the network.
Returns
-------
nearest ndarray (n,2) With column[:,0] containing the id of the nearest
neighbor and column [:,1] containing the distance.
"""
if not sourcepattern in self.pointpatterns.keys():
raise KeyError("Available point patterns are {}".format(self.pointpatterns.keys()))
if not hasattr(self,'alldistances'):
self.node_distance_matrix()
pt_indices = self.pointpatterns[sourcepattern].points.keys()
dist_to_node = self.pointpatterns[sourcepattern].dist_to_node
nearest = np.zeros((len(pt_indices), 2), dtype=np.float32)
nearest[:,1] = np.inf
if destpattern == None:
destpattern = sourcepattern
searchpts = copy.deepcopy(pt_indices)
searchnodes = {}
for s in searchpts:
e1, e2 = dist_to_node[s].keys()
searchnodes[s] = (e1, e2)
for p1 in pt_indices:
#Get the source nodes and dist to source nodes
source1, source2 = searchnodes[p1]
sdist1, sdist2 = dist_to_node[p1].values()
searchpts.remove(p1)
for p2 in searchpts:
dest1, dest2 = searchnodes[p2]
ddist1, ddist2 = dist_to_node[p2].values()
source1_to_dest1 = sdist1 + self.alldistances[source1][0][dest1] + ddist1
source1_to_dest2 = sdist1 + self.alldistances[source1][0][dest2] + ddist2
source2_to_dest1 = sdist2 + self.alldistances[source2][0][dest1] + ddist1
source2_to_dest2 = sdist2 + self.alldistances[source2][0][dest2] + ddist2
if source1_to_dest1 < nearest[p1, 1]:
nearest[p1, 0] = p2
nearest[p1, 1] = source1_to_dest1
if source1_to_dest1 < nearest[p2, 1]:
nearest[p2, 0] = p1
nearest[p2, 1] = source1_to_dest1
if source1_to_dest2 < nearest[p1, 1]:
nearest[p1, 0] = p2
nearest[p1, 1] = source1_to_dest2
if source1_to_dest1 < nearest[p2, 1]:
nearest[p2, 0] = p1
nearest[p2, 1] = source1_to_dest2
if source2_to_dest1 < nearest[p1, 1]:
nearest[p1, 0] = p2
nearest[p1, 1] = source2_to_dest1
if source2_to_dest1 < nearest[p2, 1]:
nearest[p2, 0] = p1
nearest[p2, 1] = source2_to_dest1
if source2_to_dest2 < nearest[p1, 1]:
nearest[p1, 0] = p2
nearest[p1, 1] = source2_to_dest2
if source2_to_dest2 < nearest[p2, 1]:
nearest[p2, 0] = p1
nearest[p2, 1] = source2_to_dest2
return nearest
def NetworkF(self, pointpattern, nsteps=10, permutations=99,
threshold=0.2, distribution='uniform',
lowerbound=None, upperbound=None):
"""
Computes a network constrained F-Function
Parameters
----------
pointpattern : object
A PySAL point pattern object
nsteps : int
The number of steps at which the count of the nearest
neighbors is computed
permutations : int
The number of permutations to perform (default 99)
threshold : float
The level at which significance is computed. 0.5 would be 97.5% and 2.5%
distribution : str
The distirbution from which random points are sampled: uniform or poisson
lowerbound : float
The lower bound at which the G-function is computed. (default 0)
upperbound : float
The upper bound at which the G-function is computed.
Defaults to the maximum pbserved nearest neighbor distance.
Returns
-------
NetworkF : object
A network F class instance
"""
return NetworkF(self, pointpattern, nsteps=nsteps,
permutations=permutations,threshold=threshold,
distribution=distribution,lowerbound=lowerbound,
upperbound=upperbound)
def NetworkG(self, pointpattern, nsteps=10, permutations=99,
threshold=0.5, distribution='uniform',
lowerbound=None, upperbound=None):
"""
Computes a network constrained G-Function
Parameters
----------
pointpattern : object
A PySAL point pattern object
nsteps : int
The number of steps at which the count of the nearest
neighbors is computed
permutations : int
The number of permutations to perform (default 99)
threshold : float
The level at which significance is computed. 0.5 would be 97.5% and 2.5%
distribution : str
The distirbution from which random points are sampled: uniform or poisson
lowerbound : float
The lower bound at which the G-function is computed. (default 0)
upperbound : float
The upper bound at which the G-function is computed.
Defaults to the maximum pbserved nearest neighbor distance.
Returns
-------
NetworkG : object
A network G class object
"""
return NetworkG(self, pointpattern, nsteps=nsteps,
permutations=permutations,threshold=threshold,
distribution=distribution,lowerbound=lowerbound,
upperbound=upperbound)
def NetworkK(self, pointpattern, nsteps=10, permutations=99,
threshold=0.5, distribution='uniform',
lowerbound=None, upperbound=None):
"""
Computes a network constrained G-Function
Parameters
----------
pointpattern : object
A PySAL point pattern object
nsteps : int
The number of steps at which the count of the nearest
neighbors is computed
permutations : int
The number of permutations to perform (default 99)
threshold : float
The level at which significance is computed. 0.5 would be 97.5% and 2.5%
distribution : str
The distirbution from which random points are sampled: uniform or poisson
lowerbound : float
The lower bound at which the G-function is computed. (default 0)
upperbound : float
The upper bound at which the G-function is computed.
Defaults to the maximum pbserved nearest neighbor distance.
Returns
-------
NetworkK : object
A network K class object
"""
return NetworkK(self, pointpattern, nsteps=nsteps,
permutations=permutations,threshold=threshold,
distribution=distribution,lowerbound=lowerbound,
upperbound=upperbound)
def segment_edges(self, distance):
"""
Segment all of the edges in the network at either
a fixed distance or a fixed number of segments.
Parameters
-----------
distance : float
The distance at which edges are split
Returns
-------
sn : object
PySAL Network Object
Example
-------
>>> n200 = ntw.segment_edges(200.0)
"""
sn = Network()
sn.adjacencylist = copy.deepcopy(self.adjacencylist)
sn.edge_lengths = copy.deepcopy(self.edge_lengths)
sn.edges = set(copy.deepcopy(self.edges))
sn.node_coords = copy.deepcopy(self.node_coords)
sn.node_list = copy.deepcopy(self.node_list)
sn.nodes = copy.deepcopy(self.nodes)
sn.pointpatterns = copy.deepcopy(self.pointpatterns)
sn.in_shp = self.in_shp
current_node_id = max(self.nodes.values())
newedges = set()
removeedges = set()
for e in sn.edges:
length = sn.edge_lengths[e]
interval = distance
totallength = 0
currentstart = startnode = e[0]
endnode = e[1]
#If the edge will be segmented, remove the
# current edge from the adjacency list
if interval < length:
sn.adjacencylist[e[0]].remove(e[1])
sn.adjacencylist[e[1]].remove(e[0])
sn.edge_lengths.pop(e, None)
removeedges.add(e)
else:
continue
while totallength < length:
currentstop = current_node_id
if totallength + interval > length:
currentstop = endnode
interval = length - totallength
totallength = length
else:
current_node_id += 1
currentstop = current_node_id
totallength += interval
#Compute the new node coordinate
newx, newy = self._newpoint_coords(e, totallength)
#Update node_list
if currentstop not in sn.node_list:
sn.node_list.append(currentstop)
#Update nodes and node_coords
sn.node_coords[currentstop] = newx, newy
sn.nodes[(newx, newy)] = currentstop
#Update the adjacencylist
sn.adjacencylist[currentstart].append(currentstop)
sn.adjacencylist[currentstop].append(currentstart)
#Add the new edge to the edge dict
#Iterating over this, so we need to add after iterating
newedges.add(tuple(sorted([currentstart, currentstop])))
#Modify edge_lengths
sn.edge_lengths[tuple(sorted([currentstart, currentstop]))] = interval
#Increment the start to the stop
currentstart = currentstop
sn.edges.update(newedges)
sn.edges.difference_update(removeedges)
sn.edges = list(sn.edges)
#Update the point pattern snapping
for instance in sn.pointpatterns.itervalues():
sn._snap_to_edge(instance)
return sn
def savenetwork(self, filename):
"""
Save a network to disk as a binary file
Parameters
----------
filename : str
The filename where the network should be saved.
This should be a full PATH or the file is saved
whereever this method is called from.
Example
--------
>>> ntw.savenetwork('mynetwork.pkl')
"""
with open(filename, 'wb') as networkout:
cPickle.dump(self, networkout, protocol=2)
@staticmethod
def loadnetwork(filename):
with open(filename, 'rb') as networkin:
self = cPickle.load(networkin)
return self
class PointPattern():
"""
A stub point pattern class used to store a point pattern.
This class is monkey patched with network specific attributes
when the points are snapped to a network.
In the future this class may be replaced with a generic point
pattern class.
Parameters
----------
shapefile : string
input shapefile
idvariable : string
field in the shapefile to use as an idvariable
attribute : boolean
{False, True} A flag to indicate whether all attributes
are tagged to this class.
Attributes
----------
points : dict
key is the point id
value are the coordiantes
npoints : integer
the number of points
"""
def __init__(self, shapefile, idvariable=None, attribute=False):
self.points = {}
self.npoints = 0
if idvariable:
ids = get_ids(shapefile, idvariable)
else:
ids = None
pts = ps.open(shapefile)
#Get attributes if requested
if attribute == True:
dbname = os.path.splitext(shapefile)[0] + '.dbf'
db = ps.open(dbname)
else:
db = None
for i, pt in enumerate(pts):
if ids and db:
self.points[ids[i]] = {'coordinates':pt, 'properties':db[i]}
elif ids and not db:
self.points[ids[i]] = {'coordinates':pt, 'properties':None}
elif not ids and db:
self.points[i] = {'coordinates':pt, 'properties':db[i]}
else:
self.points[i] = {'coordinates':pt, 'properties':None}
pts.close()
if db:
db.close()
self.npoints = len(self.points.keys())
class SimulatedPointPattern():
"""
Struct style class to mirror the Point Pattern Class.
If the PointPattern class has methods, it might make sense to
make this a child of that class.
This class is not intended to be used by the external user.
"""
def __init__(self):
self.npoints = 0
self.obs_to_edge = {}
self.obs_to_node = defaultdict(list)
self.dist_to_node = {}
self.snapped_coordinates = {}
class SortedEdges(OrderedDict):
def next_key(self, key):
next = self._OrderedDict__map[key][1]
if next is self._OrderedDict__root:
raise ValueError("{!r} is the last key.".format(key))
return next[2]
def first_key(self):
for key in self: return key
raise ValueError("No sorted edges remain.")
| |
# from apps.bluebottle_salesforce.models import ProjectCountry
from django.db import models
from salesforce.models import SalesforceModel
from djchoices import DjangoChoices, ChoiceItem
from django.utils.translation import ugettext as _
# TODO: remove the DjangoChoices or add it if needed to a Helper file.
class SalesforceOrganization(SalesforceModel):
"""
Default Salesforce Account model. For Onepercentclub the mapping is named Organization(s).
There are also other Salesforce models related to Account: AccountContactRole, AccountFeed, AccountHistory,
AccountPartner, AccountShare
"""
class AccountType(DjangoChoices):
business = ChoiceItem('Business', label=_("Business"))
fund = ChoiceItem('Fund', label=_("Fund"))
international = ChoiceItem('International Cooperation', label=_("International Cooperation"))
network = ChoiceItem('Network', label=_("Network"))
supplier = ChoiceItem('Supplier', label=_("Supplier"))
individual = ChoiceItem('Individual', label=_("Individual"))
percent_idea = ChoiceItem('1%IDEA', label=_("1%IDEA"))
government = ChoiceItem('Government & Politics', label=_("Individual"))
media_pr = ChoiceItem('Media / PR', label=_("Media / PR"))
# SF Layout: Account details section.
legal_status = models.CharField(max_length=10000, db_column='Legal_status__c')
name = models.CharField(max_length=255, db_column='Name')
organization_type = models.CharField(max_length=40, db_column="Type", choices=AccountType.choices ,help_text=("Type"))
# SF Layout: Address Information section.
external_id = models.CharField(max_length=255, db_column='Organization_External_ID__c')
billing_city = models.CharField(max_length=40, db_column='BillingCity')
billing_street = models.CharField(max_length=255, db_column='BillingStreet')
billing_postal_code = models.CharField(max_length=20, db_column='BillingPostalCode')
billing_country = models.CharField(max_length=80, db_column='BillingCountry')
email_address = models.EmailField(max_length=80, db_column='E_mail_address__c')
phone = models.CharField(max_length=40, db_column='Phone')
website = models.URLField(max_length=255, db_column='Website')
# SF Layout: Bank Account section.
address_bank = models.CharField(max_length=255, db_column='Address_bank__c')
bank_account_name = models.CharField(max_length=255, db_column='Bank_account_name__c')
bank_account_number = models.CharField(max_length=40, db_column='Bank_account_number__c')
bank_name = models.CharField(max_length=255, db_column='Bankname__c')
bic_swift = models.CharField(max_length=40, db_column='BIC_SWIFT__c')
country_bank = models.CharField(max_length=60, db_column='Country_bank__c')
iban_number = models.CharField(max_length=255, db_column='IBAN_number__c')
# SF Layout: Description section.
description = models.CharField(max_length=32000, db_column='Description')
# SF Layout: System Information.
created_date = models.DateField(db_column='Organization_created_date__c')
class Meta:
db_table = 'Account'
managed = False
class SalesforceContact(SalesforceModel):
"""
Default Salesforce Contact model.
"""
# SF Layout: Subscription section.
category1 = models.CharField(max_length=255, db_column='Category1__c')
email = models.EmailField(max_length=80, db_column='Email')
member_1_club = models.BooleanField(db_column='Member_1_club__c', default=True)
user_name = models.CharField(max_length=255, db_column='Username__c')
is_active = models.BooleanField(db_column='Active__c')
has_activated = models.BooleanField(db_column='Has_Activated_Account__c')
close_date = models.DateField(db_column='Deleted__c')
# SF Layout: Profile section.
first_name = models.CharField(max_length=40, db_column='FirstName')
last_name = models.CharField(max_length=80, db_column='LastName', null=False, blank=False)
member_since = models.DateField(db_column='Member_since__c')
why_one_percent_member = models.CharField(max_length=32000, db_column='Why_onepercent_member__c')
about_me_us = models.CharField(max_length=3200, db_column='About_me_us__c')
location = models.CharField(max_length=100, db_column='Location__c')
# The default: Organization(Account) will be 'Individual' as current.
# - Future purpose deactivate and put the Organization website group value
# organization_account = models.ForeignKey(SalesforceOrganization, db_column='AccountId')
website = models.CharField(max_length=255, db_column='Website__c')
last_login = models.DateTimeField(db_column='Date_Last_Login__c')
date_joined = models.DateTimeField(db_column='Date_Joined__c')
# Bank details
bank_account_number = models.CharField(max_length=30, db_column='Account_number__c')
bank_account_holder = models.CharField(max_length=60, db_column='Account_holder__c')
bank_account_city = models.CharField(max_length=50, db_column='Account_city__c')
# SF Layout: Contact Information section.
activity_number = models.CharField(max_length=255, db_column='Activity_number__c')
# SF Layout: Contact Activity section.
amount_of_single_donations = models.CharField(max_length=255, db_column='Amount_of_single_donations__c')
has_n_friends = models.CharField(max_length=255, db_column='Has_n_friends__c')
has_given_n_vouchers = models.CharField(max_length=255, db_column='Has_given_n_1_VOUCHERS__c')
is_doing_n_tasks = models.CharField(max_length=255, db_column='Is_doing_n_tasks__c')
number_of_donations = models.CharField(max_length=255, db_column='Number_of_donations__c')
support_n_projects = models.CharField(max_length=255, db_column='Support_n_projects__c')
total_amount_of_donations = models.CharField(max_length=255, db_column='Total_amount_of_donations__c')
total_number_of_received_messages = models.CharField(max_length=255, db_column='Total_number_of_received_messages__c')
total_number_of_sent_messages = models.CharField(max_length=255, db_column='Total_number_of_sent_messages__c')
# SF Layout: Administrative (private) section.
birth_date = models.DateField(db_column='Birthdate')
gender = models.CharField(max_length=20, db_column='Gender__c')
mailing_city = models.CharField(max_length=40, db_column='MailingCity')
mailing_country = models.CharField(max_length=40, db_column='MailingCountry')
mailing_postal_code = models.CharField(max_length=20, db_column='MailingPostalCode')
mailing_street = models.CharField(max_length=20, db_column='MailingStreet')
mailing_state = models.CharField(max_length=80, db_column='MailingState')
# SF Layout: My Skills section.
# The field 'Which_1_would_you_like_to_contribute__c' has been replaced by 'available_to_share_knowledge' and
# 'available_to_donate'
# which_1_would_you_like_to_contribute = models.CharField(max_length=32000, db_column=
# 'Which_1_would_you_like_to_contribute__c')
available_time = models.CharField(max_length=255, db_column='Available_time__c')
where = models.CharField(max_length=255, db_column='Where__c')
available_to_donate = models.BooleanField(db_column='Available_to_donate__c')
available_to_share_time_and_knowledge = models.BooleanField(db_column='Available_to_share_time_and_knowledge__c')
availability = models.CharField(max_length=255, db_column='Availability__c')
# SF Layout: My Settings section.
receive_emails_for_friend_invitations = models.BooleanField(db_column='Receive_emails_for_friend_invitations__c')
receive_newsletter = models.BooleanField(db_column='Receive_newsletter__c')
email_after_a_new_message = models.BooleanField(db_column='Email_after_a_new_message__c')
email_after_a_new_public_message = models.BooleanField(db_column='Email_after_a_new_public_message__c')
primary_language = models.CharField(max_length=255, db_column='Primary_language__c')
# SF Layout: All expertise section.
administration_finance = models.BooleanField(db_column='Administration_Finance__c')
agriculture_environment = models.BooleanField(db_column='Agriculture_Environment__c')
architecture = models.BooleanField(db_column='Architecture__c')
computer_ict = models.BooleanField(db_column='Computer_ICT__c')
design = models.BooleanField(db_column='Design__c')
economy_business = models.BooleanField(db_column='Economy_Business__c')
education = models.BooleanField(db_column='Education__c')
fund_raising = models.BooleanField(db_column='Fundraising__c')
graphic_design = models.BooleanField(db_column='Graphic_Design__c')
health = models.BooleanField(db_column='Health__c')
internet_research = models.BooleanField(db_column='Internet_Research__c')
law_and_politics = models.BooleanField(db_column='Law_and_Politics__c')
marketing_pr = models.BooleanField(db_column='Marketing_PR__c')
online_marketing = models.BooleanField(db_column='Online_Marketing__c')
photo_video = models.BooleanField(db_column='Photo_Video__c')
physics_technique = models.BooleanField(db_column='Physics_Technique__c')
presentations = models.BooleanField(db_column='Presentations__c')
project_management = models.BooleanField(db_column='Project_Management__c')
psychology = models.BooleanField(db_column='Psychology__c')
social_work = models.BooleanField(db_column='Social_Work__c')
sport_and_development = models.BooleanField(db_column='Sport_and_Development__c')
tourism = models.BooleanField(db_column='Tourism__c')
trade_transport = models.BooleanField(db_column='Trade_Transport__c')
translating_writing = models.BooleanField(db_column='Translating_Writing__c')
web_development = models.BooleanField(db_column='Web_development__c')
writing_proposals = models.BooleanField(db_column='Writing_proposals__c')
# SF: Other.
external_id = models.CharField(max_length=255, db_column='Contact_External_ID__c')
tags = models.CharField(max_length=255, db_column='Tags__c')
# SF: Additional requirement not implemented yet - SFDC - Sheet 1
amount_of_available_time = models.CharField(max_length=255, db_column='Amount_of_available_time__c')
industry_employed_in = models.CharField(max_length=255, db_column='Industry_employed_in__c')
nationality = models.CharField(max_length=255, db_column='Nationality__c')
follows_1_club_at_twitter = models.BooleanField(db_column='Follows_1_CLUB_at_Twitter__c')
likes_1_club_at_facebook = models.BooleanField(db_column='Likes_1_CLUB_at_Facebook__c')
interested_in_theme = models.CharField(max_length=255, db_column='Interested_in_theme__c')
interested_in_target_group = models.CharField(max_length=255, db_column='Interested_in_target_group__c')
preferred_channel_for_interaction = models.CharField(max_length=255, db_column='Preferred_channel_for_interaction__c')
# SF: Additional requirement not implemented yet - SFDC - Sheet 2
date_of_last_donation = models.DateField(db_column='Date_of_last_donation__c')
total_amount_of_one_off_donation = models.PositiveIntegerField(max_length=11, db_column='Total_amount_of_one_off_donation__c')
number_of_one_off_donations = models.PositiveIntegerField(max_length=8, db_column='Number_of_one_off_donations__c')
total_amount_of_recurring_donations = models.PositiveIntegerField(max_length=11, db_column='Total_amount_of_recurring_donations__c')
number_of_recurring_donation = models.PositiveIntegerField(max_length=8, db_column='Number_of_recurring_donation__c')
number_of_received_campaigns = models.PositiveIntegerField(max_length=6, db_column='Number_of_received_campaigns__c')
class Meta:
db_table = 'Contact'
managed = False
class SalesforceProject(SalesforceModel):
"""
Custom Salesforce Project__c model. For Onepercentclub the mapping is named 1%CLUB Project(s).
"""
class ProjectStatus(DjangoChoices):
closed = ChoiceItem('Closed', label=_("Closed"))
created = ChoiceItem('Created', label=_("Created"))
done = ChoiceItem('Done', label=_("Done"))
validated = ChoiceItem('Validated', label=_("Validated"))
# SF Layout: 1%CLUB Project Detail section.
amount_at_the_moment = models.CharField(max_length=255, db_column='Amount_at_the_moment__c')
amount_requested = models.CharField(max_length=255, db_column='Amount_requested__c')
amount_still_needed = models.CharField(max_length=255, db_column='Amount_still_needed__c')
# Should it be 255 like the Project model on new Website
project_name = models.CharField(max_length=80, db_column='Project_name__c')
project_owner = models.ForeignKey(SalesforceContact, db_column='Project_Owner__c')
status_project = models.CharField(max_length=255,
db_column='Status_project__c',
choices=ProjectStatus.choices,
help_text=_("Status project"))
target_group_s_of_the_project = models.CharField(max_length=20000, db_column='Target_group_s_of_the_project__c')
# SF Layout: Summary Project Details section.
country_in_which_the_project_is_located = models.CharField(max_length=255,
db_column='Country_in_which_the_project_is_located__c')
describe_the_project_in_one_sentence = models.CharField(max_length=50000, db_column='Describe_the_project_in_one_sentence__c')
describe_where_the_money_is_needed_for = models.CharField(max_length=15000, db_column='Describe_where_the_money_is_needed_for__c')
project_url = models.URLField(max_length=255, db_column='Projecturl__c')
# SF Layout: Extensive project information section.
third_half_project = models.BooleanField(db_column='third_half_project__c')
organization_account = models.ForeignKey(SalesforceOrganization, db_column='Organization__c', null=True)
comments = models.CharField(max_length=32000, db_column='Comments__c')
contribution_project_in_reducing_poverty = models.CharField(max_length=32000,
db_column='Contribution_project_in_reducing_poverty__c')
earth_charther_project = models.BooleanField(db_column='Earth_Charther_project__c')
extensive_project_description = models.CharField(max_length=32000, db_column='Extensive_project_description__c')
project_goals = models.CharField(max_length=20000, db_column='Project_goals__c')
sustainability = models.CharField(max_length=20000, db_column='Sustainability__c')
# SF Layout: Project planning and budget section.
additional_explanation_of_budget = models.CharField(max_length=32000,
db_column='Additional_explanation_of_budget__c')
end_date_of_the_project = models.DateField(db_column='End_date_of_the_project__c')
expected_funding_through_other_resources = models.CharField(max_length=20000, db_column='Expected_funding_through_other_resources__c')
expected_project_results = models.CharField(max_length=32000, db_column='Expected_project_results__c')
funding_received_through_other_resources = models.CharField(max_length=20000, db_column='Funding_received_through_other_resources__c')
need_for_volunteers = models.CharField(max_length=32000, db_column='Need_for_volunteers__c')
other_way_people_can_contribute = models.CharField(max_length=32000, db_column='Other_way_people_can_contribute__c')
project_activities_and_timetable = models.CharField(max_length=32000, db_column='Project_activities_and_timetable__c')
starting_date_of_the_project = models.DateField(db_column='Starting_date_of_the_project__c')
# SF Layout: Millennium Goals section.
#Multipicklist: ?? - millennium_goals = models.CharField(max_length=255, db_column='MILLENNIUM_GOALS__C')
# SF Layout: Tags section.
tags = models.CharField(max_length=20000, db_column='Tags__c')
# SF Layout: Referrals section.
name_referral_1 = models.CharField(max_length=255, db_column='Name_referral_1__c')
name_referral_2 = models.CharField(max_length=255, db_column='Name_referral_2__c')
name_referral_3 = models.CharField(max_length=255, db_column='Name_referral_3__c')
description_referral_1 = models.CharField(max_length=32000, db_column='Description_referral_1__c')
description_referral_2 = models.CharField(max_length=32000, db_column='Description_referral_2__c')
description_referral_3 = models.CharField(max_length=32000, db_column='Description_referral_3__c')
email_address_referral_1 = models.EmailField(max_length=80, blank=True, null=True, db_column='E_mail_address_referral_1__c')
email_address_referral_2 = models.EmailField(max_length=80, blank=True, null=True, db_column='E_mail_address_referral_2__c')
email_address_referral_3 = models.EmailField(max_length=80, blank=True, null=True, db_column='E_mail_address_referral_3__c')
relation_referral_1_with_project_org = models.CharField(max_length=32000, db_column='Relation_referral_1_with_project_org__c')
relation_referral_2_with_project_org = models.CharField(max_length=32000, db_column='Relation_referral_2_with_project_org__c')
relation_referral_3_with_project_org = models.CharField(max_length=32000, db_column='Relation_referral_3_with_project_org__c')
# Phase dates
date_pitch_created = models.DateField(db_column='Date_pitch_created__c')
date_pitch_submitted = models.DateField(db_column='Date_pitch_submitted__c')
date_pitch_approved = models.DateField(db_column='Date_pitch_approved__c')
date_pitch_rejected = models.DateField(db_column='Date_pitch_rejected__c')
date_plan_submitted = models.DateField(db_column='Date_plan_submitted__c')
date_plan_approved = models.DateField(db_column='Date_plan_approved__c')
date_plan_rejected = models.DateField(db_column='Date_plan_rejected__c')
date_project_act = models.DateField(db_column='Date_project_act__c')
date_project_realized = models.DateField(db_column='Date_project_realized__c')
date_project_failed = models.DateField(db_column='Date_project_failed__c')
date_project_result = models.DateField(db_column='Date_project_result__c')
# SF Layout: Project Team Information section.
project_created_date = models.DateField(db_column='Project_created_date__c')
date_project_deadline = models.DateField(db_column='Date_project_deadline__c')
# SF Layout: Other section.
external_id = models.CharField(max_length=255, db_column='Project_External_ID__c')
# SF: Additional requirement not implemented yet - SFDC - Sheet 1
number_of_people_reached_direct = models.PositiveIntegerField(max_length=18, db_column='NumberOfPeopleReachedDirect__c')
number_of_people_reached_indirect = models.PositiveIntegerField(max_length=18, db_column='NumberOfPeopleReachedIndirect__c')
# theme = models.CharField(max_length=255, db_column='Theme__c')
target_group = models.CharField(max_length=255, db_column='Target_group__c')
class Meta:
db_table = 'Project__c'
managed = False
class SalesforceProjectBudget(SalesforceModel):
"""
Custom Salesforce Project_Budget__c model. For Onepercentclub the mapping is named Project Budget.
"""
class ProjectBudgetCategory(DjangoChoices):
construction = ChoiceItem('Construction materials', label=_("Construction materials"))
agriculture = ChoiceItem('Agriculture materials', label=_("Agriculture materials"))
school_supplies = ChoiceItem('School supplies', label=_("School supplies"))
communication = ChoiceItem('Communication materials', label=_("Communication materials"))
other_materials = ChoiceItem('Other materials', label=_("Other materials"))
tools = ChoiceItem('Tools', label=_("Tools"))
transport = ChoiceItem('Transport', label=_("Transport"))
training = ChoiceItem('Training', label=_("Training"))
labor = ChoiceItem('Labor', label=_("Labor"))
marketing_communication = ChoiceItem('Marketing/Communcation', label=_("Marketing/Communcation"))
administration_costs = ChoiceItem('Adminstration Costs', label=_("Adminstration Costs"))
overhead = ChoiceItem('Overhead', label=_("Overhead"))
other = ChoiceItem('Other', label=_("Other"))
# SF Layout: Information section
category = models.CharField(max_length=255, db_column='Category__c', choices=ProjectBudgetCategory.choices, help_text=_("Category"))
costs = models.CharField(max_length=255, db_column='Costs__c')
description = models.CharField(max_length=32000, db_column='Description__c')
external_id = models.CharField(max_length=255, db_column='Project_Budget_External_ID__c')
project = models.ForeignKey(SalesforceProject, db_column='Project__c')
class Meta:
db_table = 'Project_Budget__c'
managed = False
payment_method_mapping = {
'IDEAL': 'iDEAL',
'MASTERCARD': 'Mastercard',
'VISA': 'Visa',
'DIRECT_DEBIT': 'Direct debit',
'ideal-rabobank-1procentclub_nl': 'iDEAL',
'paypal-1procentclub_nl': 'PayPal',
'omnipay-ems-visa-1procentclub_nl': 'Visa',
'banksys-mrcash-1procentclub_nl': 'Other',
'ing-ideal-1procentclub_nl': 'iDEAL',
'SOFORT_UEBERWEISUNG-SofortUeberweisung-1procentclub_nl': 'Other',
'ideal-ing-1procentclub_nl': 'iDEAL',
'system-banktransfer-nl': 'Bank transfer',
'directdebitnc-online-nl': 'Direct debit',
'directdebitnc2-online-nl': 'Direct debit',
'omnipay-ems-maestro-1procentclub_nl': 'Other',
'': 'Unknown',
'omnipay-ems-mc-1procentclub_nl': 'Mastercard',
'EBANKING': 'Other',
'SOFORT_UEBERWEISUNG': 'Other',
'MAESTRO': 'Other',
'MISTERCASH': 'Other',
}
class SalesforceOpportunity(SalesforceModel):
"""
Default abstract Salesforce Opportunity model. Used for Donation(s) / Voucher(s).
"""
# SF Layout: Donation Information section.
amount = models.CharField(max_length=255, db_column='Amount')
close_date = models.DateField(db_column='CloseDate')
opportunity_type = models.CharField(max_length=40,
db_column='Type')
name = models.CharField(max_length=120, db_column='Name')
payment_method = models.CharField(max_length=255,
db_column='Payment_method__c',
help_text=_("PaymentMethod"))
project = models.ForeignKey(SalesforceProject, db_column='Project__c', null=True)
stage_name = models.CharField(max_length=40,
db_column='StageName')
record_type = models.CharField(max_length=255, db_column='RecordTypeId')
class Meta:
abstract = True
managed = False
class SalesforceDonation(SalesforceOpportunity):
"""
Child of the Opportunity for Onepercentclub the mapping is named Donation(s).
"""
# SF Layout: Donation Information section.
# organization = models.ForeignKey(SalesforceOrganization, db_column='Project_Organization__c')
# SF Layout: Additional Information section.
# SF Layout: Description Information section.
# SF Layout: System Information section.
donation_created_date = models.DateField(db_column='Donation_created_date__c')
# SF: Other.
external_id_donation = models.CharField(max_length=255, db_column='Donation_External_ID__c')
receiver = models.ForeignKey(SalesforceContact, db_column='Receiver__c', null=True)
class Meta:
managed = False
db_table = 'Opportunity'
class SalesforceVoucher(SalesforceOpportunity):
"""
Child of the Opportunity for Onepercentclub the mapping is named Voucher(s).
"""
# SF Layout: Donation Information section.
purchaser = models.ForeignKey(SalesforceContact, db_column='Purchaser__c', related_name='contact_purchasers')
# SF Layout: Additional Information section.
description = models.CharField(max_length=32000, db_column='Description')
# SF Layout: System Information section.
receiver = models.ForeignKey(SalesforceContact, db_column='Receiver__c', related_name='contact_receivers', null=True)
# SF Other.
external_id_voucher = models.CharField(max_length=255, db_column='Voucher_External_ID__c')
class Meta:
managed = False
db_table = 'Opportunity'
class SalesforceTask(SalesforceModel):
"""
Custom Salesforce onepercentclubTasks__c model. For Onepercentclub the mapping is named 1%CLUB Task(s).
"""
class TaskStatus(DjangoChoices):
open = ChoiceItem('Open', label=_("Open"))
running = ChoiceItem('Running', label=_("Running"))
closed = ChoiceItem('Closed', label=_("Closed"))
realized = ChoiceItem('Realized', label=_("Realized"))
# SF Layout: Information section.
project = models.ForeignKey(SalesforceProject, db_column='Project__c')
deadline = models.CharField(max_length=10000, db_column='Deadline__c')
effort = models.CharField(max_length=10000, db_column='Effort__c')
extended_task_description = models.CharField(max_length=32000, db_column='Extended_task_description__c')
location_of_the_task = models.CharField(max_length=10000, db_column='Location_of_the_task__c')
short_task_description = models.CharField(max_length=10000, db_column='Short_task_description__c')
task_expertise = models.CharField(max_length=10000, db_column='Task_expertise__c')
task_status = models.CharField(max_length=40, db_column='Task_status__c', choices=TaskStatus.choices, help_text=_("TaskStatus"))
title = models.CharField(max_length=255, db_column='Title__c')
task_created_date = models.DateField(max_length=255, db_column='Task_created_date__c')
tags = models.CharField(max_length=400, db_column='Tags__c')
# SF Layout: System Information section.
# SF: Additional requirement not implemented yet - SFDC - Sheet 1
effort_in_hours_del = models.PositiveIntegerField(max_length=19, db_column='EffortInHours_del__c')
# SF: Other
external_id = models.CharField(max_length=255, db_column='Task_External_ID__c')
class Meta:
db_table = 'onepercentclubTasks__c'
managed = False
class SalesforceTaskMembers(SalesforceModel):
"""
Custom Salesforce Task_Members__c model. For Onepercentclub the mapping is named Task Member(s).
The table is used as a joined table which relates to Tasks to the Contacts.
"""
# SF Layout: Information section.
contacts = models.ForeignKey(SalesforceContact, db_column='Contacts__c')
x1_club_task = models.ForeignKey(SalesforceTask, db_column='X1_CLUB_Task__c')
external_id = models.CharField(max_length=100, db_column='Task_Member_External_ID__c')
class Meta:
db_table = 'Task_Members__c'
managed = False
class SalesforceLoginHistory(SalesforceModel):
"""
Custom X1_CLUB_Login_History__c model. For Onepercentclub the mapping is named 1%CLUB Login History.
New mapping to be added later on.
"""
# SF: Additional requirement not implemented yet - Website (back office) - Sheet 3
bounce_rate_from_first_page = models.CharField(max_length=6, db_column='Bounce_rate_from_first_page__c')
contacts = models.ForeignKey(SalesforceContact, db_column='Contacts__c')
engagement_on_facebook = models.PositiveIntegerField(max_length=8, db_column='Engagement_on_Facebook__c')
engagement_on_twitter = models.PositiveIntegerField(max_length=8, db_column='Engagement_on_Twitter__c')
number_of_pageviews = models.PositiveIntegerField(max_length=8, db_column='Number_of_pageviews__c')
online_engagement_blogs = models.PositiveIntegerField(max_length=8, db_column='Online_engagement_blogs__c')
online_engagement_projects = models.PositiveIntegerField(max_length=8, db_column='Online_engagement_projects__c')
online_engagement_reactions_to_members = models.PositiveIntegerField(max_length=8, db_column='Online_engagement_reactions_to_members__c')
online_engagement_tasks = models.PositiveIntegerField(max_length=8, db_column='Online_engagement_tasks__c')
preferred_navigation_path = models.PositiveIntegerField(max_length=255, db_column='Preferred_navigation_path__c')
shares_via_social_media = models.PositiveIntegerField(max_length=8, db_column='Shares_via_social_media__c')
size_of_basket = models.PositiveIntegerField(max_length=8, db_column='Size_of_basket__c')
time_on_website = models.PositiveIntegerField(max_length=6, db_column='Time_on_website__c')
class Meta:
db_table = 'X1_CLUB_Login_History__c'
managed = False
# Other Salesforce models available from Force.com IDE (Eclipse based)
# - ActivityHistory, AddtionalNumber, AggregateResult
# - ApexClass, ApexComponent, ApexLog, ApexTestQueueItem, ApexTestResult, ApexTrigger
# - Approval, Asset, AssetFeed, AssignmentRule, AsyncApexJob, Attachment, AuthProvider
# - BrandTemplate, Bug_Feed, Bug__c, BusinessHours, BusinessProcess
# - CallCenter, Campaign, CampaignFeed, CampaignMember, CampaignMemberStatus, CampaignShare
# - Case, CaseComment, CaseContactRole, CaseFeed, CaseHistory, CaseShare, CaseSolution, CaseTeamMember
# - CaseTeamRole, CaseTeamTemplate, CaseTeamTemplateMember, CaseTeamTemplateRecord
# - CategoryData, CategoryNode, CategoryNodeLocalization, ChatterActivity, ClientBrowser
# - CollaborationGroup, CollaborationGroupFeed, CollaborationGroupMember, CollaborationGrouopMemberRequest
# And so on
| |
#!/usr/bin/python -Wall
# ================================================================
# Please see LICENSE.txt in the same directory as this file.
# John Kerl
# kerl.john.r@gmail.com
# 2007-05-31
# ================================================================
import sys
import re
import copy
# ================================================================
# Type module for complex group algebras CG, with sparse storage.
#
# Initial attempt at complex group algebras CG for small finite groups G. This
# could, conceivably, be generalized to FG (for arbitrary user-specified
# fields) or RG (for arbitrary user-specified rings).
#
# There are two possible storage representations for an element of CG:
# * Sparse: Keep a list of non-zero coefficients, with their
# corresponding group elements.
# * Dense: Keep a list of group elements in each algebra element, with
# another list of coefficients.
#
# For now I will attempt the former. A "pair" is a two-element list of
# coefficient and group element; an algebra element is a list of pairs.
#
# ================================================================
# John Kerl
# 2007-05-08
# ================================================================
class cgpalg_t:
def __init__(self, pairs_array):
self.pairs = copy.deepcopy(pairs_array)
def index_of(self, g):
for k in range(0, len(self.pairs)):
if (g == self.pairs[k][1]):
return [1, k]
return [0, 0]
def zero_strip(self):
untested = self.pairs
self.pairs = []
while (untested):
x = untested[0]
untested = untested[1:]
if (x[0] != 0):
self.pairs += [x]
# I am using sparse storage. However, this routine permits a dense
# extraction of coefficients: Given an array of group elements, it
# returns a list of coefficients (in the same order).
#
# This makes it possible to hand the results off to a linear-algebra
# routine.
def to_coef_array(self, group_elements):
coefs = []
for g in group_elements:
coef = 0
[found, k] = self.index_of(g)
if (found):
coef = self.pairs[k][0]
coefs += [coef]
return coefs
def __add__(a,b):
# Concatenate the two lists. Then merge the pairs with matching
# group elements.
c = cgpalg_t([])
unmerged_pairs = copy.deepcopy(a.pairs + b.pairs)
while (unmerged_pairs):
current_pair = unmerged_pairs[0]
[found, k] = c.index_of(current_pair[1])
if (found):
# Update
c.pairs[k][0] += current_pair[0]
else:
# Insert
c.pairs += [current_pair]
unmerged_pairs = unmerged_pairs[1:]
return c
def __neg__(b):
negb = cgpalg_t(b.pairs)
for k in range(0, len(negb.pairs)):
negb.pairs[k][0] = -negb.pairs[k][0]
return negb
def __sub__(a,b):
return a + (-b)
def __mul__(a,b):
c = cgpalg_t([])
for ap in a.pairs:
for bp in b.pairs:
ccoef = ap[0] * bp[0] # Field multiplication
cgpelt = ap[1] * bp[1] # Group multiplication
[found, k] = c.index_of(cgpelt)
if (found):
# Update
c.pairs[k][0] += ccoef
else:
# Insert
c.pairs += [[ccoef, cgpelt]]
c.zero_strip()
return c
# The group data type must support the inv() method.
# This is a stub for correct implementation and doesn't work (except for singletons).
def inv(self):
bi = cgpalg_t([])
n = len(self.pairs)
if (n == 0):
print("cgpalg_t.inv: division by zero.")
sys.exit(1)
recip_n = 1.0/n
for pair in self.pairs:
bi.pairs += [[recip_n/pair[0], pair[1].inv()]]
return bi
def __div__(a,b):
return a * b.inv()
# def __eq__(a,b):
# if (len(a.pairs) != len(b.pairs)):
# return 0
# n = len(a.coefs)
# for i in range(0, n):
# if (a.coefs[i] != b.coefs[i]):
# return 0
# return 1
# def __ne__(a,b):
# return not (a == b)
# def scan(self, res_string, cgpalg_array):
# res_strings = re.split(',', res_string)
# #self.check_lengths(len(res_strings), len(cgpalg_array), res_strings,
# str(cgpalg_strings))
# n = len(res_strings)
# coef_array = range(0, n)
# for i in range(0, n):
# coef_array[i] = int(res_strings[i])
# self.__init__(coef_array, gp_elt_array)
def __str__(self):
string = ""
if (len(self.pairs) == 0):
string = "0"
for i in range(0, len(self.pairs)):
if (i > 0):
string += " "
string += "["
string += str(self.pairs[i][0])
string += "]*["
string += str(self.pairs[i][1])
string += "]"
return string
def __repr__(self):
return self.__str__()
# Construct an element of C S_n, given only a list of permutations: each
# coefficient is 1.
def from_pmtns(pmtn_array):
pairs = []
for pmtn in pmtn_array:
pairs += [[1, pmtn]]
return cgpalg_t(pairs)
# Construct an element of C S_n, given only a list of permutations: compute the
# coefficient from the parity. The group class being used must support the
# sgn() method.
def from_pmtns_with_parity(pmtn_array):
pairs = []
for pmtn in pmtn_array:
pairs += [[pmtn.sgn(), pmtn]]
return cgpalg_t(pairs)
#def params_from_string(params_string):
# if (len(params_string) == 0):
# print "Modadd requires non-empty parameter string"
# sys.exit(1)
# cgpalg_strings = re.split(',', params_string)
# n = len(cgpalg_strings)
# cgpalg_array = range(0, n)
# for i in range(0, n):
# cgpalg_array[i] = int(cgpalg_strings[i])
# return cgpalg_array
#def from_string(value_string, params_string):
# cgpalg_array = params_from_string(params_string)
# obj = cgpalg_t([1], [1])
# obj.scan(value_string, cgpalg_array)
# return obj
# ================================================================
import unittest
if __name__ == '__main__':
class test_cases(unittest.TestCase):
def test___init__(self):
pass # to be implemented
def test_index_of(self):
pass # to be implemented
def test_zero_strip(self):
pass # to be implemented
def test_to_coef_array(self):
pass # to be implemented
def test___add__(self):
pass # to be implemented
def test___neg__(self):
pass # to be implemented
def test___sub__(self):
pass # to be implemented
def test___mul__(self):
pass # to be implemented
def test_inv(self):
pass # to be implemented
def test___div__(self):
pass # to be implemented
def test___str__(self):
pass # to be implemented
def test___repr__(self):
pass # to be implemented
def test_from_pmtns(self):
pass # to be implemented
def test_from_pmtns_with_parity(self):
pass # to be implemented
# ----------------------------------------------------------------
unittest.main()
| |
#!/usr/bin/python3
"""Detect gamepads and show their state on Linux."""
import os
import datetime
import queue
import struct
import glob
import ctypes
import fcntl
import traceback
import array
import asyncio
import select
import urwid
import pyudev
import evdev
import sdl2
JS_EVENT_BUTTON = 0x01 # button pressed/released
JS_EVENT_AXIS = 0x02 # joystick moved
JS_EVENT_INIT = 0x80 # initial state of device
# pylint: disable=no-member
GAMEPAD_BUTTONS = (evdev.ecodes.BTN_A,
evdev.ecodes.BTN_B,
evdev.ecodes.BTN_X,
evdev.ecodes.BTN_Y,
evdev.ecodes.BTN_Z,
evdev.ecodes.BTN_BACK,
evdev.ecodes.BTN_SELECT,
evdev.ecodes.BTN_START,
evdev.ecodes.BTN_DPAD_DOWN,
evdev.ecodes.BTN_DPAD_LEFT,
evdev.ecodes.BTN_DPAD_RIGHT,
evdev.ecodes.BTN_DPAD_UP,
evdev.ecodes.BTN_GAMEPAD,
evdev.ecodes.BTN_JOYSTICK,
evdev.ecodes.BTN_NORTH,
evdev.ecodes.BTN_SOUTH,
evdev.ecodes.BTN_EAST,
evdev.ecodes.BTN_WEST,
evdev.ecodes.BTN_THUMB,
evdev.ecodes.BTN_THUMB2,
evdev.ecodes.BTN_THUMBL,
evdev.ecodes.BTN_THUMBR)
BUTTON_NAMES = {v: k[4:] for k, v in evdev.ecodes.ecodes.items()}
INPUT_DEVICES = {}
def scan_evdev_gamepads():
"""Scan for evdev gamepads."""
# remove old evdev devices
global INPUT_DEVICES # pylint: disable=global-statement
INPUT_DEVICES = {fn: INPUT_DEVICES[fn] for fn in INPUT_DEVICES if not fn.startswith('/dev/input/event')}
devs = []
for fn in evdev.list_devices():
try:
d = evdev.InputDevice(fn)
except:
# TODO trace here what happened
continue
same = False
for dd in devs:
if dd.fn == d.fn:
same = True
if same:
continue
caps = d.capabilities()
if evdev.ecodes.EV_ABS in caps and evdev.ecodes.EV_KEY in caps:
keys = caps[evdev.ecodes.EV_KEY]
if any(k in keys for k in GAMEPAD_BUTTONS):
devs.append(d)
fn = d.fn
# print 'EVDEV', d.name, fn
if fn not in INPUT_DEVICES:
INPUT_DEVICES[fn] = {}
INPUT_DEVICES[fn]['evdev'] = d
def present_evdev_gamepad(dev):
"""Generate description of evdev gamepads for urwid."""
text = [('emph', "EVDEV:",)]
caps = dev.capabilities()
text.append(" name: '%s'" % dev.name)
text.append(' file: %s' % dev.fn)
text.append(' phys: %s' % dev.phys)
if evdev.ecodes.EV_ABS in caps:
axes_text = ' axes: '
axes = caps[evdev.ecodes.EV_ABS]
axes_text += ", ".join([evdev.ecodes.ABS[a[0]][4:] for a in axes])
text.append(axes_text)
if evdev.ecodes.EV_KEY in caps:
keys_text = []
keys_text.append(' buttons: ')
keys = caps[evdev.ecodes.EV_KEY]
for k in keys:
if k in GAMEPAD_BUTTONS:
keys_text.append(('key', BUTTON_NAMES[k]))
else:
keys_text.append(BUTTON_NAMES[k])
keys_text.append(', ')
text.append(keys_text[:-1])
text.append(' %s' % str(dev.info))
# caps = dev.capabilities(verbose=True)
# text.append(str(caps))
# caps = dev.capabilities()
# text.append(str(caps))
# TODO: add SDL2 id
return text
def scan_jsio_gamepads():
"""Scan for jsio gamepads."""
# remove old js devices
global INPUT_DEVICES # pylint: disable=global-statement
INPUT_DEVICES = {fn: INPUT_DEVICES[fn] for fn in INPUT_DEVICES if not fn.startswith('/dev/input/js')}
syspaths = glob.glob("/dev/input/js*")
# ioctls, pylint: disable=invalid-name
JSIOCGVERSION = 0x80046a01
JSIOCGAXES = 0x80016a11
JSIOCGBUTTONS = 0x80016a12
JSIOCGNAME = 0x81006a13
for fn in syspaths:
data = dict(path=fn)
try:
with open(fn, "r") as jsfile:
fcntl.fcntl(jsfile.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
val = ctypes.c_int()
if fcntl.ioctl(jsfile.fileno(), JSIOCGAXES, val) != 0:
print("Failed to read number of axes")
else:
data['axes'] = val.value
if fcntl.ioctl(jsfile.fileno(), JSIOCGBUTTONS, val) != 0:
print("Failed to read number of axes")
else:
data['buttons'] = val.value
if fcntl.ioctl(jsfile.fileno(), JSIOCGVERSION, val) != 0:
print("Failed to read version")
else:
data['version'] = '0x%x' % val.value
buf = array.array('b', [0] * 64)
fcntl.ioctl(jsfile.fileno(), JSIOCGNAME + (0x10000 * len(buf)), buf)
data['name'] = str(buf.tobytes(), 'utf-8').rstrip("\x00")
if fn not in INPUT_DEVICES:
INPUT_DEVICES[fn] = {}
INPUT_DEVICES[fn]['jsio'] = data
except PermissionError:
pass # TODO: show errors on some status bar or logs panel
except:
print(traceback.format_exc())
def present_jsio_gamepad(data):
"""Generate description of jsio gamepads for urwid."""
text = [('emph', "JSIO:",)]
for k, v in data.items():
if k.lower() == 'name':
v = "'%s'" % v
text.append(' %s: %s' % (k.lower(), v))
return text
def scan_pygame_gamepads():
"""Scan for pygame gamepads."""
import pygame # pylint: disable=import-error
pygame.init()
pygame.joystick.init()
for i in range(pygame.joystick.get_count()):
j = pygame.joystick.Joystick(i)
j.init()
name = j.get_name().strip()
for d in INPUT_DEVICES.values():
if 'jsio' not in d:
continue
n = d['jsio']['name'].strip()
if n.startswith(name):
d['pygame'] = j
def present_pygame_gamepad(data):
"""Generate description of pygame gamepads for urwid."""
text = [('emph', "PyGame:",)]
text.append(' name: %s' % data.get_name())
text.append(' id: %s' % data.get_id())
text.append(' numaxes: %s' % data.get_numaxes())
text.append(' numballs: %s' % data.get_numballs())
text.append(' numbuttons: %s' % data.get_numbuttons())
# print '\tnumhats: %s' % data.get_numhats()
return text
def scan_sdl2_gamepads():
"""Scan for sdl2 gamepads."""
sdl2.SDL_Init(sdl2.SDL_INIT_JOYSTICK | sdl2.SDL_INIT_GAMECONTROLLER)
num = sdl2.joystick.SDL_NumJoysticks()
for i in range(num):
j = sdl2.joystick.SDL_JoystickOpen(i)
name = str(sdl2.SDL_JoystickName(j).strip(), 'utf-8')
for d in INPUT_DEVICES.values():
if 'evdev' not in d:
continue
n = d['evdev'].name
if n.startswith(name):
d['sdl2'] = j
# guid = sdl2.joystick.SDL_JoystickGetGUID(js)
# my_guid = SDL_JoystickGetGUIDString(guid)
def sdl_joystickgetguidstring(guid):
"""Get SDL2 GUID from low level data."""
s = ''
for g in guid.data:
s += "{:x}".format(g >> 4)
s += "{:x}".format(g & 0x0F)
return s
def present_sdl2_gamepad(j):
"""Generate description of sdl2 gamepads for urwid."""
text = [('emph', "SDL2:",)]
text.append(' guid: %s' % sdl_joystickgetguidstring(sdl2.joystick.SDL_JoystickGetGUID(j)))
text.append(' id: %s' % sdl2.joystick.SDL_JoystickInstanceID(j))
text.append(' NumAxes: %s' % sdl2.joystick.SDL_JoystickNumAxes(j))
text.append(' NumBalls: %s' % sdl2.joystick.SDL_JoystickNumBalls(j))
text.append(' NumButtons: %s' % sdl2.joystick.SDL_JoystickNumButtons(j))
text.append(' NumHats: %s' % sdl2.joystick.SDL_JoystickNumHats(j))
return text
class DeviceTreeWidget(urwid.TreeWidget):
""" Display widget for leaf nodes """
def get_display_text(self):
return self.get_node().get_value()['name']
class DeviceNode(urwid.TreeNode):
""" Data storage object for leaf nodes """
def load_widget(self):
return DeviceTreeWidget(self)
class DeviceParentNode(urwid.ParentNode):
""" Data storage object for interior/parent nodes """
def load_widget(self):
return DeviceTreeWidget(self)
def load_child_keys(self):
data = self.get_value()
return list(range(len(data['children'])))
def load_child_node(self, key):
"""Return either an DeviceNode or DeviceParentNode"""
childdata = self.get_value()['children'][key]
childdepth = self.get_depth() + 1
if 'children' in childdata:
childclass = DeviceParentNode
else:
childclass = DeviceNode
return childclass(childdata, parent=self, key=key, depth=childdepth)
class DevicesTree(urwid.TreeListBox):
def __init__(self, *args, **kwargs):
self.node_visited_cb = kwargs.pop('node_visited_cb')
super(DevicesTree, self).__init__(*args, **kwargs)
def change_focus(self, *args, **kwargs):
super(DevicesTree, self).change_focus(*args, **kwargs)
_, node = self.get_focus()
data = node.get_value()
self.node_visited_cb(data['dev'])
class DeviceBox(urwid.LineBox):
def __init__(self):
self.lines = urwid.SimpleFocusListWalker([])
self.lines_box = urwid.ListBox(self.lines)
super(DeviceBox, self).__init__(self.lines_box, 'Dev Box: [select device]')
self.device = None
def show_device(self, device):
self.device = device
text = []
if device:
if 'DEVNAME' in device and device['DEVNAME'] in INPUT_DEVICES:
data = INPUT_DEVICES[device['DEVNAME']]
if 'sdl2' in data:
text += present_sdl2_gamepad(data['sdl2'])
if 'evdev' in data:
text += present_evdev_gamepad(data['evdev'])
if 'pygame' in data:
text += present_pygame_gamepad(data['pygame'])
if 'jsio' in data:
text += present_jsio_gamepad(data['jsio'])
text.append(('emph', "UDEV:"))
for k in list(device.keys()):
text.append(" %s: %s" % (k, device[k]))
self.set_title('Dev Box: ' + device.sys_path)
else:
self.set_title('Dev Box: [select device]')
elems = [urwid.Text(t) for t in text]
self.lines[:] = elems
if elems:
self.lines_box.focus_position = 0
class Udev(object):
def __init__(self, ui_queue):
self.ui_queue = ui_queue
self.ctx = pyudev.Context()
self.ui_wakeup_fd = None
self.monitor = None
self.observer = None
def send_event_to_ui_thread(self, action, device):
self.ui_queue.put((action, device))
os.write(self.ui_wakeup_fd, b'a')
def _find_parents(self, dev):
if dev.parent is None:
return [dev]
else:
return [dev] + self._find_parents(dev.parent)
def get_devs(self):
devs = {}
roots = set()
in_joystick_chain = []
for device in self.ctx.list_devices():
devs[device.sys_path] = device
if ('ID_INPUT_JOYSTICK' in device and device['ID_INPUT_JOYSTICK']) or ('DEVNAME' in device and device['DEVNAME'] in INPUT_DEVICES):
in_joystick_chain.append(device.sys_path)
for anc in self._find_parents(device.parent):
in_joystick_chain.append(anc.sys_path)
if anc.parent is None:
roots.add(anc)
return devs, roots, in_joystick_chain
def get_subtree(self, dev, in_joystick_chain, parent):
if dev.sys_path in in_joystick_chain:
if parent:
name = dev.sys_path.replace(parent.sys_path, '')
else:
name = dev.sys_path
result = {"name": name, "dev": dev, "children": []}
for d in dev.children:
if d.parent.sys_path != dev.sys_path:
continue
st = self.get_subtree(d, in_joystick_chain, dev)
if st:
result['children'].append(st)
return result
else:
return None
def get_dev_tree(self):
scan_evdev_gamepads()
scan_jsio_gamepads()
# scan_pygame_gamepads() # TODO: missing pygame for python3
scan_sdl2_gamepads()
_, roots, in_joystick_chain = self.get_devs()
result = {"name": "root", "dev": None, "children": []}
for r in roots:
st = self.get_subtree(r, in_joystick_chain, None)
if st:
result['children'].append(st)
return result
def setup_monitor(self, ui_wakeup_fd):
self.ui_wakeup_fd = ui_wakeup_fd
self.monitor = pyudev.Monitor.from_netlink(self.ctx)
self.observer = pyudev.MonitorObserver(self.monitor, self.send_event_to_ui_thread)
self.observer.start()
class GamePadStateBox(urwid.Text):
def __init__(self, *args, **kwargs):
super(GamePadStateBox, self).__init__(*args, **kwargs)
self.buttons = {}
self.axes = {}
def update_state(self, source, device, event):
if source == 'evdev':
self._update_evdev_state(device, event)
elif source == 'jsio':
self._update_jsio_state(device, event)
def _update_evdev_state(self, device, event):
buttons = [BUTTON_NAMES[k[1]] if k[0] == '?' else k[0] for k in device.active_keys(verbose=True)]
text = "Buttons: %s\n" % ", ".join(buttons)
if event.type == evdev.ecodes.EV_ABS:
self.axes[event.code] = event.value
caps = device.capabilities()
axes_caps = {}
for a, info in caps[evdev.ecodes.EV_ABS]:
axes_caps[a] = info
text += "Axes:\n"
for c, val in self.axes.items():
text += " %s: %d/%d\n" % (evdev.ecodes.ABS[c][4:], val, axes_caps[c].max)
self.set_text(text)
def _update_jsio_state(self, device, event):
# pylint: disable=unused-argument
if event['type'] == JS_EVENT_BUTTON:
if event['value'] == 1:
self.buttons[event['number']] = event['value']
else:
if event['number'] in self.buttons:
del self.buttons[event['number']]
elif event['type'] == JS_EVENT_AXIS:
self.axes[event['number']] = event['value']
buttons = [str(b) for b in self.buttons.keys()]
text = "Buttons: %s\n" % ", ".join(buttons)
text += "Axes:\n"
for a, v in self.axes.items():
text += " %s: %d\n" % (a, v)
self.set_text(text)
class MyAsyncioEventLoop(urwid.AsyncioEventLoop):
def run(self):
"""
Start the event loop. Exit the loop when any callback raises
an exception. If ExitMainLoop is raised, exit cleanly.
"""
self._loop.set_exception_handler(self._exception_handler)
self._loop.run_forever()
if self._exc_info:
e = self._exc_info
self._exc_info = None
open('a.log', 'a').write(str(e)+'\n')
if e[1]:
raise e[1]
class ConsoleUI(object):
# pylint: disable=too-many-instance-attributes
palette = [
('body', 'black', 'light gray'),
('normal', 'light gray', ''),
('focus', 'white', 'black'),
('head', 'yellow', 'black', 'standout'),
('foot', 'light gray', 'black'),
('key', 'light cyan', 'black', 'underline'),
('title', 'white', 'black', 'bold'),
('flag', 'dark gray', 'light gray'),
('error', 'dark red', 'light gray'),
('emph', 'yellow', ''),
('dim', 'light gray', 'black'),
]
footer_texts = [[
# focused devs tree
('key', "TAB"), ":Change focused pane ",
('key', "DOWN"), ",",
('key', "UP"), ",",
('key', "PAGE UP"), ",",
('key', "PAGE DOWN"), ",",
('key', "+"), ",",
('key', "-"), ",",
('key', "LEFT"), ",",
('key', "HOME"), ",",
('key', "END"), ":Navigate Devices Tree and select device ",
('key', "F1"), ":Help ",
('key', "F2"), ":Switch Log Box/GamePad State ",
('key', "ESC"), ",",
('key', "Q"), ":Quit"
], [
# focused dev box
('key', "TAB"), ":Change focused pane ",
('key', "DOWN"), ",",
('key', "UP"), ",",
('key', "PAGE UP"), ",",
('key', "PAGE DOWN"), ":Scroll Dev Box content ",
('key', "F1"), ":Help ",
('key', "F2"), ":Switch Log Box/GamePad State ",
('key', "ESC"), ",",
('key', "Q"), ":Quit"
], [
# focused log box
('key', "TAB"), ":Change focused pane ",
('key', "DOWN"), ",",
('key', "UP"), ",",
('key', "PAGE UP"), ",",
('key', "PAGE DOWN"), ":Scroll Log Box content ",
('key', "F1"), ":Help ",
('key', "F2"), ":Switch Log Box/GamePad State ",
('key', "ESC"), ",",
('key', "Q"), ":Quit"
]]
def __init__(self):
self.udev_queue = queue.Queue()
self.udev = Udev(self.udev_queue)
# log box
self.log_list = urwid.SimpleFocusListWalker([])
self.log_list.append(urwid.Text(('dim', '%s: event monitoring started' % datetime.datetime.now())))
self.log_box = urwid.ListBox(self.log_list)
self.log_box_wrap = urwid.AttrMap(urwid.LineBox(self.log_box, 'Log Box'), 'normal', 'focus')
# gampad state box
self.gamepad_state_box = GamePadStateBox("-")
self.gamepad_state_box_wrap = urwid.AttrMap(urwid.LineBox(urwid.Filler(self.gamepad_state_box, valign='top'), 'GamePad State Box'), 'normal', 'focus')
# dev box
self.dev_box = DeviceBox()
self.dev_box_wrap = urwid.AttrMap(self.dev_box, 'normal', 'focus')
self.cols = urwid.Columns([urwid.Filler(urwid.Text('placeholder')),
self.dev_box_wrap])
# dev tree
self.refresh_devs_tree() # invoke after creating cols
self.bottom_elems = [self.log_box_wrap, self.gamepad_state_box_wrap]
self.bottom_elem_idx = 0
self.pile = urwid.Pile([self.cols,
self.bottom_elems[self.bottom_elem_idx]])
self.view = urwid.Frame(
self.pile,
header=urwid.AttrWrap(urwid.Text(" -= GamePad Info =-"), 'head'),
footer=urwid.AttrWrap(urwid.Text(self.footer_texts[0]), 'foot'))
self.aloop = asyncio.get_event_loop()
evl = MyAsyncioEventLoop(loop=self.aloop)
self.loop = urwid.MainLoop(self.view, self.palette, event_loop=evl,
unhandled_input=self.unhandled_input)
self.focus_pane = 0
self.pile.focus_position = 0
self.cols.focus_position = 0
self.ui_wakeup_fd = self.loop.watch_pipe(self.handle_udev_event)
self.udev.setup_monitor(self.ui_wakeup_fd)
self.evdev_events_handler_task = None
self.selected_evdev_device = None
self.jsio_events_handler_task = None
self.selected_jsio_device = None
def main(self):
"""Run the program."""
self.loop.run()
def switch_bottom_elem(self):
self.bottom_elem_idx = 1 - self.bottom_elem_idx
self.pile.contents[1] = (self.bottom_elems[self.bottom_elem_idx], ('weight', 1))
def unhandled_input(self, k):
if k in ('q', 'Q', 'esc'):
raise urwid.ExitMainLoop()
elif k == 'tab':
if self.focus_pane == 0:
# devs tree -> dev box
self.cols.focus_position = 1
self.focus_pane = 1
elif self.focus_pane == 1:
# dev box -> logs
self.pile.focus_position = 1
self.focus_pane = 2
else:
# logs -> devs tree
self.pile.focus_position = 0
self.cols.focus_position = 0
self.focus_pane = 0
self.view.footer = urwid.AttrWrap(urwid.Text(self.footer_texts[self.focus_pane]), 'foot')
elif k == 'f2':
self.switch_bottom_elem()
# else:
# self.log(k)
def log(self, text):
entry = '%s: %s' % (datetime.datetime.now(), text)
self.log_list.append(urwid.Text(entry))
self.log_box.focus_position = len(self.log_list) - 1
def handle_udev_event(self, data):
for _ in data:
(action, device) = self.udev_queue.get(block=False)
entry = '%8s - %s' % (action, device.sys_path)
self.log(entry)
self.refresh_devs_tree()
def refresh_devs_tree(self):
devtree = self.udev.get_dev_tree()
self.topnode = DeviceParentNode(devtree)
self.listbox = DevicesTree(urwid.TreeWalker(self.topnode), node_visited_cb=self.node_visited)
self.listbox.offset_rows = 1
self.devs_tree = urwid.LineBox(self.listbox, 'Devices Tree')
self.devs_tree_wrap = urwid.AttrMap(self.devs_tree, 'normal', 'focus')
self.cols.contents[0] = (self.devs_tree_wrap, ('weight', 1, False))
def async_evdev_read(self, device):
future = asyncio.Future()
def ready():
self.aloop.remove_reader(device.fileno())
future.set_result(device.read())
self.aloop.add_reader(device.fileno(), ready)
return future
async def handle_evdev_events(self, device):
while True:
events = await self.async_evdev_read(device)
for event in events:
self.log(str(event))
self.gamepad_state_box.update_state('evdev', device, event)
if not self.evdev_events_handler_task:
break
def async_jsio_read(self, device):
future = asyncio.Future()
data_format = 'LhBB'
def ready():
self.aloop.remove_reader(device['file'].fileno())
events = []
while select.select([device['file'].fileno()], [], [], 0.0)[0]:
data = device['file'].read(struct.calcsize(data_format))
data = struct.unpack(data_format, data)
event = dict(time=data[0], value=data[1], type=data[2] & ~JS_EVENT_INIT, number=data[3])
events.append(event)
future.set_result(events)
self.aloop.add_reader(device['file'].fileno(), ready)
return future
async def handle_jsio_events(self, device):
device['file'] = open(device['path'], 'rb', os.O_RDONLY | os.O_NDELAY)
while True:
events = await self.async_jsio_read(device)
for event in events:
self.log('%s: %s ' % (device['path'], str(event)))
self.gamepad_state_box.update_state('jsio', device, event)
if not self.jsio_events_handler_task:
break
def node_visited(self, device):
self.dev_box.show_device(device)
if self.evdev_events_handler_task:
self.log('stopped monitorig evdev %s' % self.selected_evdev_device)
self.evdev_events_handler_task.cancel()
self.aloop.remove_reader(self.selected_evdev_device.fileno())
self.evdev_events_handler_task = None
if self.jsio_events_handler_task:
self.log('stopped monitorig jsio %s' % self.selected_jsio_device)
self.jsio_events_handler_task.cancel()
self.aloop.remove_reader(self.selected_jsio_device['file'].fileno())
self.jsio_events_handler_task = None
if device and 'DEVNAME' in device and device['DEVNAME'] in INPUT_DEVICES:
data = INPUT_DEVICES[device['DEVNAME']]
if 'evdev' in data:
self.selected_evdev_device = data['evdev']
self.log('started monitorig evdev %s' % self.selected_evdev_device)
self.evdev_events_handler_task = asyncio.ensure_future(self.handle_evdev_events(data['evdev']), loop=self.aloop)
elif 'jsio' in data:
self.selected_jsio_device = data['jsio']
self.log('started monitorig jsio %s' % self.selected_jsio_device['path'])
self.jsio_events_handler_task = asyncio.ensure_future(self.handle_jsio_events(data['jsio']), loop=self.aloop)
def main():
ui = ConsoleUI()
ui.main()
if __name__ == "__main__":
main()
| |
############################################################
# #
# hprose #
# #
# Official WebSite: http://www.hprose.com/ #
# http://www.hprose.org/ #
# #
############################################################
############################################################
# #
# hprose/io.py #
# #
# hprose io for python 2.3+ #
# #
# LastModified: Mar 8, 2015 #
# Author: Ma Bingyao <andot@hprose.com> #
# #
############################################################
from cStringIO import StringIO
import datetime
from fpconst import NaN, PosInf, NegInf, isInf, isNaN, isPosInf
from inspect import isclass
from sys import modules
from threading import RLock
from uuid import UUID
from hprose.common import HproseException
import decimal
Unicode = False
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
utc = UTC()
class HproseTags:
# Serialize Tags #
TagInteger = 'i'
TagLong = 'l'
TagDouble = 'd'
TagNull = 'n'
TagEmpty = 'e'
TagTrue = 't'
TagFalse = 'f'
TagNaN = 'N'
TagInfinity = 'I'
TagDate = 'D'
TagTime = 'T'
TagUTC = 'Z'
TagBytes = 'b'
TagUTF8Char = 'u'
TagString = 's'
TagGuid = 'g'
TagList = 'a'
TagMap = 'm'
TagClass = 'c'
TagObject = 'o'
TagRef = 'r'
# Serialize Marks #
TagPos = '+'
TagNeg = '-'
TagSemicolon = ';'
TagOpenbrace = '{'
TagClosebrace = '}'
TagQuote = '"'
TagPoint = '.'
# Protocol Tags #
TagFunctions = 'F'
TagCall = 'C'
TagResult = 'R'
TagArgument = 'A'
TagError = 'E'
TagEnd = 'z'
_classCache1 = {}
_classCache2 = {}
_classCacheLock = RLock()
def _get_class(name):
name = name.split('.')
if len(name) == 1:
return getattr(modules['__main__'], name[0], None)
clsname = name.pop()
modname = '.'.join(name)
if modname in modules:
return getattr(modules[modname], clsname, None)
return None
def _get_class2(name, ps, i, c):
if i < len(ps):
p = ps[i]
name = name[:p] + c + name[p + 1:]
cls = _get_class2(name, ps, i + 1, '.')
if (i + 1 < len(ps)) and (cls == None):
cls = _get_class2(name, ps, i + 1, '_')
return cls
return _get_class(name)
def _get_class_by_alias(name):
cls = getattr(modules['__main__'], name, None)
if not isclass(cls):
ps = []
p = name.find('_')
while p > -1:
ps.append(p)
p = name.find('_', p + 1)
cls = _get_class2(name, ps, 0, '.')
if cls == None:
cls = _get_class2(name, ps, 0, '_')
if cls == None:
cls = type(name, (), {})
cls.__module__ = '__main__'
setattr(modules['__main__'], name, cls)
return cls
class HproseClassManager:
def register(_class, alias):
_classCacheLock.acquire()
try:
_classCache1[_class] = alias
_classCache2[alias] = _class
finally:
_classCacheLock.release()
register = staticmethod(register)
def getClass(alias):
if alias in _classCache2:
return _classCache2[alias]
_class = _get_class_by_alias(alias)
HproseClassManager.register(_class, alias)
return _class
getClass = staticmethod(getClass)
def getClassAlias(_class):
if _class in _classCache1:
return _classCache1[_class]
alias = []
if _class.__module__ != '__main__':
alias.extend(_class.__module__.split('.'))
alias.append(_class.__name__)
alias = '_'.join(alias)
HproseClassManager.register(_class, alias)
return alias
getClassAlias = staticmethod(getClassAlias)
def _readuntil(stream, char):
a = []
while True:
c = stream.read(1)
if (c == char) or (c == ''): break
a.append(c)
return ''.join(a)
def _readint(stream, char):
s = _readuntil(stream, char)
if s == '': return 0
return int(s, 10)
class HproseRawReader(object):
def __init__(self, stream):
self.stream = stream
def unexpectedTag(self, tag, expectTags = None):
if tag == '':
raise HproseException, "No byte found in stream"
elif expectTags == None:
raise HproseException, "Unexpected serialize tag '%s' in stream" % tag
else:
raise HproseException, "Tag '%s' expected, but '%s' found in stream" % (expectTags, tag)
def readRaw(self, ostream = None, tag = None):
if ostream == None:
ostream = StringIO()
if tag == None:
tag = self.stream.read(1)
ostream.write(tag)
if ('0' <= tag <= '9' or
tag == HproseTags.TagNull or
tag == HproseTags.TagEmpty or
tag == HproseTags.TagTrue or
tag == HproseTags.TagFalse or
tag == HproseTags.TagNaN):
pass
elif tag == HproseTags.TagInfinity:
ostream.write(self.stream.read(1))
elif (tag == HproseTags.TagInteger or
tag == HproseTags.TagLong or
tag == HproseTags.TagDouble or
tag == HproseTags.TagRef):
self.__readNumberRaw(ostream)
elif (tag == HproseTags.TagDate or
tag == HproseTags.TagTime):
self.__readDateTimeRaw(ostream)
elif tag == HproseTags.TagUTF8Char:
self.__readUTF8CharRaw(ostream)
elif tag == HproseTags.TagBytes:
self.__readBytesRaw(ostream)
elif tag == HproseTags.TagString:
self.__readStringRaw(ostream)
elif tag == HproseTags.TagGuid:
self.__readGuidRaw(ostream)
elif (tag == HproseTags.TagList or
tag == HproseTags.TagMap or
tag == HproseTags.TagObject):
self.__readComplexRaw(ostream)
elif tag == HproseTags.TagClass:
self.__readComplexRaw(ostream)
self.readRaw(ostream)
elif tag == HproseTags.TagError:
self.readRaw(ostream)
else:
self.unexpectedTag(tag)
return ostream
def __readNumberRaw(self, ostream):
ostream.write(_readuntil(self.stream, HproseTags.TagSemicolon))
ostream.write(HproseTags.TagSemicolon)
def __readDateTimeRaw(self, ostream):
s = []
while True:
c = self.stream.read(1)
s.append(c)
if (c == HproseTags.TagSemicolon or
c == HproseTags.TagUTC): break
ostream.write(''.join(s))
def __readUTF8CharRaw(self, ostream):
s = []
c = self.stream.read(1)
s.append(c)
a = ord(c)
if (a & 0xE0) == 0xC0:
s.append(self.stream.read(1))
elif (a & 0xF0) == 0xE0:
s.append(self.stream.read(2))
elif a > 0x7F:
raise HproseException, 'Bad utf-8 encoding'
ostream.write(''.join(s))
def __readBytesRaw(self, ostream):
l = _readuntil(self.stream, HproseTags.TagQuote)
ostream.write(l)
ostream.write(HproseTags.TagQuote)
if l == '':
l = 0
else:
l = int(l, 10)
ostream.write(self.stream.read(l + 1))
def __readStringRaw(self, ostream):
l = _readuntil(self.stream, HproseTags.TagQuote)
ostream.write(l)
ostream.write(HproseTags.TagQuote)
if l == '':
l = 0
else:
l = int(l, 10)
s = []
i = 0
while i < l:
c = self.stream.read(1)
s.append(c)
a = ord(c)
if (a & 0xE0) == 0xC0:
s.append(self.stream.read(1))
elif (a & 0xF0) == 0xE0:
s.append(self.stream.read(2))
elif (a & 0xF8) == 0xF0:
s.append(self.stream.read(3))
i += 1
i += 1
s.append(self.stream.read(1))
ostream.write(''.join(s))
def __readGuidRaw(self, ostream):
ostream.write(self.stream.read(38))
def __readComplexRaw(self, ostream):
ostream.write(_readuntil(self.stream, HproseTags.TagOpenbrace))
ostream.write(HproseTags.TagOpenbrace)
tag = self.stream.read(1)
while tag != HproseTags.TagClosebrace:
self.readRaw(ostream, tag)
tag = self.stream.read(1)
ostream.write(tag)
class FakeReaderRefer:
def set(self, val):
pass
def read(self, index):
raise HproseException, "Unexpected serialize tag '%s' in stream" % HproseTags.TagRef
def reset(self):
pass
class RealReaderRefer:
def __init__(self):
self.ref = []
def set(self, val):
self.ref.append(val)
def read(self, index):
return self.ref[index]
def reset(self):
del self.ref[:]
class HproseReader(HproseRawReader):
def __init__(self, stream, simple = False):
super(HproseReader, self).__init__(stream)
self.refer = (simple and [FakeReaderRefer()] or [RealReaderRefer()])[0]
self.classref = []
def unserialize(self):
tag = self.stream.read(1)
if '0' <= tag <= '9':
return int(tag, 10)
if tag == HproseTags.TagInteger:
return self.__readIntegerWithoutTag()
if tag == HproseTags.TagLong:
return self.__readLongWithoutTag()
if tag == HproseTags.TagDouble:
return self.__readDoubleWithoutTag()
if tag == HproseTags.TagNull:
return None
if tag == HproseTags.TagEmpty:
return (Unicode and [u''] or [''])[0]
if tag == HproseTags.TagTrue:
return True
if tag == HproseTags.TagFalse:
return False
if tag == HproseTags.TagNaN:
return NaN
if tag == HproseTags.TagInfinity:
return self.__readInfinityWithoutTag()
if tag == HproseTags.TagDate:
return self.readDateWithoutTag()
if tag == HproseTags.TagTime:
return self.readTimeWithoutTag()
if tag == HproseTags.TagBytes:
return self.readBytesWithoutTag()
if tag == HproseTags.TagUTF8Char:
return self.__readUTF8CharWithoutTag()
if tag == HproseTags.TagString:
return self.readStringWithoutTag()
if tag == HproseTags.TagGuid:
return self.readGuidWithoutTag()
if tag == HproseTags.TagList:
return self.readListWithoutTag()
if tag == HproseTags.TagMap:
return self.readMapWithoutTag()
if tag == HproseTags.TagClass:
self.__readClass()
return self.readObject()
if tag == HproseTags.TagObject:
return self.readObjectWithoutTag()
if tag == HproseTags.TagRef:
return self.__readRef()
if tag == HproseTags.TagError:
raise HproseException, self.readString()
self.unexpectedTag(tag)
def checkTag(self, expectTag):
tag = self.stream.read(1)
if tag != expectTag:
self.unexpectedTag(tag, expectTag)
def checkTags(self, expectTags):
tag = self.stream.read(1)
if tag not in expectTags:
self.unexpectedTag(tag, ''.join(expectTags))
return tag
def __readIntegerWithoutTag(self):
return int(_readuntil(self.stream, HproseTags.TagSemicolon), 10)
def readInteger(self):
tag = self.stream.read(1)
if '0' <= tag <= '9':
return int(tag, 10)
if tag == HproseTags.TagInteger:
return self.__readIntegerWithoutTag()
self.unexpectedTag(tag)
def __readLongWithoutTag(self):
return long(_readuntil(self.stream, HproseTags.TagSemicolon))
def readLong(self):
tag = self.stream.read(1)
if '0' <= tag <= '9':
return long(tag)
if (tag == HproseTags.TagInteger or
tag == HproseTags.TagLong):
return self.__readLongWithoutTag()
self.unexpectedTag(tag)
def __readDoubleWithoutTag(self):
return float(_readuntil(self.stream, HproseTags.TagSemicolon))
def readDouble(self):
tag = self.stream.read(1)
if '0' <= tag <= '9':
return float(tag)
if (tag == HproseTags.TagInteger or
tag == HproseTags.TagLong or
tag == HproseTags.TagDouble):
return self.__readDoubleWithoutTag()
if tag == HproseTags.TagNaN:
return NaN
if tag == HproseTags.TagInfinity:
return self.__readInfinityWithoutTag()
self.unexpectedTag(tag)
def __readInfinityWithoutTag(self):
if self.stream.read(1) == HproseTags.TagNeg:
return NegInf
else:
return PosInf
def readBoolean(self):
tag = self.checkTags((HproseTags.TagTrue, HproseTags.TagFalse))
return tag == HproseTags.TagTrue
def readDateWithoutTag(self):
year = int(self.stream.read(4), 10)
month = int(self.stream.read(2), 10)
day = int(self.stream.read(2), 10)
tag = self.stream.read(1)
if tag == HproseTags.TagTime:
hour = int(self.stream.read(2), 10)
minute = int(self.stream.read(2), 10)
second = int(self.stream.read(2), 10)
(tag, microsecond) = self.__readMicrosecond()
if tag == HproseTags.TagUTC:
d = datetime.datetime(year, month, day, hour, minute, second, microsecond, utc)
else:
d = datetime.datetime(year, month, day, hour, minute, second, microsecond)
elif tag == HproseTags.TagUTC:
d = datetime.datetime(year, month, day, 0, 0, 0, 0, utc)
else:
d = datetime.date(year, month, day)
self.refer.set(d)
return d
def readDate(self):
tag = self.stream.read(1)
if tag == HproseTags.TagNull: return None
if tag == HproseTags.TagRef: return self.__readRef()
if tag == HproseTags.TagDate: return self.readDateWithoutTag()
self.unexpectedTag(tag)
def readTimeWithoutTag(self):
hour = int(self.stream.read(2), 10)
minute = int(self.stream.read(2), 10)
second = int(self.stream.read(2), 10)
(tag, microsecond) = self.__readMicrosecond()
if tag == HproseTags.TagUTC:
t = datetime.time(hour, minute, second, microsecond, utc)
else:
t = datetime.time(hour, minute, second, microsecond)
self.refer.set(t)
return t
def readTime(self):
tag = self.stream.read(1)
if tag == HproseTags.TagNull: return None
if tag == HproseTags.TagRef: return self.__readRef()
if tag == HproseTags.TagTime: return self.readTimeWithoutTag()
self.unexpectedTag(tag)
def readBytesWithoutTag(self):
b = self.stream.read(_readint(self.stream, HproseTags.TagQuote))
self.stream.read(1)
self.refer.set(b)
return b
def readBytes(self):
tag = self.stream.read(1)
if tag == HproseTags.TagNull: return None
if tag == HproseTags.TagEmpty: return ''
if tag == HproseTags.TagRef: return self.__readRef()
if tag == HproseTags.TagBytes: return self.readBytesWithoutTag()
self.unexpectedTag(tag)
def __readUTF8CharWithoutTag(self):
s = []
c = self.stream.read(1)
s.append(c)
a = ord(c)
if (a & 0xE0) == 0xC0:
s.append(self.stream.read(1))
elif (a & 0xF0) == 0xE0:
s.append(self.stream.read(2))
elif a > 0x7F:
raise HproseException, 'Bad utf-8 encoding'
s = ''.join(s)
if Unicode:
s = unicode(s, 'utf-8')
return s
def __readString(self):
l = _readint(self.stream, HproseTags.TagQuote)
s = []
i = 0
while i < l:
c = self.stream.read(1)
s.append(c)
a = ord(c)
if (a & 0xE0) == 0xC0:
s.append(self.stream.read(1))
elif (a & 0xF0) == 0xE0:
s.append(self.stream.read(2))
elif (a & 0xF8) == 0xF0:
s.append(self.stream.read(3))
i += 1
i += 1
self.stream.read(1)
s = ''.join(s)
if Unicode:
s = unicode(s, 'utf-8')
return s
def readStringWithoutTag(self):
s = self.__readString()
self.refer.set(s)
return s
def readString(self):
tag = self.stream.read(1)
if tag == HproseTags.TagNull: return None
if tag == HproseTags.TagEmpty: return (Unicode and [u''] or [''])[0]
if tag == HproseTags.TagUTF8Char: return self.__readUTF8CharWithoutTag()
if tag == HproseTags.TagRef: return self.__readRef()
if tag == HproseTags.TagString: return self.readStringWithoutTag()
self.unexpectedTag(tag)
def readGuidWithoutTag(self):
u = UUID(self.stream.read(38))
self.refer.set(u)
return u
def readGuid(self):
tag = self.stream.read(1)
if tag == HproseTags.TagNull: return None
if tag == HproseTags.TagRef: return self.__readRef()
if tag == HproseTags.TagGuid: return self.readGuidWithoutTag()
self.unexpectedTag(tag)
def readListWithoutTag(self):
l = []
self.refer.set(l)
c = _readint(self.stream, HproseTags.TagOpenbrace)
for _ in xrange(c): l.append(self.unserialize())
self.stream.read(1)
return l
def readList(self):
tag = self.stream.read(1)
if tag == HproseTags.TagNull: return None
if tag == HproseTags.TagRef: return self.__readRef()
if tag == HproseTags.TagList: return self.readListWithoutTag()
self.unexpectedTag(tag)
def readMapWithoutTag(self):
m = {}
self.refer.set(m)
c = _readint(self.stream, HproseTags.TagOpenbrace)
for _ in xrange(c):
k = self.unserialize()
v = self.unserialize()
m[k] = v
self.stream.read(1)
return m
def readMap(self):
tag = self.stream.read(1)
if tag == HproseTags.TagNull: return None
if tag == HproseTags.TagRef: return self.__readRef()
if tag == HproseTags.TagMap: return self.readMapWithoutTag()
self.unexpectedTag(tag)
def readObjectWithoutTag(self):
(cls, count, fields) = self.classref[_readint(self.stream, HproseTags.TagOpenbrace)]
obj = cls()
self.refer.set(obj)
for i in xrange(count): setattr(obj, fields[i], self.unserialize())
self.stream.read(1)
return obj
def readObject(self):
tag = self.stream.read(1)
if tag == HproseTags.TagNull: return None
if tag == HproseTags.TagRef: return self.__readRef()
if tag == HproseTags.TagObject: return self.readObjectWithoutTag()
if tag == HproseTags.TagClass:
self.__readClass()
return self.readObject()
self.unexpectedTag(tag)
def __readClass(self):
classname = self.__readString()
count = _readint(self.stream, HproseTags.TagOpenbrace)
fields = [self.readString() for _ in xrange(count)]
self.stream.read(1)
cls = HproseClassManager.getClass(classname)
self.classref.append((cls, count, fields))
def __readRef(self):
return self.refer.read(_readint(self.stream, HproseTags.TagSemicolon))
def __readMicrosecond(self):
microsecond = 0
tag = self.stream.read(1)
if tag == HproseTags.TagPoint:
microsecond = int(self.stream.read(3), 10) * 1000
tag = self.stream.read(1)
if '0' <= tag <= '9':
microsecond = microsecond + int(tag + self.stream.read(2), 10)
tag = self.stream.read(1)
if '0' <= tag <= '9':
self.stream.read(2)
tag = self.stream.read(1)
return (tag, microsecond)
def reset(self):
del self.classref[:]
self.refer.reset()
class FakeWriterRefer:
def set(self, val):
pass
def write(self, val):
return False
def reset(self):
pass
class RealWriterRefer:
def __init__(self, stream):
self.stream = stream
self.ref = {}
self.refcount = 0
def set(self, val):
if isinstance(val, str) or isinstance(val, unicode):
self.ref[val] = self.refcount
else:
self.ref[id(val)] = self.refcount
self.refcount += 1
def write(self, val):
if not (isinstance(val, str) or isinstance(val, unicode)):
val = id(val)
if (val in self.ref):
self.stream.write('%c%d%c' % (HproseTags.TagRef,
self.ref[val],
HproseTags.TagSemicolon))
return True
return False
def reset(self):
self.ref.clear()
self.refcount = 0
class HproseWriter(object):
def __init__(self, stream, simple = False):
self.stream = stream
self.classref = {}
self.fieldsref = []
self.refer = (simple and [FakeWriterRefer()] or [RealWriterRefer(stream)])[0]
def serialize(self, v):
if v == None: self.writeNull()
elif isinstance(v, bool): self.writeBoolean(v)
elif isinstance(v, int): self.writeInteger(v)
elif isinstance(v, float): self.writeDouble(v)
elif isinstance(v, decimal.Decimal): self.writeDouble(v)
elif isinstance(v, long): self.writeLong(v)
elif isinstance(v, str):
if v == '':
self.writeEmpty()
elif Unicode:
self.writeBytesWithRef(v)
else:
try:
self.writeStringWithRef(unicode(v, 'utf-8'))
except ValueError:
self.writeBytesWithRef(v)
elif isinstance(v, unicode):
if v == u'':
self.writeEmpty()
elif len(v) == 1:
self.writeUTF8Char(v)
else:
self.writeStringWithRef(v)
elif isinstance(v, UUID): self.writeGuidWithRef(v)
elif isinstance(v, (list, tuple)): self.writeListWithRef(v)
elif isinstance(v, dict): self.writeMapWithRef(v)
elif isinstance(v, (datetime.datetime, datetime.date)): self.writeDateWithRef(v)
elif isinstance(v, datetime.time): self.writeTimeWithRef(v)
elif isinstance(v, object): self.writeObjectWithRef(v)
else: raise HproseException, 'Not support to serialize this data'
def writeInteger(self, i):
if 0 <= i <= 9:
self.stream.write('%d' % (i,))
else:
self.stream.write('%c%d%c' % (HproseTags.TagInteger,
i,
HproseTags.TagSemicolon))
def writeLong(self, l):
if 0 <= l <= 9:
self.stream.write('%d' % (l,))
else:
self.stream.write('%c%d%c' % (HproseTags.TagLong,
l,
HproseTags.TagSemicolon))
def writeDouble(self, d):
if isNaN(d): self.writeNaN()
elif isInf(d): self.writeInfinity(isPosInf(d))
else: self.stream.write('%c%s%c' % (HproseTags.TagDouble,
d,
HproseTags.TagSemicolon))
def writeNaN(self):
self.stream.write(HproseTags.TagNaN)
def writeInfinity(self, positive = True):
self.stream.write(HproseTags.TagInfinity)
if positive:
self.stream.write(HproseTags.TagPos)
else:
self.stream.write(HproseTags.TagNeg)
def writeNull(self):
self.stream.write(HproseTags.TagNull)
def writeEmpty(self):
self.stream.write(HproseTags.TagEmpty)
def writeBoolean(self, b):
if b:
self.stream.write(HproseTags.TagTrue)
else:
self.stream.write(HproseTags.TagFalse)
def writeDate(self, date):
self.refer.set(date)
if isinstance(date, datetime.datetime):
if date.utcoffset() != ZERO and date.utcoffset() != None:
date = date.astimezone(utc)
if date.hour == 0 and date.minute == 0 and date.second == 0 and date.microsecond == 0:
fmt = '%c%s' % (HproseTags.TagDate, '%Y%m%d')
elif date.year == 1970 and date.month == 1 and date.day == 1:
fmt = '%c%s' % (HproseTags.TagTime, '%H%M%S')
else:
fmt = '%c%s%c%s' % (HproseTags.TagDate, '%Y%m%d', HproseTags.TagTime, '%H%M%S')
if date.microsecond > 0:
fmt = '%s%c%s' % (fmt, HproseTags.TagPoint, '%f')
if date.utcoffset() == ZERO:
fmt = '%s%c' % (fmt, HproseTags.TagUTC)
else:
fmt = '%s%c' % (fmt, HproseTags.TagSemicolon)
else:
fmt = '%c%s%c' % (HproseTags.TagDate, '%Y%m%d', HproseTags.TagSemicolon)
self.stream.write(date.strftime(fmt))
def writeDateWithRef(self, date):
if not self.refer.write(date): self.writeDate(date)
def writeTime(self, time):
self.refer.set(time)
fmt = '%c%s' % (HproseTags.TagTime, '%H%M%S')
if time.microsecond > 0:
fmt = '%s%c%s' % (fmt, HproseTags.TagPoint, '%f')
if time.utcoffset() == ZERO:
fmt = '%s%c' % (fmt, HproseTags.TagUTC)
else:
fmt = '%s%c' % (fmt, HproseTags.TagSemicolon)
self.stream.write(time.strftime(fmt))
def writeTimeWithRef(self, time):
if not self.refer.write(time): self.writeTime(time)
def writeBytes(self, b):
self.refer.set(b)
length = len(b)
if length == 0:
self.stream.write('%c%c%c' % (HproseTags.TagBytes,
HproseTags.TagQuote,
HproseTags.TagQuote))
else:
self.stream.write('%c%d%c%s%c' % (HproseTags.TagBytes,
length,
HproseTags.TagQuote,
b,
HproseTags.TagQuote))
def writeBytesWithRef(self, b):
if not self.refer.write(b): self.writeBytes(b)
def writeUTF8Char(self, u):
self.stream.write('%c%s' % (HproseTags.TagUTF8Char, u.encode('utf-8')))
def writeString(self, s):
self.refer.set(s)
length = len(s)
if length == 0:
self.stream.write('%c%c%c' % (HproseTags.TagString,
HproseTags.TagQuote,
HproseTags.TagQuote))
else:
self.stream.write('%c%d%c%s%c' % (HproseTags.TagString,
length,
HproseTags.TagQuote,
s.encode('utf-8'),
HproseTags.TagQuote))
def writeStringWithRef(self, s):
if not self.refer.write(s): self.writeString(s)
def writeGuid(self, guid):
self.refer.set(guid)
self.stream.write(HproseTags.TagGuid)
self.stream.write(HproseTags.TagOpenbrace)
self.stream.write(str(guid))
self.stream.write(HproseTags.TagClosebrace)
def writeGuidWithRef(self, guid):
if not self.refer.write(guid): self.writeGuid(guid)
def writeList(self, l):
self.refer.set(l)
count = len(l)
if count == 0:
self.stream.write('%c%c' % (HproseTags.TagList,
HproseTags.TagOpenbrace))
else:
self.stream.write('%c%d%c' % (HproseTags.TagList,
count,
HproseTags.TagOpenbrace))
for i in xrange(count): self.serialize(l[i])
self.stream.write(HproseTags.TagClosebrace)
def writeListWithRef(self, l):
if not self.refer.write(l): self.writeList(l)
def writeMap(self, m):
self.refer.set(m)
count = len(m)
if count == 0:
self.stream.write('%c%c' % (HproseTags.TagMap,
HproseTags.TagOpenbrace))
else:
self.stream.write('%c%d%c' % (HproseTags.TagMap,
count,
HproseTags.TagOpenbrace))
for key in m:
self.serialize(key)
self.serialize(m[key])
self.stream.write(HproseTags.TagClosebrace)
def writeMapWithRef(self, m):
if not self.refer.write(m): self.writeMap(m)
def writeObject(self, obj):
classname = HproseClassManager.getClassAlias(obj.__class__)
if classname in self.classref:
index = self.classref[classname]
fields = self.fieldsref[index]
else:
data = vars(obj)
fields = tuple(data.keys())
index = self.__writeClass(classname, fields)
self.stream.write('%c%d%c' % (HproseTags.TagObject,
index,
HproseTags.TagOpenbrace))
self.refer.set(obj)
data = vars(obj)
count = len(fields)
for i in xrange(count):
self.serialize(data[fields[i]])
self.stream.write(HproseTags.TagClosebrace)
def writeObjectWithRef(self, obj):
if not self.refer.write(obj): self.writeObject(obj)
def __writeClass(self, classname, fields):
length = len(unicode(classname, 'utf-8'))
count = len(fields)
if count == 0:
self.stream.write('%c%d%c%s%c%c' % (HproseTags.TagClass,
length,
HproseTags.TagQuote,
classname,
HproseTags.TagQuote,
HproseTags.TagOpenbrace))
else:
self.stream.write('%c%d%c%s%c%d%c' % (HproseTags.TagClass,
length,
HproseTags.TagQuote,
classname,
HproseTags.TagQuote,
count,
HproseTags.TagOpenbrace))
for i in xrange(count):
field = unicode(fields[i], 'utf-8')
self.writeString(field)
self.stream.write(HproseTags.TagClosebrace)
index = len(self.fieldsref)
self.fieldsref.append(fields)
self.classref[classname] = index
return index
def reset(self):
self.classref.clear()
del self.fieldsref[:]
self.refer.reset()
class HproseFormatter:
def serialize(v, simple = False):
stream = StringIO()
writer = HproseWriter(stream, simple)
writer.serialize(v)
return stream.getvalue()
serialize = staticmethod(serialize)
def unserialize(s, simple = False):
stream = StringIO(s)
reader = HproseReader(stream, simple)
return reader.unserialize()
unserialize = staticmethod(unserialize)
| |
# Copyright (c) 2015.
# Philipp Wagner <bytefish[at]gmx[dot]de> and
# Florian Lier <flier[at]techfak.uni-bielefeld.de> and
# Norman Koester <nkoester[at]techfak.uni-bielefeld.de>
#
#
# Released to public domain under terms of the BSD Simplified license.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the organization nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# See <http://www.opensource.org/licenses/bsd-license>
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
class AbstractFeature(object):
def compute(self, X, y):
raise NotImplementedError("Every AbstractFeature must implement the compute method.")
def extract(self, X):
raise NotImplementedError("Every AbstractFeature must implement the extract method.")
def save(self):
raise NotImplementedError("Not implemented yet (TODO).")
def load(self):
raise NotImplementedError("Not implemented yet (TODO).")
def __repr__(self):
return "AbstractFeature"
class Identity(AbstractFeature):
"""
Simplest AbstractFeature you could imagine. It only forwards the data and does not operate on it,
probably useful for learning a Support Vector Machine on raw data for example!
"""
def __init__(self):
AbstractFeature.__init__(self)
def compute(self, X, y):
return X
def extract(self, X):
return X
def __repr__(self):
return "Identity"
from ocvfacerec.facerec.util import as_column_matrix
from ocvfacerec.facerec.operators import ChainOperator, CombineOperator
class PCA(AbstractFeature):
def __init__(self, num_components=0):
AbstractFeature.__init__(self)
self._num_components = num_components
def compute(self, X, y):
# build the column matrix
XC = as_column_matrix(X)
y = np.asarray(y)
# set a valid number of components
if self._num_components <= 0 or (self._num_components > XC.shape[1] - 1):
self._num_components = XC.shape[1] - 1
# center dataset
self._mean = XC.mean(axis=1).reshape(-1, 1)
XC = XC - self._mean
# perform an economy size decomposition (may still allocate too much memory for computation)
self._eigenvectors, self._eigenvalues, variances = np.linalg.svd(XC, full_matrices=False)
# sort eigenvectors by eigenvalues in descending order
idx = np.argsort(-self._eigenvalues)
self._eigenvalues, self._eigenvectors = self._eigenvalues[idx], self._eigenvectors[:, idx]
# use only num_components
self._eigenvectors = self._eigenvectors[0:, 0:self._num_components].copy()
self._eigenvalues = self._eigenvalues[0:self._num_components].copy()
# finally turn singular values into eigenvalues
self._eigenvalues = np.power(self._eigenvalues, 2) / XC.shape[1]
# get the features from the given data
features = []
for x in X:
xp = self.project(x.reshape(-1, 1))
features.append(xp)
return features
def extract(self, X):
X = np.asarray(X).reshape(-1, 1)
return self.project(X)
def project(self, X):
X = X - self._mean
return np.dot(self._eigenvectors.T, X)
def reconstruct(self, X):
X = np.dot(self._eigenvectors, X)
return X + self._mean
@property
def num_components(self):
return self._num_components
@property
def eigenvalues(self):
return self._eigenvalues
@property
def eigenvectors(self):
return self._eigenvectors
@property
def mean(self):
return self._mean
def __repr__(self):
return "PCA (num_components=%d)" % (self._num_components)
class LDA(AbstractFeature):
def __init__(self, num_components=0):
AbstractFeature.__init__(self)
self._num_components = num_components
def compute(self, X, y):
# build the column matrix
XC = as_column_matrix(X)
y = np.asarray(y)
# calculate dimensions
d = XC.shape[0]
c = len(np.unique(y))
# set a valid number of components
if self._num_components <= 0:
self._num_components = c - 1
elif self._num_components > (c - 1):
self._num_components = c - 1
# calculate total mean
meanTotal = XC.mean(axis=1).reshape(-1, 1)
# calculate the within and between scatter matrices
Sw = np.zeros((d, d), dtype=np.float32)
Sb = np.zeros((d, d), dtype=np.float32)
for i in range(0, c):
Xi = XC[:, np.where(y == i)[0]]
meanClass = np.mean(Xi, axis=1).reshape(-1, 1)
Sw = Sw + np.dot((Xi - meanClass), (Xi - meanClass).T)
Sb = Sb + Xi.shape[1] * np.dot((meanClass - meanTotal), (meanClass - meanTotal).T)
# solve eigenvalue problem for a general matrix
self._eigenvalues, self._eigenvectors = np.linalg.eig(np.linalg.inv(Sw) * Sb)
# sort eigenvectors by their eigenvalue in descending order
idx = np.argsort(-self._eigenvalues.real)
self._eigenvalues, self._eigenvectors = self._eigenvalues[idx], self._eigenvectors[:, idx]
# only store (c-1) non-zero eigenvalues
self._eigenvalues = np.array(self._eigenvalues[0:self._num_components].real, dtype=np.float32, copy=True)
self._eigenvectors = np.matrix(self._eigenvectors[0:, 0:self._num_components].real, dtype=np.float32, copy=True)
# get the features from the given data
features = []
for x in X:
xp = self.project(x.reshape(-1, 1))
features.append(xp)
return features
def project(self, X):
return np.dot(self._eigenvectors.T, X)
def reconstruct(self, X):
return np.dot(self._eigenvectors, X)
@property
def num_components(self):
return self._num_components
@property
def eigenvectors(self):
return self._eigenvectors
@property
def eigenvalues(self):
return self._eigenvalues
def __repr__(self):
return "LDA (num_components=%d)" % (self._num_components)
class Fisherfaces(AbstractFeature):
def __init__(self, num_components=0):
AbstractFeature.__init__(self)
self._num_components = num_components
def compute(self, X, y):
# turn into numpy representation
Xc = as_column_matrix(X)
y = np.asarray(y)
# gather some statistics about the dataset
n = len(y)
c = len(np.unique(y))
# define features to be extracted
pca = PCA(num_components=(n - c))
lda = LDA(num_components=self._num_components)
# fisherfaces are a chained feature of PCA followed by LDA
model = ChainOperator(pca, lda)
# computing the chained model then calculates both decompositions
model.compute(X, y)
# store eigenvalues and number of components used
self._eigenvalues = lda.eigenvalues
self._num_components = lda.num_components
# compute the new eigenspace as pca.eigenvectors*lda.eigenvectors
self._eigenvectors = np.dot(pca.eigenvectors, lda.eigenvectors)
# finally compute the features (these are the Fisherfaces)
features = []
for x in X:
xp = self.project(x.reshape(-1, 1))
features.append(xp)
return features
def extract(self, X):
X = np.asarray(X).reshape(-1, 1)
return self.project(X)
def project(self, X):
return np.dot(self._eigenvectors.T, X)
def reconstruct(self, X):
return np.dot(self._eigenvectors, X)
@property
def num_components(self):
return self._num_components
@property
def eigenvalues(self):
return self._eigenvalues
@property
def eigenvectors(self):
return self._eigenvectors
def __repr__(self):
return "Fisherfaces (num_components=%s)" % (self.num_components)
from ocvfacerec.facerec.lbp import LocalDescriptor, ExtendedLBP
class SpatialHistogram(AbstractFeature):
def __init__(self, lbp_operator=ExtendedLBP(), sz=(8, 8)):
AbstractFeature.__init__(self)
if not isinstance(lbp_operator, LocalDescriptor):
raise TypeError("Only an operator of type facerec.lbp.LocalDescriptor is a valid lbp_operator.")
self.lbp_operator = lbp_operator
self.sz = sz
def compute(self, X, y):
features = []
for x in X:
x = np.asarray(x)
h = self.spatially_enhanced_histogram(x)
features.append(h)
return features
def extract(self, X):
X = np.asarray(X)
return self.spatially_enhanced_histogram(X)
def spatially_enhanced_histogram(self, X):
# calculate the LBP image
L = self.lbp_operator(X)
# calculate the grid geometry
lbp_height, lbp_width = L.shape
grid_rows, grid_cols = self.sz
py = int(np.floor(lbp_height / grid_rows))
px = int(np.floor(lbp_width / grid_cols))
E = []
for row in range(0, grid_rows):
for col in range(0, grid_cols):
C = L[row * py:(row + 1) * py, col * px:(col + 1) * px]
H = np.histogram(C, bins=2 ** self.lbp_operator.neighbors, range=(0, 2 ** self.lbp_operator.neighbors),
normed=True)[0]
# probably useful to apply a mapping?
E.extend(H)
return np.asarray(E)
def __repr__(self):
return "SpatialHistogram (operator=%s, grid=%s)" % (repr(self.lbp_operator), str(self.sz))
| |
"""
A generic comment-moderation system which allows configuration of
moderation options on a per-model basis.
To use, do two things:
1. Create or import a subclass of ``CommentModerator`` defining the
options you want.
2. Import ``moderator`` from this module and register one or more
models, passing the models and the ``CommentModerator`` options
class you want to use.
Example
-------
First, we define a simple model class which might represent entries in
a Weblog::
from django.db import models
class Entry(models.Model):
title = models.CharField(maxlength=250)
body = models.TextField()
pub_date = models.DateField()
enable_comments = models.BooleanField()
Then we create a ``CommentModerator`` subclass specifying some
moderation options::
from django.contrib.comments.moderation import CommentModerator, moderator
class EntryModerator(CommentModerator):
email_notification = True
enable_field = 'enable_comments'
And finally register it for moderation::
moderator.register(Entry, EntryModerator)
This sample class would apply two moderation steps to each new
comment submitted on an Entry:
* If the entry's ``enable_comments`` field is set to ``False``, the
comment will be rejected (immediately deleted).
* If the comment is successfully posted, an email notification of the
comment will be sent to site staff.
For a full list of built-in moderation options and other
configurability, see the documentation for the ``CommentModerator``
class.
"""
import datetime
from django.conf import settings
from django.core.mail import send_mail
from django.contrib.comments import signals
from django.db.models.base import ModelBase
from django.template import Context, loader
from django.contrib import comments
from django.contrib.sites.models import get_current_site
from django.utils import timezone
class AlreadyModerated(Exception):
"""
Raised when a model which is already registered for moderation is
attempting to be registered again.
"""
pass
class NotModerated(Exception):
"""
Raised when a model which is not registered for moderation is
attempting to be unregistered.
"""
pass
class CommentModerator(object):
"""
Encapsulates comment-moderation options for a given model.
This class is not designed to be used directly, since it doesn't
enable any of the available moderation options. Instead, subclass
it and override attributes to enable different options::
``auto_close_field``
If this is set to the name of a ``DateField`` or
``DateTimeField`` on the model for which comments are
being moderated, new comments for objects of that model
will be disallowed (immediately deleted) when a certain
number of days have passed after the date specified in
that field. Must be used in conjunction with
``close_after``, which specifies the number of days past
which comments should be disallowed. Default value is
``None``.
``auto_moderate_field``
Like ``auto_close_field``, but instead of outright
deleting new comments when the requisite number of days
have elapsed, it will simply set the ``is_public`` field
of new comments to ``False`` before saving them. Must be
used in conjunction with ``moderate_after``, which
specifies the number of days past which comments should be
moderated. Default value is ``None``.
``close_after``
If ``auto_close_field`` is used, this must specify the
number of days past the value of the field specified by
``auto_close_field`` after which new comments for an
object should be disallowed. Default value is ``None``.
``email_notification``
If ``True``, any new comment on an object of this model
which survives moderation will generate an email to site
staff. Default value is ``False``.
``enable_field``
If this is set to the name of a ``BooleanField`` on the
model for which comments are being moderated, new comments
on objects of that model will be disallowed (immediately
deleted) whenever the value of that field is ``False`` on
the object the comment would be attached to. Default value
is ``None``.
``moderate_after``
If ``auto_moderate_field`` is used, this must specify the number
of days past the value of the field specified by
``auto_moderate_field`` after which new comments for an
object should be marked non-public. Default value is
``None``.
Most common moderation needs can be covered by changing these
attributes, but further customization can be obtained by
subclassing and overriding the following methods. Each method will
be called with three arguments: ``comment``, which is the comment
being submitted, ``content_object``, which is the object the
comment will be attached to, and ``request``, which is the
``HttpRequest`` in which the comment is being submitted::
``allow``
Should return ``True`` if the comment should be allowed to
post on the content object, and ``False`` otherwise (in
which case the comment will be immediately deleted).
``email``
If email notification of the new comment should be sent to
site staff or moderators, this method is responsible for
sending the email.
``moderate``
Should return ``True`` if the comment should be moderated
(in which case its ``is_public`` field will be set to
``False`` before saving), and ``False`` otherwise (in
which case the ``is_public`` field will not be changed).
Subclasses which want to introspect the model for which comments
are being moderated can do so through the attribute ``_model``,
which will be the model class.
"""
auto_close_field = None
auto_moderate_field = None
close_after = None
email_notification = False
enable_field = None
moderate_after = None
def __init__(self, model):
self._model = model
def _get_delta(self, now, then):
"""
Internal helper which will return a ``datetime.timedelta``
representing the time between ``now`` and ``then``. Assumes
``now`` is a ``datetime.date`` or ``datetime.datetime`` later
than ``then``.
If ``now`` and ``then`` are not of the same type due to one of
them being a ``datetime.date`` and the other being a
``datetime.datetime``, both will be coerced to
``datetime.date`` before calculating the delta.
"""
if now.__class__ is not then.__class__:
now = datetime.date(now.year, now.month, now.day)
then = datetime.date(then.year, then.month, then.day)
if now < then:
raise ValueError("Cannot determine moderation rules because date field is set to a value in the future")
return now - then
def allow(self, comment, content_object, request):
"""
Determine whether a given comment is allowed to be posted on
a given object.
Return ``True`` if the comment should be allowed, ``False
otherwise.
"""
if self.enable_field:
if not getattr(content_object, self.enable_field):
return False
if self.auto_close_field and self.close_after is not None:
close_after_date = getattr(content_object, self.auto_close_field)
if close_after_date is not None and self._get_delta(timezone.now(), close_after_date).days >= self.close_after:
return False
return True
def moderate(self, comment, content_object, request):
"""
Determine whether a given comment on a given object should be
allowed to show up immediately, or should be marked non-public
and await approval.
Return ``True`` if the comment should be moderated (marked
non-public), ``False`` otherwise.
"""
if self.auto_moderate_field and self.moderate_after is not None:
moderate_after_date = getattr(content_object, self.auto_moderate_field)
if moderate_after_date is not None and self._get_delta(timezone.now(), moderate_after_date).days >= self.moderate_after:
return True
return False
def email(self, comment, content_object, request):
"""
Send email notification of a new comment to site staff when email
notifications have been requested.
"""
if not self.email_notification:
return
recipient_list = [manager_tuple[1] for manager_tuple in settings.MANAGERS]
t = loader.get_template('comments/comment_notification_email.txt')
c = Context({ 'comment': comment,
'content_object': content_object })
subject = '[%s] New comment posted on "%s"' % (get_current_site(request).name,
content_object)
message = t.render(c)
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, recipient_list, fail_silently=True)
class Moderator(object):
"""
Handles moderation of a set of models.
An instance of this class will maintain a list of one or more
models registered for comment moderation, and their associated
moderation classes, and apply moderation to all incoming comments.
To register a model, obtain an instance of ``Moderator`` (this
module exports one as ``moderator``), and call its ``register``
method, passing the model class and a moderation class (which
should be a subclass of ``CommentModerator``). Note that both of
these should be the actual classes, not instances of the classes.
To cease moderation for a model, call the ``unregister`` method,
passing the model class.
For convenience, both ``register`` and ``unregister`` can also
accept a list of model classes in place of a single model; this
allows easier registration of multiple models with the same
``CommentModerator`` class.
The actual moderation is applied in two phases: one prior to
saving a new comment, and the other immediately after saving. The
pre-save moderation may mark a comment as non-public or mark it to
be removed; the post-save moderation may delete a comment which
was disallowed (there is currently no way to prevent the comment
being saved once before removal) and, if the comment is still
around, will send any notification emails the comment generated.
"""
def __init__(self):
self._registry = {}
self.connect()
def connect(self):
"""
Hook up the moderation methods to pre- and post-save signals
from the comment models.
"""
signals.comment_will_be_posted.connect(self.pre_save_moderation, sender=comments.get_model())
signals.comment_was_posted.connect(self.post_save_moderation, sender=comments.get_model())
def register(self, model_or_iterable, moderation_class):
"""
Register a model or a list of models for comment moderation,
using a particular moderation class.
Raise ``AlreadyModerated`` if any of the models are already
registered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model in self._registry:
raise AlreadyModerated("The model '%s' is already being moderated" % model._meta.module_name)
self._registry[model] = moderation_class(model)
def unregister(self, model_or_iterable):
"""
Remove a model or a list of models from the list of models
whose comments will be moderated.
Raise ``NotModerated`` if any of the models are not currently
registered for moderation.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotModerated("The model '%s' is not currently being moderated" % model._meta.module_name)
del self._registry[model]
def pre_save_moderation(self, sender, comment, request, **kwargs):
"""
Apply any necessary pre-save moderation steps to new
comments.
"""
model = comment.content_type.model_class()
if model not in self._registry:
return
content_object = comment.content_object
moderation_class = self._registry[model]
# Comment will be disallowed outright (HTTP 403 response)
if not moderation_class.allow(comment, content_object, request):
return False
if moderation_class.moderate(comment, content_object, request):
comment.is_public = False
def post_save_moderation(self, sender, comment, request, **kwargs):
"""
Apply any necessary post-save moderation steps to new
comments.
"""
model = comment.content_type.model_class()
if model not in self._registry:
return
self._registry[model].email(comment, comment.content_object, request)
# Import this instance in your own code to use in registering
# your models for moderation.
moderator = Moderator()
| |
from datetime import datetime
from sqlalchemy import Column, Integer, String, DateTime, JSON
from sqlalchemy.ext.mutable import MutableDict
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm.exc import NoResultFound
from flask_dance.utils import FakeCache, first
from flask_dance.consumer.storage import BaseStorage
try:
from flask_login import AnonymousUserMixin
except ImportError:
AnonymousUserMixin = None
class OAuthConsumerMixin:
"""
A :ref:`SQLAlchemy declarative mixin <sqlalchemy:declarative_mixins>` with
some suggested columns for a model to store OAuth tokens:
``id``
an integer primary key
``provider``
a short name to indicate which OAuth provider issued
this token
``created_at``
an automatically generated datetime that indicates when
the OAuth provider issued this token
``token``
a :class:`JSON <sqlalchemy.types.JSON>` field to store
the actual token received from the OAuth provider
"""
@declared_attr
def __tablename__(cls):
return f"flask_dance_{cls.__name__.lower()}"
id = Column(Integer, primary_key=True)
provider = Column(String(50), nullable=False)
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
token = Column(MutableDict.as_mutable(JSON), nullable=False)
def __repr__(self):
parts = []
parts.append(self.__class__.__name__)
if self.id:
parts.append(f"id={self.id}")
if self.provider:
parts.append(f'provider="{self.provider}"')
return "<{}>".format(" ".join(parts))
class SQLAlchemyStorage(BaseStorage):
"""
Stores and retrieves OAuth tokens using a relational database through
the `SQLAlchemy`_ ORM.
.. _SQLAlchemy: http://www.sqlalchemy.org/
"""
def __init__(
self,
model,
session,
user=None,
user_id=None,
user_required=None,
anon_user=None,
cache=None,
):
"""
Args:
model: The SQLAlchemy model class that represents the OAuth token
table in the database. At a minimum, it must have a
``provider`` column and a ``token`` column. If tokens are to be
associated with individual users in the application, it must
also have a ``user`` relationship to your User model.
It is recommended, though not required, that your model class
inherit from
:class:`~flask_dance.consumer.storage.sqla.OAuthConsumerMixin`.
session:
The :class:`SQLAlchemy session <sqlalchemy.orm.session.Session>`
for the database. If you're using `Flask-SQLAlchemy`_, this is
``db.session``.
user:
If you want OAuth tokens to be associated with individual users
in your application, this is a reference to the user that you
want to use for the current request. It can be an actual User
object, a function that returns a User object, or a proxy to the
User object. If you're using `Flask-Login`_, this is
:attr:`~flask.ext.login.current_user`.
user_id:
If you want to pass an identifier for a user instead of an actual
User object, use this argument instead. Sometimes it can save
a database query or two. If both ``user`` and ``user_id`` are
provided, ``user_id`` will take precendence.
user_required:
If set to ``True``, an exception will be raised if you try to
set or retrieve an OAuth token without an associated user.
If set to ``False``, OAuth tokens can be set with or without
an associated user. The default is auto-detection: it will
be ``True`` if you pass a ``user`` or ``user_id`` parameter,
``False`` otherwise.
anon_user:
If anonymous users are represented by a class in your application,
provide that class here. If you are using `Flask-Login`_,
anonymous users are represented by the
:class:`flask_login.AnonymousUserMixin` class, but you don't have
to provide that -- Flask-Dance treats it as the default.
cache:
An instance of `Flask-Caching`_. Providing a caching system is
highly recommended, but not required.
.. _Flask-SQLAlchemy: http://pythonhosted.org/Flask-SQLAlchemy/
.. _Flask-Login: https://flask-login.readthedocs.io/
.. _Flask-Caching: https://flask-caching.readthedocs.io/en/latest/
"""
self.model = model
self.session = session
self.user = user
self.user_id = user_id
if user_required is None:
self.user_required = user is not None or user_id is not None
else:
self.user_required = user_required
self.anon_user = anon_user or AnonymousUserMixin
self.cache = cache or FakeCache()
def make_cache_key(self, blueprint, user=None, user_id=None):
uid = first([user_id, self.user_id, blueprint.config.get("user_id")])
if not uid:
u = first(
_get_real_user(ref, self.anon_user)
for ref in (user, self.user, blueprint.config.get("user"))
)
uid = getattr(u, "id", u)
return "flask_dance_token|{name}|{user_id}".format(
name=blueprint.name, user_id=uid
)
def get(self, blueprint, user=None, user_id=None):
"""When you have a statement in your code that says
"if <provider>.authorized:" (for example "if twitter.authorized:"),
a long string of function calls result in this function being used to
check the Flask server's cache and database for any records associated
with the current_user. The `user` and `user_id` parameters are actually
not set in that case (see base.py:token(), that's what calls this
function), so the user information is instead loaded from the
current_user (if that's what you specified when you created the
blueprint) with blueprint.config.get('user_id').
:param blueprint:
:param user:
:param user_id:
:return:
"""
# check cache
cache_key = self.make_cache_key(blueprint=blueprint, user=user, user_id=user_id)
token = self.cache.get(cache_key)
if token:
return token
# if not cached, make database queries
query = self.session.query(self.model).filter_by(provider=blueprint.name)
uid = first([user_id, self.user_id, blueprint.config.get("user_id")])
u = first(
_get_real_user(ref, self.anon_user)
for ref in (user, self.user, blueprint.config.get("user"))
)
if self.user_required and not u and not uid:
raise ValueError("Cannot get OAuth token without an associated user")
# check for user ID
if hasattr(self.model, "user_id") and uid:
query = query.filter_by(user_id=uid)
# check for user (relationship property)
elif hasattr(self.model, "user") and u:
query = query.filter_by(user=u)
# if we have the property, but not value, filter by None
elif hasattr(self.model, "user_id"):
query = query.filter_by(user_id=None)
# run query
try:
token = query.one().token
except NoResultFound:
token = None
# cache the result
self.cache.set(cache_key, token)
return token
def set(self, blueprint, token, user=None, user_id=None):
uid = first([user_id, self.user_id, blueprint.config.get("user_id")])
u = first(
_get_real_user(ref, self.anon_user)
for ref in (user, self.user, blueprint.config.get("user"))
)
if self.user_required and not u and not uid:
raise ValueError("Cannot set OAuth token without an associated user")
# if there was an existing model, delete it
existing_query = self.session.query(self.model).filter_by(
provider=blueprint.name
)
# check for user ID
has_user_id = hasattr(self.model, "user_id")
if has_user_id and uid:
existing_query = existing_query.filter_by(user_id=uid)
# check for user (relationship property)
has_user = hasattr(self.model, "user")
if has_user and u:
existing_query = existing_query.filter_by(user=u)
# queue up delete query -- won't be run until commit()
existing_query.delete()
# create a new model for this token
kwargs = {"provider": blueprint.name, "token": token}
if has_user_id and uid:
kwargs["user_id"] = uid
if has_user and u:
kwargs["user"] = u
self.session.add(self.model(**kwargs))
# commit to delete and add simultaneously
self.session.commit()
# invalidate cache
self.cache.delete(
self.make_cache_key(blueprint=blueprint, user=user, user_id=user_id)
)
def delete(self, blueprint, user=None, user_id=None):
query = self.session.query(self.model).filter_by(provider=blueprint.name)
uid = first([user_id, self.user_id, blueprint.config.get("user_id")])
u = first(
_get_real_user(ref, self.anon_user)
for ref in (user, self.user, blueprint.config.get("user"))
)
if self.user_required and not u and not uid:
raise ValueError("Cannot delete OAuth token without an associated user")
# check for user ID
if hasattr(self.model, "user_id") and uid:
query = query.filter_by(user_id=uid)
# check for user (relationship property)
elif hasattr(self.model, "user") and u:
query = query.filter_by(user=u)
# if we have the property, but not value, filter by None
elif hasattr(self.model, "user_id"):
query = query.filter_by(user_id=None)
# run query
query.delete()
self.session.commit()
# invalidate cache
self.cache.delete(
self.make_cache_key(blueprint=blueprint, user=user, user_id=user_id)
)
def _get_real_user(user, anon_user=None):
"""
Given a "user" that could be:
* a real user object
* a function that returns a real user object
* a LocalProxy to a real user object (like Flask-Login's ``current_user``)
This function returns the real user object, regardless of which we have.
"""
if hasattr(user, "_get_current_object"):
# this is a proxy
user = user._get_current_object()
if callable(user):
# this is a function
user = user()
if anon_user and isinstance(user, anon_user):
return None
return user
| |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
from abc import abstractmethod
from collections import OrderedDict
from twitter.common.collections import OrderedSet
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
from pants.build_graph.address import Address
from pants.build_graph.address_lookup_error import AddressLookupError
from pants.task.task import Task
from pants.util.dirutil import fast_relpath, safe_delete, safe_walk
logger = logging.getLogger(__name__)
class SimpleCodegenTask(Task):
"""A base-class for code generation for a single target language.
:API: public
"""
def __init__(self, context, workdir):
"""
Add pass-thru Task Constructor for public API visibility.
:API: public
"""
super(SimpleCodegenTask, self).__init__(context, workdir)
@classmethod
def product_types(cls):
# NB(gmalmquist): This is a hack copied from the old CodeGen base class to get the round manager
# to properly run codegen before resolve and compile. It would be more correct to just have each
# individual codegen class declare what languages it generates, but would cause problems with
# scala. See https://rbcommons.com/s/twitter/r/2540/.
return ['java', 'scala', 'python']
@classmethod
def register_options(cls, register):
super(SimpleCodegenTask, cls).register_options(register)
register('--allow-empty', type=bool, default=True, fingerprint=True,
help='Skip targets with no sources defined.',
advanced=True)
register('--allow-dups', type=bool, fingerprint=True,
help='Allow multiple targets specifying the same sources. If duplicates are '
'allowed, the logic of find_sources will associate generated sources with '
'the least-dependent targets that generate them.',
advanced=True)
@classmethod
def get_fingerprint_strategy(cls):
"""Override this method to use a fingerprint strategy other than the default one.
:API: public
:return: a fingerprint strategy, or None to use the default strategy.
"""
return None
@property
def cache_target_dirs(self):
return True
@property
def validate_sources_present(self):
"""A property indicating whether input targets require sources.
If targets should have sources, the `--allow-empty` flag indicates whether it is a
warning or an error for sources to be missing.
:API: public
"""
return True
def synthetic_target_extra_dependencies(self, target, target_workdir):
"""Gets any extra dependencies generated synthetic targets should have.
This method is optional for subclasses to implement, because some code generators may have no
extra dependencies.
:param Target target: the Target from which we are generating a synthetic Target. E.g., 'target'
might be a JavaProtobufLibrary, whose corresponding synthetic Target would be a JavaLibrary.
It may not be necessary to use this parameter depending on the details of the subclass.
:API: public
:return: a list of dependencies.
"""
return []
def synthetic_target_type_by_target(self, target):
"""The type of target this codegen task generates.
For example, the target type for JaxbGen would simply be JavaLibrary.
:API: public
:return: a type (class) that inherits from Target.
"""
raise NotImplementedError
def synthetic_target_type(self, target):
"""The type of target this codegen task generates.
For example, the target type for JaxbGen would simply be JavaLibrary.
:API: public
:return: a type (class) that inherits from Target.
"""
raise NotImplementedError
def is_gentarget(self, target):
"""Predicate which determines whether the target in question is relevant to this codegen task.
E.g., the JaxbGen task considers JaxbLibrary targets to be relevant, and nothing else.
:API: public
:param Target target: The target to check.
:return: True if this class can generate code for the given target, False otherwise.
"""
raise NotImplementedError
def codegen_targets(self):
"""Finds codegen targets in the dependency graph.
:API: public
:return: an iterable of dependency targets.
"""
return self.context.targets(self.is_gentarget)
def _do_validate_sources_present(self, target):
"""Checks whether sources is empty, and either raises a TaskError or just returns False.
The specifics of this behavior are defined by whether the user sets --allow-empty to True/False:
--allow-empty=False will result in a TaskError being raised in the event of an empty source
set. If --allow-empty=True, this method will just return false and log a warning.
Shared for all SimpleCodegenTask subclasses to help keep errors consistent and descriptive.
:param target: Target to validate.
:return: True if sources is not empty, False otherwise.
"""
if not self.validate_sources_present:
return True
sources = target.sources_relative_to_buildroot()
if not sources:
message = ('Target {} has no sources.'.format(target.address.spec))
if not self.get_options().allow_empty:
raise TaskError(message)
else:
logging.warn(message)
return False
return True
def _get_synthetic_address(self, target, target_workdir):
synthetic_name = target.id
sources_rel_path = os.path.relpath(target_workdir, get_buildroot())
synthetic_address = Address(sources_rel_path, synthetic_name)
return synthetic_address
def execute(self):
with self.invalidated(self.codegen_targets(),
invalidate_dependents=True,
fingerprint_strategy=self.get_fingerprint_strategy()) as invalidation_check:
with self.context.new_workunit(name='execute', labels=[WorkUnitLabel.MULTITOOL]):
for vt in invalidation_check.all_vts:
# Build the target and handle duplicate sources.
if not vt.valid:
if self._do_validate_sources_present(vt.target):
self.execute_codegen(vt.target, vt.results_dir)
self._handle_duplicate_sources(vt.target, vt.results_dir)
vt.update()
# And inject a synthetic target to represent it.
self._inject_synthetic_target(vt.target, vt.results_dir)
@property
def _copy_target_attributes(self):
"""Return a list of attributes to be copied from the target to derived synthetic targets."""
return []
def synthetic_target_dir(self, target, target_workdir):
"""
:API: public
"""
return target_workdir
def _inject_synthetic_target(self, target, target_workdir):
"""Create, inject, and return a synthetic target for the given target and workdir.
:param target: The target to inject a synthetic target for.
:param target_workdir: The work directory containing the generated code for the target.
"""
copied_attributes = {}
for attribute in self._copy_target_attributes:
copied_attributes[attribute] = getattr(target, attribute)
target_workdir = self.synthetic_target_dir(target, target_workdir)
synthetic_target = self.context.add_new_target(
address=self._get_synthetic_address(target, target_workdir),
target_type=self.synthetic_target_type(target),
dependencies=self.synthetic_target_extra_dependencies(target, target_workdir),
sources=list(self.find_sources(target, target_workdir)),
derived_from=target,
**copied_attributes
)
build_graph = self.context.build_graph
# NB(pl): This bypasses the convenience function (Target.inject_dependency) in order
# to improve performance. Note that we can walk the transitive dependee subgraph once
# for transitive invalidation rather than walking a smaller subgraph for every single
# dependency injected.
for dependent_address in build_graph.dependents_of(target.address):
build_graph.inject_dependency(
dependent=dependent_address,
dependency=synthetic_target.address,
)
# NB(pl): See the above comment. The same note applies.
for concrete_dependency_address in build_graph.dependencies_of(target.address):
build_graph.inject_dependency(
dependent=synthetic_target.address,
dependency=concrete_dependency_address,
)
build_graph.walk_transitive_dependee_graph(
build_graph.dependencies_of(target.address),
work=lambda t: t.mark_transitive_invalidation_hash_dirty(),
)
if target in self.context.target_roots:
self.context.target_roots.append(synthetic_target)
return synthetic_target
def resolve_deps(self, unresolved_deps):
"""
:API: public
"""
deps = OrderedSet()
for dep in unresolved_deps:
try:
deps.update(self.context.resolve(dep))
except AddressLookupError as e:
raise AddressLookupError('{message}\n on dependency {dep}'.format(message=e, dep=dep))
return deps
@abstractmethod
def execute_codegen(self, target, target_workdir):
"""Generate code for the given target.
:param target: A target to generate code for
:param target_workdir: A clean directory into which to generate code
"""
def find_sources(self, target, target_workdir):
"""Determines what sources were generated by the target after the fact.
This is done by searching the directory where this target's code was generated.
:param Target target: the target for which to find generated sources.
:param path target_workdir: directory containing sources for the target.
:return: A set of filepaths relative to the target_workdir.
:rtype: OrderedSet
"""
return OrderedSet(self._find_sources_in_workdir(target_workdir))
def _find_sources_in_workdir(self, target_workdir):
"""Returns relative sources contained in the given target_workdir."""
for root, _, files in safe_walk(target_workdir):
rel_root = fast_relpath(root, target_workdir)
for name in files:
yield os.path.join(rel_root, name)
def _handle_duplicate_sources(self, target, target_workdir):
"""Handles duplicate sources generated by the given gen target by either failure or deletion.
This method should be called after all dependencies have been injected into the graph, but
before injecting the synthetic version of this target.
NB(gm): Some code generators may re-generate code that their dependent libraries generate.
This results in targets claiming to generate sources that they really don't, so we try to
filter out sources that were actually generated by dependencies of the target. This causes
the code generated by the dependencies to 'win' over the code generated by dependees. By
default, this behavior is disabled, and duplication in generated sources will raise a
TaskError. This is controlled by the --allow-dups flag.
"""
# Compute the raw sources owned by this target.
by_target = self.find_sources(target, target_workdir)
# Walk dependency gentargets and record any sources owned by those targets that are also
# owned by this target.
duplicates_by_target = OrderedDict()
def record_duplicates(dep):
if dep == target or not self.is_gentarget(dep.concrete_derived_from):
return
duped_sources = [s for s in dep.sources_relative_to_source_root() if s in by_target]
if duped_sources:
duplicates_by_target[dep] = duped_sources
target.walk(record_duplicates)
# If there were no dupes, we're done.
if not duplicates_by_target:
return
# If there were duplicates warn or error.
messages = ['{target} generated sources that had already been generated by dependencies.'
.format(target=target.address.spec)]
for dep, duped_sources in duplicates_by_target.items():
messages.append('\t{} also generated:'.format(dep.concrete_derived_from.address.spec))
messages.extend(['\t\t{}'.format(source) for source in duped_sources])
message = '\n'.join(messages)
if self.get_options().allow_dups:
logger.warn(message)
else:
raise self.DuplicateSourceError(message)
# Finally, remove duplicates from the workdir. This prevents us from having to worry
# about them during future incremental compiles.
for dep, duped_sources in duplicates_by_target.items():
for duped_source in duped_sources:
safe_delete(os.path.join(target_workdir, duped_source))
class DuplicateSourceError(TaskError):
"""A target generated the same code that was generated by one of its dependencies.
This is only thrown when --allow-dups=False.
"""
| |
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests dealing with HTTP rate-limiting.
"""
import httplib
import StringIO
from xml.dom import minidom
from lxml import etree
import webob
from cinder.api.v1 import limits
from cinder.api import views
from cinder.api import xmlutil
import cinder.context
from cinder.openstack.common import jsonutils
from cinder import test
TEST_LIMITS = [
limits.Limit("GET", "/delayed", "^/delayed", 1, limits.PER_MINUTE),
limits.Limit("POST", "*", ".*", 7, limits.PER_MINUTE),
limits.Limit("POST", "/volumes", "^/volumes", 3, limits.PER_MINUTE),
limits.Limit("PUT", "*", "", 10, limits.PER_MINUTE),
limits.Limit("PUT", "/volumes", "^/volumes", 5, limits.PER_MINUTE),
]
NS = {
'atom': 'http://www.w3.org/2005/Atom',
'ns': 'http://docs.openstack.org/common/api/v1.0'
}
class BaseLimitTestSuite(test.TestCase):
"""Base test suite which provides relevant stubs and time abstraction."""
def setUp(self):
super(BaseLimitTestSuite, self).setUp()
self.time = 0.0
self.stubs.Set(limits.Limit, "_get_time", self._get_time)
self.absolute_limits = {}
def stub_get_project_quotas(context, project_id, usages=True):
return dict((k, dict(limit=v))
for k, v in self.absolute_limits.items())
self.stubs.Set(cinder.quota.QUOTAS, "get_project_quotas",
stub_get_project_quotas)
def _get_time(self):
"""Return the "time" according to this test suite."""
return self.time
class LimitsControllerTest(BaseLimitTestSuite):
"""
Tests for `limits.LimitsController` class.
"""
def setUp(self):
"""Run before each test."""
super(LimitsControllerTest, self).setUp()
self.controller = limits.create_resource()
def _get_index_request(self, accept_header="application/json"):
"""Helper to set routing arguments."""
request = webob.Request.blank("/")
request.accept = accept_header
request.environ["wsgiorg.routing_args"] = (None, {
"action": "index",
"controller": "",
})
context = cinder.context.RequestContext('testuser', 'testproject')
request.environ["cinder.context"] = context
return request
def _populate_limits(self, request):
"""Put limit info into a request."""
_limits = [
limits.Limit("GET", "*", ".*", 10, 60).display(),
limits.Limit("POST", "*", ".*", 5, 60 * 60).display(),
limits.Limit("GET", "changes-since*", "changes-since",
5, 60).display(),
]
request.environ["cinder.limits"] = _limits
return request
def test_empty_index_json(self):
"""Test getting empty limit details in JSON."""
request = self._get_index_request()
response = request.get_response(self.controller)
expected = {
"limits": {
"rate": [],
"absolute": {},
},
}
body = jsonutils.loads(response.body)
self.assertEqual(expected, body)
def test_index_json(self):
"""Test getting limit details in JSON."""
request = self._get_index_request()
request = self._populate_limits(request)
self.absolute_limits = {
'gigabytes': 512,
'volumes': 5,
}
response = request.get_response(self.controller)
expected = {
"limits": {
"rate": [
{
"regex": ".*",
"uri": "*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00Z",
"unit": "MINUTE",
"value": 10,
"remaining": 10,
},
{
"verb": "POST",
"next-available": "1970-01-01T00:00:00Z",
"unit": "HOUR",
"value": 5,
"remaining": 5,
},
],
},
{
"regex": "changes-since",
"uri": "changes-since*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00Z",
"unit": "MINUTE",
"value": 5,
"remaining": 5,
},
],
},
],
"absolute": {"maxTotalVolumeGigabytes": 512,
"maxTotalVolumes": 5, },
},
}
body = jsonutils.loads(response.body)
self.assertEqual(expected, body)
def _populate_limits_diff_regex(self, request):
"""Put limit info into a request."""
_limits = [
limits.Limit("GET", "*", ".*", 10, 60).display(),
limits.Limit("GET", "*", "*.*", 10, 60).display(),
]
request.environ["cinder.limits"] = _limits
return request
def test_index_diff_regex(self):
"""Test getting limit details in JSON."""
request = self._get_index_request()
request = self._populate_limits_diff_regex(request)
response = request.get_response(self.controller)
expected = {
"limits": {
"rate": [
{
"regex": ".*",
"uri": "*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00Z",
"unit": "MINUTE",
"value": 10,
"remaining": 10,
},
],
},
{
"regex": "*.*",
"uri": "*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00Z",
"unit": "MINUTE",
"value": 10,
"remaining": 10,
},
],
},
],
"absolute": {},
},
}
body = jsonutils.loads(response.body)
self.assertEqual(expected, body)
def _test_index_absolute_limits_json(self, expected):
request = self._get_index_request()
response = request.get_response(self.controller)
body = jsonutils.loads(response.body)
self.assertEqual(expected, body['limits']['absolute'])
def test_index_ignores_extra_absolute_limits_json(self):
self.absolute_limits = {'unknown_limit': 9001}
self._test_index_absolute_limits_json({})
class TestLimiter(limits.Limiter):
pass
class LimitMiddlewareTest(BaseLimitTestSuite):
"""
Tests for the `limits.RateLimitingMiddleware` class.
"""
@webob.dec.wsgify
def _empty_app(self, request):
"""Do-nothing WSGI app."""
pass
def setUp(self):
"""Prepare middleware for use through fake WSGI app."""
super(LimitMiddlewareTest, self).setUp()
_limits = '(GET, *, .*, 1, MINUTE)'
self.app = limits.RateLimitingMiddleware(self._empty_app, _limits,
"%s.TestLimiter" %
self.__class__.__module__)
def test_limit_class(self):
"""Test that middleware selected correct limiter class."""
assert isinstance(self.app._limiter, TestLimiter)
def test_good_request(self):
"""Test successful GET request through middleware."""
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(200, response.status_int)
def test_limited_request_json(self):
"""Test a rate-limited (413) GET request through middleware."""
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(200, response.status_int)
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(response.status_int, 413)
self.assertTrue('Retry-After' in response.headers)
retry_after = int(response.headers['Retry-After'])
self.assertAlmostEqual(retry_after, 60, 1)
body = jsonutils.loads(response.body)
expected = "Only 1 GET request(s) can be made to * every minute."
value = body["overLimitFault"]["details"].strip()
self.assertEqual(value, expected)
def test_limited_request_xml(self):
"""Test a rate-limited (413) response as XML"""
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(200, response.status_int)
request = webob.Request.blank("/")
request.accept = "application/xml"
response = request.get_response(self.app)
self.assertEqual(response.status_int, 413)
root = minidom.parseString(response.body).childNodes[0]
expected = "Only 1 GET request(s) can be made to * every minute."
details = root.getElementsByTagName("details")
self.assertEqual(details.length, 1)
value = details.item(0).firstChild.data.strip()
self.assertEqual(value, expected)
class LimitTest(BaseLimitTestSuite):
"""
Tests for the `limits.Limit` class.
"""
def test_GET_no_delay(self):
"""Test a limit handles 1 GET per second."""
limit = limits.Limit("GET", "*", ".*", 1, 1)
delay = limit("GET", "/anything")
self.assertEqual(None, delay)
self.assertEqual(0, limit.next_request)
self.assertEqual(0, limit.last_request)
def test_GET_delay(self):
"""Test two calls to 1 GET per second limit."""
limit = limits.Limit("GET", "*", ".*", 1, 1)
delay = limit("GET", "/anything")
self.assertEqual(None, delay)
delay = limit("GET", "/anything")
self.assertEqual(1, delay)
self.assertEqual(1, limit.next_request)
self.assertEqual(0, limit.last_request)
self.time += 4
delay = limit("GET", "/anything")
self.assertEqual(None, delay)
self.assertEqual(4, limit.next_request)
self.assertEqual(4, limit.last_request)
class ParseLimitsTest(BaseLimitTestSuite):
"""
Tests for the default limits parser in the in-memory
`limits.Limiter` class.
"""
def test_invalid(self):
"""Test that parse_limits() handles invalid input correctly."""
self.assertRaises(ValueError, limits.Limiter.parse_limits,
';;;;;')
def test_bad_rule(self):
"""Test that parse_limits() handles bad rules correctly."""
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'GET, *, .*, 20, minute')
def test_missing_arg(self):
"""Test that parse_limits() handles missing args correctly."""
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'(GET, *, .*, 20)')
def test_bad_value(self):
"""Test that parse_limits() handles bad values correctly."""
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'(GET, *, .*, foo, minute)')
def test_bad_unit(self):
"""Test that parse_limits() handles bad units correctly."""
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'(GET, *, .*, 20, lightyears)')
def test_multiple_rules(self):
"""Test that parse_limits() handles multiple rules correctly."""
try:
l = limits.Limiter.parse_limits('(get, *, .*, 20, minute);'
'(PUT, /foo*, /foo.*, 10, hour);'
'(POST, /bar*, /bar.*, 5, second);'
'(Say, /derp*, /derp.*, 1, day)')
except ValueError as e:
assert False, str(e)
# Make sure the number of returned limits are correct
self.assertEqual(len(l), 4)
# Check all the verbs...
expected = ['GET', 'PUT', 'POST', 'SAY']
self.assertEqual([t.verb for t in l], expected)
# ...the URIs...
expected = ['*', '/foo*', '/bar*', '/derp*']
self.assertEqual([t.uri for t in l], expected)
# ...the regexes...
expected = ['.*', '/foo.*', '/bar.*', '/derp.*']
self.assertEqual([t.regex for t in l], expected)
# ...the values...
expected = [20, 10, 5, 1]
self.assertEqual([t.value for t in l], expected)
# ...and the units...
expected = [limits.PER_MINUTE, limits.PER_HOUR,
limits.PER_SECOND, limits.PER_DAY]
self.assertEqual([t.unit for t in l], expected)
class LimiterTest(BaseLimitTestSuite):
"""
Tests for the in-memory `limits.Limiter` class.
"""
def setUp(self):
"""Run before each test."""
super(LimiterTest, self).setUp()
userlimits = {'user:user3': ''}
self.limiter = limits.Limiter(TEST_LIMITS, **userlimits)
def _check(self, num, verb, url, username=None):
"""Check and yield results from checks."""
for x in xrange(num):
yield self.limiter.check_for_delay(verb, url, username)[0]
def _check_sum(self, num, verb, url, username=None):
"""Check and sum results from checks."""
results = self._check(num, verb, url, username)
return sum(item for item in results if item)
def test_no_delay_GET(self):
"""
Simple test to ensure no delay on a single call for a limit verb we
didn"t set.
"""
delay = self.limiter.check_for_delay("GET", "/anything")
self.assertEqual(delay, (None, None))
def test_no_delay_PUT(self):
"""
Simple test to ensure no delay on a single call for a known limit.
"""
delay = self.limiter.check_for_delay("PUT", "/anything")
self.assertEqual(delay, (None, None))
def test_delay_PUT(self):
"""
Ensure the 11th PUT will result in a delay of 6.0 seconds until
the next request will be granced.
"""
expected = [None] * 10 + [6.0]
results = list(self._check(11, "PUT", "/anything"))
self.assertEqual(expected, results)
def test_delay_POST(self):
"""
Ensure the 8th POST will result in a delay of 6.0 seconds until
the next request will be granced.
"""
expected = [None] * 7
results = list(self._check(7, "POST", "/anything"))
self.assertEqual(expected, results)
expected = 60.0 / 7.0
results = self._check_sum(1, "POST", "/anything")
self.failUnlessAlmostEqual(expected, results, 8)
def test_delay_GET(self):
"""
Ensure the 11th GET will result in NO delay.
"""
expected = [None] * 11
results = list(self._check(11, "GET", "/anything"))
self.assertEqual(expected, results)
def test_delay_PUT_volumes(self):
"""
Ensure PUT on /volumes limits at 5 requests, and PUT elsewhere is still
OK after 5 requests...but then after 11 total requests, PUT limiting
kicks in.
"""
# First 6 requests on PUT /volumes
expected = [None] * 5 + [12.0]
results = list(self._check(6, "PUT", "/volumes"))
self.assertEqual(expected, results)
# Next 5 request on PUT /anything
expected = [None] * 4 + [6.0]
results = list(self._check(5, "PUT", "/anything"))
self.assertEqual(expected, results)
def test_delay_PUT_wait(self):
"""
Ensure after hitting the limit and then waiting for the correct
amount of time, the limit will be lifted.
"""
expected = [None] * 10 + [6.0]
results = list(self._check(11, "PUT", "/anything"))
self.assertEqual(expected, results)
# Advance time
self.time += 6.0
expected = [None, 6.0]
results = list(self._check(2, "PUT", "/anything"))
self.assertEqual(expected, results)
def test_multiple_delays(self):
"""
Ensure multiple requests still get a delay.
"""
expected = [None] * 10 + [6.0] * 10
results = list(self._check(20, "PUT", "/anything"))
self.assertEqual(expected, results)
self.time += 1.0
expected = [5.0] * 10
results = list(self._check(10, "PUT", "/anything"))
self.assertEqual(expected, results)
def test_user_limit(self):
"""
Test user-specific limits.
"""
self.assertEqual(self.limiter.levels['user3'], [])
def test_multiple_users(self):
"""
Tests involving multiple users.
"""
# User1
expected = [None] * 10 + [6.0] * 10
results = list(self._check(20, "PUT", "/anything", "user1"))
self.assertEqual(expected, results)
# User2
expected = [None] * 10 + [6.0] * 5
results = list(self._check(15, "PUT", "/anything", "user2"))
self.assertEqual(expected, results)
# User3
expected = [None] * 20
results = list(self._check(20, "PUT", "/anything", "user3"))
self.assertEqual(expected, results)
self.time += 1.0
# User1 again
expected = [5.0] * 10
results = list(self._check(10, "PUT", "/anything", "user1"))
self.assertEqual(expected, results)
self.time += 1.0
# User1 again
expected = [4.0] * 5
results = list(self._check(5, "PUT", "/anything", "user2"))
self.assertEqual(expected, results)
class WsgiLimiterTest(BaseLimitTestSuite):
"""
Tests for `limits.WsgiLimiter` class.
"""
def setUp(self):
"""Run before each test."""
super(WsgiLimiterTest, self).setUp()
self.app = limits.WsgiLimiter(TEST_LIMITS)
def _request_data(self, verb, path):
"""Get data decribing a limit request verb/path."""
return jsonutils.dumps({"verb": verb, "path": path})
def _request(self, verb, url, username=None):
"""Make sure that POSTing to the given url causes the given username
to perform the given action. Make the internal rate limiter return
delay and make sure that the WSGI app returns the correct response.
"""
if username:
request = webob.Request.blank("/%s" % username)
else:
request = webob.Request.blank("/")
request.method = "POST"
request.body = self._request_data(verb, url)
response = request.get_response(self.app)
if "X-Wait-Seconds" in response.headers:
self.assertEqual(response.status_int, 403)
return response.headers["X-Wait-Seconds"]
self.assertEqual(response.status_int, 204)
def test_invalid_methods(self):
"""Only POSTs should work."""
requests = []
for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]:
request = webob.Request.blank("/", method=method)
response = request.get_response(self.app)
self.assertEqual(response.status_int, 405)
def test_good_url(self):
delay = self._request("GET", "/something")
self.assertEqual(delay, None)
def test_escaping(self):
delay = self._request("GET", "/something/jump%20up")
self.assertEqual(delay, None)
def test_response_to_delays(self):
delay = self._request("GET", "/delayed")
self.assertEqual(delay, None)
delay = self._request("GET", "/delayed")
self.assertEqual(delay, '60.00')
def test_response_to_delays_usernames(self):
delay = self._request("GET", "/delayed", "user1")
self.assertEqual(delay, None)
delay = self._request("GET", "/delayed", "user2")
self.assertEqual(delay, None)
delay = self._request("GET", "/delayed", "user1")
self.assertEqual(delay, '60.00')
delay = self._request("GET", "/delayed", "user2")
self.assertEqual(delay, '60.00')
class FakeHttplibSocket(object):
"""
Fake `httplib.HTTPResponse` replacement.
"""
def __init__(self, response_string):
"""Initialize new `FakeHttplibSocket`."""
self._buffer = StringIO.StringIO(response_string)
def makefile(self, _mode, _other):
"""Returns the socket's internal buffer."""
return self._buffer
class FakeHttplibConnection(object):
"""
Fake `httplib.HTTPConnection`.
"""
def __init__(self, app, host):
"""
Initialize `FakeHttplibConnection`.
"""
self.app = app
self.host = host
def request(self, method, path, body="", headers=None):
"""
Requests made via this connection actually get translated and routed
into our WSGI app, we then wait for the response and turn it back into
an `httplib.HTTPResponse`.
"""
if not headers:
headers = {}
req = webob.Request.blank(path)
req.method = method
req.headers = headers
req.host = self.host
req.body = body
resp = str(req.get_response(self.app))
resp = "HTTP/1.0 %s" % resp
sock = FakeHttplibSocket(resp)
self.http_response = httplib.HTTPResponse(sock)
self.http_response.begin()
def getresponse(self):
"""Return our generated response from the request."""
return self.http_response
def wire_HTTPConnection_to_WSGI(host, app):
"""Monkeypatches HTTPConnection so that if you try to connect to host, you
are instead routed straight to the given WSGI app.
After calling this method, when any code calls
httplib.HTTPConnection(host)
the connection object will be a fake. Its requests will be sent directly
to the given WSGI app rather than through a socket.
Code connecting to hosts other than host will not be affected.
This method may be called multiple times to map different hosts to
different apps.
This method returns the original HTTPConnection object, so that the caller
can restore the default HTTPConnection interface (for all hosts).
"""
class HTTPConnectionDecorator(object):
"""Wraps the real HTTPConnection class so that when you instantiate
the class you might instead get a fake instance.
"""
def __init__(self, wrapped):
self.wrapped = wrapped
def __call__(self, connection_host, *args, **kwargs):
if connection_host == host:
return FakeHttplibConnection(app, host)
else:
return self.wrapped(connection_host, *args, **kwargs)
oldHTTPConnection = httplib.HTTPConnection
httplib.HTTPConnection = HTTPConnectionDecorator(httplib.HTTPConnection)
return oldHTTPConnection
class WsgiLimiterProxyTest(BaseLimitTestSuite):
"""
Tests for the `limits.WsgiLimiterProxy` class.
"""
def setUp(self):
"""
Do some nifty HTTP/WSGI magic which allows for WSGI to be called
directly by something like the `httplib` library.
"""
super(WsgiLimiterProxyTest, self).setUp()
self.app = limits.WsgiLimiter(TEST_LIMITS)
self.oldHTTPConnection = (
wire_HTTPConnection_to_WSGI("169.254.0.1:80", self.app))
self.proxy = limits.WsgiLimiterProxy("169.254.0.1:80")
def test_200(self):
"""Successful request test."""
delay = self.proxy.check_for_delay("GET", "/anything")
self.assertEqual(delay, (None, None))
def test_403(self):
"""Forbidden request test."""
delay = self.proxy.check_for_delay("GET", "/delayed")
self.assertEqual(delay, (None, None))
delay, error = self.proxy.check_for_delay("GET", "/delayed")
error = error.strip()
expected = ("60.00", "403 Forbidden\n\nOnly 1 GET request(s) can be "
"made to /delayed every minute.")
self.assertEqual((delay, error), expected)
def tearDown(self):
# restore original HTTPConnection object
httplib.HTTPConnection = self.oldHTTPConnection
super(WsgiLimiterProxyTest, self).tearDown()
class LimitsViewBuilderTest(test.TestCase):
def setUp(self):
super(LimitsViewBuilderTest, self).setUp()
self.view_builder = views.limits.ViewBuilder()
self.rate_limits = [{"URI": "*",
"regex": ".*",
"value": 10,
"verb": "POST",
"remaining": 2,
"unit": "MINUTE",
"resetTime": 1311272226},
{"URI": "*/volumes",
"regex": "^/volumes",
"value": 50,
"verb": "POST",
"remaining": 10,
"unit": "DAY",
"resetTime": 1311272226}]
self.absolute_limits = {"metadata_items": 1,
"injected_files": 5,
"injected_file_content_bytes": 5}
def test_build_limits(self):
tdate = "2011-07-21T18:17:06Z"
expected_limits = \
{"limits": {"rate": [{"uri": "*",
"regex": ".*",
"limit": [{"value": 10,
"verb": "POST",
"remaining": 2,
"unit": "MINUTE",
"next-available": tdate}]},
{"uri": "*/volumes",
"regex": "^/volumes",
"limit": [{"value": 50,
"verb": "POST",
"remaining": 10,
"unit": "DAY",
"next-available": tdate}]}],
"absolute": {"maxServerMeta": 1,
"maxImageMeta": 1,
"maxPersonality": 5,
"maxPersonalitySize": 5}}}
output = self.view_builder.build(self.rate_limits,
self.absolute_limits)
self.assertDictMatch(output, expected_limits)
def test_build_limits_empty_limits(self):
expected_limits = {"limits": {"rate": [],
"absolute": {}}}
abs_limits = {}
rate_limits = []
output = self.view_builder.build(rate_limits, abs_limits)
self.assertDictMatch(output, expected_limits)
class LimitsXMLSerializationTest(test.TestCase):
def test_xml_declaration(self):
serializer = limits.LimitsTemplate()
fixture = {"limits": {
"rate": [],
"absolute": {}}}
output = serializer.serialize(fixture)
has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
self.assertTrue(has_dec)
def test_index(self):
serializer = limits.LimitsTemplate()
fixture = {
"limits": {
"rate": [{
"uri": "*",
"regex": ".*",
"limit": [{
"value": 10,
"verb": "POST",
"remaining": 2,
"unit": "MINUTE",
"next-available": "2011-12-15T22:42:45Z"}]},
{"uri": "*/servers",
"regex": "^/servers",
"limit": [{
"value": 50,
"verb": "POST",
"remaining": 10,
"unit": "DAY",
"next-available": "2011-12-15T22:42:45Z"}]}],
"absolute": {"maxServerMeta": 1,
"maxImageMeta": 1,
"maxPersonality": 5,
"maxPersonalitySize": 10240}}}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'limits')
#verify absolute limits
absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS)
self.assertEqual(len(absolutes), 4)
for limit in absolutes:
name = limit.get('name')
value = limit.get('value')
self.assertEqual(value, str(fixture['limits']['absolute'][name]))
#verify rate limits
rates = root.xpath('ns:rates/ns:rate', namespaces=NS)
self.assertEqual(len(rates), 2)
for i, rate in enumerate(rates):
for key in ['uri', 'regex']:
self.assertEqual(rate.get(key),
str(fixture['limits']['rate'][i][key]))
rate_limits = rate.xpath('ns:limit', namespaces=NS)
self.assertEqual(len(rate_limits), 1)
for j, limit in enumerate(rate_limits):
for key in ['verb', 'value', 'remaining', 'unit',
'next-available']:
self.assertEqual(
limit.get(key),
str(fixture['limits']['rate'][i]['limit'][j][key]))
def test_index_no_limits(self):
serializer = limits.LimitsTemplate()
fixture = {"limits": {
"rate": [],
"absolute": {}}}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'limits')
#verify absolute limits
absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS)
self.assertEqual(len(absolutes), 0)
#verify rate limits
rates = root.xpath('ns:rates/ns:rate', namespaces=NS)
self.assertEqual(len(rates), 0)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import AvroTester
from time import time
from random import randint
from avro.ipc import AvroRemoteException
import struct
def i64(i):
return struct.pack('>q', i)
def timestamp():
return long(time() * 1e6)
def new_column(suffix, stamp=None, ttl=0):
ts = isinstance(stamp, (long,int)) and stamp or timestamp()
column = dict()
column['name'] = 'name-%s' % suffix
column['value'] = 'value-%s' % suffix
column['clock'] = {'timestamp': ts}
column['ttl'] = ttl
return column
def assert_columns_match(colA, colB):
assert colA['name'] == colB['name'], \
"column name mismatch: %s != %s" % (colA['name'], colB['name'])
assert colA['value'] == colB['value'], \
"column value mismatch: %s != %s" % (colA['value'], colB['value'])
def assert_cosc(thing, with_supercolumn=False):
containing = with_supercolumn and 'super_column' or 'column'
assert isinstance(thing, dict), "Expected dict, got %s" % type(thing)
assert thing.has_key(containing) and thing[containing].has_key('name'), \
"Invalid or missing \"%s\" member" % containing
def assert_raises(excClass, func, *args, **kwargs):
try: r = func(*args, **kwargs)
except excClass: pass
else: raise Exception('expected %s; got %s' % (excClass.__name__, r))
class TestRpcOperations(AvroTester):
def test_insert_simple(self): # Also tests get
"setting and getting a simple column"
self.__set_keyspace('Keyspace1')
params = dict()
params['key'] = 'key1'
params['column_parent'] = {'column_family': 'Standard1'}
params['column'] = new_column(1)
params['consistency_level'] = 'ONE'
self.client.request('insert', params)
read_params = dict()
read_params['key'] = params['key']
read_params['column_path'] = dict()
read_params['column_path']['column_family'] = 'Standard1'
read_params['column_path']['column'] = params['column']['name']
read_params['consistency_level'] = 'ONE'
cosc = self.client.request('get', read_params)
assert_cosc(cosc)
assert_columns_match(cosc['column'], params['column'])
def test_insert_super(self):
"setting and getting a super column"
self.__set_keyspace('Keyspace1')
params = dict()
params['key'] = 'key1'
params['column_parent'] = dict()
params['column_parent']['column_family'] = 'Super1'
params['column_parent']['super_column'] = 'sc1'
params['column'] = dict()
params['column']['name'] = i64(1)
params['column']['value'] = 'v1'
params['column']['clock'] = { 'timestamp' : 0 }
params['consistency_level'] = 'ONE'
self.client.request('insert', params)
read_params = dict()
read_params['key'] = params['key']
read_params['column_path'] = dict()
read_params['column_path']['column_family'] = 'Super1'
read_params['column_path']['super_column'] = params['column_parent']['super_column']
read_params['column_path']['column'] = params['column']['name']
read_params['consistency_level'] = 'ONE'
cosc = self.client.request('get', read_params)
assert_cosc(cosc)
assert_columns_match(cosc['column'], params['column'])
def test_remove_simple(self):
"removing a simple column"
self.__set_keyspace('Keyspace1')
params = dict()
params['key'] = 'key1'
params['column_parent'] = {'column_family': 'Standard1'}
params['column'] = new_column(1)
params['consistency_level'] = 'ONE'
self.client.request('insert', params)
read_params = dict()
read_params['key'] = params['key']
read_params['column_path'] = dict()
read_params['column_path']['column_family'] = 'Standard1'
read_params['column_path']['column'] = params['column']['name']
read_params['consistency_level'] = 'ONE'
cosc = self.client.request('get', read_params)
assert_cosc(cosc)
remove_params = read_params
remove_params['clock'] = {'timestamp': timestamp()}
self.client.request('remove', remove_params)
assert_raises(AvroRemoteException,
self.client.request, 'get', read_params)
def test_batch_mutate(self):
"batching addition/removal mutations"
self.__set_keyspace('Keyspace1')
mutations = list()
# New column mutations
for i in range(3):
cosc = {'column': new_column(i)}
mutation = {'column_or_supercolumn': cosc}
mutations.append(mutation)
map_entry = {'key': 'key1', 'mutations': {'Standard1': mutations}}
params = dict()
params['mutation_map'] = [map_entry]
params['consistency_level'] = 'ONE'
self.client.request('batch_mutate', params)
# Verify that new columns were added
for i in range(3):
column = new_column(i)
cosc = self.__get('key1', 'Standard1', None, column['name'])
assert_cosc(cosc)
assert_columns_match(cosc['column'], column)
# Add one more column; remove one column
extra_column = new_column(3); remove_column = new_column(0)
mutations = [{'column_or_supercolumn': {'column': extra_column}}]
deletion = dict()
deletion['clock'] = {'timestamp': timestamp()}
deletion['predicate'] = {'column_names': [remove_column['name']]}
mutations.append({'deletion': deletion})
map_entry = {'key': 'key1', 'mutations': {'Standard1': mutations}}
params = dict()
params['mutation_map'] = [map_entry]
params['consistency_level'] = 'ONE'
self.client.request('batch_mutate', params)
# Ensure successful column removal
assert_raises(AvroRemoteException,
self.__get, 'key1', 'Standard1', None, remove_column['name'])
# Ensure successful column addition
cosc = self.__get('key1', 'Standard1', None, extra_column['name'])
assert_cosc(cosc)
assert_columns_match(cosc['column'], extra_column)
def test_get_slice_simple(self):
"performing a slice of simple columns"
self.__set_keyspace('Keyspace1')
columns = list(); mutations = list()
for i in range(6):
columns.append(new_column(i))
for column in columns:
mutation = {'column_or_supercolumn': {'column': column}}
mutations.append(mutation)
mutation_params = dict()
map_entry = {'key': 'key1', 'mutations': {'Standard1': mutations}}
mutation_params['mutation_map'] = [map_entry]
mutation_params['consistency_level'] = 'ONE'
self.client.request('batch_mutate', mutation_params)
# Slicing on list of column names
slice_params= dict()
slice_params['key'] = 'key1'
slice_params['column_parent'] = {'column_family': 'Standard1'}
slice_params['predicate'] = {'column_names': list()}
slice_params['predicate']['column_names'].append(columns[0]['name'])
slice_params['predicate']['column_names'].append(columns[4]['name'])
slice_params['consistency_level'] = 'ONE'
coscs = self.client.request('get_slice', slice_params)
for cosc in coscs: assert_cosc(cosc)
assert_columns_match(coscs[0]['column'], columns[0])
assert_columns_match(coscs[1]['column'], columns[4])
# Slicing on a range of column names
slice_range = dict()
slice_range['start'] = columns[2]['name']
slice_range['finish'] = columns[5]['name']
slice_range['reversed'] = False
slice_range['count'] = 1000
slice_params['predicate'] = {'slice_range': slice_range}
coscs = self.client.request('get_slice', slice_params)
for cosc in coscs: assert_cosc(cosc)
assert len(coscs) == 4, "expected 4 results, got %d" % len(coscs)
assert_columns_match(coscs[0]['column'], columns[2])
assert_columns_match(coscs[3]['column'], columns[5])
def test_multiget_slice_simple(self):
"performing a slice of simple columns, multiple keys"
self.__set_keyspace('Keyspace1')
columns = list(); mutation_params = dict()
for i in range(12):
columns.append(new_column(i))
# key1, first 6 columns
mutations_one = list()
for column in columns[:6]:
mutation = {'column_or_supercolumn': {'column': column}}
mutations_one.append(mutation)
map_entry = {'key': 'key1', 'mutations': {'Standard1': mutations_one}}
mutation_params['mutation_map'] = [map_entry]
# key2, last 6 columns
mutations_two = list()
for column in columns[6:]:
mutation = {'column_or_supercolumn': {'column': column}}
mutations_two.append(mutation)
map_entry = {'key': 'key2', 'mutations': {'Standard1': mutations_two}}
mutation_params['mutation_map'].append(map_entry)
mutation_params['consistency_level'] = 'ONE'
self.client.request('batch_mutate', mutation_params)
# Slice all 6 columns on both keys
slice_params= dict()
slice_params['keys'] = ['key1', 'key2']
slice_params['column_parent'] = {'column_family': 'Standard1'}
sr = {'start': '', 'finish': '', 'reversed': False, 'count': 1000}
slice_params['predicate'] = {'slice_range': sr}
slice_params['consistency_level'] = 'ONE'
coscs_map = self.client.request('multiget_slice', slice_params)
for entry in coscs_map:
assert(entry['key'] in ['key1', 'key2']), \
"expected one of [key1, key2]; got %s" % entry['key']
assert(len(entry['columns']) == 6), \
"expected 6 results, got %d" % len(entry['columns'])
def test_get_count(self):
"counting columns"
self.__set_keyspace('Keyspace1')
mutations = list()
for i in range(10):
mutation = {'column_or_supercolumn': {'column': new_column(i)}}
mutations.append(mutation)
mutation_params = dict()
map_entry = {'key': 'key1', 'mutations': {'Standard1': mutations}}
mutation_params['mutation_map'] = [map_entry]
mutation_params['consistency_level'] = 'ONE'
self.client.request('batch_mutate', mutation_params)
count_params = dict()
count_params['key'] = 'key1'
count_params['column_parent'] = {'column_family': 'Standard1'}
sr = {'start': '', 'finish': '', 'reversed': False, 'count': 1000}
count_params['predicate'] = {'slice_range': sr}
count_params['consistency_level'] = 'ONE'
num_columns = self.client.request('get_count', count_params)
assert(num_columns == 10), "expected 10 results, got %d" % num_columns
def test_describe_keyspaces(self):
"retrieving a list of all keyspaces"
keyspaces = self.client.request('describe_keyspaces', {})
assert 'Keyspace1' in keyspaces, "Keyspace1 not in " + keyspaces
def test_describe_keyspace(self):
"retrieving a keyspace metadata"
ks1 = self.client.request('describe_keyspace',
{'keyspace': "Keyspace1"})
assert ks1['replication_factor'] == 1
cf0 = ks1['cf_defs'][0]
assert cf0['comparator_type'] == "org.apache.cassandra.db.marshal.BytesType"
def test_describe_cluster_name(self):
"retrieving the cluster name"
name = self.client.request('describe_cluster_name', {})
assert 'Test' in name, "'Test' not in '" + name + "'"
def test_describe_version(self):
"getting the remote api version string"
vers = self.client.request('describe_version', {})
assert isinstance(vers, (str,unicode)), "api version is not a string"
segs = vers.split('.')
assert len(segs) == 3 and len([i for i in segs if i.isdigit()]) == 3, \
"incorrect api version format: " + vers
def test_describe_partitioner(self):
"getting the partitioner"
part = "org.apache.cassandra.dht.CollatingOrderPreservingPartitioner"
result = self.client.request('describe_partitioner', {})
assert result == part, "got %s, expected %s" % (result, part)
def test_system_column_family_operations(self):
"adding, renaming, and removing column families"
self.__set_keyspace('Keyspace1')
# create
columnDef = dict()
columnDef['name'] = b'ValidationColumn'
columnDef['validation_class'] = 'BytesType'
cfDef = dict()
cfDef['keyspace'] = 'Keyspace1'
cfDef['name'] = 'NewColumnFamily'
cfDef['column_metadata'] = [columnDef]
s = self.client.request('system_add_column_family', {'cf_def' : cfDef})
assert isinstance(s, unicode), \
'returned type is %s, (not \'unicode\')' % type(s)
ks1 = self.client.request(
'describe_keyspace', {'keyspace' : 'Keyspace1'})
assert 'NewColumnFamily' in [x['name'] for x in ks1['cf_defs']]
# rename
self.client.request('system_rename_column_family',
{'old_name' : 'NewColumnFamily', 'new_name': 'RenameColumnFamily'})
ks1 = self.client.request(
'describe_keyspace', {'keyspace' : 'Keyspace1'})
assert 'RenameColumnFamily' in [x['name'] for x in ks1['cf_defs']]
# drop
self.client.request('system_drop_column_family',
{'column_family' : 'RenameColumnFamily'})
ks1 = self.client.request(
'describe_keyspace', {'keyspace' : 'Keyspace1'})
assert 'RenameColumnFamily' not in [x['name'] for x in ks1['cf_defs']]
assert 'NewColumnFamily' not in [x['name'] for x in ks1['cf_defs']]
assert 'Standard1' in [x['name'] for x in ks1['cf_defs']]
def __get(self, key, cf, super_name, col_name, consistency_level='ONE'):
"""
Given arguments for the key, column family, super column name,
column name, and consistency level, returns a dictionary
representing a ColumnOrSuperColumn record.
Raises an AvroRemoteException if the column is not found.
"""
params = dict()
params['key'] = key
params['column_path'] = dict()
params['column_path']['column_family'] = cf
params['column_path']['column'] = col_name
params['consistency_level'] = consistency_level
if (super_name):
params['super_column'] = super_name
return self.client.request('get', params)
def __set_keyspace(self, keyspace_name):
self.client.request('set_keyspace', {'keyspace': keyspace_name})
# vi:ai sw=4 ts=4 tw=0 et
| |
import asyncio
import logging
from gear import Database
from hailtop import aiotools
from hailtop.utils import (
AsyncWorkerPool,
WaitableSharedPool,
periodically_call,
retry_long_running,
run_if_changed,
time_msecs,
)
from ..utils import Box
from .instance_collection import InstanceCollectionManager
from .job import mark_job_complete, unschedule_job
log = logging.getLogger('canceller')
class Canceller:
@staticmethod
async def create(app):
c = Canceller(app)
c.task_manager.ensure_future(
retry_long_running(
'cancel_cancelled_ready_jobs_loop',
run_if_changed,
c.cancel_ready_state_changed,
c.cancel_cancelled_ready_jobs_loop_body,
)
)
c.task_manager.ensure_future(
retry_long_running(
'cancel_cancelled_creating_jobs_loop',
run_if_changed,
c.cancel_creating_state_changed,
c.cancel_cancelled_creating_jobs_loop_body,
)
)
c.task_manager.ensure_future(
retry_long_running(
'cancel_cancelled_running_jobs_loop',
run_if_changed,
c.cancel_running_state_changed,
c.cancel_cancelled_running_jobs_loop_body,
)
)
c.task_manager.ensure_future(periodically_call(60, c.cancel_orphaned_attempts_loop_body))
return c
def __init__(self, app):
self.app = app
self.cancel_ready_state_changed: asyncio.Event = app['cancel_ready_state_changed']
self.cancel_creating_state_changed: asyncio.Event = app['cancel_creating_state_changed']
self.cancel_running_state_changed: asyncio.Event = app['cancel_running_state_changed']
self.db: Database = app['db']
self.async_worker_pool: AsyncWorkerPool = self.app['async_worker_pool']
self.inst_coll_manager: InstanceCollectionManager = app['driver'].inst_coll_manager
self.task_manager = aiotools.BackgroundTaskManager()
def shutdown(self):
try:
self.task_manager.shutdown()
finally:
self.async_worker_pool.shutdown()
async def cancel_cancelled_ready_jobs_loop_body(self):
records = self.db.select_and_fetchall(
'''
SELECT user, CAST(COALESCE(SUM(n_cancelled_ready_jobs), 0) AS SIGNED) AS n_cancelled_ready_jobs
FROM user_inst_coll_resources
GROUP BY user
HAVING n_cancelled_ready_jobs > 0;
''',
)
user_n_cancelled_ready_jobs = {record['user']: record['n_cancelled_ready_jobs'] async for record in records}
total = sum(user_n_cancelled_ready_jobs.values())
if total == 0:
should_wait = True
return should_wait
user_share = {
user: max(int(300 * user_n_jobs / total + 0.5), 20)
for user, user_n_jobs in user_n_cancelled_ready_jobs.items()
}
async def user_cancelled_ready_jobs(user, remaining):
async for batch in self.db.select_and_fetchall(
'''
SELECT batches.id, batches_cancelled.id IS NOT NULL AS cancelled
FROM batches
LEFT JOIN batches_cancelled
ON batches.id = batches_cancelled.id
WHERE user = %s AND `state` = 'running';
''',
(user,),
):
if batch['cancelled']:
async for record in self.db.select_and_fetchall(
'''
SELECT jobs.job_id
FROM jobs FORCE INDEX(jobs_batch_id_state_always_run_cancelled)
WHERE batch_id = %s AND state = 'Ready' AND always_run = 0
LIMIT %s;
''',
(batch['id'], remaining.value),
):
record['batch_id'] = batch['id']
yield record
else:
async for record in self.db.select_and_fetchall(
'''
SELECT jobs.job_id
FROM jobs FORCE INDEX(jobs_batch_id_state_always_run_cancelled)
WHERE batch_id = %s AND state = 'Ready' AND always_run = 0 AND cancelled = 1
LIMIT %s;
''',
(batch['id'], remaining.value),
):
record['batch_id'] = batch['id']
yield record
waitable_pool = WaitableSharedPool(self.async_worker_pool)
should_wait = True
for user, share in user_share.items():
remaining = Box(share)
async for record in user_cancelled_ready_jobs(user, remaining):
batch_id = record['batch_id']
job_id = record['job_id']
id = (batch_id, job_id)
log.info(f'cancelling job {id}')
async def cancel_with_error_handling(app, batch_id, job_id, id):
try:
resources = []
await mark_job_complete(
app, batch_id, job_id, None, None, 'Cancelled', None, None, None, 'cancelled', resources
)
except Exception:
log.info(f'error while cancelling job {id}', exc_info=True)
await waitable_pool.call(cancel_with_error_handling, self.app, batch_id, job_id, id)
remaining.value -= 1
if remaining.value <= 0:
should_wait = False
break
await waitable_pool.wait()
return should_wait
async def cancel_cancelled_creating_jobs_loop_body(self):
records = self.db.select_and_fetchall(
'''
SELECT user, CAST(COALESCE(SUM(n_cancelled_creating_jobs), 0) AS SIGNED) AS n_cancelled_creating_jobs
FROM user_inst_coll_resources
GROUP BY user
HAVING n_cancelled_creating_jobs > 0;
''',
)
user_n_cancelled_creating_jobs = {
record['user']: record['n_cancelled_creating_jobs'] async for record in records
}
total = sum(user_n_cancelled_creating_jobs.values())
if total == 0:
should_wait = True
return should_wait
user_share = {
user: max(int(300 * user_n_jobs / total + 0.5), 20)
for user, user_n_jobs in user_n_cancelled_creating_jobs.items()
}
async def user_cancelled_creating_jobs(user, remaining):
async for batch in self.db.select_and_fetchall(
'''
SELECT batches.id
FROM batches
INNER JOIN batches_cancelled
ON batches.id = batches_cancelled.id
WHERE user = %s AND `state` = 'running';
''',
(user,),
):
async for record in self.db.select_and_fetchall(
'''
SELECT jobs.job_id, attempts.attempt_id, attempts.instance_name
FROM jobs FORCE INDEX(jobs_batch_id_state_always_run_cancelled)
STRAIGHT_JOIN attempts
ON attempts.batch_id = jobs.batch_id AND attempts.job_id = jobs.job_id
WHERE jobs.batch_id = %s AND state = 'Creating' AND always_run = 0 AND cancelled = 0
LIMIT %s;
''',
(batch['id'], remaining.value),
):
record['batch_id'] = batch['id']
yield record
waitable_pool = WaitableSharedPool(self.async_worker_pool)
should_wait = True
for user, share in user_share.items():
remaining = Box(share)
async for record in user_cancelled_creating_jobs(user, remaining):
batch_id = record['batch_id']
job_id = record['job_id']
attempt_id = record['attempt_id']
instance_name = record['instance_name']
id = (batch_id, job_id)
async def cancel_with_error_handling(app, batch_id, job_id, attempt_id, instance_name, id):
try:
resources = []
end_time = time_msecs()
await mark_job_complete(
app,
batch_id,
job_id,
attempt_id,
instance_name,
'Cancelled',
None,
None,
end_time,
'cancelled',
resources,
)
instance = self.inst_coll_manager.get_instance(instance_name)
if instance is None:
log.warning(f'in cancel_cancelled_creating_jobs: unknown instance {instance_name}')
return
await instance.inst_coll.call_delete_instance(instance, 'cancelled')
except Exception:
log.info(f'cancelling creating job {id} on instance {instance_name}', exc_info=True)
await waitable_pool.call(
cancel_with_error_handling, self.app, batch_id, job_id, attempt_id, instance_name, id
)
remaining.value -= 1
if remaining.value <= 0:
should_wait = False
break
await waitable_pool.wait()
return should_wait
async def cancel_cancelled_running_jobs_loop_body(self):
records = self.db.select_and_fetchall(
'''
SELECT user, CAST(COALESCE(SUM(n_cancelled_running_jobs), 0) AS SIGNED) AS n_cancelled_running_jobs
FROM user_inst_coll_resources
GROUP BY user
HAVING n_cancelled_running_jobs > 0;
''',
)
user_n_cancelled_running_jobs = {record['user']: record['n_cancelled_running_jobs'] async for record in records}
total = sum(user_n_cancelled_running_jobs.values())
if total == 0:
should_wait = True
return should_wait
user_share = {
user: max(int(300 * user_n_jobs / total + 0.5), 20)
for user, user_n_jobs in user_n_cancelled_running_jobs.items()
}
async def user_cancelled_running_jobs(user, remaining):
async for batch in self.db.select_and_fetchall(
'''
SELECT batches.id
FROM batches
INNER JOIN batches_cancelled
ON batches.id = batches_cancelled.id
WHERE user = %s AND `state` = 'running';
''',
(user,),
):
async for record in self.db.select_and_fetchall(
'''
SELECT jobs.job_id, attempts.attempt_id, attempts.instance_name
FROM jobs FORCE INDEX(jobs_batch_id_state_always_run_cancelled)
STRAIGHT_JOIN attempts
ON attempts.batch_id = jobs.batch_id AND attempts.job_id = jobs.job_id
WHERE jobs.batch_id = %s AND state = 'Running' AND always_run = 0 AND cancelled = 0
LIMIT %s;
''',
(batch['id'], remaining.value),
):
record['batch_id'] = batch['id']
yield record
waitable_pool = WaitableSharedPool(self.async_worker_pool)
should_wait = True
for user, share in user_share.items():
remaining = Box(share)
async for record in user_cancelled_running_jobs(user, remaining):
batch_id = record['batch_id']
job_id = record['job_id']
id = (batch_id, job_id)
async def unschedule_with_error_handling(app, record, instance_name, id):
try:
await unschedule_job(app, record)
except Exception:
log.info(f'unscheduling job {id} on instance {instance_name}', exc_info=True)
await waitable_pool.call(unschedule_with_error_handling, self.app, record, record['instance_name'], id)
remaining.value -= 1
if remaining.value <= 0:
should_wait = False
break
await waitable_pool.wait()
return should_wait
async def cancel_orphaned_attempts_loop_body(self):
log.info('cancelling orphaned attempts')
waitable_pool = WaitableSharedPool(self.async_worker_pool)
n_unscheduled = 0
async for record in self.db.select_and_fetchall(
'''
SELECT attempts.*
FROM attempts
INNER JOIN jobs ON attempts.batch_id = jobs.batch_id AND attempts.job_id = jobs.job_id
LEFT JOIN instances ON attempts.instance_name = instances.name
WHERE attempts.start_time IS NOT NULL
AND attempts.end_time IS NULL
AND ((jobs.state != 'Running' AND jobs.state != 'Creating') OR jobs.attempt_id != attempts.attempt_id)
AND instances.`state` = 'active'
ORDER BY attempts.start_time ASC
LIMIT 300;
''',
):
batch_id = record['batch_id']
job_id = record['job_id']
attempt_id = record['attempt_id']
instance_name = record['instance_name']
id = (batch_id, job_id)
n_unscheduled += 1
async def unschedule_with_error_handling(app, record, instance_name, id, attempt_id):
try:
await unschedule_job(app, record)
except Exception:
log.info(
f'unscheduling job {id} with orphaned attempt {attempt_id} on instance {instance_name}',
exc_info=True,
)
await waitable_pool.call(unschedule_with_error_handling, self.app, record, instance_name, id, attempt_id)
await waitable_pool.wait()
log.info(f'cancelled {n_unscheduled} orphaned attempts')
| |
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HERCopierProp_ConnectedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HERCopierProp_ConnectedLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HERCopierProp_ConnectedLHS, self).__init__(name='HERCopierProp_ConnectedLHS', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'ERCopierProp')
# Set the node attributes
# match class Feature() node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["MT_dirty__"] = False
self.vs[0]["mm__"] = """MT_pre__Feature"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class EntityType() node
self.add_node()
self.vs[1]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["MT_dirty__"] = False
self.vs[1]["mm__"] = """MT_pre__EntityType"""
self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class ERModel() node
self.add_node()
self.vs[2]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["MT_dirty__"] = False
self.vs[2]["mm__"] = """MT_pre__ERModel"""
self.vs[2]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# Nodes that represent the edges of the property.
# match association ERModel--entities-->EntityType node
self.add_node()
self.vs[3]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "entities"
"""
self.vs[3]["MT_label__"] = """4"""
self.vs[3]["MT_subtypes__"] = []
self.vs[3]["MT_dirty__"] = False
self.vs[3]["mm__"] = """MT_pre__directLink_S"""
self.vs[3]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc3')
# match association EntityType--features-->Feature node
self.add_node()
self.vs[4]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "features"
"""
self.vs[4]["MT_label__"] = """5"""
self.vs[4]["MT_subtypes__"] = []
self.vs[4]["MT_dirty__"] = False
self.vs[4]["mm__"] = """MT_pre__directLink_S"""
self.vs[4]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc4')
# Add the edges
self.add_edges([
(2,3), # match_class ERModel() -> association entities
(3,1), # association entities -> match_class EntityType()
(1,4), # match_class EntityType() -> association features
(4,0) # association features -> match_class Feature()
])
# Add the attribute equations
self["equations"] = []
def eval_attr11(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr12(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr13(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr14(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "entities"
def eval_attr15(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "features"
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
| |
import datetime
import sys
from cStringIO import StringIO
from urlparse import urlparse
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.contrib.sessions.models import Session
from django.contrib.sessions.middleware import SessionWrapper
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import WSGIRequest
from django.core.signals import got_request_exception
from django.dispatch import dispatcher
from django.http import urlencode, SimpleCookie, HttpRequest
from django.test import signals
from django.utils.functional import curry
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes.
Uses the WSGI interface to compose requests, but returns
the raw HttpResponse object
"""
def __call__(self, environ):
from django.conf import settings
from django.core import signals
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
self.load_middleware()
dispatcher.send(signal=signals.request_started)
try:
request = WSGIRequest(environ)
response = self.get_response(request)
# Apply response middleware
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
finally:
dispatcher.send(signal=signals.request_finished)
return response
def store_rendered_templates(store, signal, sender, template, context):
"A utility function for storing templates and contexts that are rendered"
store.setdefault('template',[]).append(template)
store.setdefault('context',[]).append(context)
def encode_multipart(boundary, data):
"""
A simple method for encoding multipart POST data from a dictionary of
form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
for (key, value) in data.items():
if isinstance(value, file):
lines.extend([
'--' + boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
'--' + boundary,
'Content-Disposition: form-data; name="%s_file"; filename="%s"' % (key, value.name),
'Content-Type: application/octet-stream',
'',
value.read()
])
elif hasattr(value, '__iter__'):
for item in value:
lines.extend([
'--' + boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
str(item)
])
else:
lines.extend([
'--' + boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
str(value)
])
lines.extend([
'--' + boundary + '--',
'',
])
return '\r\n'.join(lines)
class Client:
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(self, **defaults):
self.handler = ClientHandler()
self.defaults = defaults
self.cookies = SimpleCookie()
self.exc_info = None
def store_exc_info(self, *args, **kwargs):
"""
Utility method that can be used to store exceptions when they are
generated by a view.
"""
self.exc_info = sys.exc_info()
def _session(self):
"Obtain the current session variables"
if 'django.contrib.sessions' in settings.INSTALLED_APPS:
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME, None)
if cookie:
return SessionWrapper(cookie.value)
return {}
session = property(_session)
def request(self, **request):
"""
The master request method. Composes the environment dictionary
and passes to the handler, returning the result of the handler.
Assumes defaults for the query environment, which can be overridden
using the arguments to the request.
"""
environ = {
'HTTP_COOKIE': self.cookies,
'PATH_INFO': '/',
'QUERY_STRING': '',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': None,
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
'SERVER_PROTOCOL': 'HTTP/1.1',
}
environ.update(self.defaults)
environ.update(request)
# Curry a data dictionary into an instance of
# the template renderer callback function
data = {}
on_template_render = curry(store_rendered_templates, data)
dispatcher.connect(on_template_render, signal=signals.template_rendered)
# Capture exceptions created by the handler
dispatcher.connect(self.store_exc_info, signal=got_request_exception)
response = self.handler(environ)
# Add any rendered template detail to the response
# If there was only one template rendered (the most likely case),
# flatten the list to a single element
for detail in ('template', 'context'):
if data.get(detail):
if len(data[detail]) == 1:
setattr(response, detail, data[detail][0]);
else:
setattr(response, detail, data[detail])
else:
setattr(response, detail, None)
# Look for a signalled exception and reraise it
if self.exc_info:
raise self.exc_info[1], None, self.exc_info[2]
# Update persistent cookie data
if response.cookies:
self.cookies.update(response.cookies)
return response
def get(self, path, data={}, **extra):
"Request a response from the server using GET."
r = {
'CONTENT_LENGTH': None,
'CONTENT_TYPE': 'text/html; charset=utf-8',
'PATH_INFO': path,
'QUERY_STRING': urlencode(data),
'REQUEST_METHOD': 'GET',
}
r.update(extra)
return self.request(**r)
def post(self, path, data={}, content_type=MULTIPART_CONTENT, **extra):
"Request a response from the server using POST."
if content_type is MULTIPART_CONTENT:
post_data = encode_multipart(BOUNDARY, data)
else:
post_data = data
r = {
'CONTENT_LENGTH': len(post_data),
'CONTENT_TYPE': content_type,
'PATH_INFO': path,
'REQUEST_METHOD': 'POST',
'wsgi.input': StringIO(post_data),
}
r.update(extra)
return self.request(**r)
def login(self, **credentials):
"""Set the Client to appear as if it has sucessfully logged into a site.
Returns True if login is possible; False if the provided credentials
are incorrect, or if the Sessions framework is not available.
"""
user = authenticate(**credentials)
if user and 'django.contrib.sessions' in settings.INSTALLED_APPS:
obj = Session.objects.get_new_session_object()
# Create a fake request to store login details
request = HttpRequest()
request.session = SessionWrapper(obj.session_key)
login(request, user)
# Set the cookie to represent the session
self.cookies[settings.SESSION_COOKIE_NAME] = obj.session_key
self.cookies[settings.SESSION_COOKIE_NAME]['max-age'] = None
self.cookies[settings.SESSION_COOKIE_NAME]['path'] = '/'
self.cookies[settings.SESSION_COOKIE_NAME]['domain'] = settings.SESSION_COOKIE_DOMAIN
self.cookies[settings.SESSION_COOKIE_NAME]['secure'] = settings.SESSION_COOKIE_SECURE or None
self.cookies[settings.SESSION_COOKIE_NAME]['expires'] = None
# Set the session values
Session.objects.save(obj.session_key, request.session._session,
datetime.datetime.now() + datetime.timedelta(seconds=settings.SESSION_COOKIE_AGE))
return True
else:
return False
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
# Standard library
import urllib.error
import warnings
# Third-party
import numpy as np
from astropy.utils.data import download_file, clear_download_cache
from astropy.utils import iers
from astropy.time import Time
import astropy.units as u
from astropy.utils.data import _get_download_cache_locs, CacheMissingWarning
from astropy.coordinates import EarthLocation
# Package
from .exceptions import OldEarthOrientationDataWarning
__all__ = ["get_IERS_A_or_workaround", "download_IERS_A",
"time_grid_from_range", "_set_mpl_style_sheet",
"stride_array"]
IERS_A_WARNING = ("For best precision (on the order of arcseconds), you must "
"download an up-to-date IERS Bulletin A table. To do so, run:"
"\n\n"
">>> from astroplan import download_IERS_A\n"
">>> download_IERS_A()\n")
BACKUP_Time_get_delta_ut1_utc = Time._get_delta_ut1_utc
def _low_precision_utc_to_ut1(self, jd1, jd2):
"""
When no IERS Bulletin A is available (no internet connection), use low
precision time conversion by assuming UT1-UTC=0 always.
This method mimics `~astropy.coordinates.builtin_frames.utils.get_dut1utc`
"""
try:
if self.mjd*u.day not in iers.IERS_Auto.open()['MJD']:
warnings.warn(IERS_A_WARNING, OldEarthOrientationDataWarning)
return self.delta_ut1_utc
except (AttributeError, ValueError):
warnings.warn(IERS_A_WARNING, OldEarthOrientationDataWarning)
return np.zeros(self.shape)
def get_IERS_A_or_workaround():
"""
Get the cached IERS Bulletin A table if one exists. If one does not exist,
monkey patch `~astropy.time.Time._get_delta_ut1_utc` so that
`~astropy.time.Time` objects don't raise errors by computing UT1-UTC off
the end of the IERS table.
"""
if IERS_A_in_cache():
iers.IERS.iers_table = _get_IERS_A_table()
else:
Time._get_delta_ut1_utc = _low_precision_utc_to_ut1
def IERS_A_in_cache():
"""
Check if the IERS Bulletin A table is locally cached.
"""
urls = (iers.conf.iers_auto_url, iers.conf.iers_auto_url_mirror)
for url_key in urls:
# The below code which accesses ``urlmapfn`` is stolen from
# astropy.utils.data.download_file()
try:
dldir, urlmapfn = _get_download_cache_locs()
except (IOError, OSError) as e:
msg = 'Remote data cache could not be accessed due to '
estr = '' if len(e.args) < 1 else (': ' + str(e))
warnings.warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
else:
with _open_shelve(urlmapfn, True) as url2hash:
# TODO: try to figure out how to test this in the unicode case
if str(url_key) in url2hash:
return True
return False
def _get_IERS_A_table(warn_update=14*u.day):
"""
Grab the locally cached copy of the IERS Bulletin A table. Check to see
if it's up to date, and warn the user if it is not.
This will fail and raise OSError if the file is not in the cache.
"""
if IERS_A_in_cache():
table = iers.IERS_Auto.open()
# Use polar motion flag to identify last observation before predictions
if 'PolPMFlag_A' in table.colnames:
index_of_last_observation = ''.join(table['PolPMFlag_A']).index('IP')
time_of_last_observation = Time(table['MJD'][index_of_last_observation],
format='mjd')
# If time of last observation is not available, set it equal to the
# final prediction in the table:
else:
time_of_last_observation = Time(table['MJD'].max(),
format='mjd')
time_since_last_update = Time.now() - time_of_last_observation
# If the IERS bulletin is more than `warn_update` days old, warn user
if warn_update < time_since_last_update:
warnmsg = ("Your version of the IERS Bulletin A is {:.1f} days "
"old. ".format(time_since_last_update.to(u.day).value) +
IERS_A_WARNING)
warnings.warn(warnmsg, OldEarthOrientationDataWarning)
return table
else:
raise OSError("No IERS A table has been downloaded.")
def download_IERS_A(show_progress=True):
"""
Download and cache the IERS Bulletin A table.
If one is already cached, download a new one and overwrite the old. Store
table in the astropy cache, and undo the monkey patching done by
`~astroplan.get_IERS_A_or_workaround`.
Parameters
----------
show_progress : bool
`True` shows a progress bar during the download.
"""
urls = (iers.conf.iers_auto_url, iers.conf.iers_auto_url_mirror)
if IERS_A_in_cache():
for url in urls:
clear_download_cache(url)
for i, url in enumerate(urls):
try:
local_iers_a_path = download_file(url, cache=True,
show_progress=show_progress)
except urllib.error.URLError:
if i == len(urls) - 1:
raise
# Undo monkey patch set up by get_IERS_A_or_workaround
iers.IERS.iers_table = iers.IERS_A.open(local_iers_a_path)
Time._get_delta_ut1_utc = BACKUP_Time_get_delta_ut1_utc
@u.quantity_input(time_resolution=u.hour)
def time_grid_from_range(time_range, time_resolution=0.5*u.hour):
"""
Get linearly-spaced sequence of times.
Parameters
----------
time_range : `~astropy.time.Time` (length = 2)
Lower and upper bounds on time sequence.
time_resolution : `~astropy.units.quantity` (optional)
Time-grid spacing
Returns
-------
times : `~astropy.time.Time`
Linearly-spaced sequence of times
"""
try:
start_time, end_time = time_range
except ValueError:
raise ValueError("time_range should have a length of 2: lower and "
"upper bounds on the time sequence.")
return Time(np.arange(start_time.jd, end_time.jd,
time_resolution.to(u.day).value), format='jd')
def _mock_remote_data():
"""
Apply mocks (i.e. monkey-patches) to avoid the need for internet access
for certain things.
This is currently called in `astroplan/conftest.py` when the tests are run
and the `--remote-data` option isn't used.
The way this setup works is that for functionality that usually requires
internet access, but has mocks in place, it is possible to write the test
without adding a `@remote_data` decorator, and `py.test` will do the right
thing when running the tests:
1. Access the internet and use the normal code if `--remote-data` is used
2. Not access the internet and use the mock code if `--remote-data` is not used
Both of these cases are tested on travis-ci.
"""
from .target import FixedTarget
from astropy.coordinates import EarthLocation
if not hasattr(FixedTarget, '_real_from_name'):
FixedTarget._real_from_name = FixedTarget.from_name
FixedTarget.from_name = FixedTarget._from_name_mock
if not hasattr(EarthLocation, '_real_of_site'):
EarthLocation._real_of_site = EarthLocation.of_site
EarthLocation.of_site = EarthLocation_mock.of_site_mock
# otherwise already mocked
def _unmock_remote_data():
"""
undo _mock_remote_data
currently unused
"""
from .target import FixedTarget
if hasattr(FixedTarget, '_real_from_name'):
FixedTarget.from_name = FixedTarget._real_from_name
del FixedTarget._real_from_name
if hasattr(EarthLocation, '_real_of_site'):
EarthLocation.of_site = EarthLocation._real_of_site
del EarthLocation._real_of_site
# otherwise assume it's already correct
def _set_mpl_style_sheet(style_sheet):
"""
Import matplotlib, set the style sheet to ``style_sheet`` using
the most backward compatible import pattern.
"""
import matplotlib
matplotlib.rcdefaults()
matplotlib.rcParams.update(style_sheet)
def stride_array(arr, window_width):
"""
Computes all possible sequential subarrays of arr with length = window_width
Parameters
----------
arr : array-like (length = n)
Linearly-spaced sequence
window_width : int
Number of elements in each new sub-array
Returns
-------
strided_arr : array (shape = (n-window_width, window_width))
Linearly-spaced sequence of times
"""
as_strided = np.lib.stride_tricks.as_strided
new_shape = (len(arr) - window_width + 1, window_width)
strided_arr = as_strided(arr, new_shape, (arr.strides[0], arr.strides[0]))
return strided_arr
class EarthLocation_mock(EarthLocation):
"""
Mock the EarthLocation class if no remote data for locations commonly
used in the tests.
"""
@classmethod
def of_site_mock(cls, string):
subaru = EarthLocation.from_geodetic(-155.4761111111111*u.deg,
19.825555555555564*u.deg,
4139*u.m)
lco = EarthLocation.from_geodetic(-70.70166666666665*u.deg,
-29.003333333333327*u.deg,
2282*u.m)
aao = EarthLocation.from_geodetic(149.06608611111113*u.deg,
-31.277038888888896*u.deg,
1164*u.m)
vbo = EarthLocation.from_geodetic(78.8266*u.deg,
12.576659999999999*u.deg,
725*u.m)
apo = EarthLocation.from_geodetic(-105.82*u.deg,
32.78*u.deg,
2798*u.m)
keck = EarthLocation.from_geodetic(-155.47833333333332*u.deg,
19.828333333333326*u.deg,
4160*u.m)
kpno = EarthLocation.from_geodetic(-111.6*u.deg,
31.963333333333342*u.deg,
2120*u.m)
lapalma = EarthLocation.from_geodetic(-17.879999*u.deg,
28.758333*u.deg,
2327*u.m)
observatories = dict(lco=lco, subaru=subaru, aao=aao, vbo=vbo, apo=apo,
keck=keck, kpno=kpno, lapalma=lapalma)
return observatories[string.lower()]
def _open_shelve(shelffn, withclosing=False):
"""
Opens a shelf file. If ``withclosing`` is True, it will be opened with
closing, allowing use like:
with _open_shelve('somefile',True) as s:
...
This workaround can be removed in favour of using shelve.open() directly
once support for Python <3.4 is dropped.
"""
import shelve
import contextlib
shelf = shelve.open(shelffn, protocol=2)
if withclosing:
return contextlib.closing(shelf)
else:
return shelf
| |
# coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.9.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ImageCatalogShortResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'url': 'str',
'id': 'int',
'public_in_account': 'bool',
'used_as_default': 'bool'
}
attribute_map = {
'name': 'name',
'url': 'url',
'id': 'id',
'public_in_account': 'publicInAccount',
'used_as_default': 'usedAsDefault'
}
def __init__(self, name=None, url=None, id=None, public_in_account=False, used_as_default=False):
"""
ImageCatalogShortResponse - a model defined in Swagger
"""
self._name = None
self._url = None
self._id = None
self._public_in_account = None
self._used_as_default = None
self.name = name
self.url = url
self.id = id
self.public_in_account = public_in_account
self.used_as_default = used_as_default
@property
def name(self):
"""
Gets the name of this ImageCatalogShortResponse.
name of the resource
:return: The name of this ImageCatalogShortResponse.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this ImageCatalogShortResponse.
name of the resource
:param name: The name of this ImageCatalogShortResponse.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
if name is not None and len(name) > 100:
raise ValueError("Invalid value for `name`, length must be less than or equal to `100`")
if name is not None and len(name) < 5:
raise ValueError("Invalid value for `name`, length must be greater than or equal to `5`")
if name is not None and not re.search('(^[a-z][-a-z0-9]*[a-z0-9]$)', name):
raise ValueError("Invalid value for `name`, must be a follow pattern or equal to `/(^[a-z][-a-z0-9]*[a-z0-9]$)/`")
self._name = name
@property
def url(self):
"""
Gets the url of this ImageCatalogShortResponse.
custom image catalog's URL
:return: The url of this ImageCatalogShortResponse.
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""
Sets the url of this ImageCatalogShortResponse.
custom image catalog's URL
:param url: The url of this ImageCatalogShortResponse.
:type: str
"""
if url is None:
raise ValueError("Invalid value for `url`, must not be `None`")
if url is not None and not re.search('^http[s]?:\/\/.*', url):
raise ValueError("Invalid value for `url`, must be a follow pattern or equal to `/^http[s]?:\/\/.*/`")
self._url = url
@property
def id(self):
"""
Gets the id of this ImageCatalogShortResponse.
id of the resource
:return: The id of this ImageCatalogShortResponse.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this ImageCatalogShortResponse.
id of the resource
:param id: The id of this ImageCatalogShortResponse.
:type: int
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`")
self._id = id
@property
def public_in_account(self):
"""
Gets the public_in_account of this ImageCatalogShortResponse.
resource is visible in account
:return: The public_in_account of this ImageCatalogShortResponse.
:rtype: bool
"""
return self._public_in_account
@public_in_account.setter
def public_in_account(self, public_in_account):
"""
Sets the public_in_account of this ImageCatalogShortResponse.
resource is visible in account
:param public_in_account: The public_in_account of this ImageCatalogShortResponse.
:type: bool
"""
if public_in_account is None:
raise ValueError("Invalid value for `public_in_account`, must not be `None`")
self._public_in_account = public_in_account
@property
def used_as_default(self):
"""
Gets the used_as_default of this ImageCatalogShortResponse.
true if image catalog is the default one
:return: The used_as_default of this ImageCatalogShortResponse.
:rtype: bool
"""
return self._used_as_default
@used_as_default.setter
def used_as_default(self, used_as_default):
"""
Sets the used_as_default of this ImageCatalogShortResponse.
true if image catalog is the default one
:param used_as_default: The used_as_default of this ImageCatalogShortResponse.
:type: bool
"""
if used_as_default is None:
raise ValueError("Invalid value for `used_as_default`, must not be `None`")
self._used_as_default = used_as_default
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ImageCatalogShortResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| |
"""Utility methods for working with OpHandler and tf.Operation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow.compat.v1 as tf
OP_TYPES_WITH_MULTIPLE_OUTPUTS = ('SplitV',)
# Dictionary mapping op type to input index of weights.
WEIGHTS_INDEX_DICT = {
'Conv2D': 1,
'Conv2DBackpropInput': 1,
'DepthwiseConv2dNative': 1,
'MatMul': 1
}
def get_input_ops(op, op_reg_manager, whitelist_indices=None):
"""Returns input ops for a given op.
Filters constants and weight tensors.
Args:
op: tf.Operation to get inputs of.
op_reg_manager: OpRegularizerManager to keep track of the grouping.
whitelist_indices: Optional, indices of op.inputs that should be considered.
Returns:
List of tf.Operation that are the inputs to op.
"""
if 'GumbelPrefix' in op.type:
return []
# Ignore scalar or 1-D constant inputs.
def is_const(tensor):
return tensor.op.type == 'Const'
def is_weight_tensor(i, op_type):
return i == WEIGHTS_INDEX_DICT.get(op_type, -666) # If op_type not in dict.
# If op has a weight tensor as an input, remove it.
inputs = list(op.inputs)
whitelist_indices = whitelist_indices or range(len(inputs))
filted_input_ops = []
for i, tensor in enumerate(inputs):
if (i not in whitelist_indices
or is_weight_tensor(i, op.type)
or is_const(tensor)
or tensor.op not in op_reg_manager.ops):
continue
filted_input_ops.append(tensor.op)
return filted_input_ops
def get_output_ops(op, op_reg_manager):
"""Returns output ops for a given op.
Args:
op: tf.Operation to get outputs of.
op_reg_manager: OpRegularizerManager to keep track of the grouping.
Returns:
List of tf.Operation that are the outputs of op.
"""
output_ops = []
for output_tensor in op.outputs:
for output_op in output_tensor.consumers():
if output_op not in output_ops and output_op in op_reg_manager.ops:
output_ops.append(output_op)
return output_ops
def get_ops_without_groups(ops, op_reg_manager):
"""Returns ops without OpGroup.
Args:
ops: List of tf.Operation.
op_reg_manager: OpRegularizerManager to keep track of the grouping.
Returns:
List of tf.Operation that do not have OpGroup assigned.
"""
ops_without_groups = []
for op in ops:
op_slices = op_reg_manager.get_op_slices(op)
for op_slice in op_slices:
op_group = op_reg_manager.get_op_group(op_slice)
if op_group is None:
ops_without_groups.append(op)
break
return ops_without_groups
def remove_non_passthrough_ops(ops, op_reg_manager):
"""Removes non-passthrough ops from ops.
Args:
ops: List of tf.Operation.
op_reg_manager: OpRegularizerManager to keep track of the grouping.
Returns:
List of tf.Operation of only passthrough ops in ops.
"""
return [op for op in ops if op_reg_manager.is_passthrough(op)]
def group_op_with_inputs_and_outputs(op, input_op_slices, output_op_slices,
aligned_op_slice_sizes, op_reg_manager):
"""Groups op with inputs and outputs if grouping is inconsistent.
Args:
op: tf.Operation.
input_op_slices: List of list of OpSlice, with a list per input op.
output_op_slices: List of list of OpSlice, with a list per output op.
aligned_op_slice_sizes: List of integer OpSlice sizes.
op_reg_manager: OpRegularizerManager to keep track of the grouping.
Returns:
Boolean indicating if grouping was inconsistent.
"""
op_slices = op_reg_manager.get_op_slices(op)
inconsistent_grouping = False
# Group aligned OpSlice by iterating along each slice.
for slice_index in range(len(aligned_op_slice_sizes)):
op_group = op_reg_manager.get_op_group(op_slices[slice_index])
output_op_slices_at_index = [output_op_slice[slice_index]
for output_op_slice in output_op_slices]
input_op_slices_at_index = [input_op_slice[slice_index]
for input_op_slice in input_op_slices]
if op_group is None:
# The current op does not have a group. Group with inputs and outputs.
op_reg_manager.group_op_slices(
[op_slices[slice_index]] + input_op_slices_at_index
+ output_op_slices_at_index)
continue
if any([op_group != op_reg_manager.get_op_group(output_op_slice)
for output_op_slice in output_op_slices_at_index]):
# Some output OpSlice have different grouping.
op_reg_manager.group_op_slices(
[op_slices[slice_index]] + output_op_slices_at_index)
# Refesh OpGroup before comparing with input groups.
op_group = op_reg_manager.get_op_group(op_slices[slice_index])
inconsistent_grouping = True
if any([op_group != op_reg_manager.get_op_group(input_op_slice)
for input_op_slice in input_op_slices_at_index]):
# Some input OpSlice have different grouping.
op_slice = op_slices[slice_index]
# Check if inputs have source ops. The default behavior is to regularize
# all sources in the group; however, depending on the local structure, it
# may be unnecessary to regularize these input sources. Flag this as a
# potential issue.
source_op_slices = _get_source_op_slices([op_slice], op_reg_manager)
input_source_op_slices = _get_source_op_slices(
input_op_slices_at_index, op_reg_manager)
input_source_op_slices_to_be_merged = [s for s in input_source_op_slices
if s not in source_op_slices]
if source_op_slices and input_source_op_slices_to_be_merged:
tf.logging.warn('Potential overregularization near {}.'.format(op.name))
tf.logging.warn('Downstream source slices:')
for ss in source_op_slices:
tf.logging.warn(' {}'.format(ss))
tf.logging.warn('...to be merged with upstream source slices:')
for ss in input_source_op_slices_to_be_merged:
tf.logging.warn(' {}'.format(ss))
tf.logging.warn('')
op_reg_manager.group_op_slices([op_slice] + input_op_slices_at_index)
inconsistent_grouping = True
return inconsistent_grouping
def get_concat_input_op_slices(concat_ops, op_reg_manager):
"""Returns OpSlice for concat input ops to concatenate.
For concat, all input OpSlice should be stacked to align with the concat
OpSlice. Also, the last input is the axis which should be omitted.
Args:
concat_ops: List of tf.Operation which provide inputs to the concat op.
op_reg_manager: OpRegularizerManager that tracks the slicing.
Returns:
List of list of OpSlice, where the outer list only has 1 element, and the
inner list is the concatenation of input OpSlice.
"""
concat_input_op_slices = []
for concat_op in concat_ops:
concat_input_op_slices.extend(op_reg_manager.get_op_slices(concat_op))
return [concat_input_op_slices]
def get_op_slices(ops, op_reg_manager):
"""Returns list of OpSlice per op in a list of ops.
Args:
ops: List of tf.Operation.
op_reg_manager: OpRegularizerManager that tracks the slicing.
Returns:
List of list of OpSlice, where the outer list has a list per op, and the
inner list is a list of OpSlice that compose the op.
"""
op_slices = []
for op in ops:
op_slices.append(op_reg_manager.get_op_slices(op))
return list(filter(None, op_slices))
def get_op_slice_sizes(op_slices):
"""Returns OpSlice sizes for a list of list of OpSlice.
The outer list has an element per op, while the inner list is the list of
OpSlice that compose the op.
Args:
op_slices: List of list of OpSlice.
Returns:
List of list of OpSlice sizes where the outer list has an entry per op.
"""
op_slice_sizes = []
for op in op_slices:
op_slice_sizes.append([op_slice.slice.size for op_slice in op])
return op_slice_sizes
def get_aligned_op_slice_sizes(op_slices, input_op_slices, output_op_slices):
"""Returns list of OpSlice sizes with aligned sizes.
Given lists of OpSlice for an op and its inputs and outputs, returns the
smallest list of slice sizes that aligns the slices. For example, given an
input of [[1, 2], [3]] representing a first op with slice sizes [1, 2] and a
second op with op slice size [3], then the aligned slice sizes is [1, 2] to be
compatible. This means the second op would need to be sliced to match the
aligned slice sizes. As another example, given an input of [[2, 5], [3, 4]],
both ops would need to be resliced. The smallest list of slice sizes that
aligns the 2 ops is [2, 1, 4]. Finally, consider the example
[[5, 6, 7], [9, 4, 5], [18]], which returns [5, 4, 2, 2, 5]. Once the slice
sizes are aligned, the corresponding slices are of matching size and can be
grouped for the purpose of regularization.
Given lists of OpSlice for an op and its inputs and outputs, returns the
smallest list of slice sizes that aligns the slices.
Args:
op_slices: List of OpSlice for an op.
input_op_slices: List of list of OpSlice, with a list per input op.
output_op_slices: List of list of OpSlice, with a list per output op.
Returns:
List of integer slice sizes which is the smallest list of aligned sizes.
"""
# TODO(a1): Create a helper class to manage list of list of OpSlice.
input_op_slice_sizes = get_op_slice_sizes(input_op_slices)
output_op_slices_sizes = get_op_slice_sizes(output_op_slices)
op_slice_sizes = get_op_slice_sizes([op_slices])
all_op_slice_sizes = (input_op_slice_sizes + output_op_slices_sizes
+ op_slice_sizes)
return get_aligned_sizes(all_op_slice_sizes)
def get_aligned_sizes(op_slice_sizes):
"""Returns list of OpSlice sizes with aligned sizes.
Given a list of OpSlice sizes, returns the smallest list of slice sizes that
aligns the slices.
Args:
op_slice_sizes: List of list of slice sizes, where the outer list has a list
per op and the inner list is the integer slice sizes of the op.
Returns:
List of integer slice sizes which is the smallest list of aligned sizes.
Raises:
ValueError: If op_slice_sizes is empty.
ValueError: If slice size lists do not have the same total size.
"""
# Check for empty list.
if not op_slice_sizes:
raise ValueError('Cannot align empty op slice lists.')
# Check that all ops have the same total size.
total_slice_sizes = [
get_total_slice_size(op_slice_size, 0, len(op_slice_size))
for op_slice_size in op_slice_sizes]
if total_slice_sizes.count(total_slice_sizes[0]) != len(total_slice_sizes):
raise ValueError(
'Total size for op slices do not match: %s' % op_slice_sizes)
# Make local copy of op_slice_sizes for destruction.
aligned_op_slice_sizes = [list(op_slice_size)
for op_slice_size in op_slice_sizes]
slice_index = 0
num_slices = _get_num_slices(op_slice_sizes)
# Iterate slice by slice to check if slice sizes match across ops, or if they
# need to be split further.
while slice_index < num_slices:
op_slices_at_index = [slice_size[slice_index]
for slice_size in aligned_op_slice_sizes]
min_slice_size = min(op_slices_at_index)
for op_index in range(len(aligned_op_slice_sizes)):
old_size = aligned_op_slice_sizes[op_index][slice_index]
if old_size != min_slice_size:
# This OpSlice is bigger than the minimum, meaning this op needs to be
# sliced again to match sizes.
aligned_op_slice_sizes[op_index][slice_index] = min_slice_size
aligned_op_slice_sizes[op_index].insert(
slice_index + 1, old_size - min_slice_size)
num_slices = _get_num_slices(aligned_op_slice_sizes)
slice_index += 1
return aligned_op_slice_sizes[0]
def _get_num_slices(op_slice_sizes):
"""Returns the number of slices in a list of OpSlice sizes.
Args:
op_slice_sizes: List of list of slice sizes, where the outer list has a list
per op and the inner list is the slice sizes of the op.
Returns:
Integer max number of slices in the list of ops.
"""
return max([len(slices) for slices in op_slice_sizes])
def reslice_concat_ops(concat_ops, aligned_op_slice_sizes, op_reg_manager):
"""Reslices concat ops according to aligned sizes.
For concat, the input ops are concatenated which means the input op slice
sizes must be concatenated when comparing to aligned slice sizes. This is
different from the output, where the output op slices can be directly compared
to the aligned sizes.
For example, consider a concatenation of OpA (size 3) and OpB (size 5) which
is input into OpC (size 8, but slices of size [3, 3, 2] perhaps due to another
downstream concat). To group these ops, the input op slices need to be
concatenated before aligning with the output op slices, which requires
aligning ops slice sizes [[3, 5], [3, 3, 2]] which results in [3, 3, 2].
Thus, OpB needs to be sliced into sizes [3, 2] in order to makes slice sizes
compatible for grouping.
Args:
concat_ops: List of tf.Operation to slice.
aligned_op_slice_sizes: List of integer slice sizes.
op_reg_manager: OpRegularizerManager to keep track of slicing.
Raises:
ValueError: If concat op slice sizes do not match aligned op slice sizes.
"""
concat_slice_index = 0
for concat_op in concat_ops:
concat_op_slices = op_reg_manager.get_op_slices(concat_op)
concat_op_slice_sizes = get_op_slice_sizes([concat_op_slices])[0]
if concat_op_slice_sizes == aligned_op_slice_sizes[
concat_slice_index:concat_slice_index + len(concat_op_slice_sizes)]:
# Slice sizes match so move on to the next op.
concat_slice_index += len(concat_op_slice_sizes)
continue
else:
# Slice sizes do not match. The concat op needs to be resliced to match
# the aligned sizes.
slice_count = 1
concat_op_size = sum(concat_op_slice_sizes)
slice_size = get_total_slice_size(
aligned_op_slice_sizes, concat_slice_index, slice_count)
# Accumulate aligned slices until the sizes match the input op size.
while concat_op_size > slice_size:
slice_count += 1
slice_size = get_total_slice_size(
aligned_op_slice_sizes, concat_slice_index, slice_count)
if concat_op_size != slice_size:
raise ValueError('Could not properly slice op: %s' % concat_op)
else:
# Now concat_slice_index and slice_count specify the sublist of aligned
# op slice sizes that match the current concat op. Reslice the concat
# op using the aligned sizes.
op_reg_manager.slice_op(
concat_op,
aligned_op_slice_sizes[
concat_slice_index:concat_slice_index + slice_count])
concat_slice_index += slice_count
def get_total_slice_size(op_slice_sizes, index, slice_count):
"""Returns total size of a sublist of slices.
Args:
op_slice_sizes: List of integer slice sizes.
index: Integer index specifying the start of the sublist.
slice_count: Integer number of slices to include in the total size.
Returns:
Integer total size of the sublist of slices.
"""
return sum(op_slice_sizes[index:index + slice_count])
def reslice_ops(ops, aligned_op_slice_sizes, op_reg_manager):
"""Reslices ops according to aligned sizes.
Args:
ops: List of tf.Operation to slice.
aligned_op_slice_sizes: List of integer slice sizes.
op_reg_manager: OpRegularizerManager to keep track of slicing.
"""
for op_to_slice in ops:
op_slice_sizes = [
op_slice.slice.size
for op_slice in op_reg_manager.get_op_slices(op_to_slice)]
if op_slice_sizes and op_slice_sizes != aligned_op_slice_sizes:
op_reg_manager.slice_op(op_to_slice, aligned_op_slice_sizes)
def _get_source_op_slices(op_slices, op_reg_manager):
"""Returns list of OpSlice that are sources.
Args:
op_slices: List of OpSlice.
op_reg_manager: OpRegularizerManager to keep track of slicing.
Returns:
List of OpSlice that are sources.
"""
op_groups = [op_reg_manager.get_op_group(op_slice)
for op_slice in op_slices
if op_reg_manager.get_op_group(op_slice) is not None]
# pylint: disable=g-complex-comprehension
return list(set([source_op_slice for op_group in op_groups
for source_op_slice in op_group.source_op_slices]))
# pylint: enable=g-complex-comprehension
def group_aligned_input_output_slices(
op, input_ops_to_process, output_ops_to_process, input_op_slices,
output_op_slices, aligned_op_slice_sizes, op_reg_manager):
"""Groups aligned OpSlice and reprocesses ungrouped ops.
Assuming OpSlice of op have been aligned with input and output, groups the
corresponding OpSlice based on whether all inputs or all outputs are grouped.
Ungrouped ops are put into the queue for processing.
1. If all inputs and outputs have groups, op is also grouped with them for
consistency.
2. If all inputs are grouped, op is grouped with inputs while ungrouped
outputs are queued for processing.
3. If all outputs are grouped and there is only 1 input, op is grouped with
outputs while ungrouped inputs are queued for processing.
4. If neither inputs or outputs are grouped, then all ungrouped ops are queued
for processing as grouping for op currently cannot be resolved.
Args:
op: tf.Operation to determine grouping for.
input_ops_to_process: List of tf.Operation of ungrouped input ops.
output_ops_to_process: List of tf.Operation of ungrouped output ops.
input_op_slices: List of list of OpSlice, with a list per input op.
output_op_slices: List of list of OpSlice, with a list per output op.
aligned_op_slice_sizes: List of integer slice sizes.
op_reg_manager: OpRegularizerManager to keep track of grouping.
"""
# If all inputs and outputs have groups, group slices with op for consistency.
if not input_ops_to_process and not output_ops_to_process:
group_op_with_inputs_and_outputs(
op, input_op_slices, output_op_slices, aligned_op_slice_sizes,
op_reg_manager)
elif not input_ops_to_process:
# All inputs are grouped, so group with inputs and process outputs.
group_op_with_inputs_and_outputs(
op, input_op_slices, [], aligned_op_slice_sizes, op_reg_manager)
op_reg_manager.process_ops(output_ops_to_process)
else:
# Both inputs and outputs need to be grouped first.
op_reg_manager.process_ops(output_ops_to_process + input_ops_to_process)
op_reg_manager.process_ops_last([op])
def get_op_size(op):
"""Returns output size of an op.
The output size of an op is typically the last dimension of the output tensor.
For example, this is the number of output channels of a convolution. If the
op has no shape (i.e. a constant), then return 0.
Args:
op: A tf.Operation.
Returns:
Integer output size of the op.
"""
if op.type in OP_TYPES_WITH_MULTIPLE_OUTPUTS:
return sum([output_tensor.shape.as_list()[-1]
for output_tensor in op.outputs])
# For regular ops, return the size of the first output tensor.
shape = op.outputs[0].shape.as_list()
if shape:
return shape[-1]
return 0
def separate_same_size_ops(reference_op, ops):
"""Separate ops by comparing to size of op.
Ops of size 0 are dropped.
Args:
reference_op: tf.Operation which is the reference size.
ops: List of tf.Operation to compare to the reference op size.
Returns:
A 2-tuple of lists of tf.Operation. The first element is a list of
tf.Operation which match the size of the reference op. The second element
is a list of tf.Operation that do not match the size of the reference op.
"""
same_size_ops = []
different_size_ops = []
reference_op_size = get_op_size(reference_op)
for op in ops:
op_size = get_op_size(op)
if op_size == reference_op_size:
same_size_ops.append(op)
elif op_size > 0:
different_size_ops.append(op)
return (same_size_ops, different_size_ops)
def group_match(regex, op_slices):
"""Returns True if the regex is found in the op name of any Opslice.
Args:
regex: A string regex.
op_slices: List of OpRegularizerManager.OpSlice.
Returns:
True if the regex is found in the op name of any op in op_slices.
"""
# If no regex, then group does not match.
if not regex:
return False
# Check if any OpSlice in the group matches the regex.
matches = [re.search(regex, op_slice.op.name) for op_slice in op_slices]
return any(matches)
| |
from PIL import Image, ImageTk
import math
# import Image
import Tkinter
# import turtle
from turtle import RawTurtle, TurtleScreen
import random
import json
import os
# import time
import numpy as np
root = Tkinter.Tk()
canvas = Tkinter.Canvas(root, width=1200, height=264, state=Tkinter.DISABLED)
canvas.pack()
screen = TurtleScreen(canvas)
turtle = RawTurtle(screen)
# screen.setworldcoordinates(0, 399, 999, 0)
turtle.hideturtle()
turtle.up()
turtle.tracer(50000, delay=0)
# turtle.register_shape("dot", ((-3,-3), (-3,3), (3,3), (3,-3)))
screen.register_shape("tri", ((-3, -2), (0, 3), (3, -2), (0, 0)))
turtle.speed(0)
UPDATE_EVERY = 0
DRAW_EVERY = 2
class Map(object):
"""
TODO: docstring
"""
DEG_TO_RAD = math.pi / 180
RAD_TO_DEG = 180 / math.pi
def __init__(self, json_file):
self.load_from_json_file(json_file)
self.width, self.height = screen.screensize()
# self.im_resized = self.im.resize((self.width, self.height), Image.ANTIALIAS)
self.photo = ImageTk.PhotoImage(self.im)
# self.photo = self.photo.zoom(1000)
canvas.create_image(0, 0, image=self.photo, anchor='nw')
img_data = list(self.im.getdata())
self.img_bitmap = np.zeros(len(img_data), dtype=np.int8)
for i in np.arange(len(img_data)):
if img_data[i] != (255, 255, 255):
self.img_bitmap[i] = 1
self.img_bitmap = self.img_bitmap.reshape(self.im.size[::-1])
self.img_bitmap = self.img_bitmap.transpose()
# print(screen.screensize())
# print(self.im_pixels[468, 231], self.img_bitmap[468][231])
# print(self.im.size)
screen.setworldcoordinates(0, self.height - 1, self.width - 1, 0)
turtle.home()
# self.blocks = []
self.update_cnt = 0
self.one_px = float(turtle.window_width()) / float(self.width) / 2
# self.beacons = []
# for y, line in enumerate(self.maze):
# for x, block in enumerate(line):
# if block:
# nb_y = self.height - y - 1
# self.blocks.append((x, nb_y))
# if block == 2:
# self.beacons.extend(((x, nb_y), (x+1, nb_y),
# (x, nb_y+1), (x+1, nb_y+1)))
def load_from_json_file(self, json_file):
json_data = open(os.path.join(os.path.dirname(
os.path.abspath(__file__)), json_file))
data = json.load(json_data)
json_data.close()
self.im = Image.open(os.path.join(os.path.dirname(
os.path.abspath(__file__)), data["Filepath"]))
self.im = self.im.convert('RGB')
self.im_pixels = self.im.load()
self.lats = [None] * 2
self.lons = [None] * 2
self.xs = [None] * 2
self.ys = [None] * 2
for i in range(0, 2):
self.lats[i] = data["Points"][i]["GPS"]["latitude"]
self.lons[i] = data["Points"][i]["GPS"]["longitude"]
self.xs[i] = data["Points"][i]["Pixel world"]["x"]
self.ys[i] = data["Points"][i]["Pixel world"]["y"]
self.beacons = data["Beacons"]
self.angle = self.getAngle()
self.metersForPixel = self.calculateMetersForPixel()
(x, y) = self.PixelsToXYMeters(self.xs[1], self.ys[1])
self.origin = self.XYMetersToGPS(
self.lats[1], self.lons[1], -x, -y)
def check(self, x0, y0):
if not self.is_in(x0, y0):
return False
self.im_pixels[int(x0), int(y0)] = (0, 0, 255)
return self.img_bitmap[int(x0)][int(y0)] == 0
def segment_is_intersecting_walls(self, x0, y0, x1, y1):
# print('*')
# for i in range(100):
# for j in range(100):
# self.im_pixels[i, j] = (0, 255, 0)
x0 = int(x0)
y0 = int(y0)
x1 = int(x1)
y1 = int(y1)
dx = abs(x1-x0)
dy = abs(y1-y0)
if (x0 < x1):
sx = 1
else:
sx = -1
if (y0 < y1):
sy = 1
else:
sy = -1
err = dx-dy
while(True):
# print x0, y0
if(not self.check(x0,y0)):
# print 0
return True
if (x0 == x1 and y0 == y1):
break
e2 = 2*err
if (e2 > -dy):
err = err - dy
x0 = x0 + sx
if (x0 == x1 and y0 == y1):
if(not self.check(x0,y0)):
# print 0
return True
break
if (e2 < dx):
err = err + dx
y0 = y0 + sy
return False
def GPSToXYMeters(self, latitude1, longitude1, latitude2, longitude2):
x = (longitude1 - longitude2) * self.DEG_TO_RAD * math.cos((latitude1 + latitude2) * self.DEG_TO_RAD/2) * 6371000
y = (latitude1 - latitude2) * self.DEG_TO_RAD * 6371000
xnew = x * math.cos(-self.angle) - y * math.sin(-self.angle)
ynew = x * math.sin(-self.angle) + y * math.cos(-self.angle)
return (xnew, -ynew)
def GPSToAbsoluteXYMeters(self, latitude, longitude):
return self.GPSToXYMeters(latitude, longitude, self.origin[0], self.origin[1])
def XYMetersToGPS(self, latitude1, longitude1, x, y):
xnew = x * math.cos(self.angle) + y * math.sin(self.angle)
ynew = x * math.sin(self.angle) - y * math.cos(self.angle)
bearing = math.atan2(xnew, ynew)
d = math.sqrt(xnew * xnew + ynew * ynew)
latitude = math.asin(math.sin(latitude1 * self.DEG_TO_RAD) * math.cos(d/6371000) + math.cos(latitude1 * self.DEG_TO_RAD) * math.sin(d/6371000) * math.cos(bearing)) * self.RAD_TO_DEG
longitude = longitude1 + math.atan2(math.sin(bearing) * math.sin(d/6371000) * math.cos(latitude1 * self.DEG_TO_RAD), math.cos(d/6371000) - math.sin(latitude1 * self.DEG_TO_RAD) * math.sin(latitude * self.DEG_TO_RAD)) * self.RAD_TO_DEG
return (latitude, longitude)
def AbsoluteXYMetersToGPS(self, x, y):
return self.XYMetersToGPS(self.origin[0], self.origin[1], x, y)
def XYMetersToPixels(self, x, y):
return (x / self.metersForPixel[0], y / self.metersForPixel[1])
def PixelsToXYMeters(self, px, py):
return (px * self.metersForPixel[0], py * self.metersForPixel[1])
def calculateMetersForPixel(self):
(x, y) = self.GPSToXYMeters(self.lats[0], self.lons[0], self.lats[1], self.lons[1])
(px, py) = (self.xs[0] - self.xs[1], self.ys[0] - self.ys[1])
return (math.fabs(x/px), math.fabs(y/py))
def getAngle(self):
if(self.xs[1]>self.xs[0]):
x = (self.lons[1] - self.lons[0]) * self.DEG_TO_RAD * math.cos((self.lats[1] + self.lats[0]) * self.DEG_TO_RAD/2) * 6371000
y = (self.lats[1] - self.lats[0]) * self.DEG_TO_RAD * 6371000
angle1 = math.atan2(y, x)
angle2 = math.atan2(-self.ys[1] + self.ys[0], self.xs[1] - self.xs[0])
else:
x = (self.lons[0] - self.lons[1]) * self.DEG_TO_RAD * math.cos((self.lats[0] + self.lats[1]) * self.DEG_TO_RAD/2) * 6371000
y = (self.lats[0] - self.lats[1]) * self.DEG_TO_RAD * 6371000
angle1 = math.atan2(y, x)
angle2 = math.atan2(-self.ys[0] + self.ys[1], self.xs[0] - self.xs[1])
return angle1 - angle2
def draw(self):
screen.update()
def weight_to_color(self, weight):
return "#%02x00%02x" % (int(weight * 255), int((1 - weight) * 255))
def is_in(self, x, y):
if x < 0 or y < 0 or x >= self.width or y >= self.height:
return False
return True
def is_free(self, x, y):
if not self.is_in(x, y):
return False
# print(x, y, self.img_bitmap[int(x)][int(y)])
return self.img_bitmap[int(x)][int(y)] == 0
def show_mean(self, x, y, confident=False):
if not hasattr(self, 'mean_pos_id'):
self.mean_pos_id = canvas.create_oval(x-3, y-3, x+3, y+3,
fill='green', outline='green')
canvas.coords(self.mean_pos_id, x-3, y-3, x+3, y+3)
canvas.update()
def show_particles(self, particles):
self.update_cnt += 1
if UPDATE_EVERY > 0 and self.update_cnt % UPDATE_EVERY != 1:
return
for p in particles:
if not hasattr(p, 'canvas_id'):
p.canvas_id = canvas.create_rectangle(p.x, p.y,
p.x, p.y, fill="blue", outline="blue")
canvas.coords(p.canvas_id, p.x, p.y, p.x, p.y)
canvas.update()
def show_robot(self, robot):
if not hasattr(robot, 'canvas_id'):
print(robot.x, robot.y)
robot.canvas_id = canvas.create_oval(robot.x-2, robot.y-2,
robot.x+2, robot.y+2, fill="red", outline="red")
canvas.coords(robot.canvas_id,
robot.x-2, robot.y-2, robot.x+2, robot.y+2)
# print(canvas.coords(robot.canvas_id))
# print(canvas.bbox(robot.canvas_id))
# canvas.coords(robot.canvas_id, (robot.x, robot.y))
# print("Show robot", x, y)
# turtle.color("green")
# turtle.shape('turtle')
# turtle.up()
# turtle.setposition(x, y)
# turtle.setheading(h)
# turtle.down()
# turtle.clearstamps()
# turtle.stamp()
# turtle.dot()
canvas.update()
def random_place(self):
x = random.uniform(0, self.width - 1)
y = random.uniform(0, self.height - 1)
return x, y
def random_free_place(self):
while True:
x, y = self.random_place()
if self.is_free(x, y):
return x, y
def distance(self, x1, y1, x2, y2):
return math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
def distance_to_nearest_beacon(self, x, y):
d = 99999
for b in self.beacons:
bx = b["Pixel world"]["x"]
by = b["Pixel world"]["y"]
distance = self.distance(bx, by, x, y)
if distance < d:
d = distance
return d
# return 0.1
def distance_array_to_beacons(self, x, y):
distance_array = []
for b in self.beacons:
bx = b["Pixel world"]["x"]
by = b["Pixel world"]["y"]
distance = self.distance(bx, by, x, y)
distance_array.append(distance)
return distance_array
def main():
dfki_map = Map('dfki-2nd-floor.json')
# canvas = screen.getcanvas()
# rect = canvas.create_rectangle(0, 0, 999, 0, fill="blue")
# print(screen.screensize())
print(dfki_map.im.getpixel((1000, 350)))
#
print(dfki_map.img_bitmap.shape)
dfki_map.draw()
# for i in range(0, 100, 10):
# dfki_map.show_robot(i, 10, i)
# canvas.move(rect, i, 2*i)
# screen.update()
# canvas.update_idletasks()
# time.sleep(1)
if __name__ == '__main__':
main()
| |
from absl import logging
import argparse
import tensorflow as tf
import tensorflow_hub as hub
import os
from PIL import Image
import multiprocessing
from functools import partial
import time
import pyaudio as pya
import threading
import queue
import numpy as np
from moviepy import editor
import pygame
pygame.init()
os.environ["TFHUB_DOWNLOAD_PROGRESS"] = "True"
BUFFER_SIZE = 8
class Player(object):
def __init__(self, videofile, tflite="", saved_model=""):
"""
Player Class for the Video
Args:
videofile: Path to the video file
tflite: Path to the Super Resolution TFLite
saved_model: path to Super Resolution SavedModel
"""
self.video = editor.VideoFileClip(videofile)
self.audio = self.video.audio
self.tolerance = 2.25 # Higher Tolerance Faster Video
self.running = False
self.interpreter = None
self.saved_model = None
if saved_model:
self.saved_model = hub.load(saved_model)
if tflite:
self.interpreter = tf.lite.Interpreter(model_path=tflite)
self.interpreter.allocate_tensors()
self.input_details = self.interpreter.get_input_details()
self.output_details = self.interpreter.get_output_details()
self.lock = threading.Lock()
self.audio_thread = threading.Thread(target=self.write_audio_stream)
self.video_thread = threading.Thread(target=self.write_video_stream)
self.video_iterator = self.video.iter_frames()
self.audio_iterator = self.audio.iter_chunks(int(self.audio.fps))
self.video_queue = queue.Queue()
self.audio_queue = queue.Queue()
pyaudio = pya.PyAudio()
issmallscreen = 1 if saved_model or tflite else 0.25
self.screen = pygame.display.set_mode(
(int(1280 * issmallscreen),
int(720 * issmallscreen)), 0, 32)
self.stream = pyaudio.open(
format=pya.paFloat32,
channels=2,
rate=44100,
output=True,
frames_per_buffer=1024)
def tflite_super_resolve(self, frame):
"""
Super Resolve bicubically downsampled image frames
using the TFLite of the model.
Args:
frame: Image frame to scale up.
"""
self.interpreter.set_tensor(self.input_details[0]['index'], frame)
self.interpreter.invoke()
frame = self.interpreter.get_tensor(self.output_details[0]['index'])
frame = tf.squeeze(tf.cast(tf.clip_by_value(frame, 0, 255), "uint8"))
return frame.numpy()
def saved_model_super_resolve(self, frame):
"""
Super Resolve using exported SavedModel.
Args:
frames: Batch of Frames to Scale Up.
"""
if self.saved_model:
start = time.time()
frame = self.saved_model.call(frame)
logging.debug("[SAVED_MODEL] Super Resolving Time: %f" % (time.time() - start))
logging.debug("Returning Modified Frames")
return np.squeeze(np.clip(frame.numpy(), 0, 255).astype("uint8"))
def video_second(self):
"""
Fetch Video Frames for each second
and super resolve them accordingly.
"""
frames = []
logging.debug("Fetching Frames")
start = time.time()
loop_time = time.time()
for _ in range(int(self.video.fps)):
logging.debug("Fetching Video Frame. %f" % (time.time() - loop_time))
loop_time = time.time()
frame = next(self.video_iterator)
frame = np.asarray(
Image.fromarray(frame)
.resize(
[1280 // 4, 720 // 4],
Image.BICUBIC), dtype="float32")
frames.append(tf.expand_dims(frame, 0))
logging.debug("Frame Fetching Time: %f" % (time.time() - start))
if self.interpreter and not self.saved_model:
resolution_fn = self.tflite_super_resolve
else:
resolution_fn = self.saved_model_super_resolve
start = time.time()
with multiprocessing.pool.ThreadPool(30) as pool:
frames = pool.map(resolution_fn, frames)
logging.debug("Fetched Frames. Time: %f" % (time.time() - start))
return frames
def fetch_video(self):
"""
Fetches audio and video frames from the file.
And put them in player cache.
"""
audio = next(self.audio_iterator)
video = self.video_second()
self.audio_queue.put(audio)
self.video_queue.put(video)
def write_audio_stream(self):
"""
Write Audio Frames to default audio device.
"""
try:
while self.audio_queue.qsize() < BUFFER_SIZE:
continue
while self.running:
audio = self.audio_queue.get(timeout=10)
self.stream.write(audio.astype("float32").tostring())
except BaseException:
raise
def write_video_stream(self):
"""
Write Video frames to the player display.
"""
try:
while self.video_queue.qsize() < BUFFER_SIZE:
continue
while self.running:
logging.info("Displaying Frame")
for video_frame in self.video_queue.get(timeout=10):
video_frame = pygame.surfarray.make_surface(
np.rot90(np.fliplr(video_frame)))
self.screen.fill((0, 0, 2))
self.screen.blit(video_frame, (0, 0))
pygame.display.update()
time.sleep((1000 / self.video.fps - self.tolerance) / 1000)
except BaseException:
raise
def run(self):
"""
Start the player threads and the frame streaming simulator.
"""
with self.lock:
if not self.running:
self.running = True
self.audio_thread.start()
self.video_thread.start()
for _ in range(int(self.video.duration)):
logging.debug("Fetching Video")
self.fetch_video()
time.sleep(0.1)
with self.lock:
if not self.running:
self.running = True
self.audio_thread.join()
self.video_thread.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-v", "--verbose",
action="count",
default=0,
help="Increases Verbosity of Logging")
parser.add_argument(
"--file",
default=None,
help="File to play")
parser.add_argument(
"--tflite",
default="",
help="Path to TFLite File")
parser.add_argument(
"--saved_model",
default="",
help="Path to Saved Model File")
FLAGS, unknown_args = parser.parse_known_args()
log_levels = [logging.FATAL, logging.WARNING, logging.INFO, logging.DEBUG]
current_log_level = log_levels[min(len(log_levels) - 1, FLAGS.verbose)]
logging.set_verbosity(current_log_level)
player = Player(
videofile=FLAGS.file,
saved_model=FLAGS.saved_model,
tflite=FLAGS.tflite)
player.run()
| |
import re
import skpy
import discord
import asyncio
import logging
from config import *
from collections import deque
from typing import Tuple, Deque
class Rex(dict):
def __getitem__(self, item):
return self.setdefault((item, 0), re.compile(item))
def __setitem__(self, key, value):
raise AttributeError('Rex objects are not supposed to be set')
def get(self, k, flags=0):
return self.setdefault((k, flags), re.compile(k, flags))
rex = Rex()
class ApplicationDiscord(discord.Client):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.discord_forbidden = []
self.all_members = {}
self.all_members_nick = {}
self.message_dict = {}
self.forward_q: Deque[Tuple[skpy.SkypeMsg, tuple, int]] = deque()
self.skype = None
self.start_tuple = None
self.first_run = True
self.loop_task = None
def enque(self, msg, file, work):
self.forward_q.append((msg, file, work))
def run_loop(self):
self.loop_task = asyncio.ensure_future(self.main_loop())
async def main_loop(self):
try:
while True:
await asyncio.sleep(0.01) # all the other things
while self.forward_q:
msg, file, work = self.forward_q.popleft()
msg.content = self.to_discord_format(msg.content)
if work == 1:
await self.discord_send_message(msg, file, work)
elif work == 2:
await self.discord_edit_message(msg, file, work)
else:
await self.discord_delete_message(msg, file, work)
except asyncio.CancelledError:
return
except Exception as e:
logging.exception("exception in discord main loop")
self.run_loop()
async def on_ready(self):
logging.info(f'Logged in \nUsername: {self.user.name}\nID: {self.user.id}\nAPI Version: {discord.__version__}')
gameplayed = config.MAIN.get("gameplayed", "Yuri is Love")
if gameplayed:
activity = discord.Game(name=gameplayed)
await self.change_presence(status=discord.Status.idle, activity=activity)
avatar_file_name = config.MAIN.get("avatarfile")
if avatar_file_name:
with open(avatar_file_name, "rb") as f:
avatar = f.read()
await self.edit_profile(avatar=avatar)
if self.first_run:
self.first_run = False
self.get_forbidden_list()
self.get_startswith()
self.fill_member_list()
self.skype.discord = self
for k, v in list(config.ch.items()):
config.ch[k] = self.get_channel(v)
self.run_loop()
# TODO Add embed support
async def on_message(self, message):
content = message.content
if content.startswith(f"{config.MAIN.command_prefix}temp_bridge"):
await self.create_temp_bridge(message)
if content.startswith(self.start_tuple):
return
if message.author.id in self.discord_forbidden or message.author.name in self.discord_forbidden:
return
if message.channel.id in config.ch:
content = await self.to_skype_format(content, message)
self.skype.enque(message, content=content, work=1, new_msg=None)
async def on_message_edit(self, old_message, message):
content = message.content
if content.startswith(self.start_tuple):
return
if message.author.id in self.discord_forbidden or message.author.name in self.discord_forbidden:
return
if message.channel in config.ch:
content = await self.to_skype_format(content, message)
self.skype.enque(old_message, content=content, work=2, new_msg=message)
async def on_message_delete(self, message):
content = message.content
if content.startswith(self.start_tuple):
return
if message.author.id in self.discord_forbidden or message.author.name in self.discord_forbidden:
return
if message.channel in config.ch:
self.skype.enque(message, content=None, work=3, new_msg=None)
async def discord_send_message(self, msg, file, work):
try:
if file:
discord_message = await self.send_file(config.ch[msg.chat.id], file[0], filename=file[1], content=msg.content)
else:
discord_message = await config.ch[msg.chat.id].send(msg.content)
self.update_internal_msg(msg, discord_message)
except KeyError:
logging.warning("Deleted a message from unkown chat.")
except Exception as e:
logging.exception("Exception while sending discord message")
#self.forward_q.append((msg, file, work))
async def discord_edit_message(self, msg, file, work):
if msg.clientId not in self.message_dict:
return
try:
discord_message = await self.edit_message(self.message_dict[msg.clientId], new_content=msg.content)
self.update_internal_msg(msg, discord_message)
except Exception as e:
logging.exception("Exception in discord_edit_message")
self.forward_q.append((msg, file, work))
async def discord_delete_message(self, msg, file, work):
if msg.clientId not in self.message_dict:
return
try:
await self.delete_message(self.message_dict[msg.clientId])
except Exception as e:
logging.exception("Exception in discord_delete_message")
self.forward_q.append((msg, file, work))
def update_internal_msg(self, skype_msg_obj: skpy.SkypeMsg, discord_msg_obj):
self.message_dict[skype_msg_obj.clientId] = discord_msg_obj
asyncio.get_event_loop().call_later(36000, lambda: self.message_dict.pop(skype_msg_obj.clientId, None))
async def create_temp_bridge(self, msg: discord.Message):
if msg.author.id in config.admin_id:
content = msg.content.split(" ")
if len(content) != 4:
return await self.send_message(msg.channel, content="Input is not correct, pls check it again")
first_id = content[2].split(":", 1)
second_id = content[3].split(":", 1)
if len(first_id) != 2 or len(second_id) != 2:
return await self.send_message(msg.channel, content="ID input is not correct, pls check it again")
if first_id[0] == "skype":
skype_id = first_id[1]
discord_id = second_id[1]
elif first_id[0] == "discord":
discord_id = first_id[1]
skype_id = second_id[1]
else:
return await self.send_message(msg.channel, content="Input is not correct, pls check it again")
if content[1] == "add":
self.add_temp_bridge(skype_id, discord_id)
elif content[1] == "delete":
self.delete_temp_bridge(skype_id, discord_id)
else:
return await self.send_message(msg.channel, content="Method is not correct, pls check it again")
await self.send_message(msg.channel, content="Done")
def add_temp_bridge(self, skype_id: str, discord_id: str):
config.ch[skype_id] = self.get_channel(discord_id)
def delete_temp_bridge(self, skype_id, discord_id):
config.ch.pop(skype_id, None)
config.ch.pop(self.get_channel(discord_id), None)
def get_forbidden_list(self):
self.discord_forbidden = [self.user.id]
for x in config["FORBIDDEN_DISCORD"].values():
self.discord_forbidden.append(str(x))
logging.info(f"Forbidden Discord:\n{self.discord_forbidden}")
def get_startswith(self):
start_list = []
for word in config.FORBIDDEN_START.values():
start_list.append(word)
self.start_tuple = tuple(start_list)
logging.info(f"Forbidden Start:\n{self.start_tuple}")
def fill_member_list(self):
for user in self.get_all_members():
self.all_members[user.name.lower()] = user.id
if hasattr('user', 'nick'):
self.all_members_nick[user.nick.lower()] = user.id
@staticmethod
def embeds_to_skype_format(embeds) -> str:
formated_embeds = "Embed:"
for embed in embeds:
title = embed.get("title", None)
if title:
formated_embeds += f"\n<b raw_pre=\"*\" raw_post=\"*\">Title: </b> {title}"
description = embed.get("description", None)
if description:
formated_embeds += f"\n<b raw_pre=\"*\" raw_post=\"*\"> Description: </b> {description}"
return formated_embeds
# TODO Code blocks fix?
async def to_skype_format(self, content, message) -> str:
if message.author.bot and message.embeds:
content = content.replace("[]()", "")
content = f"{self.embeds_to_skype_format(message.embeds)}\n{content}"
line_splits = content.split('\n')
for li, line in enumerate(line_splits):
word_splits = line.split(" ")
for index, word in enumerate(word_splits):
if "http" in word:
word_splits[index] = f"<a href=\"{word}\">{word}</a>"
continue
if word in config.unicode_emoji:
try:
word_splits[index] = config.emoji[config.unicode_emoji[word]][1:-1]
except KeyError:
logging.warning(f"Missing emoji in emoji.json: {config.unicode_emoji[word]}")
continue
emoji = re.match(rex["<:(\w+):(\d+)>"], word)
if emoji:
if emoji.group(1) in config.emoji:
word_splits[index] = config.emoji[emoji.group(1)][1:-1]
else:
emo = f"<b raw_pre=\"*\" raw_post=\"*\">{emoji.group(1)}</b>"
word_splits[index] = emo
continue
mention = re.match(rex["<@!?(\d+)>"], word)
if mention:
mention = await self.fetch_user(mention.group(1))
mention = f"@{mention.name}"
word_splits[index] = mention
continue
mention_role = re.match(rex["<@&(\d+)>"], word)
if mention_role:
for role in message.server.roles:
if role.id == mention_role.group(1):
mentioned_role = role
mention = f"@{mentioned_role.name} (Discord Role)"
word_splits[index] = mention
continue
mention_channel = re.match(rex["<#(\d+)>"], word)
if mention_channel:
mention = self.get_channel(mention_channel.group(1))
mention = f"#{mention.name}"
word_splits[index] = mention
line_splits[li] = " ".join(word_splits)
content = '\n'.join(line_splits)
content = f"<b raw_pre=\"*\" raw_post=\"*\">{message.author.nick if hasattr('message.author', 'nick') else message.author.name}: </b> {content}"
if message.attachments:
for word in message.attachments:
content += f"\n<a href=\"{word['url']}\">{word['filename']}</a>"
return content.replace("{code}", "```")
def get_user_id(self, username):
user_id = self.all_members.get(username, self.all_members_nick.get(username))
if not user_id:
self.fill_member_list()
user_id = self.all_members.get(username, self.all_members_nick.get(username))
return user_id
# TODO Fix usernames with space
# TODO Use re.finditer
def to_discord_format(self, msg_content) -> str:
msg_content = msg_content.replace("{code}", "```").replace("Edited previous message:", "")
if "@" not in msg_content:
return msg_content
line_splits = msg_content.split('\n')
for li, line in enumerate(line_splits):
word_splits = line.split(" ")
for index, sky_msg in enumerate(word_splits):
username = re.match(rex["@(\w+)"], sky_msg)
if username:
user_id = self.get_user_id(username.group(1).lower())
if user_id:
word_splits[index] = f"<@{user_id}> "
line_splits[li] = " ".join(word_splits)
return '\n'.join(line_splits)
| |
from PyQt5.QtCore import QModelIndex, QRect
from PyQt5.QtWidgets import QAbstractItemView
from inselect.lib.inselect_error import InselectError
from inselect.lib.utils import debug_print
from inselect.gui.roles import PixmapRole, RectRole, MetadataValidRole
from inselect.gui.utils import update_selection_model
from .boxes_scene import BoxesScene
class GraphicsItemView(QAbstractItemView):
"""Qt have used 'view' in two different contexts: the model-view
architecture and the graphics/view framework, henceforth MV and GV
respectively.
This class is a MV view that acts as an interface between MV and GV.
A limited number of events are passed between the two systems:
* changes in selection
* changes in boxes' position and size (RectRole)
* addition of boxes
* deletion of boxes
* metadata valid status (MetadataValidRole)
* TODO box verified status
"""
# Based on idea in:
# http://stackoverflow.com/questions/3188584/how-to-use-qt-model-view-framework-with-the-graphics-view-framework
def __init__(self, parent=None):
super(GraphicsItemView, self).__init__(parent)
self.scene = BoxesScene(self, parent)
# List of QGraphicsRectItem
self._rows = []
self.handling_selection_update = False
self.scene.selectionChanged.connect(self.scene_selection_changed)
def reset(self):
"""QAbstractItemView virtual
"""
debug_print('GraphicsItemView.reset')
super(GraphicsItemView, self).reset()
model = self.model()
self.scene.new_document(model.data(QModelIndex(), PixmapRole))
# Build up new mapping
rows = [None] * model.rowCount()
for row in range(model.rowCount()):
index = model.index(row, 0)
rows[row] = self.scene.add_box(index.data(RectRole),
index.data(MetadataValidRole))
self._rows = rows
def show_alternative_pixmap(self, pixmap):
"""Show or clear an alternative pixmap in place of the document's usual
pixmap. pixmaps should either be a QPixmap of the same dimensions as the
documents pixmap (which is shown) or None (which clears any existing
alternative pixmap)
"""
debug_print('show_alternative_pixmap', pixmap)
model = self.model()
pixmap = pixmap if pixmap else model.data(QModelIndex(), PixmapRole)
self.scene.set_pixmap(pixmap)
def rowsInserted(self, parent, start, end):
"""QAbstractItemView slot
"""
debug_print('GraphicsItemView.rowsInserted', start, end)
# New boxes but are coming but their rects are not yet known.
# Create new items with zero height and zero width rects - actual rects
# will be set in dataChanged()
n = 1 + end - start
new = [None] * n
rect = QRect(0, 0, 0, 0)
for row in range(n):
new[row] = self.scene.add_box(rect, False)
self._rows[start:start] = new
def dataChanged(self, topLeft, bottomRight, roles=[]):
"""QAbstractItemView virtual
"""
debug_print('GraphicsItemView.dataChanged', topLeft.row(), bottomRight.row())
for row in range(topLeft.row(), 1 + bottomRight.row()):
item = self._rows[row]
# new is a QRect - integer coordinates
index = self.model().index(row, 0)
item.set_rect(index.data(RectRole))
item.set_isvalid(index.data(MetadataValidRole))
def rowsAboutToBeRemoved(self, parent, start, end):
"""QAbstractItemView slot
"""
debug_print('GraphicsItemView.rowsAboutToBeRemoved', start, end)
if self.handling_selection_update:
debug_print('Unexpected handling_selection_update in '
'GraphicsItemView.rowsAboutToBeRemoved')
# Ignore the selectionChanged() notifications that the scene will send
# for every item that is about to be removed.
self.handling_selection_update = True
try:
# TODO Context for this
for item in self._rows[start:end]:
self.scene.removeItem(item)
finally:
self.handling_selection_update = False
# Remove items
self._rows[start:end] = []
def selectionChanged(self, selected, deselected):
"""QAbstractItemView virtual
"""
# Tell the scene about the new selection
# TODO LH Use a timer to implement a delayed refresh
if not self.handling_selection_update:
# TODO Context for this
debug_print('GraphicsItemView.selectionChanged')
self.handling_selection_update = True
try:
current = set(self.scene.selectedItems())
new = set(self._rows[i.row()] for i in self.selectionModel().selectedIndexes())
for item in new.difference(current):
item.setSelected(True)
item.update()
for item in current.difference(new):
item.setSelected(False)
item.update()
if new:
for view in self.scene.views():
view.zoom_to_items(new)
finally:
self.handling_selection_update = False
def rows_of_items(self, items):
"""Returns a generator of row numbers of the list of QGraphicsItems
"""
# TODO LH This is horrible
# TODO LH Use a view to support changes to self._rows during iteration?
return (self._rows.index(i) for i in items)
def indexes_of_items(self, items):
"""Returns a generator of indexes of the list of QGraphicsItems
"""
# TODO LH Use a view to support changes to self._rows during iteration?
return (self.model().index(row, 0) for row in self.rows_of_items(items))
def items_of_rows(self, rows):
"""Returns an iterable of QGraphicsItems for the given rows
"""
return (self._rows[r] for r in rows)
def items_of_indexes(self, indexes):
"""Returns an iterable of QGraphicsItems for the given indexes
"""
return (self._rows[i.row()] for i in indexes)
def scene_selection_changed(self):
"""scene.selectionChanged slot
"""
# TODO LH Fix dreadful performance when selection changing as a result
# of mouse drag
if not self.handling_selection_update:
debug_print('GraphicsItemView.scene_selection_changed')
# TODO Context for this
self.handling_selection_update = True
try:
new_selection = set(self.rows_of_items(self.scene.selectedItems()))
update_selection_model(self.model(), self.selectionModel(),
new_selection)
finally:
self.handling_selection_update = False
def scene_item_rects_updated(self, items):
"""The user moved or resized items in the scene
"""
debug_print('GraphicsItemView.item_rects_updated')
for index, item in zip(self.indexes_of_items(items), items):
# item.sceneBoundingRect() is the items rects in the correct
# coordinates system
debug_print('Row [{0}] updated'.format(index.row()))
rect = item.sceneBoundingRect()
# Cumbersome conversion to ints
rect = QRect(rect.left(), rect.top(), rect.width(), rect.height())
self.model().setData(index, rect, RectRole)
def scene_box_added(self, rect):
"""The user added a box
"""
m = self.model()
row = len(self._rows)
if not m.insertRow(row):
raise InselectError('Could not insert row')
else:
# Cumbersome conversion to ints
rect = QRect(rect.left(), rect.top(), rect.width(), rect.height())
if not m.setData(m.index(row, 0), rect, RectRole):
raise InselectError('Could not set rect')
else:
# Select the new box
self.scene.clearSelection()
item = next(self.items_of_rows([row]))
item.setSelected(True)
item.update()
| |
from __future__ import absolute_import, print_function, division
import unittest
import numpy as np
import theano
import theano.tensor as T
from theano.tests import unittest_tools as utt
import theano.gpuarray.fft
import numpy.fft
from .config import mode_with_gpu
# Skip tests if pygpu is not available.
from nose.plugins.skip import SkipTest
from theano.gpuarray.fft import pygpu_available, scikits_cuda_available, pycuda_available
if not pygpu_available: # noqa
raise SkipTest('Optional package pygpu not available')
if not scikits_cuda_available: # noqa
raise SkipTest('Optional package scikits.cuda not available')
if not pycuda_available: # noqa
raise SkipTest('Optional package pycuda not available')
# Transform sizes
N = 32
class TestFFT(unittest.TestCase):
def test_1Dfft(self):
inputs_val = np.random.random((1, N)).astype('float32')
x = T.matrix('x', dtype='float32')
rfft = theano.gpuarray.fft.curfft(x)
f_rfft = theano.function([x], rfft, mode=mode_with_gpu)
res_rfft = f_rfft(inputs_val)
res_rfft_comp = (np.asarray(res_rfft[:, :, 0]) +
1j * np.asarray(res_rfft[:, :, 1]))
rfft_ref = numpy.fft.rfft(inputs_val, axis=1)
utt.assert_allclose(rfft_ref, res_rfft_comp)
m = rfft.type()
irfft = theano.gpuarray.fft.cuirfft(m)
f_irfft = theano.function([m], irfft, mode=mode_with_gpu)
res_irfft = f_irfft(res_rfft)
utt.assert_allclose(inputs_val, np.asarray(res_irfft))
# The numerical gradient of the FFT is sensitive, must set large
# enough epsilon to get good accuracy.
eps = 1e-1
def f_rfft(inp):
return theano.gpuarray.fft.curfft(inp)
inputs_val = np.random.random((1, N)).astype('float32')
utt.verify_grad(f_rfft, [inputs_val], eps=eps)
def f_irfft(inp):
return theano.gpuarray.fft.cuirfft(inp)
inputs_val = np.random.random((1, N // 2 + 1, 2)).astype('float32')
utt.verify_grad(f_irfft, [inputs_val], eps=eps)
def test_rfft(self):
inputs_val = np.random.random((1, N, N)).astype('float32')
inputs = theano.shared(inputs_val)
rfft = theano.gpuarray.fft.curfft(inputs)
f_rfft = theano.function([], rfft, mode=mode_with_gpu)
res_rfft = f_rfft()
res_rfft_comp = (np.asarray(res_rfft[:, :, :, 0]) +
1j * np.asarray(res_rfft[:, :, :, 1]))
rfft_ref = numpy.fft.rfftn(inputs_val, axes=(1, 2))
utt.assert_allclose(rfft_ref, res_rfft_comp, atol=1e-4, rtol=1e-4)
def test_irfft(self):
inputs_val = np.random.random((1, N, N)).astype('float32')
inputs = theano.shared(inputs_val)
fft = theano.gpuarray.fft.curfft(inputs)
f_fft = theano.function([], fft, mode=mode_with_gpu)
res_fft = f_fft()
m = fft.type()
ifft = theano.gpuarray.fft.cuirfft(m)
f_ifft = theano.function([m], ifft, mode=mode_with_gpu)
res_ifft = f_ifft(res_fft)
utt.assert_allclose(inputs_val, np.asarray(res_ifft))
inputs_val = numpy.random.random((1, N, N, 2)).astype('float32')
inputs = theano.shared(inputs_val)
irfft = theano.gpuarray.fft.cuirfft(inputs)
f_irfft = theano.function([], irfft)
res_irfft = f_irfft()
inputs_ref = inputs_val[..., 0] + inputs_val[..., 1] * 1j
irfft_ref = np.fft.irfftn(inputs_ref, axes=(1, 2))
utt.assert_allclose(irfft_ref, res_irfft, atol=1e-4, rtol=1e-4)
def test_type(self):
inputs_val = np.random.random((1, N)).astype('float64')
inputs = theano.shared(inputs_val)
with self.assertRaises(AssertionError):
theano.gpuarray.fft.curfft(inputs)
with self.assertRaises(AssertionError):
theano.gpuarray.fft.cuirfft(inputs)
def test_norm(self):
inputs_val = np.random.random((1, N, N)).astype('float32')
inputs = theano.shared(inputs_val)
# Unitary normalization
rfft = theano.gpuarray.fft.curfft(inputs, norm='ortho')
f_rfft = theano.function([], rfft, mode=mode_with_gpu)
res_rfft = f_rfft()
res_rfft_comp = (np.asarray(res_rfft[:, :, :, 0]) +
1j * np.asarray(res_rfft[:, :, :, 1]))
rfft_ref = numpy.fft.rfftn(inputs_val, axes=(1, 2))
utt.assert_allclose(rfft_ref / N, res_rfft_comp, atol=1e-4, rtol=1e-4)
# No normalization
rfft = theano.gpuarray.fft.curfft(inputs, norm='no_norm')
f_rfft = theano.function([], rfft, mode=mode_with_gpu)
res_rfft = f_rfft()
res_rfft_comp = (np.asarray(res_rfft[:, :, :, 0]) +
1j * np.asarray(res_rfft[:, :, :, 1]))
utt.assert_allclose(rfft_ref, res_rfft_comp, atol=1e-4, rtol=1e-4)
# Inverse FFT inputs
inputs_val = np.random.random((1, N, N // 2 + 1, 2)).astype('float32')
inputs = theano.shared(inputs_val)
inputs_ref = inputs_val[:, :, :, 0] + 1j * inputs_val[:, :, :, 1]
# Unitary normalization inverse FFT
irfft = theano.gpuarray.fft.cuirfft(inputs, norm='ortho')
f_irfft = theano.function([], irfft, mode=mode_with_gpu)
res_irfft = f_irfft()
irfft_ref = numpy.fft.irfftn(inputs_ref, axes=(1, 2))
utt.assert_allclose(irfft_ref * N, res_irfft, atol=1e-4, rtol=1e-4)
# No normalization inverse FFT
irfft = theano.gpuarray.fft.cuirfft(inputs, norm='no_norm')
f_irfft = theano.function([], irfft, mode=mode_with_gpu)
res_irfft = f_irfft()
utt.assert_allclose(irfft_ref * N**2, res_irfft, atol=1e-4, rtol=1e-4)
def test_grad(self):
# The numerical gradient of the FFT is sensitive, must set large
# enough epsilon to get good accuracy.
eps = 1e-1
def f_rfft(inp):
return theano.gpuarray.fft.curfft(inp)
inputs_val = np.random.random((1, N, N)).astype('float32')
utt.verify_grad(f_rfft, [inputs_val], eps=eps)
def f_irfft(inp):
return theano.gpuarray.fft.cuirfft(inp)
inputs_val = np.random.random((1, N, N // 2 + 1, 2)).astype('float32')
utt.verify_grad(f_irfft, [inputs_val], eps=eps)
def f_rfft(inp):
return theano.gpuarray.fft.curfft(inp, norm='ortho')
inputs_val = np.random.random((1, N, N)).astype('float32')
utt.verify_grad(f_rfft, [inputs_val], eps=eps)
def f_irfft(inp):
return theano.gpuarray.fft.cuirfft(inp, norm='no_norm')
inputs_val = np.random.random((1, N, N // 2 + 1, 2)).astype('float32')
utt.verify_grad(f_irfft, [inputs_val], eps=eps)
def test_odd(self):
M = N - 1
inputs_val = np.random.random((1, M, M)).astype('float32')
inputs = theano.shared(inputs_val)
rfft = theano.gpuarray.fft.curfft(inputs)
f_rfft = theano.function([], rfft, mode=mode_with_gpu)
res_rfft = f_rfft()
res_rfft_comp = (np.asarray(res_rfft[:, :, :, 0]) +
1j * np.asarray(res_rfft[:, :, :, 1]))
rfft_ref = numpy.fft.rfftn(inputs_val, s=(M, M), axes=(1, 2))
utt.assert_allclose(rfft_ref, res_rfft_comp, atol=1e-4, rtol=1e-4)
m = rfft.type()
ifft = theano.gpuarray.fft.cuirfft(m, is_odd=True)
f_ifft = theano.function([m], ifft, mode=mode_with_gpu)
res_ifft = f_ifft(res_rfft)
utt.assert_allclose(inputs_val, np.asarray(res_ifft))
inputs_val = np.random.random((1, M, M // 2 + 1, 2)).astype('float32')
inputs = theano.shared(inputs_val)
irfft = theano.gpuarray.fft.cuirfft(inputs, norm='ortho', is_odd=True)
f_irfft = theano.function([], irfft, mode=mode_with_gpu)
res_irfft = f_irfft()
inputs_ref = inputs_val[:, :, :, 0] + 1j * inputs_val[:, :, :, 1]
irfft_ref = numpy.fft.irfftn(
inputs_ref, s=(M, M), axes=(1, 2), norm='ortho')
utt.assert_allclose(irfft_ref, res_irfft, atol=1e-4, rtol=1e-4)
# The numerical gradient of the FFT is sensitive, must set large
# enough epsilon to get good accuracy.
eps = 1e-1
def f_rfft(inp):
return theano.gpuarray.fft.curfft(inp)
inputs_val = np.random.random((1, M, M)).astype('float32')
utt.verify_grad(f_rfft, [inputs_val], eps=eps)
def f_irfft(inp):
return theano.gpuarray.fft.cuirfft(inp, is_odd=True)
inputs_val = np.random.random((1, M, M // 2 + 1, 2)).astype('float32')
utt.verify_grad(f_irfft, [inputs_val], eps=eps)
def f_rfft(inp):
return theano.gpuarray.fft.curfft(inp, norm='ortho')
inputs_val = np.random.random((1, M, M)).astype('float32')
utt.verify_grad(f_rfft, [inputs_val], eps=eps)
def f_irfft(inp):
return theano.gpuarray.fft.cuirfft(inp, norm='no_norm', is_odd=True)
inputs_val = np.random.random((1, M, M // 2 + 1, 2)).astype('float32')
utt.verify_grad(f_irfft, [inputs_val], eps=eps)
def test_params(self):
inputs_val = numpy.random.random((1, N)).astype('float32')
inputs = theano.shared(inputs_val)
self.assertRaises(ValueError, theano.gpuarray.fft.curfft, inputs, norm=123)
inputs_val = numpy.random.random((1, N // 2 + 1, 2)).astype('float32')
inputs = theano.shared(inputs_val)
self.assertRaises(ValueError, theano.gpuarray.fft.cuirfft, inputs, norm=123)
self.assertRaises(ValueError, theano.gpuarray.fft.cuirfft, inputs, is_odd=123)
| |
import numpy as np
import random
import math
import datetime
class CrossEntropyCost:
"""
Cross Entropy class with cost function and error
"""
@staticmethod
def fn(a,y):
return np.sum( np.nan_to_num( -y * np.log(a) - (1-y) * np.log(1-a) ) )
@staticmethod
def delta(a,y):
return (a-y)
class MultiLayerPerceptron:
"""
A fully connected neural network with stochastic gradient descent and
various diagnostic visualizations.
"""
def __init__(self, sizes, cost=CrossEntropyCost):
"""Initializes the network parameters
Parameters
----------
sizes : List
A list with number of neurons per each layer
cost : object
Cost object to use for cost calculation
"""
self.num_layers = len(sizes)
self.sizes = sizes
self.initialize_weights()
self.cost = cost
def initialize_weights(self):
"""Initializing weights as Gaussian random variables with mean
0 and standard deviation 1/sqrt(n) where n is the number
of weights connecting to the same neuron.
"""
self.biases = [np.random.randn(y,1) for y in self.sizes[1:]]
self.weights = [np.random.randn(y,x)/np.sqrt(x) for x,y in zip(self.sizes[:-1],self.sizes[1:])]
def feed_forward(self,a):
"""Carry out a forward pass through the network and return
the activation value of the last layer
"""
for b,w in zip(self.biases,self.weights):
a = self.sigmoid(np.dot(w,a)+b)
return a
def backprop(self,x,y):
"""Perform backward pass using backpropagation on a single
item of dataset and return the weights and biases
"""
# biases and weights calculated by backprop
b = [np.zeros(bias.shape) for bias in self.biases]
w = [np.zeros(weight.shape) for weight in self.weights]
# forward pass
activation = x
activations = [x]
zs = []
for bias,weight in zip(self.biases,self.weights):
z = np.dot(weight, activation) + bias
zs.append(z)
activation = self.sigmoid(z)
activations.append(activation)
# output error
delta = (self.cost).delta(activations[-1],y)
b[-1] = delta
w[-1] = np.dot(delta,activations[-2].transpose())
# backpropagate
for l in xrange(2,self.num_layers):
z = zs[-l]
sp = self.sigmoid_prime(z)
delta = np.dot(self.weights[-l+1].transpose(),delta) * sp
# store the derrivative terms in the bias and weight list
b[-l] = delta
w[-l] = np.dot(delta,activations[-l-1].transpose())
return (b,w)
def gd_mini_batch(self,mini_batch,alpha,lmbda,n):
"""Update the weights and biases of the netwrok by applying
gradient descent on each mini batch. Mini batch is a list
of tuple (x,y)
"""
biases = [np.zeros(b.shape) for b in self.biases]
weights = [np.zeros(w.shape) for w in self.weights]
for x, y in mini_batch:
# get derrivative terms using backprop
delta_b, delta_w = self.backprop(x,y)
# accumulate the weights and biases
biases = [nb + db for nb, db in zip(biases,delta_b)]
weights = [nw + dw for nw, dw in zip(weights,delta_w)]
# update network using gradient descent update rule
self.biases = [b - (alpha/len(mini_batch))*nb
for b, nb in zip(self.biases, biases)]
self.weights = [(1 - (alpha*lmbda/n))*w - (alpha/len(mini_batch))*nw
for w,nw in zip(self.weights, weights)]
def SGD(self,training_data,epochs,mini_batch_size,alpha,lmbda,evaluation_data):
"""Train the network using mini-batch stochastic gradient descent
Parameters
----------
training_data : ndarray
Numpy array of training data
epochs : int
Number of epochs to train the network
mini_batch_size : int
The size of each mini batch to use for SGD
alpha : float
Learning Rate
lmbda : float
Regularization parameter
evaluation_data : ndarray
Validation or test dataset similar to training_data
"""
n = len(training_data)
n_data = len(evaluation_data)
evaluation_cost = []
evaluation_accuracy = []
training_cost = []
training_accuracy = []
for i in xrange(epochs):
random.shuffle(training_data)
mini_batches = [training_data[k:k+mini_batch_size]
for k in xrange(0,n,mini_batch_size)]
for mini_batch in mini_batches:
self.gd_mini_batch(mini_batch,alpha,lmbda,n)
print("Epoch "+ str(i) +" training complete")
# training cost and accuracy
cost = self.total_cost(training_data,lmbda)
training_cost.append(cost)
print("Cost on training data: "+str(cost))
accuracy = self.accuracy(training_data)
training_accuracy.append(accuracy)
print("Accuracy on training data: "+str(accuracy)+"/"+str(n))
# evaluation cost and accuracy
cost = self.total_cost(evaluation_data,lmbda)
print("Cost on evaluation data: "+str(cost))
evaluation_cost.append(cost)
accuracy = self.accuracy(evaluation_data)
evaluation_accuracy.append(accuracy)
print("Accuracy on evaluation data: "+str(accuracy)+"/"+str(n_data))
return evaluation_cost,evaluation_accuracy,training_cost,training_accuracy
def accuracy(self,data):
"""Returns the number of input in data for which neural network
outputs the correct result.
"""
results = [(np.argmax(self.feed_forward(x)),np.argmax(y)) for(x, y) in data]
return sum( int(x == y) for(x,y) in results)
def total_cost(self,data,lmbda):
"""Return the total cost of the network for dataset
"""
cost = 0.0
for x, y in data:
a = self.feed_forward(x)
cost += self.cost.fn(a,y)/len(data)
# add regularization
cost += 0.5*(lmbda/len(data))*sum( np.linalg.norm(w)**2 for w in self.weights )
return cost
def one_hot_encoded_result(self,j):
"""Convert output value into one hot encoded output vector
"""
vec = np.zeros((self.sizes[-1],1))
vec[j] = 1.0
return vec
def sigmoid(self,z):
return 1.0/(1.0+np.exp(-z))
def sigmoid_prime(self,z):
return self.sigmoid(z)*(1-self.sigmoid(z))
def plot(self,evaluation_cost,evaluation_accuracy,training_cost,training_accuracy):
"""Visualize the cost and accuracy on training and evaluation data
Parameters
----------
evaluation_cost : list
List of cost on evaluation data for each epoch
evaluation_accuracy : list
List of accuracy on evaluation data for each epoch
training_cost : list
List of cost on training data for each epoch
training_accuracy : list
List of accuracy on training data for each epoch
"""
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
train_cost,eval_cost = [],[]
train_acc,eval_acc = [],[]
for i,cost in enumerate(training_cost):
train_cost.append((cost,i))
for i,cost in enumerate(evaluation_cost):
eval_cost.append((cost,i))
for i,acc in enumerate(training_accuracy):
train_acc.append((acc,i))
for i,acc in enumerate(evaluation_accuracy):
eval_acc.append((acc,i))
np_train_cost = np.asarray(train_cost)
np_eval_cost = np.asarray(eval_cost)
np_train_acc = np.asarray(train_acc)
np_eval_acc = np.asarray(eval_acc)
plt.subplot(221)
plt.plot(np_train_cost[:,1],np_train_cost[:,0],linewidth=2)
ax = plt.gca()
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.title("Cost on training data")
plt.xlabel("No of epochs")
plt.ylabel("Cost")
plt.subplot(222)
plt.plot(np_eval_cost[:,1],np_eval_cost[:,0],linewidth=2)
ax = plt.gca()
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.title("Cost on evaluation data")
plt.xlabel("No of epochs")
plt.ylabel("Cost")
plt.subplot(223)
plt.plot(np_train_acc[:,1],np_train_acc[:,0],linewidth=2)
plt.title("Accuracy on training data")
ax = plt.gca()
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_ylim([80,100])
plt.xlabel("No of epochs")
plt.ylabel("Accuracy")
plt.subplot(224)
plt.plot(np_eval_acc[:,1],np_eval_acc[:,0],linewidth=2)
plt.title("Accuracy on evaluation data")
ax = plt.gca()
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_ylim([80,100])
plt.xlabel("No of epochs")
plt.ylabel("Accuracy")
plt.tight_layout()
plt.show()
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Command-line interface to inspect and execute a graph in a SavedModel.
For detailed usages and examples, please refer to:
https://www.tensorflow.org/programmers_guide/saved_model_cli
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import re
import sys
import warnings
import numpy as np
from six import integer_types
from tensorflow.contrib.saved_model.python.saved_model import reader
from tensorflow.contrib.saved_model.python.saved_model import signature_def_utils
from tensorflow.core.example import example_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.wrappers import local_cli_wrapper
from tensorflow.python.framework import meta_graph as meta_graph_lib
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.platform import app # pylint: disable=unused-import
from tensorflow.python.saved_model import loader
from tensorflow.python.tools import saved_model_utils
# Set of ops to blacklist.
_OP_BLACKLIST = set(['WriteFile', 'ReadFile'])
def _show_tag_sets(saved_model_dir):
"""Prints the tag-sets stored in SavedModel directory.
Prints all the tag-sets for MetaGraphs stored in SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
"""
tag_sets = reader.get_saved_model_tag_sets(saved_model_dir)
print('The given SavedModel contains the following tag-sets:')
for tag_set in sorted(tag_sets):
print(', '.join(sorted(tag_set)))
def _show_signature_def_map_keys(saved_model_dir, tag_set):
"""Prints the keys for each SignatureDef in the SignatureDef map.
Prints the list of SignatureDef keys from the SignatureDef map specified by
the given tag-set and SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
tag_set: Group of tag(s) of the MetaGraphDef to get SignatureDef map from,
in string format, separated by ','. For tag-set contains multiple tags,
all tags must be passed in.
"""
signature_def_map = get_signature_def_map(saved_model_dir, tag_set)
print('The given SavedModel MetaGraphDef contains SignatureDefs with the '
'following keys:')
for signature_def_key in sorted(signature_def_map.keys()):
print('SignatureDef key: \"%s\"' % signature_def_key)
def _get_inputs_tensor_info_from_meta_graph_def(meta_graph_def,
signature_def_key):
"""Gets TensorInfo for all inputs of the SignatureDef.
Returns a dictionary that maps each input key to its TensorInfo for the given
signature_def_key in the meta_graph_def
Args:
meta_graph_def: MetaGraphDef protocol buffer with the SignatureDef map to
look up SignatureDef key.
signature_def_key: A SignatureDef key string.
Returns:
A dictionary that maps input tensor keys to TensorInfos.
"""
return signature_def_utils.get_signature_def_by_key(meta_graph_def,
signature_def_key).inputs
def _get_outputs_tensor_info_from_meta_graph_def(meta_graph_def,
signature_def_key):
"""Gets TensorInfos for all outputs of the SignatureDef.
Returns a dictionary that maps each output key to its TensorInfo for the given
signature_def_key in the meta_graph_def.
Args:
meta_graph_def: MetaGraphDef protocol buffer with the SignatureDefmap to
look up signature_def_key.
signature_def_key: A SignatureDef key string.
Returns:
A dictionary that maps output tensor keys to TensorInfos.
"""
return signature_def_utils.get_signature_def_by_key(meta_graph_def,
signature_def_key).outputs
def _show_inputs_outputs(saved_model_dir, tag_set, signature_def_key, indent=0):
"""Prints input and output TensorInfos.
Prints the details of input and output TensorInfos for the SignatureDef mapped
by the given signature_def_key.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
tag_set: Group of tag(s) of the MetaGraphDef, in string format, separated by
','. For tag-set contains multiple tags, all tags must be passed in.
signature_def_key: A SignatureDef key string.
indent: How far (in increments of 2 spaces) to indent each line of output.
"""
meta_graph_def = saved_model_utils.get_meta_graph_def(saved_model_dir,
tag_set)
inputs_tensor_info = _get_inputs_tensor_info_from_meta_graph_def(
meta_graph_def, signature_def_key)
outputs_tensor_info = _get_outputs_tensor_info_from_meta_graph_def(
meta_graph_def, signature_def_key)
indent_str = " " * indent
def in_print(s):
print(indent_str + s)
in_print('The given SavedModel SignatureDef contains the following input(s):')
for input_key, input_tensor in sorted(inputs_tensor_info.items()):
in_print(' inputs[\'%s\'] tensor_info:' % input_key)
_print_tensor_info(input_tensor, indent+1)
in_print('The given SavedModel SignatureDef contains the following '
'output(s):')
for output_key, output_tensor in sorted(outputs_tensor_info.items()):
in_print(' outputs[\'%s\'] tensor_info:' % output_key)
_print_tensor_info(output_tensor, indent+1)
in_print('Method name is: %s' %
meta_graph_def.signature_def[signature_def_key].method_name)
def _print_tensor_info(tensor_info, indent=0):
"""Prints details of the given tensor_info.
Args:
tensor_info: TensorInfo object to be printed.
indent: How far (in increments of 2 spaces) to indent each line output
"""
indent_str = " " * indent
def in_print(s):
print(indent_str + s)
in_print(' dtype: ' +
{value: key
for (key, value) in types_pb2.DataType.items()}[tensor_info.dtype])
# Display shape as tuple.
if tensor_info.tensor_shape.unknown_rank:
shape = 'unknown_rank'
else:
dims = [str(dim.size) for dim in tensor_info.tensor_shape.dim]
shape = ', '.join(dims)
shape = '(' + shape + ')'
in_print(' shape: ' + shape)
in_print(' name: ' + tensor_info.name)
def _show_all(saved_model_dir):
"""Prints tag-set, SignatureDef and Inputs/Outputs information in SavedModel.
Prints all tag-set, SignatureDef and Inputs/Outputs information stored in
SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
"""
tag_sets = reader.get_saved_model_tag_sets(saved_model_dir)
for tag_set in sorted(tag_sets):
tag_set = ', '.join(tag_set)
print('\nMetaGraphDef with tag-set: \'' + tag_set +
'\' contains the following SignatureDefs:')
signature_def_map = get_signature_def_map(saved_model_dir, tag_set)
for signature_def_key in sorted(signature_def_map.keys()):
print('\nsignature_def[\'' + signature_def_key + '\']:')
_show_inputs_outputs(saved_model_dir, tag_set, signature_def_key,
indent=1)
def get_meta_graph_def(saved_model_dir, tag_set):
"""DEPRECATED: Use saved_model_utils.get_meta_graph_def instead.
Gets MetaGraphDef from SavedModel. Returns the MetaGraphDef for the given
tag-set and SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect or execute.
tag_set: Group of tag(s) of the MetaGraphDef to load, in string format,
separated by ','. For tag-set contains multiple tags, all tags must be
passed in.
Raises:
RuntimeError: An error when the given tag-set does not exist in the
SavedModel.
Returns:
A MetaGraphDef corresponding to the tag-set.
"""
return saved_model_utils.get_meta_graph_def(saved_model_dir, tag_set)
def get_signature_def_map(saved_model_dir, tag_set):
"""Gets SignatureDef map from a MetaGraphDef in a SavedModel.
Returns the SignatureDef map for the given tag-set in the SavedModel
directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect or execute.
tag_set: Group of tag(s) of the MetaGraphDef with the SignatureDef map, in
string format, separated by ','. For tag-set contains multiple tags, all
tags must be passed in.
Returns:
A SignatureDef map that maps from string keys to SignatureDefs.
"""
meta_graph = saved_model_utils.get_meta_graph_def(saved_model_dir, tag_set)
return meta_graph.signature_def
def scan_meta_graph_def(meta_graph_def):
"""Scans meta_graph_def and reports if there are ops on blacklist.
Print ops if they are on black list, or print success if no blacklisted ops
found.
Args:
meta_graph_def: MetaGraphDef protocol buffer.
"""
all_ops_set = set(
meta_graph_lib.ops_used_by_graph_def(meta_graph_def.graph_def))
blacklisted_ops = _OP_BLACKLIST & all_ops_set
if blacklisted_ops:
# TODO(yifeif): print more warnings
print('MetaGraph with tag set %s contains the following blacklisted ops:' %
meta_graph_def.meta_info_def.tags, blacklisted_ops)
else:
print('MetaGraph with tag set %s does not contain blacklisted ops.' %
meta_graph_def.meta_info_def.tags)
def run_saved_model_with_feed_dict(saved_model_dir, tag_set, signature_def_key,
input_tensor_key_feed_dict, outdir,
overwrite_flag, tf_debug=False):
"""Runs SavedModel and fetch all outputs.
Runs the input dictionary through the MetaGraphDef within a SavedModel
specified by the given tag_set and SignatureDef. Also save the outputs to file
if outdir is not None.
Args:
saved_model_dir: Directory containing the SavedModel to execute.
tag_set: Group of tag(s) of the MetaGraphDef with the SignatureDef map, in
string format, separated by ','. For tag-set contains multiple tags, all
tags must be passed in.
signature_def_key: A SignatureDef key string.
input_tensor_key_feed_dict: A dictionary maps input keys to numpy ndarrays.
outdir: A directory to save the outputs to. If the directory doesn't exist,
it will be created.
overwrite_flag: A boolean flag to allow overwrite output file if file with
the same name exists.
tf_debug: A boolean flag to use TensorFlow Debugger (TFDBG) to observe the
intermediate Tensor values and runtime GraphDefs while running the
SavedModel.
Raises:
ValueError: When any of the input tensor keys is not valid.
RuntimeError: An error when output file already exists and overwrite is not
enabled.
"""
# Get a list of output tensor names.
meta_graph_def = saved_model_utils.get_meta_graph_def(saved_model_dir,
tag_set)
# Re-create feed_dict based on input tensor name instead of key as session.run
# uses tensor name.
inputs_tensor_info = _get_inputs_tensor_info_from_meta_graph_def(
meta_graph_def, signature_def_key)
# Check if input tensor keys are valid.
for input_key_name in input_tensor_key_feed_dict.keys():
if input_key_name not in inputs_tensor_info.keys():
raise ValueError(
'"%s" is not a valid input key. Please choose from %s, or use '
'--show option.' %
(input_key_name, '"' + '", "'.join(inputs_tensor_info.keys()) + '"'))
inputs_feed_dict = {
inputs_tensor_info[key].name: tensor
for key, tensor in input_tensor_key_feed_dict.items()
}
# Get outputs
outputs_tensor_info = _get_outputs_tensor_info_from_meta_graph_def(
meta_graph_def, signature_def_key)
# Sort to preserve order because we need to go from value to key later.
output_tensor_keys_sorted = sorted(outputs_tensor_info.keys())
output_tensor_names_sorted = [
outputs_tensor_info[tensor_key].name
for tensor_key in output_tensor_keys_sorted
]
with session.Session(graph=ops_lib.Graph()) as sess:
loader.load(sess, tag_set.split(','), saved_model_dir)
if tf_debug:
sess = local_cli_wrapper.LocalCLIDebugWrapperSession(sess)
outputs = sess.run(output_tensor_names_sorted, feed_dict=inputs_feed_dict)
for i, output in enumerate(outputs):
output_tensor_key = output_tensor_keys_sorted[i]
print('Result for output key %s:\n%s' % (output_tensor_key, output))
# Only save if outdir is specified.
if outdir:
# Create directory if outdir does not exist
if not os.path.isdir(outdir):
os.makedirs(outdir)
output_full_path = os.path.join(outdir, output_tensor_key + '.npy')
# If overwrite not enabled and file already exist, error out
if not overwrite_flag and os.path.exists(output_full_path):
raise RuntimeError(
'Output file %s already exists. Add \"--overwrite\" to overwrite'
' the existing output files.' % output_full_path)
np.save(output_full_path, output)
print('Output %s is saved to %s' % (output_tensor_key,
output_full_path))
def preprocess_inputs_arg_string(inputs_str):
"""Parses input arg into dictionary that maps input to file/variable tuple.
Parses input string in the format of, for example,
"input1=filename1[variable_name1],input2=filename2" into a
dictionary looks like
{'input_key1': (filename1, variable_name1),
'input_key2': (file2, None)}
, which maps input keys to a tuple of file name and variable name(None if
empty).
Args:
inputs_str: A string that specified where to load inputs. Inputs are
separated by semicolons.
* For each input key:
'<input_key>=<filename>' or
'<input_key>=<filename>[<variable_name>]'
* The optional 'variable_name' key will be set to None if not specified.
Returns:
A dictionary that maps input keys to a tuple of file name and variable name.
Raises:
RuntimeError: An error when the given input string is in a bad format.
"""
input_dict = {}
inputs_raw = inputs_str.split(';')
for input_raw in filter(bool, inputs_raw): # skip empty strings
# Format of input=filename[variable_name]'
match = re.match(r'([^=]+)=([^\[\]]+)\[([^\[\]]+)\]$', input_raw)
if match:
input_dict[match.group(1)] = match.group(2), match.group(3)
else:
# Format of input=filename'
match = re.match(r'([^=]+)=([^\[\]]+)$', input_raw)
if match:
input_dict[match.group(1)] = match.group(2), None
else:
raise RuntimeError(
'--inputs "%s" format is incorrect. Please follow'
'"<input_key>=<filename>", or'
'"<input_key>=<filename>[<variable_name>]"' % input_raw)
return input_dict
def preprocess_input_exprs_arg_string(input_exprs_str):
"""Parses input arg into dictionary that maps input key to python expression.
Parses input string in the format of 'input_key=<python expression>' into a
dictionary that maps each input_key to its python expression.
Args:
input_exprs_str: A string that specifies python expression for input keys.
Each input is separated by semicolon. For each input key:
'input_key=<python expression>'
Returns:
A dictionary that maps input keys to their values.
Raises:
RuntimeError: An error when the given input string is in a bad format.
"""
input_dict = {}
for input_raw in filter(bool, input_exprs_str.split(';')):
if '=' not in input_exprs_str:
raise RuntimeError('--input_exprs "%s" format is incorrect. Please follow'
'"<input_key>=<python expression>"' % input_exprs_str)
input_key, expr = input_raw.split('=', 1)
# ast.literal_eval does not work with numpy expressions
input_dict[input_key] = eval(expr) # pylint: disable=eval-used
return input_dict
def preprocess_input_examples_arg_string(input_examples_str):
"""Parses input into dict that maps input keys to lists of tf.Example.
Parses input string in the format of 'input_key1=[{feature_name:
feature_list}];input_key2=[{feature_name:feature_list}];' into a dictionary
that maps each input_key to its list of serialized tf.Example.
Args:
input_examples_str: A string that specifies a list of dictionaries of
feature_names and their feature_lists for each input.
Each input is separated by semicolon. For each input key:
'input=[{feature_name1: feature_list1, feature_name2:feature_list2}]'
items in feature_list can be the type of float, int, long or str.
Returns:
A dictionary that maps input keys to lists of serialized tf.Example.
Raises:
ValueError: An error when the given tf.Example is not a list.
"""
input_dict = preprocess_input_exprs_arg_string(input_examples_str)
for input_key, example_list in input_dict.items():
if not isinstance(example_list, list):
raise ValueError(
'tf.Example input must be a list of dictionaries, but "%s" is %s' %
(example_list, type(example_list)))
input_dict[input_key] = [
_create_example_string(example) for example in example_list
]
return input_dict
def _create_example_string(example_dict):
"""Create a serialized tf.example from feature dictionary."""
example = example_pb2.Example()
for feature_name, feature_list in example_dict.items():
if not isinstance(feature_list, list):
raise ValueError('feature value must be a list, but %s: "%s" is %s' %
(feature_name, feature_list, type(feature_list)))
if isinstance(feature_list[0], float):
example.features.feature[feature_name].float_list.value.extend(
feature_list)
elif isinstance(feature_list[0], str):
example.features.feature[feature_name].bytes_list.value.extend(
feature_list)
elif isinstance(feature_list[0], integer_types):
example.features.feature[feature_name].int64_list.value.extend(
feature_list)
else:
raise ValueError(
'Type %s for value %s is not supported for tf.train.Feature.' %
(type(feature_list[0]), feature_list[0]))
return example.SerializeToString()
def load_inputs_from_input_arg_string(inputs_str, input_exprs_str,
input_examples_str):
"""Parses input arg strings and create inputs feed_dict.
Parses '--inputs' string for inputs to be loaded from file, and parses
'--input_exprs' string for inputs to be evaluated from python expression.
'--input_examples' string for inputs to be created from tf.example feature
dictionary list.
Args:
inputs_str: A string that specified where to load inputs. Each input is
separated by semicolon.
* For each input key:
'<input_key>=<filename>' or
'<input_key>=<filename>[<variable_name>]'
* The optional 'variable_name' key will be set to None if not specified.
* File specified by 'filename' will be loaded using numpy.load. Inputs
can be loaded from only .npy, .npz or pickle files.
* The "[variable_name]" key is optional depending on the input file type
as descripted in more details below.
When loading from a npy file, which always contains a numpy ndarray, the
content will be directly assigned to the specified input tensor. If a
variable_name is specified, it will be ignored and a warning will be
issued.
When loading from a npz zip file, user can specify which variable within
the zip file to load for the input tensor inside the square brackets. If
nothing is specified, this function will check that only one file is
included in the zip and load it for the specified input tensor.
When loading from a pickle file, if no variable_name is specified in the
square brackets, whatever that is inside the pickle file will be passed
to the specified input tensor, else SavedModel CLI will assume a
dictionary is stored in the pickle file and the value corresponding to
the variable_name will be used.
input_exprs_str: A string that specifies python expressions for inputs.
* In the format of: '<input_key>=<python expression>'.
* numpy module is available as np.
input_examples_str: A string that specifies tf.Example with dictionary.
* In the format of: '<input_key>=<[{feature:value list}]>'
Returns:
A dictionary that maps input tensor keys to numpy ndarrays.
Raises:
RuntimeError: An error when a key is specified, but the input file contains
multiple numpy ndarrays, none of which matches the given key.
RuntimeError: An error when no key is specified, but the input file contains
more than one numpy ndarrays.
"""
tensor_key_feed_dict = {}
inputs = preprocess_inputs_arg_string(inputs_str)
input_exprs = preprocess_input_exprs_arg_string(input_exprs_str)
input_examples = preprocess_input_examples_arg_string(input_examples_str)
for input_tensor_key, (filename, variable_name) in inputs.items():
data = np.load(filename)
# When a variable_name key is specified for the input file
if variable_name:
# if file contains a single ndarray, ignore the input name
if isinstance(data, np.ndarray):
warnings.warn(
'Input file %s contains a single ndarray. Name key \"%s\" ignored.'
% (filename, variable_name))
tensor_key_feed_dict[input_tensor_key] = data
else:
if variable_name in data:
tensor_key_feed_dict[input_tensor_key] = data[variable_name]
else:
raise RuntimeError(
'Input file %s does not contain variable with name \"%s\".' %
(filename, variable_name))
# When no key is specified for the input file.
else:
# Check if npz file only contains a single numpy ndarray.
if isinstance(data, np.lib.npyio.NpzFile):
variable_name_list = data.files
if len(variable_name_list) != 1:
raise RuntimeError(
'Input file %s contains more than one ndarrays. Please specify '
'the name of ndarray to use.' % filename)
tensor_key_feed_dict[input_tensor_key] = data[variable_name_list[0]]
else:
tensor_key_feed_dict[input_tensor_key] = data
# When input is a python expression:
for input_tensor_key, py_expr_evaluated in input_exprs.items():
if input_tensor_key in tensor_key_feed_dict:
warnings.warn(
'input_key %s has been specified with both --inputs and --input_exprs'
' options. Value in --input_exprs will be used.' % input_tensor_key)
tensor_key_feed_dict[input_tensor_key] = py_expr_evaluated
# When input is a tf.Example:
for input_tensor_key, example in input_examples.items():
if input_tensor_key in tensor_key_feed_dict:
warnings.warn(
'input_key %s has been specified in multiple options. Value in '
'--input_examples will be used.' % input_tensor_key)
tensor_key_feed_dict[input_tensor_key] = example
return tensor_key_feed_dict
def show(args):
"""Function triggered by show command.
Args:
args: A namespace parsed from command line.
"""
# If all tag is specified, display all information.
if args.all:
_show_all(args.dir)
else:
# If no tag is specified, display all tag_set, if no signaure_def key is
# specified, display all SignatureDef keys, else show input output tensor
# information corresponding to the given SignatureDef key
if args.tag_set is None:
_show_tag_sets(args.dir)
else:
if args.signature_def is None:
_show_signature_def_map_keys(args.dir, args.tag_set)
else:
_show_inputs_outputs(args.dir, args.tag_set, args.signature_def)
def run(args):
"""Function triggered by run command.
Args:
args: A namespace parsed from command line.
Raises:
AttributeError: An error when neither --inputs nor --input_exprs is passed
to run command.
"""
if not args.inputs and not args.input_exprs and not args.input_examples:
raise AttributeError(
'At least one of --inputs, --input_exprs or --input_examples must be '
'required')
tensor_key_feed_dict = load_inputs_from_input_arg_string(
args.inputs, args.input_exprs, args.input_examples)
run_saved_model_with_feed_dict(args.dir, args.tag_set, args.signature_def,
tensor_key_feed_dict, args.outdir,
args.overwrite, tf_debug=args.tf_debug)
def scan(args):
"""Function triggered by scan command.
Args:
args: A namespace parsed from command line.
"""
if args.tag_set:
scan_meta_graph_def(
saved_model_utils.get_meta_graph_def(args.dir, args.tag_set))
else:
saved_model = reader.read_saved_model(args.dir)
for meta_graph_def in saved_model.meta_graphs:
scan_meta_graph_def(meta_graph_def)
def create_parser():
"""Creates a parser that parse the command line arguments.
Returns:
A namespace parsed from command line arguments.
"""
parser = argparse.ArgumentParser(
description='saved_model_cli: Command-line interface for SavedModel')
parser.add_argument('-v', '--version', action='version', version='0.1.0')
subparsers = parser.add_subparsers(
title='commands', description='valid commands', help='additional help')
# show command
show_msg = (
'Usage examples:\n'
'To show all tag-sets in a SavedModel:\n'
'$saved_model_cli show --dir /tmp/saved_model\n\n'
'To show all available SignatureDef keys in a '
'MetaGraphDef specified by its tag-set:\n'
'$saved_model_cli show --dir /tmp/saved_model --tag_set serve\n\n'
'For a MetaGraphDef with multiple tags in the tag-set, all tags must be '
'passed in, separated by \';\':\n'
'$saved_model_cli show --dir /tmp/saved_model --tag_set serve,gpu\n\n'
'To show all inputs and outputs TensorInfo for a specific'
' SignatureDef specified by the SignatureDef key in a'
' MetaGraph.\n'
'$saved_model_cli show --dir /tmp/saved_model --tag_set serve'
' --signature_def serving_default\n\n'
'To show all available information in the SavedModel:\n'
'$saved_model_cli show --dir /tmp/saved_model --all')
parser_show = subparsers.add_parser(
'show',
description=show_msg,
formatter_class=argparse.RawTextHelpFormatter)
parser_show.add_argument(
'--dir',
type=str,
required=True,
help='directory containing the SavedModel to inspect')
parser_show.add_argument(
'--all',
action='store_true',
help='if set, will output all information in given SavedModel')
parser_show.add_argument(
'--tag_set',
type=str,
default=None,
help='tag-set of graph in SavedModel to show, separated by \',\'')
parser_show.add_argument(
'--signature_def',
type=str,
default=None,
metavar='SIGNATURE_DEF_KEY',
help='key of SignatureDef to display input(s) and output(s) for')
parser_show.set_defaults(func=show)
# run command
run_msg = ('Usage example:\n'
'To run input tensors from files through a MetaGraphDef and save'
' the output tensors to files:\n'
'$saved_model_cli show --dir /tmp/saved_model --tag_set serve \\\n'
' --signature_def serving_default \\\n'
' --inputs input1_key=/tmp/124.npz[x],input2_key=/tmp/123.npy '
'\\\n'
' --input_exprs \'input3_key=np.ones(2)\' \\\n'
' --input_examples '
'\'input4_key=[{"id":[26],"weights":[0.5, 0.5]}]\' \\\n'
' --outdir=/out\n\n'
'For more information about input file format, please see:\n'
'https://www.tensorflow.org/programmers_guide/saved_model_cli\n')
parser_run = subparsers.add_parser(
'run', description=run_msg, formatter_class=argparse.RawTextHelpFormatter)
parser_run.add_argument(
'--dir',
type=str,
required=True,
help='directory containing the SavedModel to execute')
parser_run.add_argument(
'--tag_set',
type=str,
required=True,
help='tag-set of graph in SavedModel to load, separated by \',\'')
parser_run.add_argument(
'--signature_def',
type=str,
required=True,
metavar='SIGNATURE_DEF_KEY',
help='key of SignatureDef to run')
msg = ('Loading inputs from files, in the format of \'<input_key>=<filename>,'
' or \'<input_key>=<filename>[<variable_name>]\', separated by \';\'.'
' The file format can only be from .npy, .npz or pickle.')
parser_run.add_argument('--inputs', type=str, default='', help=msg)
msg = ('Specifying inputs by python expressions, in the format of'
' "<input_key>=\'<python expression>\'", separated by \';\'. '
'numpy module is available as \'np\'. '
'Will override duplicate input keys from --inputs option.')
parser_run.add_argument('--input_exprs', type=str, default='', help=msg)
msg = (
'Specifying tf.Example inputs as list of dictionaries. For example: '
'<input_key>=[{feature0:value_list,feature1:value_list}]. Use ";" to '
'separate input keys. Will override duplicate input keys from --inputs '
'and --input_exprs option.')
parser_run.add_argument('--input_examples', type=str, default='', help=msg)
parser_run.add_argument(
'--outdir',
type=str,
default=None,
help='if specified, output tensor(s) will be saved to given directory')
parser_run.add_argument(
'--overwrite',
action='store_true',
help='if set, output file will be overwritten if it already exists.')
parser_run.add_argument(
'--tf_debug',
action='store_true',
help='if set, will use TensorFlow Debugger (tfdbg) to watch the '
'intermediate Tensors and runtime GraphDefs while running the '
'SavedModel.')
parser_run.set_defaults(func=run)
# scan command
scan_msg = ('Usage example:\n'
'To scan for blacklisted ops in SavedModel:\n'
'$saved_model_cli scan --dir /tmp/saved_model\n'
'To scan a specific MetaGraph, pass in --tag_set\n')
parser_scan = subparsers.add_parser(
'scan',
description=scan_msg,
formatter_class=argparse.RawTextHelpFormatter)
parser_scan.add_argument(
'--dir',
type=str,
required=True,
help='directory containing the SavedModel to execute')
parser_scan.add_argument(
'--tag_set',
type=str,
help='tag-set of graph in SavedModel to scan, separated by \',\'')
parser_scan.set_defaults(func=scan)
return parser
def main():
parser = create_parser()
args = parser.parse_args()
if not hasattr(args, 'func'):
parser.error('too few arguments')
args.func(args)
if __name__ == '__main__':
sys.exit(main())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.